value1 + (value2 - value1) * amount
.
+ Passing amount a value of 0 will cause value1 to be returned, a value of 1 will cause value2 to be returned.
+ See ((1 - amount) * value1) + (value2 * amount)
.
+ Passing amount a value of 0 will cause value1 to be returned, a value of 1 will cause value2 to be returned.
+ This method does not have the floating point precision issue that true
.
+ On OpenGL platforms, it is true
if both framebuffer sRGB
+ and texture sRGB are supported.
+
+ Vector3[] positions = new Vector3[numVertices];
+ vertexBuffer.SetData(0, positions, 0, numVertices, vertexBuffer.VertexDeclaration.VertexStride);
+
+
+ Continuing from the previous example, if you want to set only the texture coordinate component of the vertex data,
+ you would call this method as follows (note the use of
+ Vector2[] texCoords = new Vector2[numVertices];
+ vertexBuffer.SetData(12, texCoords, 0, numVertices, vertexBuffer.VertexDeclaration.VertexStride);
+
+
+ using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ {
+ using (var raw = System.IO.File.Create(fileToCompress + ".zlib"))
+ {
+ using (Stream compressor = new ZlibStream(raw, CompressionMode.Compress))
+ {
+ byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ int n;
+ while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ {
+ compressor.Write(buffer, 0, n);
+ }
+ }
+ }
+ }
+
+
+ Using input As Stream = File.OpenRead(fileToCompress)
+ Using raw As FileStream = File.Create(fileToCompress & ".zlib")
+ Using compressor As Stream = New ZlibStream(raw, CompressionMode.Compress)
+ Dim buffer As Byte() = New Byte(4096) {}
+ Dim n As Integer = -1
+ Do While (n <> 0)
+ If (n > 0) Then
+ compressor.Write(buffer, 0, n)
+ End If
+ n = input.Read(buffer, 0, buffer.Length)
+ Loop
+ End Using
+ End Using
+ End Using
+
+
+ using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ {
+ using (var raw = System.IO.File.Create(fileToCompress + ".zlib"))
+ {
+ using (Stream compressor = new ZlibStream(raw,
+ CompressionMode.Compress,
+ CompressionLevel.BestCompression))
+ {
+ byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ int n;
+ while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ {
+ compressor.Write(buffer, 0, n);
+ }
+ }
+ }
+ }
+
+
+
+ Using input As Stream = File.OpenRead(fileToCompress)
+ Using raw As FileStream = File.Create(fileToCompress & ".zlib")
+ Using compressor As Stream = New ZlibStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression)
+ Dim buffer As Byte() = New Byte(4096) {}
+ Dim n As Integer = -1
+ Do While (n <> 0)
+ If (n > 0) Then
+ compressor.Write(buffer, 0, n)
+ End If
+ n = input.Read(buffer, 0, buffer.Length)
+ Loop
+ End Using
+ End Using
+ End Using
+
+
+ using (var output = System.IO.File.Create(fileToCompress + ".zlib"))
+ {
+ using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ {
+ using (Stream compressor = new ZlibStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, true))
+ {
+ byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ int n;
+ while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ {
+ compressor.Write(buffer, 0, n);
+ }
+ }
+ }
+ // can write additional data to the output stream here
+ }
+
+
+ Using output As FileStream = File.Create(fileToCompress & ".zlib")
+ Using input As Stream = File.OpenRead(fileToCompress)
+ Using compressor As Stream = New ZlibStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, True)
+ Dim buffer As Byte() = New Byte(4096) {}
+ Dim n As Integer = -1
+ Do While (n <> 0)
+ If (n > 0) Then
+ compressor.Write(buffer, 0, n)
+ End If
+ n = input.Read(buffer, 0, buffer.Length)
+ Loop
+ End Using
+ End Using
+ ' can write additional data to the output stream here.
+ End Using
+
+
+ private void InflateBuffer()
+ {
+ int bufferSize = 1024;
+ byte[] buffer = new byte[bufferSize];
+ ZlibCodec decompressor = new ZlibCodec();
+
+ Console.WriteLine("\n============================================");
+ Console.WriteLine("Size of Buffer to Inflate: {0} bytes.", CompressedBytes.Length);
+ MemoryStream ms = new MemoryStream(DecompressedBytes);
+
+ int rc = decompressor.InitializeInflate();
+
+ decompressor.InputBuffer = CompressedBytes;
+ decompressor.NextIn = 0;
+ decompressor.AvailableBytesIn = CompressedBytes.Length;
+
+ decompressor.OutputBuffer = buffer;
+
+ // pass 1: inflate
+ do
+ {
+ decompressor.NextOut = 0;
+ decompressor.AvailableBytesOut = buffer.Length;
+ rc = decompressor.Inflate(FlushType.None);
+
+ if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ throw new Exception("inflating: " + decompressor.Message);
+
+ ms.Write(decompressor.OutputBuffer, 0, buffer.Length - decompressor.AvailableBytesOut);
+ }
+ while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0);
+
+ // pass 2: finish and flush
+ do
+ {
+ decompressor.NextOut = 0;
+ decompressor.AvailableBytesOut = buffer.Length;
+ rc = decompressor.Inflate(FlushType.Finish);
+
+ if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ throw new Exception("inflating: " + decompressor.Message);
+
+ if (buffer.Length - decompressor.AvailableBytesOut > 0)
+ ms.Write(buffer, 0, buffer.Length - decompressor.AvailableBytesOut);
+ }
+ while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0);
+
+ decompressor.EndInflate();
+ }
+
+
+
+ int bufferSize = 40000;
+ byte[] CompressedBytes = new byte[bufferSize];
+ byte[] DecompressedBytes = new byte[bufferSize];
+
+ ZlibCodec compressor = new ZlibCodec();
+
+ compressor.InitializeDeflate(CompressionLevel.Default);
+
+ compressor.InputBuffer = System.Text.ASCIIEncoding.ASCII.GetBytes(TextToCompress);
+ compressor.NextIn = 0;
+ compressor.AvailableBytesIn = compressor.InputBuffer.Length;
+
+ compressor.OutputBuffer = CompressedBytes;
+ compressor.NextOut = 0;
+ compressor.AvailableBytesOut = CompressedBytes.Length;
+
+ while (compressor.TotalBytesIn != TextToCompress.Length && compressor.TotalBytesOut < bufferSize)
+ {
+ compressor.Deflate(FlushType.None);
+ }
+
+ while (true)
+ {
+ int rc= compressor.Deflate(FlushType.Finish);
+ if (rc == ZlibConstants.Z_STREAM_END) break;
+ }
+
+ compressor.EndDeflate();
+
+
+
+ private void DeflateBuffer(CompressionLevel level)
+ {
+ int bufferSize = 1024;
+ byte[] buffer = new byte[bufferSize];
+ ZlibCodec compressor = new ZlibCodec();
+
+ Console.WriteLine("\n============================================");
+ Console.WriteLine("Size of Buffer to Deflate: {0} bytes.", UncompressedBytes.Length);
+ MemoryStream ms = new MemoryStream();
+
+ int rc = compressor.InitializeDeflate(level);
+
+ compressor.InputBuffer = UncompressedBytes;
+ compressor.NextIn = 0;
+ compressor.AvailableBytesIn = UncompressedBytes.Length;
+
+ compressor.OutputBuffer = buffer;
+
+ // pass 1: deflate
+ do
+ {
+ compressor.NextOut = 0;
+ compressor.AvailableBytesOut = buffer.Length;
+ rc = compressor.Deflate(FlushType.None);
+
+ if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ throw new Exception("deflating: " + compressor.Message);
+
+ ms.Write(compressor.OutputBuffer, 0, buffer.Length - compressor.AvailableBytesOut);
+ }
+ while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
+
+ // pass 2: finish and flush
+ do
+ {
+ compressor.NextOut = 0;
+ compressor.AvailableBytesOut = buffer.Length;
+ rc = compressor.Deflate(FlushType.Finish);
+
+ if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ throw new Exception("deflating: " + compressor.Message);
+
+ if (buffer.Length - compressor.AvailableBytesOut > 0)
+ ms.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut);
+ }
+ while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
+
+ compressor.EndDeflate();
+
+ ms.Seek(0, SeekOrigin.Begin);
+ CompressedBytes = new byte[compressor.TotalBytesOut];
+ ms.Read(CompressedBytes, 0, CompressedBytes.Length);
+ }
+
+
+ var adler = Adler.Adler32(0, null, 0, 0);
+ adler = Adler.Adler32(adler, buffer, index, length);
+
+
+ using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ {
+ using (var raw = System.IO.File.Create(outputFile))
+ {
+ using (Stream compressor = new GZipStream(raw, CompressionMode.Compress))
+ {
+ byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ int n;
+ while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ {
+ compressor.Write(buffer, 0, n);
+ }
+ }
+ }
+ }
+
+
+ Dim outputFile As String = (fileToCompress & ".compressed")
+ Using input As Stream = File.OpenRead(fileToCompress)
+ Using raw As FileStream = File.Create(outputFile)
+ Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress)
+ Dim buffer As Byte() = New Byte(4096) {}
+ Dim n As Integer = -1
+ Do While (n <> 0)
+ If (n > 0) Then
+ compressor.Write(buffer, 0, n)
+ End If
+ n = input.Read(buffer, 0, buffer.Length)
+ Loop
+ End Using
+ End Using
+ End Using
+
+
+ private void GunZipFile(string filename)
+ {
+ if (!filename.EndsWith(".gz))
+ throw new ArgumentException("filename");
+ var DecompressedFile = filename.Substring(0,filename.Length-3);
+ byte[] working = new byte[WORKING_BUFFER_SIZE];
+ int n= 1;
+ using (System.IO.Stream input = System.IO.File.OpenRead(filename))
+ {
+ using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true))
+ {
+ using (var output = System.IO.File.Create(DecompressedFile))
+ {
+ while (n !=0)
+ {
+ n= decompressor.Read(working, 0, working.Length);
+ if (n > 0)
+ {
+ output.Write(working, 0, n);
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+
+ Private Sub GunZipFile(ByVal filename as String)
+ If Not (filename.EndsWith(".gz)) Then
+ Throw New ArgumentException("filename")
+ End If
+ Dim DecompressedFile as String = filename.Substring(0,filename.Length-3)
+ Dim working(WORKING_BUFFER_SIZE) as Byte
+ Dim n As Integer = 1
+ Using input As Stream = File.OpenRead(filename)
+ Using decompressor As Stream = new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, True)
+ Using output As Stream = File.Create(UncompressedFile)
+ Do
+ n= decompressor.Read(working, 0, working.Length)
+ If n > 0 Then
+ output.Write(working, 0, n)
+ End IF
+ Loop While (n > 0)
+ End Using
+ End Using
+ End Using
+ End Sub
+
+
+ using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ {
+ using (var raw = System.IO.File.Create(fileToCompress + ".gz"))
+ {
+ using (Stream compressor = new GZipStream(raw,
+ CompressionMode.Compress,
+ CompressionLevel.BestCompression))
+ {
+ byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ int n;
+ while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ {
+ compressor.Write(buffer, 0, n);
+ }
+ }
+ }
+ }
+
+
+
+ Using input As Stream = File.OpenRead(fileToCompress)
+ Using raw As FileStream = File.Create(fileToCompress & ".gz")
+ Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression)
+ Dim buffer As Byte() = New Byte(4096) {}
+ Dim n As Integer = -1
+ Do While (n <> 0)
+ If (n > 0) Then
+ compressor.Write(buffer, 0, n)
+ End If
+ n = input.Read(buffer, 0, buffer.Length)
+ Loop
+ End Using
+ End Using
+ End Using
+
+
+ using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ {
+ using (var raw = System.IO.File.Create(outputFile))
+ {
+ using (Stream compressor = new GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression, true))
+ {
+ byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ int n;
+ while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ {
+ compressor.Write(buffer, 0, n);
+ }
+ }
+ }
+ }
+
+
+ Dim outputFile As String = (fileToCompress & ".compressed")
+ Using input As Stream = File.OpenRead(fileToCompress)
+ Using raw As FileStream = File.Create(outputFile)
+ Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression, True)
+ Dim buffer As Byte() = New Byte(4096) {}
+ Dim n As Integer = -1
+ Do While (n <> 0)
+ If (n > 0) Then
+ compressor.Write(buffer, 0, n)
+ End If
+ n = input.Read(buffer, 0, buffer.Length)
+ Loop
+ End Using
+ End Using
+ End Using
+
+
+ byte[] working = new byte[WORKING_BUFFER_SIZE];
+ using (System.IO.Stream input = System.IO.File.OpenRead(_CompressedFile))
+ {
+ using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true))
+ {
+ using (var output = System.IO.File.Create(_DecompressedFile))
+ {
+ int n;
+ while ((n= decompressor.Read(working, 0, working.Length)) !=0)
+ {
+ output.Write(working, 0, n);
+ }
+ }
+ }
+ }
+
+ The
A display sub-system is often referred to as a video card, however, on some machines the display sub-system is part of the mother board.
To enumerate the display sub-systems, use
Windows?Phone?8: This API is supported.
+An
Windows?Phone?8: This API is supported.
+Sets application-defined data to the object and associates that data with a
A
The size of the object's data.
A reference to the object's data.
Returns one of the DXGI_ERROR values.
SetPrivateData makes a copy of the specified data and stores it with the object.
Private data that SetPrivateData stores in the object occupies the same storage space as private data that is stored by associated Direct3D objects (for example, by a Microsoft Direct3D?11 device through
The debug layer reports memory leaks by outputting a list of object interface references along with their friendly names. The default friendly name is "<unnamed>". You can set the friendly name so that you can determine if the corresponding object interface reference caused the leak. To set the friendly name, use the SetPrivateData method and the well-known private data
static const char c_szName[] = "My name"; + hr = pContext->SetPrivateData(, sizeof( c_szName ) - 1, c_szName ); +
You can use
Windows?Phone?8: This API is supported.
+Set an interface in the object's private data.
+A
The interface to set.
Returns one of the following DXGI_ERROR.
This API associates an interface reference with the object.
When the interface is set its reference count is incremented. When the data are overwritten (by calling SPD or SPDI with the same
Windows?Phone?8: This API is supported.
+Get a reference to the object's data.
+A
The size of the data.
Pointer to the data.
Returns one of the following DXGI_ERROR.
If the data returned is a reference to an
You can pass GUID_DeviceType in the Name parameter of GetPrivateData to retrieve the device type from the display adapter object (
To get the type of device on which the display adapter was created
On Windows?7 or earlier, this type is either a value from
Windows?Phone?8: This API is supported.
+Gets the parent of the object.
+The ID of the requested interface.
The address of a reference to the parent object.
Returns one of the DXGI_ERROR values.
Windows?Phone?8: This API is supported.
+Enumerate adapter (video card) outputs.
+The index of the output.
The address of a reference to an
A code that indicates success or failure (see DXGI_ERROR). Will return
Note??If you call this API in a Session 0 process, it returns
When the EnumOutputs method succeeds and fills the ppOutput parameter with the address of the reference to the output interface, EnumOutputs increments the output interface's reference count. To avoid a memory leak, when you finish using the output interface, call the Release method to decrement the reference count.
EnumOutputs first returns the output on which the desktop primary is displayed. This output corresponds with an index of zero. EnumOutputs then returns other outputs.
Windows?Phone?8: This API is supported.
+Gets a DXGI 1.0 description of an adapter (or video card).
+A reference to a
Returns
Graphics apps can use the DXGI API to retrieve an accurate set of graphics memory values on systems that have Windows Display Driver Model (WDDM) drivers. The following are the critical steps involved.
HasWDDMDriver() + { LPDIRECT3DCREATE9EX pD3D9Create9Ex =null ; HMODULE hD3D9 =null ; hD3D9 = LoadLibrary( L"d3d9.dll" ); if (null == hD3D9 ) { return false; } // /* Try to createinterface (also known as a DX9L interface). This interface can only be created if the driver is a WDDM driver. */ // pD3D9Create9Ex = (LPDIRECT3DCREATE9EX) GetProcAddress( hD3D9, "Direct3DCreate9Ex" ); return pD3D9Create9Ex != null ; + }
* pDXGIDevice; + hr = g_pd3dDevice->QueryInterface(__uuidof( ), (void **)&pDXGIDevice); + * pDXGIAdapter; + pDXGIDevice->GetAdapter(&pDXGIAdapter); + adapterDesc; + pDXGIAdapter->GetDesc(&adapterDesc);
Windows?Phone?8: This API is supported.
+Checks whether the system supports a device interface for a graphics component.
+The
The user mode driver version of InterfaceName. This is returned only if the interface is supported. This parameter can be
Note??You can use CheckInterfaceSupport only to check whether a Direct3D 10.x interface is supported, and only on Windows Vista SP1 and later versions of the operating system. If you try to use CheckInterfaceSupport to check whether a Direct3D 11.x and later version interface is supported, CheckInterfaceSupport returns
Windows?Phone?8: This API is supported.
+Gets a DXGI 1.0 description of an adapter (or video card).
+Graphics apps can use the DXGI API to retrieve an accurate set of graphics memory values on systems that have Windows Display Driver Model (WDDM) drivers. The following are the critical steps involved.
HasWDDMDriver() + { LPDIRECT3DCREATE9EX pD3D9Create9Ex =null ; HMODULE hD3D9 =null ; hD3D9 = LoadLibrary( L"d3d9.dll" ); if (null == hD3D9 ) { return false; } // /* Try to createinterface (also known as a DX9L interface). This interface can only be created if the driver is a WDDM driver. */ // pD3D9Create9Ex = (LPDIRECT3DCREATE9EX) GetProcAddress( hD3D9, "Direct3DCreate9Ex" ); return pD3D9Create9Ex != null ; + }
* pDXGIDevice; + hr = g_pd3dDevice->QueryInterface(__uuidof( ), (void **)&pDXGIDevice); + * pDXGIAdapter; + pDXGIDevice->GetAdapter(&pDXGIAdapter); + adapterDesc; + pDXGIAdapter->GetDesc(&adapterDesc);
Windows?Phone?8: This API is supported.
+An
The
The Direct3D create device functions return a Direct3D device object. This Direct3D device object implements the
* pDXGIDevice; + hr = g_pd3dDevice->QueryInterface(__uuidof( ), (void **)&pDXGIDevice); +
Windows?Phone?8: This API is supported.
+Returns the adapter for the specified device.
+The address of an
Returns
If the GetAdapter method succeeds, the reference count on the adapter interface will be incremented. To avoid a memory leak, be sure to release the interface when you are finished using it.
Windows?Phone?8: This API is supported.
+Returns a surface. This method is used internally and you should not call it directly in your application.
+A reference to a
The number of surfaces to create.
A DXGI_USAGE flag that specifies how the surface is expected to be used.
An optional reference to a
The address of an
Returns
The CreateSurface method creates a buffer to exchange data between one or more devices. It is used internally, and you should not directly call it.
The runtime automatically creates an
Windows?Phone?8: This API is supported.
+Gets the residency status of an array of resources.
+An array of
An array of
The number of resources in the ppResources argument array and pResidencyStatus argument array.
Returns
The information returned by the pResidencyStatus argument array describes the residency status at the time that the QueryResourceResidency method was called.
Note??The residency status will constantly change.
If you call the QueryResourceResidency method during a device removed state, the pResidencyStatus argument will return the
Note??This method should not be called every frame as it incurs a non-trivial amount of overhead.
Windows?Phone?8: This API is supported.
+Gets the residency status of an array of resources.
+An array of
An array of
The number of resources in the ppResources argument array and pResidencyStatus argument array.
Returns
The information returned by the pResidencyStatus argument array describes the residency status at the time that the QueryResourceResidency method was called.
Note??The residency status will constantly change.
If you call the QueryResourceResidency method during a device removed state, the pResidencyStatus argument will return the
Note??This method should not be called every frame as it incurs a non-trivial amount of overhead.
Windows?Phone?8: This API is supported.
+Sets the GPU thread priority.
+A value that specifies the required GPU thread priority. This value must be between -7 and 7, inclusive, where 0 represents normal priority.
Return
The values for the Priority parameter function as follows:
To use the SetGPUThreadPriority method, you should have a comprehensive understanding of GPU scheduling. You should profile your application to ensure that it behaves as intended. If used inappropriately, the SetGPUThreadPriority method can impede rendering speed and result in a poor user experience.
Windows?Phone?8: This API is supported.
+Gets the GPU thread priority.
+A reference to a variable that receives a value that indicates the current GPU thread priority. The value will be between -7 and 7, inclusive, where 0 represents normal priority.
Return
Windows?Phone?8: This API is supported.
+Returns the adapter for the specified device.
+If the GetAdapter method succeeds, the reference count on the adapter interface will be incremented. To avoid a memory leak, be sure to release the interface when you are finished using it.
Windows?Phone?8: This API is supported.
+Gets or sets the GPU thread priority.
+Windows?Phone?8: This API is supported.
+Inherited from objects that are tied to the device so that they can retrieve a reference to it.
+Windows?Phone?8: This API is supported.
+Retrieves the device.
+The reference id for the device.
The address of a reference to the device.
A code that indicates success or failure (see DXGI_ERROR).
The type of interface that is returned can be any interface published by the device. For example, it could be an
Windows?Phone?8: This API is supported.
+An
Create a factory by calling CreateDXGIFactory.
Because you can create a Direct3D device without creating a swap chain, you might need to retrieve the factory that is used to create the device in order to create a swap chain.
+ You can request the
* pDXGIDevice; + hr = g_pd3dDevice->QueryInterface(__uuidof( ), (void **)&pDXGIDevice); * pDXGIAdapter; + hr = pDXGIDevice->GetParent(__uuidof( ), (void **)&pDXGIAdapter); * pIDXGIFactory; + pDXGIAdapter->GetParent(__uuidof( ), (void **)&pIDXGIFactory); +
Windows?Phone?8: This API is supported.
+Enumerates the adapters (video cards).
+The index of the adapter to enumerate.
The address of a reference to an
Returns
When you create a factory, the factory enumerates the set of adapters that are available in the system. Therefore, if you change the adapters in a system, you must destroy and recreate the
When the EnumAdapters method succeeds and fills the ppAdapter parameter with the address of the reference to the adapter interface, EnumAdapters increments the adapter interface's reference count. When you finish using the adapter interface, call the Release method to decrement the reference count before you destroy the reference.
EnumAdapters first returns the adapter with the output on which the desktop primary is displayed. This adapter corresponds with an index of zero. EnumAdapters next returns other adapters with outputs. EnumAdapters finally returns adapters without outputs.
Windows?Phone?8: This API is supported.
+Allows DXGI to monitor an application's message queue for the alt-enter key sequence (which causes the application to switch from windowed to full screen or vice versa).
+The handle of the window that is to be monitored. This parameter can be
One or more of the following values:
Note??If you call this API in a Session 0 process, it returns
The combination of WindowHandle and Flags informs DXGI to stop monitoring window messages for the previously-associated window.
If the application switches to full-screen mode, DXGI will choose a full-screen resolution to be the smallest supported resolution that is larger or the same size as the current back buffer size.
Applications can make some changes to make the transition from windowed to full screen more efficient. For example, on a WM_SIZE message, the application should release any outstanding swap-chain back buffers, call
While windowed, the application can, if it chooses, restrict the size of its window's client area to sizes to which it is comfortable rendering. A fully flexible application would make no such restriction, but UI elements or other design considerations can, of course, make this flexibility untenable. If the application further chooses to restrict its window's client area to just those that match supported full-screen resolutions, the application can field WM_SIZING, then check against
Applications that want to handle mode changes or Alt+Enter themselves should call MakeWindowAssociation with the
Get the window through which the user controls the transition to and from full screen.
+A reference to a window handle.
Note??If you call this API in a Session 0 process, it returns
[Starting with Direct3D 11.1, we recommend not to use CreateSwapChain anymore to create a swap chain. Instead, use CreateSwapChainForHwnd, CreateSwapChainForCoreWindow, or CreateSwapChainForComposition depending on how you want to create the swap chain.]
Creates a swap chain.
+
Note??If you call this API in a Session 0 process, it returns
If you attempt to create a swap chain in full-screen mode, and full-screen mode is unavailable, the swap chain will be created in windowed mode and
If the buffer width or the buffer height is zero, the sizes will be inferred from the output window size in the swap-chain description.
Because the target output can't be chosen explicitly when the swap chain is created, we recommend not to create a full-screen swap chain. This can reduce presentation performance if the swap chain size and the output window size do not match. Here are two ways to ensure that the sizes match:
If the swap chain is in full-screen mode, before you release it you must use SetFullscreenState to switch it to windowed mode. For more information about releasing a swap chain, see the "Destroying a Swap Chain" section of DXGI Overview.
After the runtime renders the initial frame in full screen, the runtime might unexpectedly exit full screen during a call to
// Detect if newly created full-screen swap chain isn't actually full screen. +* pTarget; bFullscreen; + if (SUCCEEDED(pSwapChain->GetFullscreenState(&bFullscreen, &pTarget))) + { pTarget->Release(); + } + else bFullscreen = ; + // If not full screen, enable full screen again. + if (!bFullscreen) + { ShowWindow(hWnd, SW_MINIMIZE); ShowWindow(hWnd, SW_RESTORE); pSwapChain->SetFullscreenState(TRUE, null ); + } +
You can specify
However, to use stereo presentation and to change resize behavior for the flip model, applications must use the IDXGIFactory2::CreateSwapChainForHwnd method. Otherwise, the back-buffer contents implicitly scale to fit the presentation target size; that is, you can't turn off scaling.
+Create an adapter interface that represents a software adapter.
+Handle to the software adapter's dll. HMODULE can be obtained with GetModuleHandle or LoadLibrary.
Address of a reference to an adapter (see
A software adapter is a DLL that implements the entirety of a device driver interface, plus emulation, if necessary, of kernel-mode graphics components for Windows. Details on implementing a software adapter can be found in the Windows Vista Driver Development Kit. This is a very complex development task, and is not recommended for general readers.
Calling this method will increment the module's reference count by one. The reference count can be decremented by calling FreeLibrary.
The typical calling scenario is to call LoadLibrary, pass the handle to CreateSoftwareAdapter, then immediately call FreeLibrary on the DLL and forget the DLL's HMODULE. Since the software adapter calls FreeLibrary when it is destroyed, the lifetime of the DLL will now be owned by the adapter, and the application is free of any further consideration of its lifetime.
Windows?Phone?8: This API is supported.
+The
This interface is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
To create a factory, call the CreateDXGIFactory1 function.
Because you can create a Direct3D device without creating a swap chain, you might need to retrieve the factory that is used to create the device in order to create a swap chain.
+ You can request the
* pDXGIDevice; + hr = g_pd3dDevice->QueryInterface(__uuidof( ), (void **)&pDXGIDevice); * pDXGIAdapter; + hr = pDXGIDevice->GetParent(__uuidof( ), (void **)&pDXGIAdapter); * pIDXGIFactory; + pDXGIAdapter->GetParent(__uuidof( ), (void **)&pIDXGIFactory); +
Windows?Phone?8: This API is supported.
+Enumerates both adapters (video cards) with or without outputs.
+The index of the adapter to enumerate.
The address of a reference to an
Returns
This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
When you create a factory, the factory enumerates the set of adapters that are available in the system. Therefore, if you change the adapters in a system, you must destroy and recreate the
When the EnumAdapters1 method succeeds and fills the ppAdapter parameter with the address of the reference to the adapter interface, EnumAdapters1 increments the adapter interface's reference count. When you finish using the adapter interface, call the Release method to decrement the reference count before you destroy the reference.
EnumAdapters1 first returns the adapter with the output on which the desktop primary is displayed. This adapter corresponds with an index of zero. EnumAdapters1 next returns other adapters with outputs. EnumAdapters1 finally returns adapters without outputs.
Windows?Phone?8: This API is supported.
+Informs an application of the possible need to re-enumerate adapters.
+IsCurrent returns
This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Windows?Phone?8: This API is supported.
+Informs an application of the possible need to re-enumerate adapters.
+This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Windows?Phone?8: This API is supported.
+Identifies the type of DXGI adapter.
+The
Specifies no flags.
Value always set to 0. This flag is reserved.
Options for enumerating display modes.
+These flag options are used in
These flag options are also used in IDXGIOutput1::GetDisplayModeList1 to enumerate display modes.
+Flags that indicate how the back buffers should be rotated to fit the physical rotation of a monitor.
+Unspecified rotation.
Specifies no rotation.
Specifies 90 degrees of rotation.
Specifies 180 degrees of rotation.
Specifies 270 degrees of rotation.
Flags indicating how an image is stretched to fit a given monitor's resolution.
+Unspecified scaling.
Specifies no scaling. The image is centered on the display. This flag is typically used for a fixed-dot-pitch display (such as an LED display).
Specifies stretched scaling.
Flags indicating the method the raster uses to create an image on a surface.
+Scanline order is unspecified.
The image is created from the first scanline to the last without skipping any.
The image is created beginning with the upper field.
The image is created beginning with the lower field.
Status codes that can be returned by DXGI functions.
+The
#define _FACDXGI 0x87a + #define MAKE_DXGI_STATUS(code) MAKE_HRESULT(0, _FACDXGI, code) +
For example,
#define+MAKE_DXGI_STATUS(1) +
Resource data formats which includes fully-typed and typeless formats. There is a list of format modifiers at the bottom of the page, that more fully describes each format type.
+A few formats have additional restrictions.
The following topics provide lists of the formats that particular hardware feature levels support:
For a list of the DirectXMath types that map to
The format is not known.
A four-component, 128-bit typeless format that supports 32 bits per channel including alpha. 1
A four-component, 128-bit floating-point format that supports 32 bits per channel including alpha. 1
A four-component, 128-bit unsigned-integer format that supports 32 bits per channel including alpha. 1
A four-component, 128-bit signed-integer format that supports 32 bits per channel including alpha. 1
A three-component, 96-bit typeless format that supports 32 bits per color channel.
A three-component, 96-bit floating-point format that supports 32 bits per color channel.
A three-component, 96-bit unsigned-integer format that supports 32 bits per color channel.
A three-component, 96-bit signed-integer format that supports 32 bits per color channel.
A four-component, 64-bit typeless format that supports 16 bits per channel including alpha.
A four-component, 64-bit floating-point format that supports 16 bits per channel including alpha.
A four-component, 64-bit unsigned-normalized-integer format that supports 16 bits per channel including alpha.
A four-component, 64-bit unsigned-integer format that supports 16 bits per channel including alpha.
A four-component, 64-bit signed-normalized-integer format that supports 16 bits per channel including alpha.
A four-component, 64-bit signed-integer format that supports 16 bits per channel including alpha.
A two-component, 64-bit typeless format that supports 32 bits for the red channel and 32 bits for the green channel.
A two-component, 64-bit floating-point format that supports 32 bits for the red channel and 32 bits for the green channel.
A two-component, 64-bit unsigned-integer format that supports 32 bits for the red channel and 32 bits for the green channel.
A two-component, 64-bit signed-integer format that supports 32 bits for the red channel and 32 bits for the green channel.
A two-component, 64-bit typeless format that supports 32 bits for the red channel, 8 bits for the green channel, and 24 bits are unused.
A 32-bit floating-point component, and two unsigned-integer components (with an additional 32 bits). This format supports 32-bit depth, 8-bit stencil, and 24 bits are unused.
A 32-bit floating-point component, and two typeless components (with an additional 32 bits). This format supports 32-bit red channel, 8 bits are unused, and 24 bits are unused.
A 32-bit typeless component, and two unsigned-integer components (with an additional 32 bits). This format has 32 bits unused, 8 bits for green channel, and 24 bits are unused.
A four-component, 32-bit typeless format that supports 10 bits for each color and 2 bits for alpha.
A four-component, 32-bit unsigned-normalized-integer format that supports 10 bits for each color and 2 bits for alpha.
A four-component, 32-bit unsigned-integer format that supports 10 bits for each color and 2 bits for alpha.
Three partial-precision floating-point numbers encoded into a single 32-bit value (a variant of s10e5, which is sign bit, 10-bit mantissa, and 5-bit biased (15) exponent). There are no sign bits, and there is a 5-bit biased (15) exponent for each channel, 6-bit mantissa for R and G, and a 5-bit mantissa for B, as shown in the following illustration.
A four-component, 32-bit typeless format that supports 8 bits per channel including alpha.
A four-component, 32-bit unsigned-normalized-integer format that supports 8 bits per channel including alpha.
A four-component, 32-bit unsigned-normalized integer sRGB format that supports 8 bits per channel including alpha.
A four-component, 32-bit unsigned-integer format that supports 8 bits per channel including alpha.
A four-component, 32-bit signed-normalized-integer format that supports 8 bits per channel including alpha.
A four-component, 32-bit signed-integer format that supports 8 bits per channel including alpha.
A two-component, 32-bit typeless format that supports 16 bits for the red channel and 16 bits for the green channel.
A two-component, 32-bit floating-point format that supports 16 bits for the red channel and 16 bits for the green channel.
A two-component, 32-bit unsigned-normalized-integer format that supports 16 bits each for the green and red channels.
A two-component, 32-bit unsigned-integer format that supports 16 bits for the red channel and 16 bits for the green channel.
A two-component, 32-bit signed-normalized-integer format that supports 16 bits for the red channel and 16 bits for the green channel.
A two-component, 32-bit signed-integer format that supports 16 bits for the red channel and 16 bits for the green channel.
A single-component, 32-bit typeless format that supports 32 bits for the red channel.
A single-component, 32-bit floating-point format that supports 32 bits for depth.
A single-component, 32-bit floating-point format that supports 32 bits for the red channel.
A single-component, 32-bit unsigned-integer format that supports 32 bits for the red channel.
A single-component, 32-bit signed-integer format that supports 32 bits for the red channel.
A two-component, 32-bit typeless format that supports 24 bits for the red channel and 8 bits for the green channel.
A 32-bit z-buffer format that supports 24 bits for depth and 8 bits for stencil.
A 32-bit format, that contains a 24 bit, single-component, unsigned-normalized integer, with an additional typeless 8 bits. This format has 24 bits red channel and 8 bits unused.
A 32-bit format, that contains a 24 bit, single-component, typeless format, with an additional 8 bit unsigned integer component. This format has 24 bits unused and 8 bits green channel.
A two-component, 16-bit typeless format that supports 8 bits for the red channel and 8 bits for the green channel.
A two-component, 16-bit unsigned-normalized-integer format that supports 8 bits for the red channel and 8 bits for the green channel.
A two-component, 16-bit unsigned-integer format that supports 8 bits for the red channel and 8 bits for the green channel.
A two-component, 16-bit signed-normalized-integer format that supports 8 bits for the red channel and 8 bits for the green channel.
A two-component, 16-bit signed-integer format that supports 8 bits for the red channel and 8 bits for the green channel.
A single-component, 16-bit typeless format that supports 16 bits for the red channel.
A single-component, 16-bit floating-point format that supports 16 bits for the red channel.
A single-component, 16-bit unsigned-normalized-integer format that supports 16 bits for depth.
A single-component, 16-bit unsigned-normalized-integer format that supports 16 bits for the red channel.
A single-component, 16-bit unsigned-integer format that supports 16 bits for the red channel.
A single-component, 16-bit signed-normalized-integer format that supports 16 bits for the red channel.
A single-component, 16-bit signed-integer format that supports 16 bits for the red channel.
A single-component, 8-bit typeless format that supports 8 bits for the red channel.
A single-component, 8-bit unsigned-normalized-integer format that supports 8 bits for the red channel.
A single-component, 8-bit unsigned-integer format that supports 8 bits for the red channel.
A single-component, 8-bit signed-normalized-integer format that supports 8 bits for the red channel.
A single-component, 8-bit signed-integer format that supports 8 bits for the red channel.
A single-component, 8-bit unsigned-normalized-integer format for alpha only.
A single-component, 1-bit unsigned-normalized integer format that supports 1 bit for the red channel. 2.
Three partial-precision floating-point numbers encoded into a single 32-bit value all sharing the same 5-bit exponent (variant of s10e5, which is sign bit, 10-bit mantissa, and 5-bit biased (15) exponent). There is no sign bit, and there is a shared 5-bit biased (15) exponent and a 9-bit mantissa for each channel, as shown in the following illustration. 2.
A four-component, 32-bit unsigned-normalized-integer format. This packed RGB format is analogous to the UYVY format. Each 32-bit block describes a pair of pixels: (R8, G8, B8) and (R8, G8, B8) where the R8/B8 values are repeated, and the G8 values are unique to each pixel. 3
Width must be even.
A four-component, 32-bit unsigned-normalized-integer format. This packed RGB format is analogous to the YUY2 format. Each 32-bit block describes a pair of pixels: (R8, G8, B8) and (R8, G8, B8) where the R8/B8 values are repeated, and the G8 values are unique to each pixel. 3
Width must be even.
Four-component typeless block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Four-component block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Four-component block-compression format for sRGB data. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Four-component typeless block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Four-component block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Four-component block-compression format for sRGB data. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Four-component typeless block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Four-component block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Four-component block-compression format for sRGB data. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
One-component typeless block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
One-component block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
One-component block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Two-component typeless block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Two-component block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Two-component block-compression format. For information about block-compression formats, see Texture Block Compression in Direct3D 11.
A three-component, 16-bit unsigned-normalized-integer format that supports 5 bits for blue, 6 bits for green, and 5 bits for red.
Direct3D 10 through Direct3D 11:??This value is defined for DXGI. However, Direct3D 10, 10.1, or 11 devices do not support this format.
Direct3D 11.1:??This value is not supported until Windows?8.
A four-component, 16-bit unsigned-normalized-integer format that supports 5 bits for each color channel and 1-bit alpha.
Direct3D 10 through Direct3D 11:??This value is defined for DXGI. However, Direct3D 10, 10.1, or 11 devices do not support this format.
Direct3D 11.1:??This value is not supported until Windows?8.
A four-component, 32-bit unsigned-normalized-integer format that supports 8 bits for each color channel and 8-bit alpha.
A four-component, 32-bit unsigned-normalized-integer format that supports 8 bits for each color channel and 8 bits unused.
A four-component, 32-bit 2.8-biased fixed-point format that supports 10 bits for each color channel and 2-bit alpha.
A four-component, 32-bit typeless format that supports 8 bits for each channel including alpha. 4
A four-component, 32-bit unsigned-normalized standard RGB format that supports 8 bits for each channel including alpha. 4
A four-component, 32-bit typeless format that supports 8 bits for each color channel, and 8 bits are unused. 4
A four-component, 32-bit unsigned-normalized standard RGB format that supports 8 bits for each color channel, and 8 bits are unused. 4
A typeless block-compression format. 4 For information about block-compression formats, see Texture Block Compression in Direct3D 11.
A block-compression format. 4 For information about block-compression formats, see Texture Block Compression in Direct3D 11.
A block-compression format. 4 For information about block-compression formats, see Texture Block Compression in Direct3D 11.
A typeless block-compression format. 4 For information about block-compression formats, see Texture Block Compression in Direct3D 11.
A block-compression format. 4 For information about block-compression formats, see Texture Block Compression in Direct3D 11.
A block-compression format. 4 For information about block-compression formats, see Texture Block Compression in Direct3D 11.
Identifies how to perform a present operation.
+Specifies whether to perform a bit-block transfer (bitblt) data to the primary surface.
Setting this member is equivalent to setting the first bit of the 32-bit Value member (0x00000001).
Specifies whether to flip to a new surface.
Setting this member is equivalent to setting the second bit of the 32-bit Value member (0x00000002).
Supported in Windows?8 and later versions.
Specifies that, if a stereo present operation must be reduced to mono, the present operation should show the view from the right eye.
If this member is not set, the default mono present operation shows the view from the left eye.
Setting this member is equivalent to setting the third bit of the 32-bit Value member (0x00000004).
Identifies how to perform a present operation.
+Specifies whether to perform a bit-block transfer (bitblt) data to the primary surface.
Setting this member is equivalent to setting the first bit of the 32-bit Value member (0x00000001).
Specifies whether to flip to a new surface.
Setting this member is equivalent to setting the second bit of the 32-bit Value member (0x00000002).
Supported in Windows?8 and later versions.
Specifies that, if a stereo present operation must be reduced to mono, the present operation should show the view from the right eye.
If this member is not set, the default mono present operation shows the view from the left eye.
Setting this member is equivalent to setting the third bit of the 32-bit Value member (0x00000004).
Flags indicating the memory location of a resource.
+The resource is located in video memory.
At least some of the resource is located in CPU memory.
At least some of the resource has been paged out to the hard drive.
Identifies the importance of a resource?s content when you call the IDXGIDevice2::OfferResources method to offer the resource.
+Priority determines how likely the operating system is to discard an offered resource. Resources offered with lower priority are discarded first.
+Options for swap-chain behavior.
+This enumeration is used by the
This enumeration is also used by the DXGI_SWAP_CHAIN_DESC1 structure.
You don't need to set DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY for swap chains that you create in full-screen mode with the
Swap chains that you create with the IDXGIFactory2::CreateSwapChainForHwnd, IDXGIFactory2::CreateSwapChainForCoreWindow, and IDXGIFactory2::CreateSwapChainForComposition methods are not protected if DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY is not set and are protected if DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY is set. When swap chains are protected, screen scraping is prevented and, in full-screen mode, presented content is not accessible through the desktop duplication APIs.
When you call
Options for swap-chain behavior.
+This enumeration is used by the
This enumeration is also used by the DXGI_SWAP_CHAIN_DESC1 structure.
You don't need to set DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY for swap chains that you create in full-screen mode with the
Swap chains that you create with the IDXGIFactory2::CreateSwapChainForHwnd, IDXGIFactory2::CreateSwapChainForCoreWindow, and IDXGIFactory2::CreateSwapChainForComposition methods are not protected if DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY is not set and are protected if DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY is set. When swap chains are protected, screen scraping is prevented and, in full-screen mode, presented content is not accessible through the desktop duplication APIs.
When you call
Set this flag to turn off automatic image rotation; that is, do not perform a rotation when transferring the contents of the front buffer to the monitor. Use this flag to avoid a bandwidth penalty when an application expects to handle rotation. This option is valid only during full-screen mode.
Set this flag to enable an application to switch modes by calling
Set this flag to enable an application to render using GDI on a swap chain or a surface. This will allow the application to call
Options for handling pixels in a display surface after calling IDXGISwapChain1::Present1.
+This enumeration is used by the
This enumeration is also used by the DXGI_SWAP_CHAIN_DESC1 structure.
To use multisampling with
The primary difference between presentation models is how back-buffer contents get to the Desktop Window Manager (DWM) for composition. In the bitblt model, which is used with the
When you call IDXGISwapChain1::Present1 on a flip model swap chain (DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL) with 0 specified in the SyncInterval parameter, IDXGISwapChain1::Present1's behavior is the same as the behavior of Direct3D 9Ex's
Regardless of whether the flip model is more efficient, an application still might choose the bitblt model because the bitblt model is the only way to mix GDI and DirectX presentation. In the flip model, the application must create the swap chain with
For more info about the flip-model swap chain and optimizing presentation, see Enhancing presentation with the flip model, dirty rectangles, and scrolled areas.
+Flags for surface and resource creation options.
+Each flag is defined as an unsigned integer.
#define DXGI_CPU_ACCESS_NONE ( 0 )
+ #define DXGI_CPU_ACCESS_DYNAMIC ( 1 )
+ #define DXGI_CPU_ACCESS_READ_WRITE ( 2 )
+ #define DXGI_CPU_ACCESS_SCRATCH ( 3 )
+ #define DXGI_CPU_ACCESS_FIELD 15
+ #define ( 1L << (0 + 4) )
+ #define ( 1L << (1 + 4) )
+ #define ( 1L << (2 + 4) )
+ #define ( 1L << (3 + 4) )
+ #define ( 1L << (4 + 4) )
+ #define ( 1L << (5 + 4) )
+ #define ( 1L << (6 + 4) )
+ typedef UINT DXGI_USAGE;
These flag options are used in a call to the
These flag options are also used by the
Allows DXGI to monitor an application's message queue for the alt-enter key sequence (which causes the application to switch from windowed to full screen or vice versa).
+Note??If you call this API in a Session 0 process, it returns
The combination of WindowHandle and Flags informs DXGI to stop monitoring window messages for the previously-associated window.
If the application switches to full-screen mode, DXGI will choose a full-screen resolution to be the smallest supported resolution that is larger or the same size as the current back buffer size.
Applications can make some changes to make the transition from windowed to full screen more efficient. For example, on a WM_SIZE message, the application should release any outstanding swap-chain back buffers, call
While windowed, the application can, if it chooses, restrict the size of its window's client area to sizes to which it is comfortable rendering. A fully flexible application would make no such restriction, but UI elements or other design considerations can, of course, make this flexibility untenable. If the application further chooses to restrict its window's client area to just those that match supported full-screen resolutions, the application can field WM_SIZING, then check against
Applications that want to handle mode changes or Alt+Enter themselves should call MakeWindowAssociation with the
The handle of the window that is to be monitored. This parameter can be
One or more of the following values: +
Creates a DXGI 1.1 factory that you can use to generate other DXGI objects.
+The globally unique identifier (
Address of a reference to an
Returns
Use a DXGI 1.1 factory to generate objects that enumerate adapters, create swap chains, and associate a window with the alt+enter key sequence for toggling to and from the full-screen display mode.
If the CreateDXGIFactory1 function succeeds, the reference count on the
This entry point is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Note??Do not mix the use of DXGI 1.0 (
Note??CreateDXGIFactory1 fails if your app's DllMain function calls it. For more info about how DXGI responds from DllMain, see DXGI Responses from DLLMain.
Note??Starting with Windows?8, all DXGI factories (regardless if they were created with CreateDXGIFactory or CreateDXGIFactory1) enumerate adapters identically. The enumeration order of adapters, which you retrieve with
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Creates a DXGI 1.0 factory that you can use to generate other DXGI objects.
+The globally unique identifier (
Address of a reference to an
Returns
Use a DXGI factory to generate objects that enumerate adapters, create swap chains, and associate a window with the alt+enter key sequence for toggling to and from the fullscreen display mode.
If the CreateDXGIFactory function succeeds, the reference count on the
Note??Do not mix the use of DXGI 1.0 (
Note??CreateDXGIFactory fails if your app's DllMain function calls it. For more info about how DXGI responds from DllMain, see DXGI Responses from DLLMain.
Note??Starting with Windows?8, all DXGI factories (regardless if they were created with CreateDXGIFactory or CreateDXGIFactory1) enumerate adapters identically. The enumeration order of adapters, which you retrieve with
The CreateDXGIFactory function does not exist for Windows Store apps. Instead, Windows Store apps use the CreateDXGIFactory1 function.
+The
This interface is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
A display sub-system is often referred to as a video card, however, on some machines the display sub-system is part of the mother board.
To enumerate the display sub-systems, use
Windows?Phone?8: This API is supported.
+Gets a DXGI 1.1 description of an adapter (or video card).
+A reference to a
Returns
This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Use the GetDesc1 method to get a DXGI 1.1 description of an adapter. To get a DXGI 1.0 description, use the
Windows?Phone?8: This API is supported.
+Gets a DXGI 1.1 description of an adapter (or video card).
+This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Use the GetDesc1 method to get a DXGI 1.1 description of an adapter. To get a DXGI 1.0 description, use the
Windows?Phone?8: This API is supported.
+An
This interface is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
The
The Direct3D create device functions return a Direct3D device object. This Direct3D device object implements the
* pDXGIDevice; + hr = g_pd3dDevice->QueryInterface(__uuidof( ), (void **)&pDXGIDevice); +
Windows?Phone?8: This API is supported.
+Sets the number of frames that the system is allowed to queue for rendering.
+The maximum number of back buffer frames that a driver can queue. The value defaults to 3, but can range from 1 to 16. A value of 0 will reset latency to the default. For multi-head devices, this value is specified per-head.
Returns
This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Frame latency is the number of frames that are allowed to be stored in a queue before submission for rendering. Latency is often used to control how the CPU chooses between responding to user input and frames that are in the render queue. It is often beneficial for applications that have no user input (for example, video playback) to queue more than 3 frames of data.
Windows?Phone?8: This API is supported.
+Gets the number of frames that the system is allowed to queue for rendering.
+This value is set to the number of frames that can be queued for render. This value defaults to 3, but can range from 1 to 16.
Returns
This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Frame latency is the number of frames that are allowed to be stored in a queue before submission for rendering. Latency is often used to control how the CPU chooses between responding to user input and frames that are in the render queue. It is often beneficial for applications that have no user input (for example, video playback) to queue more than 3 frames of data.
Windows?Phone?8: This API is supported.
+Gets or sets the number of frames that the system is allowed to queue for rendering.
+This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Frame latency is the number of frames that are allowed to be stored in a queue before submission for rendering. Latency is often used to control how the CPU chooses between responding to user input and frames that are in the render queue. It is often beneficial for applications that have no user input (for example, video playback) to queue more than 3 frames of data.
Windows?Phone?8: This API is supported.
+Represents a keyed mutex, which allows exclusive access to a shared resource that is used by multiple devices.
+The
An
For information about creating a keyed mutex, see the
Windows?Phone?8: This API is supported.
+Using a key, acquires exclusive rendering access to a shared resource.
+A value that indicates which device to give access to. This method will succeed when the device that currently owns the surface calls the
The time-out interval, in milliseconds. This method will return if the interval elapses, and the keyed mutex has not been released using the specified Key. If this value is set to zero, the AcquireSync method will test to see if the keyed mutex has been released and returns immediately. If this value is set to INFINITE, the time-out interval will never elapse.
Return
If the owning device attempted to create another keyed mutex on the same shared resource, AcquireSync returns E_FAIL.
AcquireSync can also return the following DWORD constants. Therefore, you should explicitly check for these constants. If you only use the SUCCEEDED macro on the return value to determine if AcquireSync succeeded, you will not catch these constants.
The AcquireSync method creates a lock to a surface that is shared between multiple devices, allowing only one device to render to a surface at a time. This method uses a key to determine which device currently has exclusive access to the surface.
When a surface is created using the
To acquire a reference to the keyed mutex object of a shared resource, call the QueryInterface method of the resource and pass in the UUID of the
The AcquireSync method uses the key as follows, depending on the state of the surface:
Windows?Phone?8: This API is supported.
+Using a key, releases exclusive rendering access to a shared resource.
+A value that indicates which device to give access to. This method succeeds when the device that currently owns the surface calls the ReleaseSync method using the same value. This value can be any UINT64 value.
Returns
If the device attempted to release a keyed mutex that is not valid or owned by the device, ReleaseSync returns E_FAIL.
The ReleaseSync method releases a lock to a surface that is shared between multiple devices. This method uses a key to determine which device currently has exclusive access to the surface.
When a surface is created using the
After you call the ReleaseSync method, the shared resource is unset from the rendering pipeline.
To acquire a reference to the keyed mutex object of a shared resource, call the QueryInterface method of the resource and pass in the UUID of the
Windows?Phone?8: This API is supported.
+An
To see the outputs available, use
Windows?Phone?8: This API is supported.
+Get a description of the output.
+A reference to the output description (see
Returns a code that indicates success or failure.
On a high DPI desktop, GetDesc returns the visualized screen size unless the app is marked high DPI aware. For info about writing DPI-aware Win32 apps, see High DPI.
Windows?Phone?8: This API is supported.
+[Starting with Direct3D 11.1, we recommend not to use GetDisplayModeList anymore to retrieve the matching display mode. Instead, use IDXGIOutput1::GetDisplayModeList1, which supports stereo display mode.]
Gets the display modes that match the requested format and other input options.
+Returns one of the following DXGI_ERROR. It is rare, but possible, that the display modes available can change immediately after calling this method, in which case
In general, when switching from windowed to full-screen mode, a swap chain automatically chooses a display mode that meets (or exceeds) the resolution, color depth and refresh rate of the swap chain. To exercise more control over the display mode, use this API to poll the set of display modes that are validated against monitor capabilities, or all modes that match the desktop (if the desktop settings are not validated against the monitor).
As shown, this API is designed to be called twice. First to get the number of modes available, and second to return a description of the modes.
UINT num = 0; +format = ; + UINT flags = ; pOutput->GetDisplayModeList( format, flags, &num, 0); ... * pDescs = new [num]; + pOutput->GetDisplayModeList( format, flags, &num, pDescs);
Windows?Phone?8: This API is supported.
+[Starting with Direct3D 11.1, we recommend not to use FindClosestMatchingMode anymore to find the display mode that most closely matches the requested display mode. Instead, use IDXGIOutput1::FindClosestMatchingMode1, which supports stereo display mode.]
Finds the display mode that most closely matches the requested display mode.
+Returns one of the following DXGI_ERROR.
FindClosestMatchingMode behaves similarly to the IDXGIOutput1::FindClosestMatchingMode1 except FindClosestMatchingMode considers only the mono display modes. IDXGIOutput1::FindClosestMatchingMode1 considers only stereo modes if you set the Stereo member in the DXGI_MODE_DESC1 structure that pModeToMatch points to, and considers only mono modes if Stereo is not set.
IDXGIOutput1::FindClosestMatchingMode1 returns a matched display-mode set with only stereo modes or only mono modes. + FindClosestMatchingMode behaves as though you specified the input mode as mono.
Windows?Phone?8: This API is supported.
+Halt a thread until the next vertical blank occurs.
+Returns one of the following DXGI_ERROR.
A vertical blank occurs when the raster moves from the lower right corner to the upper left corner to begin drawing the next frame.
Windows?Phone?8: This API is supported.
+Takes ownership of an output.
+A reference to the
Set to TRUE to enable other threads or applications to take ownership of the device; otherwise, set to
Returns one of the DXGI_ERROR values.
When you are finished with the output, call
TakeOwnership should not be called directly by applications, since results will be unpredictable. It is called implicitly by the DXGI swap chain object during full-screen transitions, and should not be used as a substitute for swap-chain methods.
+Releases ownership of the output.
+If you are not using a swap chain, get access to an output by calling
Gets a description of the gamma-control capabilities.
+A reference to a description of the gamma-control capabilities (see
Returns one of the DXGI_ERROR values.
Note??Calling this method is only supported while in full-screen mode.
For info about using gamma correction, see Using gamma correction.
+Sets the gamma controls.
+A reference to a
Returns one of the DXGI_ERROR values.
Note??Calling this method is only supported while in full-screen mode.
For info about using gamma correction, see Using gamma correction.
+Gets the gamma control settings.
+An array of gamma control settings (see
Returns one of the DXGI_ERROR values.
Note??Calling this method is only supported while in full-screen mode.
For info about using gamma correction, see Using gamma correction.
+Changes the display mode.
+A reference to a surface (see
Returns one of the DXGI_ERROR values.
This method should only be called between
[Starting with Direct3D 11.1, we recommend not to use GetDisplaySurfaceData anymore to retrieve the current display surface. Instead, use IDXGIOutput1::GetDisplaySurfaceData1, which supports stereo display mode.]
Gets a copy of the current display surface.
+Returns one of the DXGI_ERROR values.
Use
Gets statistics about recently rendered frames.
+A reference to frame statistics (see
If this function succeeds, it returns
This API is similar to
Note??Calling this method is only supported while in full-screen mode.
+ UINT num = 0;
+ DXGI_FORMAT format = DXGI_FORMAT_R32G32B32A32_FLOAT;
+ UINT flags = DXGI_ENUM_MODES_INTERLACED; pOutput->GetDisplayModeList( format, flags, &num, 0); ... DXGI_MODE_DESC * pDescs = new DXGI_MODE_DESC[num];
+ pOutput->GetDisplayModeList( format, flags, &num, pDescs);
+
+
+ Get a description of the output.
+On a high DPI desktop, GetDesc returns the visualized screen size unless the app is marked high DPI aware. For info about writing DPI-aware Win32 apps, see High DPI.
Windows?Phone?8: This API is supported.
+Gets a description of the gamma-control capabilities.
+
Note??Calling this method is only supported while in full-screen mode.
For info about using gamma correction, see Using gamma correction.
+Gets or sets the gamma control settings.
+
Note??Calling this method is only supported while in full-screen mode.
For info about using gamma correction, see Using gamma correction.
+Gets statistics about recently rendered frames.
+This API is similar to
Note??Calling this method is only supported while in full-screen mode.
+An
To find out what type of memory a resource is currently located in, use
You can retrieve the
* pDXGIResource; + hr = g_pd3dTexture2D->QueryInterface(__uuidof( ), (void **)&pDXGIResource); +
Windows?Phone?8: This API is supported.
+[Starting with Direct3D 11.1, we recommend not to use GetSharedHandle anymore to retrieve the handle to a shared resource. Instead, use IDXGIResource1::CreateSharedHandle to get a handle for sharing. To use IDXGIResource1::CreateSharedHandle, you must create the resource as shared and specify that it uses NT handles (that is, you set the D3D11_RESOURCE_MISC_SHARED_NTHANDLE flag). We also recommend that you create shared resources that use NT handles so you can use CloseHandle, DuplicateHandle, and so on on those shared resources.]
Gets the handle to a shared resource.
+Returns one of the DXGI_ERROR values.
GetSharedHandle returns a handle for the resource that you created as shared (that is, you set the
The creator of a shared resource must not destroy the resource until all intended entities have opened the resource. The validity of the handle is tied to the lifetime of the underlying video memory. If no resource objects exist on any devices that refer to this resource, the handle is no longer valid. To extend the lifetime of the handle and video memory, you must open the shared resource on a device.
GetSharedHandle can also return handles for resources that were passed into
GetSharedHandle fails if the resource to which it wants to get a handle is not shared.
Windows?Phone?8: This API is supported.
+Get the expected resource usage.
+A reference to a usage flag (see DXGI_USAGE). For Direct3D 10, a surface can be used as a shader input or a render-target output.
Returns one of the following DXGI_ERROR.
Windows?Phone?8: This API is supported.
+Set the priority for evicting the resource from memory.
+The priority is one of the following values:
Value | Meaning |
---|---|
| The resource is unused and can be evicted as soon as another resource requires the memory that the resource occupies. |
| The eviction priority of the resource is low. The placement of the resource is not critical, and minimal work is performed to find a location for the resource. For example, if a GPU can render with a vertex buffer from either local or non-local memory with little difference in performance, that vertex buffer is low priority. Other more critical resources (for example, a render target or texture) can then occupy the faster memory. |
| The eviction priority of the resource is normal. The placement of the resource is important, but not critical, for performance. The resource is placed in its preferred location instead of a low-priority resource. |
| The eviction priority of the resource is high. The resource is placed in its preferred location instead of a low-priority or normal-priority resource. |
| The resource is evicted from memory only if there is no other way of resolving the memory requirement. |
?
Returns one of the following DXGI_ERROR.
The eviction priority is a memory-management variable that is used by DXGI for determining how to populate overcommitted memory.
You can set priority levels other than the defined values when appropriate. For example, you can set a resource with a priority level of 0x78000001 to indicate that the resource is slightly above normal.
Windows?Phone?8: This API is supported.
+Get the eviction priority.
+A reference to the eviction priority, which determines when a resource can be evicted from memory.
The following defined values are possible.
Value | Meaning |
---|---|
| The resource is unused and can be evicted as soon as another resource requires the memory that the resource occupies. |
| The eviction priority of the resource is low. The placement of the resource is not critical, and minimal work is performed to find a location for the resource. For example, if a GPU can render with a vertex buffer from either local or non-local memory with little difference in performance, that vertex buffer is low priority. Other more critical resources (for example, a render target or texture) can then occupy the faster memory. |
| The eviction priority of the resource is normal. The placement of the resource is important, but not critical, for performance. The resource is placed in its preferred location instead of a low-priority resource. |
| The eviction priority of the resource is high. The resource is placed in its preferred location instead of a low-priority or normal-priority resource. |
| The resource is evicted from memory only if there is no other way of resolving the memory requirement. |
?
Returns one of the following DXGI_ERROR.
The eviction priority is a memory-management variable that is used by DXGI to determine how to manage overcommitted memory.
Priority levels other than the defined values are used when appropriate. For example, a resource with a priority level of 0x78000001 indicates that the resource is slightly above normal.
Windows?Phone?8: This API is supported.
+[Starting with Direct3D 11.1, we recommend not to use GetSharedHandle anymore to retrieve the handle to a shared resource. Instead, use IDXGIResource1::CreateSharedHandle to get a handle for sharing. To use IDXGIResource1::CreateSharedHandle, you must create the resource as shared and specify that it uses NT handles (that is, you set the D3D11_RESOURCE_MISC_SHARED_NTHANDLE flag). We also recommend that you create shared resources that use NT handles so you can use CloseHandle, DuplicateHandle, and so on on those shared resources.]
Gets the handle to a shared resource.
+GetSharedHandle returns a handle for the resource that you created as shared (that is, you set the
The creator of a shared resource must not destroy the resource until all intended entities have opened the resource. The validity of the handle is tied to the lifetime of the underlying video memory. If no resource objects exist on any devices that refer to this resource, the handle is no longer valid. To extend the lifetime of the handle and video memory, you must open the shared resource on a device.
GetSharedHandle can also return handles for resources that were passed into
GetSharedHandle fails if the resource to which it wants to get a handle is not shared.
Windows?Phone?8: This API is supported.
+Get the expected resource usage.
+Windows?Phone?8: This API is supported.
+Get or sets the eviction priority.
+The eviction priority is a memory-management variable that is used by DXGI to determine how to manage overcommitted memory.
Priority levels other than the defined values are used when appropriate. For example, a resource with a priority level of 0x78000001 indicates that the resource is slightly above normal.
Windows?Phone?8: This API is supported.
+The
An image-data object is a 2D section of memory, commonly called a surface. To get the surface from an output, call
The runtime automatically creates an
Windows?Phone?8: This API is supported.
+Get a description of the surface.
+A reference to the surface description (see
Returns
Windows?Phone?8: This API is supported.
+Get a reference to the data contained in the surface, and deny GPU access to the surface.
+A reference to the surface data (see
CPU read-write flags. These flags can be combined with a logical OR.
Returns
Use
Windows?Phone?8: This API is supported.
+Invalidate the reference to the surface retrieved by
Returns
Windows?Phone?8: This API is supported.
+Get a description of the surface.
+Windows?Phone?8: This API is supported.
+The
This interface is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
An image-data object is a 2D section of memory, commonly called a surface. To get the surface from an output, call
Any object that supports
The runtime automatically creates an
Returns a device context (DC) that allows you to render to a Microsoft DirectX Graphics Infrastructure (DXGI) surface using Windows Graphics Device Interface (GDI).
+A Boolean value that specifies whether to preserve Direct3D contents in the GDI DC. TRUE directs the runtime not to preserve Direct3D contents in the GDI DC; that is, the runtime discards the Direct3D contents.
A reference to an
This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
After you use the GetDC method to retrieve a DC, you can render to the DXGI surface by using GDI. The GetDC method readies the surface for GDI rendering and allows inter-operation between DXGI and GDI technologies.
Keep the following in mind when using this method:
You can also call GetDC on the back buffer at index 0 of a swap chain by obtaining an
+* g_pSwapChain = null ; +* g_pSurface1 = null ; + ... + //Setup the device and and swapchain + g_pSwapChain->GetBuffer(0, __uuidof(), (void**) &g_pSurface1); + g_pSurface1->GetDC( , &g_hDC ); + ... + //Draw on the DC using GDI + ... + //When finish drawing release the DC + g_pSurface1->ReleaseDC( null );
Releases the GDI device context (DC) that is associated with the current surface and allows you to use Direct3D to render.
+A reference to a
You can pass a reference to an empty
If this method succeeds, it returns
This method is not supported by DXGI 1.0, which shipped in Windows?Vista and Windows Server?2008. DXGI 1.1 support is required, which is available on Windows?7, Windows Server?2008?R2, and as an update to Windows?Vista with Service Pack?2 (SP2) (KB 971644) and Windows Server?2008 (KB 971512).
Use the ReleaseDC method to release the DC and indicate that your application finished all GDI rendering to this surface. You must call the ReleaseDC method before you can use Direct3D to perform additional rendering.
Prior to resizing buffers you must release all outstanding DCs.
+An
You can create a swap chain by
+ calling IDXGIFactory2::CreateSwapChainForHwnd, IDXGIFactory2::CreateSwapChainForCoreWindow, or IDXGIFactory2::CreateSwapChainForComposition. You can also create a swap chain when you call
Windows?Phone?8: This API is supported.
+[Starting with Direct3D 11.1, we recommend not to use Present anymore to present a rendered image. Instead, use IDXGISwapChain1::Present1. For more info, see Remarks.]
Presents a rendered image to the user.
+Possible return values include:
Note??The Present method can return either
Starting with Direct3D 11.1, we recommend to instead use IDXGISwapChain1::Present1 because you can then use dirty rectangles and the scroll rectangle in the swap chain presentation and as such use less memory bandwidth and as a result less system power. For more info about using dirty rectangles and the scroll rectangle in swap chain presentation, see Using dirty rectangles and the scroll rectangle in swap chain presentation.
For the best performance when flipping swap-chain buffers in a full-screen application, see Full-Screen Application Performance Hints.
Because calling Present might cause the render thread to wait on the message-pump thread, be careful when calling this method in an application that uses multiple threads. For more details, see Multithreading Considerations.
Differences between Direct3D 9 and Direct3D 10: Specifying |
?
For flip presentation model swap chains that you create with the DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL value set, a successful presentation unbinds back buffer 0 from the graphics pipeline, except for when you pass the
For info about how data values change when you present content to the screen, see Converting data for the color space.
+Accesses one of the swap-chain's back buffers.
+A zero-based buffer index.
If the swap chain's swap effect is
If the swap chain's swap effect is either
The type of interface used to manipulate the buffer.
A reference to a back-buffer interface.
Returns one of the following DXGI_ERROR.
Windows?Phone?8: This API is supported.
+Sets the display state to windowed or full screen.
+A Boolean value that specifies whether to set the display state to windowed or full screen. TRUE for full screen, and
If you pass TRUE to the Fullscreen parameter to set the display state to full screen, you can optionally set this parameter to a reference to an
This methods returns:
When this error is returned, an application can continue to run in windowed mode and try to switch to full-screen mode later.
DXGI may change the display state of a swap chain in response to end user or system requests.
We recommend that you create a windowed swap chain and allow the end user to change the swap chain to full screen through SetFullscreenState; that is, do not set the Windowed member of
Get the state associated with full-screen mode.
+A reference to a boolean whose value is either:
A reference to the output target (see
Returns one of the following DXGI_ERROR.
When the swap chain is in full-screen mode, a reference to the target output will be returned and its reference count will be incremented.
Windows?Phone?8: This API is supported.
+[Starting with Direct3D 11.1, we recommend not to use GetDesc anymore to get a description of the swap chain. Instead, use IDXGISwapChain1::GetDesc1.]
Get a description of the swap chain.
+Returns one of the following DXGI_ERROR.
Windows?Phone?8: This API is supported.
+Changes the swap chain's back buffer size, format, and number of buffers. This should be called when the application window is resized.
+The number of buffers in the swap chain (including all back and front buffers). This number can be different from the number of buffers with which you created the swap chain. This number can't be greater than DXGI_MAX_SWAP_CHAIN_BUFFERS. Set this number to zero to preserve the existing number of buffers in the swap chain. You can't specify less than two buffers for the flip presentation model.
New width of the back buffer. If you specify zero, DXGI will use the width of the client area of the target window. You can't specify the width as zero if you called the IDXGIFactory2::CreateSwapChainForComposition method to create the swap chain for a composition surface.
New height of the back buffer. If you specify zero, DXGI will use the height of the client area of the target window. You can't specify the height as zero if you called the IDXGIFactory2::CreateSwapChainForComposition method to create the swap chain for a composition surface.
A
A combination of
Returns
You can't resize a swap chain unless you release all outstanding references to its back buffers. You must release all of its direct and indirect references on the back buffers in order for ResizeBuffers to succeed.
Direct references are held by the application after it calls AddRef on a resource.
Indirect references are held by views to a resource, binding a view of the resource to a device context, a command list that used the resource, a command list that used a view to that resource, a command list that executed another command list that used the resource, and so on.
Before you call ResizeBuffers, ensure that the application releases all references (by calling the appropriate number of Release invocations) on the resources, any views to the resource, and any command lists that use either the resources or views, and ensure that neither the resource nor a view is still bound to a device context. You can use
For swap chains that you created with
We recommend that you call ResizeBuffers when a client window is resized (that is, when an application receives a WM_SIZE message).
The only difference between ResizeBuffers in Windows?8 and ResizeBuffers in Windows?7 is with flip presentation model swap chains that you create with the DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL value set. In Windows?8, you must call ResizeBuffers to realize a transition between full-screen mode and windowed mode; otherwise, your next call to the Present method fails.
+Resizes the output target.
+A reference to a
Returns a code that indicates success or failure.
ResizeTarget resizes the target window when the swap chain is in windowed mode, and changes the display mode on the target output when the swap chain is in full-screen mode. Therefore, apps can call ResizeTarget to resize the target window (rather than a Microsoft Win32API such as SetWindowPos) without knowledge of the swap chain display mode.
If a Windows Store app calls ResizeTarget, it fails with
You cannot call ResizeTarget on a windowless swap chain that you created with IDXGIFactory2::CreateSwapChainForComposition.
Apps must still call
Get the output (the display monitor) that contains the majority of the client area of the target window.
+A reference to the output interface (see
Returns one of the following DXGI_ERROR.
If the method succeeds, the output interface will be filled and its reference count incremented. When you are finished with it, be sure to release the interface to avoid a memory leak.
The output is also owned by the adapter on which the swap chain's device was created.
You cannot call GetContainingOutput on a windowless swap chain that you created with IDXGIFactory2::CreateSwapChainForComposition.
Windows?Phone?8: This API is supported.
+Gets performance statistics about the last render frame.
+A reference to a
Returns one of the DXGI_ERROR values.
You cannot use GetFrameStatistics for swap chains that both use the bit-block transfer (bitblt) presentation model and draw in windowed mode.
You can only use GetFrameStatistics for swap chains that either use the flip presentation model or draw in full-screen mode. You set the DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL value in the SwapEffect member of the DXGI_SWAP_CHAIN_DESC1 structure to specify that the swap chain uses the flip presentation model.
+Gets the number of times that
Returns one of the DXGI_ERROR values.
For info about presentation statistics for a frame, see
Windows?Phone?8: This API is supported.
+[Starting with Direct3D 11.1, we recommend not to use GetDesc anymore to get a description of the swap chain. Instead, use IDXGISwapChain1::GetDesc1.]
Get a description of the swap chain.
+Windows?Phone?8: This API is supported.
+Get the output (the display monitor) that contains the majority of the client area of the target window.
+If the method succeeds, the output interface will be filled and its reference count incremented. When you are finished with it, be sure to release the interface to avoid a memory leak.
The output is also owned by the adapter on which the swap chain's device was created.
You cannot call GetContainingOutput on a windowless swap chain that you created with IDXGIFactory2::CreateSwapChainForComposition.
Windows?Phone?8: This API is supported.
+Gets performance statistics about the last render frame.
+You cannot use GetFrameStatistics for swap chains that both use the bit-block transfer (bitblt) presentation model and draw in windowed mode.
You can only use GetFrameStatistics for swap chains that either use the flip presentation model or draw in full-screen mode. You set the DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL value in the SwapEffect member of the DXGI_SWAP_CHAIN_DESC1 structure to specify that the swap chain uses the flip presentation model.
+Gets the number of times that
For info about presentation statistics for a frame, see
Windows?Phone?8: This API is supported.
+Describes an adapter (or video card) by using DXGI 1.0.
+The
A string that contains the adapter description. On feature level 9 graphics hardware, GetDesc returns ?Software Adapter? for the description string.
The PCI ID of the hardware vendor. On feature level 9 graphics hardware, GetDesc returns zeros for the PCI ID of the hardware vendor.
The PCI ID of the hardware device. On feature level 9 graphics hardware, GetDesc returns zeros for the PCI ID of the hardware device.
The PCI ID of the sub system. On feature level 9 graphics hardware, GetDesc returns zeros for the PCI ID of the sub system.
The PCI ID of the revision number of the adapter. On feature level 9 graphics hardware, GetDesc returns zeros for the PCI ID of the revision number of the adapter.
The number of bytes of dedicated video memory that are not shared with the CPU.
The number of bytes of dedicated system memory that are not shared with the CPU. This memory is allocated from available system memory at boot time.
The number of bytes of shared system memory. This is the maximum value of system memory that may be consumed by the adapter during operation. Any incidental memory consumed by the driver as it manages and uses video memory is additional.
A unique value that identifies the adapter. See
Describes an adapter (or video card) using DXGI 1.1.
+The
A string that contains the adapter description. On feature level 9 graphics hardware, GetDesc1 returns ?Software Adapter? for the description string.
The PCI ID of the hardware vendor. On feature level 9 graphics hardware, GetDesc1 returns zeros for the PCI ID of the hardware vendor.
The PCI ID of the hardware device. On feature level 9 graphics hardware, GetDesc1 returns zeros for the PCI ID of the hardware device.
The PCI ID of the sub system. On feature level 9 graphics hardware, GetDesc1 returns zeros for the PCI ID of the sub system.
The PCI ID of the revision number of the adapter. On feature level 9 graphics hardware, GetDesc1 returns zeros for the PCI ID of the revision number of the adapter.
The number of bytes of dedicated video memory that are not shared with the CPU.
The number of bytes of dedicated system memory that are not shared with the CPU. This memory is allocated from available system memory at boot time.
The number of bytes of shared system memory. This is the maximum value of system memory that may be consumed by the adapter during operation. Any incidental memory consumed by the driver as it manages and uses video memory is additional.
A unique value that identifies the adapter. See
A value of the
Describes timing and presentation statistics for a frame.
+You initialize the
You can only use
The values in the PresentCount and PresentRefreshCount members indicate information about when a frame was presented on the display screen. You can use these values to determine whether a glitch occurred. The values in the SyncRefreshCount and SyncQPCTime members indicate timing information that you can use for audio and video synchronization or very precise animation. If the swap chain draws in full-screen mode, these values are based on when the computer booted. + If the swap chain draws in windowed mode, these values are based on when the swap chain is created.
+A value that represents the running total count of times that an image was presented to the monitor since the computer booted.
Note??The number of times that an image was presented to the monitor is not necessarily the same as the number of times that you called
A value that represents the running total count of v-blanks at which the last image was presented to the monitor and that have happened since the computer booted (for windowed mode, since the swap chain was created).
A value that represents the running total count of v-blanks when the scheduler last sampled the machine time by calling QueryPerformanceCounter and that have happened since the computer booted (for windowed mode, since the swap chain was created).
A value that represents the high-resolution performance counter timer. This value is the same as the value returned by the QueryPerformanceCounter function.
Reserved. Always returns 0.
Controls the settings of a gamma curve.
+The
For info about using gamma correction, see Using gamma correction.
+A
A
An array of
Controls the gamma capabilities of an adapter.
+To get a list of the capabilities for controlling gamma correction, call
For info about using gamma correction, see Using gamma correction.
+True if scaling and offset operations are supported during gamma correction; otherwise, false.
A value describing the maximum range of the control-point positions.
A value describing the minimum range of the control-point positions.
A value describing the number of control points in the array.
An array of values describing control points; the maximum length of control points is 1025.
Describes a mapped rectangle that is used to access a surface.
+The
A value that describes the width, in bytes, of the surface.
A reference to the image buffer of the surface.
Describes a display mode.
+The following format values are valid for display modes and when you create a bit-block transfer (bitblt) model swap chain. The valid values depend on the feature level that you are working with.
Feature level >= 9.1
Feature level >= 10.0
Feature level >= 11.0
You can pass one of these format values to
Starting with Windows?8 for a flip model swap chain (that is, a swap chain that has the DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL value set in the SwapEffect member of
Because of the relaxed render target creation rules that Direct3D 11 has for back buffers, applications can create a
A value that describes the resolution width. If you specify the width as zero when you call the
A value describing the resolution height. If you specify the height as zero when you call the
A
A
A member of the
A member of the
Describes an output or physical connection between the adapter (video card) and a device.
+The
A string that contains the name of the output device.
A
True if the output is attached to the desktop; otherwise, false.
A member of the
An
Represents a rational number.
+The
An unsigned integer value representing the top of the rational number.
An unsigned integer value representing the bottom of the rational number.
Describes multi-sampling parameters for a resource.
+The default sampler mode, with no anti-aliasing, has a count of 1 and a quality level of 0.
If multi-sample antialiasing is being used, all bound render targets and depth buffers must have the same sample counts and quality levels.
Differences between Direct3D 10.0 and Direct3D 10.1 and between Direct3D 10.0 and Direct3D 11: Direct3D 10.1 has defined two standard quality levels: Direct3D 11 has defined two standard quality levels: |
?
+The number of multisamples per pixel.
The image quality level. The higher the quality, the lower the performance. The valid range is between zero and one less than the level returned by
For Direct3D 10.1 and Direct3D 11, you can use two special quality level values. For more information about these quality level values, see Remarks.
Represents a handle to a shared resource.
+To create a shared surface, pass a shared-resource handle into the
A handle to a shared resource.
Describes a surface.
+A value describing the surface width.
A value describing the surface height.
A member of the
A member of the
Describes a swap chain.
+In full-screen mode, there is a dedicated front buffer; in windowed mode, the desktop is the front buffer.
If you create a swap chain with one buffer, specifying
For performance information about flipping swap-chain buffers in full-screen application, see Full-Screen Application Performance Hints.
+A
A
A member of the DXGI_USAGE enumerated type that describes the surface usage and CPU access options for the back buffer. The back buffer can be used for shader input or render-target output.
A value that describes the number of buffers in the swap chain. When you call
An
A Boolean value that specifies whether the output is in windowed mode. TRUE if the output is in windowed mode; otherwise,
We recommend that you create a windowed swap chain and allow the end user to change the swap chain to full screen through
For more information about choosing windowed verses full screen, see
A member of the
A member of the
Indicates whether the specified matrix is invertible.
+The matrix to test.
true if the matrix was inverted; otherwise, false.
Windows Phone 8.1: This API is supported.
+Tries to invert the specified matrix.
+The matrix to invert.
true if the matrix was inverted; otherwise, false.
Windows Phone 8.1: This API is supported.
+Creates a skew transformation that has the specified x-axis angle, y-axis angle, and center point.
+The x-axis skew angle, which is measured in degrees counterclockwise from the y-axis.
The y-axis skew angle, which is measured in degrees counterclockwise from the x-axis.
The center point of the skew operation.
When this method returns, contains the rotation transformation. You must allocate storate for this parameter.
Windows Phone 8.1: This API is supported.
+Creates a factory object that can be used to create Direct2D resources.
+The threading model of the factory and the resources it creates.
A reference to the IID of
The level of detail provided to the debugging layer.
When this method returns, contains the address to a reference to the new factory.
If this function succeeds, it returns
The
Windows Phone 8.1: This API is supported.
+Creates a rotation transformation that rotates by the specified angle about the specified point.
+The clockwise rotation angle, in degrees.
The point about which to rotate.
When this method returns, contains the new rotation transformation. You must allocate storage for this parameter.
Rotation occurs in the plane of the 2-D surface.
Windows Phone 8.1: This API is supported.
+A built-in implementation of the
Handles loading font file resources of a particular type from a font file reference key into a font file stream object.
+The font file loader interface is recommended to be implemented by a singleton object. Note that font file loader implementations must not register themselves with DirectWrite factory inside their constructors and must not unregister themselves in their destructors, because registration and unregistraton operations increment and decrement the object reference count respectively. Instead, registration and unregistration of font file loaders with DirectWrite factory should be performed outside of the font file loader implementation as a separate step.
+Handles loading font file resources of a particular type from a font file reference key into a font file stream object.
+The font file loader interface is recommended to be implemented by a singleton object. Note that font file loader implementations must not register themselves with DirectWrite factory inside their constructors and must not unregister themselves in their destructors, because registration and unregistraton operations increment and decrement the object reference count respectively. Instead, registration and unregistration of font file loaders with DirectWrite factory should be performed outside of the font file loader implementation as a separate step.
+Creates a font file stream object that encapsulates an open file resource.
+A reference to a font file reference key that uniquely identifies the font file resource within the scope of the font loader being used. The buffer allocated for this key must at least be the size, in bytes, specified by fontFileReferenceKeySize.
The size of font file reference key, in bytes.
When this method returns, contains the address of a reference to the newly created
If this method succeeds, it returns
The resource is closed when the last reference to fontFileStream is released.
+Obtains the absolute font file path from the font file reference key.
+The font file reference key that uniquely identifies the local font file within the scope of the font loader being used.
If this method succeeds, the absolute font file path from the font file reference key.
Obtains the last write time of the file from the font file reference key.
+The font file reference key that uniquely identifies the local font file within the scope of the font loader being used.
The time of the last font file modification.
Obtains the length of the absolute file path from the font file reference key.
+Font file reference key that uniquely identifies the local font file within the scope of the font loader being used.
Size of font file reference key in bytes.
Length of the file path string, not including the terminated
Obtains the absolute font file path from the font file reference key.
+The font file reference key that uniquely identifies the local font file within the scope of the font loader being used.
The size of font file reference key in bytes.
The character array that receives the local file path.
The length of the file path character array.
If this method succeeds, it returns
Obtains the last write time of the file from the font file reference key.
+The font file reference key that uniquely identifies the local font file within the scope of the font loader being used.
The size of font file reference key in bytes.
The time of the last font file modification.
Contains the content bounds, mask information, opacity settings, and other options for a layer resource.
+The content bounds of the layer. Content outside these bounds is not guaranteed to render.
The geometric mask specifies the area of the layer that is composited into the render target.
A value that specifies the antialiasing mode for the geometricMask.
A value that specifies the transform that is applied to the geometric mask when composing the layer.
An opacity value that is applied uniformly to all resources in the layer when compositing to the target.
A brush that is used to modify the opacity of the layer. The brush + is mapped to the layer, and the alpha channel of each mapped brush pixel is multiplied against the corresponding layer pixel.
A value that specifies whether the layer intends to render text with ClearType antialiasing.
Specifies the identifiers of the metadata items in an 8BIM IPTC digest metadata block.
+[VT_LPSTR] A name that identifies the 8BIM block.
[VT_BLOB] The embedded IPTC digest value.
Specifies the identifiers of the metadata items in an 8BIM IPTC block.
+[VT_LPSTR] A name that identifies the 8BIM block.
[VT_UNKNOWN] The IPTC block embedded in this 8BIM IPTC block.
Specifies the identifiers of the metadata items in an 8BIMResolutionInfo block.
+[VT_LPSTR] A name that identifies the 8BIM block.
[VT_UI4] The horizontal resolution of the image.
[VT_UI2] The units that the horizontal resolution is specified in; a 1 indicates pixels per inch and a 2 indicates pixels per centimeter.
[VT_UI2] The units that the image width is specified in; a 1 indicates inches, a 2 indicates centimeters, a 3 indicates points, a 4 specifies picas, and a 5 specifies columns.
[VT_UI4] The vertical resolution of the image.
[VT_UI2] The units that the vertical resolution is specified in; a 1 indicates pixels per inch and a 2 indicates pixels per centimeter.
[VT_UI2] The units that the image height is specified in; a 1 indicates inches, a 2 indicates centimeters, a 3 indicates points, a 4 specifies picas, and a 5 specifies columns.
Specifies the desired alpha channel usage.
+Use alpha channel.
Use a pre-multiplied alpha channel.
Ignore alpha channel.
Specifies the desired cache usage.
+The CreateBitmap of the
Do not cache the bitmap.
Cache the bitmap when needed.
Cache the bitmap at initialization.
Specifies the capabilities of the decoder.
+Decoder recognizes the image was encoded with an encoder produced by the same vendor.
Decoder can decode all the images within an image container.
Decoder can decode some of the images within an image container.
Decoder can enumerate the metadata blocks within a container format.
Decoder can find and decode a thumbnail.
Specifies the type of dither algorithm to apply when converting between image formats.
+A solid color algorithm without dither.
A solid color algorithm without dither.
A 4x4 ordered dither algorithm.
An 8x8 ordered dither algorithm.
A 16x16 ordered dither algorithm.
A 4x4 spiral dither algorithm.
An 8x8 spiral dither algorithm.
A 4x4 dual spiral dither algorithm.
An 8x8 dual spiral dither algorithm.
An error diffusion algorithm.
Specifies the cache options available for an encoder.
+The encoder is cached in memory. This option is not supported.
The encoder is cached to a temporary file. This option is not supported.
The encoder is not cached.
Specifies the sampling or filtering mode to use when scaling an image.
+A nearest neighbor interpolation algorithm. Also known as nearest pixel or point interpolation.
The output pixel is assigned the value of the pixel that the point falls within. No other pixels are considered.
A bilinear interpolation algorithm.
The output pixel values are computed as a weighted average of the nearest four pixels in a 2x2 grid.
A bicubic interpolation algorithm.
Destination pixel values are computed as a weighted average of the nearest sixteen pixels in a 4x4 grid.
A Fant resampling algorithm.
Destination pixel values are computed as a weighted average of the all the pixels that map to the new pixel.
Specifies access to an
Specifies the type of palette used for an indexed image format.
+An arbitrary custom palette provided by caller.
An optimal palette generated using a median-cut algorithm. Derived from the colors in an image.
A black and white palette.
A palette that has its 8-color on-off primaries and the 16 system colors added. With duplicates removed, 16 colors are available.
A palette that has 3 intensity levels of each primary: 27-color on-off primaries and the 16 system colors added. With duplicates removed, 35 colors are available.
A palette that has 4 intensity levels of each primary: 64-color on-off primaries and the 16 system colors added. With duplicates removed, 72 colors are available.
A palette that has 5 intensity levels of each primary: 125-color on-off primaries and the 16 system colors added. With duplicates removed, 133 colors are available.
A palette that has 6 intensity levels of each primary: 216-color on-off primaries and the 16 system colors added. With duplicates removed, 224 colors are available. This is the same as WICBitmapPaletteFixedHalftoneWeb.
A palette that has 6 intensity levels of each primary: 216-color on-off primaries and the 16 system colors added. With duplicates removed, 224 colors are available. This is the same as
A palette that has its 252-color on-off primaries and the 16 system colors added. With duplicates removed, 256 colors are available.
A palette that has its 256-color on-off primaries and the 16 system colors added. With duplicates removed, 256 colors are available.
A palette that has 4 shades of gray.
A palette that has 16 shades of gray.
A palette that has 256 shades of gray.
Specifies the flip and rotation transforms.
+A rotation of 0 degrees.
A clockwise rotation of 90 degrees.
A clockwise rotation of 180 degrees.
A clockwise rotation of 270 degrees.
A horizontal flip. Pixels are flipped around the vertical y-axis.
A vertical flip. Pixels are flipped around the horizontal x-axis.
Specifies the color context types.
+An uninitialized color context.
A color context that is a full ICC color profile.
A color context that is one of a number of set color spaces (sRGB, AdobeRGB) that are defined in the EXIF specification.
Specifies component enumeration options.
+Enumerate any components that are not disabled. Because this value is 0x0, it is always included with the other options.
Force a read of the registry before enumerating components.
Include disabled components in the enumeration. The set of disabled components is disjoint with the set of default enumerated components
Include unsigned components in the enumeration. This option has no effect.
At the end of component enumeration, filter out any components that are not Windows provided.
Specifies the component signing status.
+A signed component.
An unsigned component
A component is safe.
Components that do not have a binary component to sign, such as a pixel format, should return this value.
A component has been disabled.
Specifies the type of Windows Imaging Component (WIC) component.
+A WIC decoder.
A WIC encoder.
A WIC pixel converter.
A WIC metadata reader.
A WIC metadata writer.
A WIC pixel format.
All WIC components.
Specifies decode options.
+Cache metadata when needed.
Cache metadata when decoder is loaded.
Specifies the application extension metadata properties for a Graphics Interchange Format (GIF) image.
+[VT_UI1 | VT_VECTOR] Indicates a string that identifies the application.
[VT_UI1 | VT_VECTOR] Indicates data that is exposed by the application.
Specifies the comment extension metadata properties for a Graphics Interchange Format (GIF) image.
+[VT_LPSTR] Indicates the comment text.
Specifies the graphic control extension metadata properties that define the transitions between each frame animation for Graphics Interchange Format (GIF) images.
+[VT_UI1] Indicates the disposal requirements. 0 - no disposal, 1 - do not dispose, 2 - restore to background color, 3 - restore to previous.
[VT_BOOL] Indicates the user input flag. TRUE if user input should advance to the next frame; otherwise,
[VT_BOOL] Indicates the transparency flag. TRUE if a transparent color in is in the color table for this frame; otherwise,
[VT_UI2] Indicates how long to display the next frame before advancing to the next frame, in units of 1/100th of a second.
[VT_UI1] Indicates which color in the palette should be treated as transparent.
Specifies the image descriptor metadata properties for Graphics Interchange Format (GIF) frames.
+[VT_UI2] Indicates the X offset at which to locate this frame within the logical screen.
[VT_UI2] Indicates the Y offset at which to locate this frame within the logical screen.
[VT_UI2] Indicates width of this frame, in pixels.
[VT_UI2] Indicates height of this frame, in pixels.
[VT_BOOL] Indicates the local color table flag. TRUE if global color table is present; otherwise,
[VT_BOOL] Indicates the interlace flag. TRUE if image is interlaced; otherwise,
[VT_BOOL] Indicates the sorted color table flag. TRUE if the color table is sorted from most frequently to least frequently used color; otherwise,
[VT_UI1] Indicates the value used to calculate the number of bytes contained in the global color table.
To calculate the actual size of the color table, raise 2 to the value of the field + 1.
Specifies the logical screen descriptor properties for Graphics Interchange Format (GIF) metadata.
+[VT_UI1 | VT_VECTOR] Indicates the signature property.
[VT_UI2] Indicates the width in pixels.
[VT_UI2] Indicates the height in pixels.
[VT_BOOL] Indicates the global color table flag. TRUE if a global color table is present; otherwise,
[VT_UI1] Indicates the color resolution in bits per pixel.
[VT_BOOL] Indicates the sorted color table flag. TRUE if the table is sorted; otherwise,
[VT_UI1] Indicates the value used to calculate the number of bytes contained in the global color table.
To calculate the actual size of the color table, raise 2 to the value of the field + 1.
[VT_UI1] Indicates the index within the color table to use for the background (pixels not defined in the image).
[VT_UI1] Indicates the factor used to compute an approximation of the aspect ratio.
Specifies the JPEG chrominance table property.
+[VT_UI2|VT_VECTOR] Indicates the metadata property is a chrominance table.
Specifies the JPEG comment properties.
+Indicates the metadata property is comment text.
Specifies the JPEG luminance table property.
+[VT_UI2|VT_VECTOR] Indicates the metadata property is a luminance table.
Specifies the JPEG YCrCB subsampling options.
+The native JPEG encoder uses
The default subsampling option.
Subsampling option that uses both horizontal and vertical decimation.
Subsampling option that uses horizontal decimation .
Subsampling option that uses no decimation.
Specifies named white balances for raw images.
+The default white balance.
A daylight white balance.
A cloudy white balance.
A shade white balance.
A tungsten white balance.
A fluorescent white balance.
Daylight white balance.
A flash white balance.
A custom white balance. This is typically used when using a picture (grey-card) as white balance.
An automatic balance.
An "as shot" white balance.
Specifies the Portable Network Graphics (PNG) background (bKGD) chunk metadata properties.
+Indicates the background color. There are three possible types, depending on the image's pixel format.
Specifies the index of the background color in an image with an indexed pixel format.
Specifies the background color in a grayscale image.
Specifies the background color in an RGB image as three USHORT values: {0xRRRR, 0xGGGG, 0xBBBB}.
Specifies the Portable Network Graphics (PNG) cHRM chunk metadata properties for CIE XYZ chromaticity.
+[VT_UI4] Indicates the whitepoint x value ratio.
[VT_UI4] Indicates the whitepoint y value ratio.
[VT_UI4] Indicates the red x value ratio.
[VT_UI4] Indicates the red y value ratio.
[VT_UI4] Indicates the green x value ratio.
[VT_UI4] Indicates the green y value ratio.
[VT_UI4] Indicates the blue x value ratio.
[VT_UI4] Indicates the blue y value ratio.
Specifies the Portable Network Graphics (PNG) filters available for compression optimization.
+Indicates an unspecified PNG filter. This enables WIC to algorithmically choose the best filtering option for the image.
Indicates no PNG filter.
Indicates a PNG sub filter.
Indicates a PNG up filter.
Indicates a PNG average filter.
Indicates a PNG paeth filter.
Indicates a PNG adaptive filter. This enables WIC to choose the best filtering mode on a per-scanline basis.
Specifies the Portable Network Graphics (PNG) gAMA chunk metadata properties.
+[VT_UI4] Indicates the gamma value.
Specifies the Portable Network Graphics (PNG) hIST chunk metadata properties.
+[VT_VECTOR | VT_UI2] Indicates the approximate usage frequency of each color in the color palette.
Specifies the Portable Network Graphics (PNG) iCCP chunk metadata properties.
+[VT_LPSTR] Indicates the International Color Consortium (ICC) profile name.
[VT_VECTOR | VT_UI1] Indicates the embedded ICC profile.
Specifies the Portable Network Graphics (PNG) iTXT chunk metadata properties.
+[VT_LPSTR] Indicates the keywords in the iTXT metadata chunk.
[VT_UI1] Indicates whether the text in the iTXT chunk is compressed. 1 if the text is compressed; otherwise, 0.
[VT_LPSTR] Indicates the human language used by the translated keyword and the text.
[VT_LPWSTR] Indicates a translation of the keyword into the language indicated by the language tag.
[VT_LPWSTR] Indicates additional text in the iTXT metadata chunk.
Specifies the Portable Network Graphics (PNG) sRGB chunk metadata properties.
+[VT_UI1] Indicates the rendering intent for an sRGB color space image. The rendering intents have the following meaning.
Value | Meaning |
---|---|
0 | Perceptual |
1 | Relative colorimetric |
2 | Saturation |
3 | Absolute colorimetric |
?
Specifies the Portable Network Graphics (PNG) tIME chunk metadata properties.
+[VT_UI2] Indicates the year of the last modification.
[VT_UI1] Indicates the month of the last modification.
[VT_UI1] Indicates day of the last modification.
[VT_UI1] Indicates the hour of the last modification.
[VT_UI1] Indicates the minute of the last modification.
[VT_UI1] Indicates the second of the last modification.
Specifies when the progress notification callback should be called.
+The callback should be called when codec operations begin.
The callback should be called when codec operations end.
The callback should be called frequently to report status.
The callback should be called on all available progress notifications.
Specifies the progress operations to receive notifications for.
+Receive copy pixel operation.
Receive write pixel operation.
Receive all progress operations available.
Specifies the capability support of a raw image.
+The capability is not supported.
The capability supports only get operations.
The capability supports get and set operations.
Specifies the parameter set used by a raw codec.
+An as shot parameter set.
A user adjusted parameter set.
A codec adjusted parameter set.
Specifies the render intent of the next CopyPixels call.
+Specifies the rotation capabilities of the codec.
+Rotation is not supported.
Set operations for rotation is not supported.
90 degree rotations are supported.
All rotation angles are supported.
Specifies the access level of a Windows Graphics Device Interface (GDI) section.
+Indicates a read only access level.
Indicates a read/write access level.
Specifies the Tagged Image File Format (TIFF) compression options.
+Indicates a suitable compression algorithm based on the image and pixel format.
Indicates no compression.
Indicates a CCITT3 compression algorithm. This algorithm is only valid for 1bpp pixel formats.
Indicates a CCITT4 compression algorithm. This algorithm is only valid for 1bpp pixel formats.
Indicates a LZW compression algorithm.
Indicates a RLE compression algorithm. This algorithm is only valid for 1bpp pixel formats.
Indicates a ZIP compression algorithm.
Indicates an LZWH differencing algorithm.
Defines methods that add the concept of writeability and static in-memory representations of bitmaps to
Because of to the internal memory representation implied by the
Exposes methods that refers to a source from which pixels are retrieved, but cannot be written back to.
+This interface provides a common way of accessing and linking together bitmaps, decoders, format converters, and scalers. Components that implement this interface can be connected together in a graph to pull imaging data through.
This interface defines only the notion of readability or being able to produce pixels. Modifying or writing to a bitmap is considered to be a specialization specific to bitmaps which have storage and is defined in the descendant interface
Retrieves the pixel width and height of the bitmap.
+A reference that receives the pixel width of the bitmap.
A reference that receives the pixel height of the bitmap
If this method succeeds, it returns
Retrieves the pixel format of the bitmap source..
+Receives the pixel format
If this method succeeds, it returns
The pixel format returned by this method is not necessarily the pixel format the image is stored as. The codec may perform a format conversion from the storage pixel format to an output pixel format.
+Retrieves the sampling rate between pixels and physical world measurements.
+A reference that receives the x-axis dpi resolution.
A reference that receives the y-axis dpi resolution.
If this method succeeds, it returns
Some formats, such as GIF and ICO, do not have full DPI support. For GIF, this method calculates the DPI values from the aspect ratio, using a base DPI of (96.0, 96.0). The ICO format does not support DPI at all, and the method always returns (96.0,96.0) for ICO images.
Additionally, WIC itself does not transform images based on the DPI values in an image. It is up to the caller to transform an image based on the resolution returned.
+Retrieves the color table for indexed pixel formats.
+An
Returns one of the following values.
Return code | Description |
---|---|
| The palette was unavailable. |
| The palette was successfully copied. |
?
If the
Instructs the object to produce pixels.
+The rectangle to copy. A
The stride of the bitmap
The size of the buffer.
A reference to the buffer.
If this method succeeds, it returns
CopyPixels is one of the two main image processing routines (the other being Lock) triggering the actual processing. It instructs the object to produce pixels according to its algorithm - this may involve decoding a portion of a JPEG stored on disk, copying a block of memory, or even analytically computing a complex gradient. The algorithm is completely dependent on the object implementing the interface.
The caller can restrict the operation to a rectangle of interest (ROI) using the prc parameter. The ROI sub-rectangle must be fully contained in the bounds of the bitmap. Specifying a
The caller controls the memory management and must provide an output buffer (pbBuffer) for the results of the copy along with the buffer's bounds (cbBufferSize). The cbStride parameter defines the count of bytes between two vertically adjacent pixels in the output buffer. The caller must ensure that there is sufficient buffer to complete the call based on the width, height and pixel format of the bitmap and the sub-rectangle provided to the copy method.
If the caller needs to perform numerous copies of an expensive
Instructs the object to produce pixels.
+The rectangle to copy. A
The stride of the bitmap
A reference to the buffer.
CopyPixels is one of the two main image processing routines (the other being Lock) triggering the actual processing. It instructs the object to produce pixels according to its algorithm - this may involve decoding a portion of a JPEG stored on disk, copying a block of memory, or even analytically computing a complex gradient. The algorithm is completely dependent on the object implementing the interface.
The caller can restrict the operation to a rectangle of interest (ROI) using the prc parameter. The ROI sub-rectangle must be fully contained in the bounds of the bitmap. Specifying a
The caller controls the memory management and must provide an output buffer (pbBuffer) for the results of the copy along with the buffer's bounds (cbBufferSize). The cbStride parameter defines the count of bytes between two vertically adjacent pixels in the output buffer. The caller must ensure that there is sufficient buffer to complete the call based on the width, height and pixel format of the bitmap and the sub-rectangle provided to the copy method.
If the caller needs to perform numerous copies of an expensive
The callee must only write to the first (prc->Width*bitsperpixel+7)/8 bytes of each line of the output buffer (in this case, a line is a consecutive string of cbStride bytes).
+Instructs the object to produce pixels.
+The stride of the bitmap
A reference to the buffer.
CopyPixels is one of the two main image processing routines (the other being Lock) triggering the actual processing. It instructs the object to produce pixels according to its algorithm - this may involve decoding a portion of a JPEG stored on disk, copying a block of memory, or even analytically computing a complex gradient. The algorithm is completely dependent on the object implementing the interface.
The caller can restrict the operation to a rectangle of interest (ROI) using the prc parameter. The ROI sub-rectangle must be fully contained in the bounds of the bitmap. Specifying a
The caller controls the memory management and must provide an output buffer (pbBuffer) for the results of the copy along with the buffer's bounds (cbBufferSize). The cbStride parameter defines the count of bytes between two vertically adjacent pixels in the output buffer. The caller must ensure that there is sufficient buffer to complete the call based on the width, height and pixel format of the bitmap and the sub-rectangle provided to the copy method.
If the caller needs to perform numerous copies of an expensive
The callee must only write to the first (prc->Width*bitsperpixel+7)/8 bytes of each line of the output buffer (in this case, a line is a consecutive string of cbStride bytes).
+Instructs the object to produce pixels.
+The stride of the bitmap
A reference to the buffer.
CopyPixels is one of the two main image processing routines (the other being Lock) triggering the actual processing. It instructs the object to produce pixels according to its algorithm - this may involve decoding a portion of a JPEG stored on disk, copying a block of memory, or even analytically computing a complex gradient. The algorithm is completely dependent on the object implementing the interface.
The caller can restrict the operation to a rectangle of interest (ROI) using the prc parameter. The ROI sub-rectangle must be fully contained in the bounds of the bitmap. Specifying a
The caller controls the memory management and must provide an output buffer (pbBuffer) for the results of the copy along with the buffer's bounds (cbBufferSize). The cbStride parameter defines the count of bytes between two vertically adjacent pixels in the output buffer. The caller must ensure that there is sufficient buffer to complete the call based on the width, height and pixel format of the bitmap and the sub-rectangle provided to the copy method.
If the caller needs to perform numerous copies of an expensive
The callee must only write to the first (prc->Width*bitsperpixel+7)/8 bytes of each line of the output buffer (in this case, a line is a consecutive string of cbStride bytes).
+Instructs the object to produce pixels.
+The rectangle to copy. A
If this method succeeds, it returns
CopyPixels is one of the two main image processing routines (the other being Lock) triggering the actual processing. It instructs the object to produce pixels according to its algorithm - this may involve decoding a portion of a JPEG stored on disk, copying a block of memory, or even analytically computing a complex gradient. The algorithm is completely dependent on the object implementing the interface.
The caller can restrict the operation to a rectangle of interest (ROI) using the prc parameter. The ROI sub-rectangle must be fully contained in the bounds of the bitmap. Specifying a
The caller controls the memory management and must provide an output buffer (pbBuffer) for the results of the copy along with the buffer's bounds (cbBufferSize). The cbStride parameter defines the count of bytes between two vertically adjacent pixels in the output buffer. The caller must ensure that there is sufficient buffer to complete the call based on the width, height and pixel format of the bitmap and the sub-rectangle provided to the copy method.
If the caller needs to perform numerous copies of an expensive
The callee must only write to the first (prc->Width*bitsperpixel+7)/8 bytes of each line of the output buffer (in this case, a line is a consecutive string of cbStride bytes).
Instructs the object to produce pixels.
+If this method succeeds, it returns
CopyPixels is one of the two main image processing routines (the other being Lock) triggering the actual processing. It instructs the object to produce pixels according to its algorithm - this may involve decoding a portion of a JPEG stored on disk, copying a block of memory, or even analytically computing a complex gradient. The algorithm is completely dependent on the object implementing the interface.
The caller can restrict the operation to a rectangle of interest (ROI) using the prc parameter. The ROI sub-rectangle must be fully contained in the bounds of the bitmap. Specifying a
The caller controls the memory management and must provide an output buffer (pbBuffer) for the results of the copy along with the buffer's bounds (cbBufferSize). The cbStride parameter defines the count of bytes between two vertically adjacent pixels in the output buffer. The caller must ensure that there is sufficient buffer to complete the call based on the width, height and pixel format of the bitmap and the sub-rectangle provided to the copy method.
If the caller needs to perform numerous copies of an expensive
The callee must only write to the first (prc->Width*bitsperpixel+7)/8 bytes of each line of the output buffer (in this case, a line is a consecutive string of cbStride bytes).
+Instructs the object to produce pixels.
+If this method succeeds, it returns
CopyPixels is one of the two main image processing routines (the other being Lock) triggering the actual processing. It instructs the object to produce pixels according to its algorithm - this may involve decoding a portion of a JPEG stored on disk, copying a block of memory, or even analytically computing a complex gradient. The algorithm is completely dependent on the object implementing the interface.
The caller can restrict the operation to a rectangle of interest (ROI) using the prc parameter. The ROI sub-rectangle must be fully contained in the bounds of the bitmap. Specifying a
The caller controls the memory management and must provide an output buffer (pbBuffer) for the results of the copy along with the buffer's bounds (cbBufferSize). The cbStride parameter defines the count of bytes between two vertically adjacent pixels in the output buffer. The caller must ensure that there is sufficient buffer to complete the call based on the width, height and pixel format of the bitmap and the sub-rectangle provided to the copy method.
If the caller needs to perform numerous copies of an expensive
The callee must only write to the first (prc->Width*bitsperpixel+7)/8 bytes of each line of the output buffer (in this case, a line is a consecutive string of cbStride bytes).
Instructs the object to produce pixels.
+If this method succeeds, it returns
CopyPixels is one of the two main image processing routines (the other being Lock) triggering the actual processing. It instructs the object to produce pixels according to its algorithm - this may involve decoding a portion of a JPEG stored on disk, copying a block of memory, or even analytically computing a complex gradient. The algorithm is completely dependent on the object implementing the interface.
The caller can restrict the operation to a rectangle of interest (ROI) using the prc parameter. The ROI sub-rectangle must be fully contained in the bounds of the bitmap. Specifying a
The caller controls the memory management and must provide an output buffer (pbBuffer) for the results of the copy along with the buffer's bounds (cbBufferSize). The cbStride parameter defines the count of bytes between two vertically adjacent pixels in the output buffer. The caller must ensure that there is sufficient buffer to complete the call based on the width, height and pixel format of the bitmap and the sub-rectangle provided to the copy method.
If the caller needs to perform numerous copies of an expensive
The callee must only write to the first (prc->Width*bitsperpixel+7)/8 bytes of each line of the output buffer (in this case, a line is a consecutive string of cbStride bytes).
Retrieves the pixel format of the bitmap source..
+The pixel format returned by this method is not necessarily the pixel format the image is stored as. The codec may perform a format conversion from the storage pixel format to an output pixel format.
+Retrieves the pixel width and height of the bitmap.
+Provides access to a rectangular area of the bitmap.
+The rectangle to be accessed.
The access mode you wish to obtain for the lock. This is a bitwise combination of
Value | Meaning |
---|---|
The read access lock. | |
The write access lock. |
?
A reference that receives the locked memory location.
Locks are exclusive for writing but can be shared for reading. You cannot call CopyPixels while the
Provides access for palette modifications.
+The palette to use for conversion.
If this method succeeds, it returns
Changes the physical resolution of the image.
+The horizontal resolution.
The vertical resolution.
If this method succeeds, it returns
This method has no effect on the actual pixels or samples stored in the bitmap. Instead the interpretation of the sampling rate is modified. This means that a 96 DPI image which is 96 pixels wide is one inch. If the physical resolution is modified to 48 DPI, then the bitmap is considered to be 2 inches wide but has the same number of pixels. If the resolution is less than REAL_EPSILON (1.192092896e-07F) the error code
Provides access to a rectangular area of the bitmap.
+The access mode you wish to obtain for the lock. This is a bitwise combination of
Value | Meaning |
---|---|
The read access lock. | |
The write access lock. |
?
A reference that receives the locked memory location.
Locks are exclusive for writing but can be shared for reading. You cannot call CopyPixels while the
Provides access to a rectangular area of the bitmap.
+The rectangle to be accessed.
The access mode you wish to obtain for the lock. This is a bitwise combination of
Value | Meaning |
---|---|
The read access lock. | |
The write access lock. |
?
A reference that receives the locked memory location.
Locks are exclusive for writing but can be shared for reading. You cannot call CopyPixels while the
Provides access for palette modifications.
+Exposes methods that produce a clipped version of the input bitmap for a specified rectangular region of interest.
+Initializes the bitmap clipper with the provided parameters.
+he input bitmap source.
The rectangle of the bitmap source to clip.
If this method succeeds, it returns
Initializes the bitmap clipper with the provided parameters.
+he input bitmap source.
The rectangle of the bitmap source to clip.
If this method succeeds, it returns
Exposes methods that provide information about a particular codec.
+Exposes methods that provide component information.
+Retrieves the component's
If this method succeeds, it returns
Retrieves the component's class identifier (CLSID)
+A reference that receives the component's CLSID.
If this method succeeds, it returns
Retrieves the signing status of the component.
+A reference that receives the
If this method succeeds, it returns
Signing is unused by WIC. Therefore, all components
This function can be used to determine whether a component has no binary component or has been added to the disabled components list in the registry.
+Retrieves the name of component's author.
+The size of the wzAuthor buffer.
A reference that receives the name of the component's author. The locale of the string depends on the value that the codec wrote to the registry at install time. For built-in components, these strings are always in English.
A reference that receives the actual length of the component's authors name. The author name is optional; if an author name is not specified by the component, the length returned is 0.
If this method succeeds, it returns
If cchAuthor is 0 and wzAuthor is
Retrieves the vendor
A reference that receives the component's vendor
If this method succeeds, it returns
Retrieves the component's version.
+The size of the wzVersion buffer.
A reference that receives a culture invariant string of the component's version.
A reference that receives the actual length of the component's version. The version is optional; if a value is not specified by the component, the length returned is 0.
If this method succeeds, it returns
All built-in components return "1.0.0.0", except for pixel formats, which do not have a version.
If cchAuthor is 0 and wzAuthor is
Retrieves the component's specification version.
+The size of the wzSpecVersion buffer.
When this method returns, contain a culture invarient string of the component's specification version. The version form is NN.NN.NN.NN.
A reference that receives the actual length of the component's specification version. The specification version is optional; if a value is not specified by the component, the length returned is 0.
If this method succeeds, it returns
All built-in components return "1.0.0.0", except for pixel formats, which do not have a spec version.
If cchAuthor is 0 and wzAuthor is
Retrieves the component's friendly name, which is a human-readable display name for the component.
+The size of the wzFriendlyName buffer.
A reference that receives the friendly name of the component. The locale of the string depends on the value that the codec wrote to the registry at install time. For built-in components, these strings are always in English.
A reference that receives the actual length of the component's friendly name.
If this method succeeds, it returns
If cchFriendlyName is 0 and wzFriendlyName is
Retrieves the component's
Retrieves the component's class identifier (CLSID)
+Retrieves the signing status of the component.
+Signing is unused by WIC. Therefore, all components
This function can be used to determine whether a component has no binary component or has been added to the disabled components list in the registry.
+Retrieves the vendor
Retrieves the container
Receives the container
If this method succeeds, it returns
Retrieves the pixel formats the codec supports.
+The size of the pguidPixelFormats array. Use 0
on first call to determine the needed array size.
Receives the supported pixel formats. Use
on first call to determine needed array size.
The array size needed to retrieve all supported pixel formats.
If this method succeeds, it returns
The usage pattern for this method is a two call process. The first call retrieves the array size needed to retrieve all the supported pixel formats by calling it with cFormats set to 0
and pguidPixelFormats set to
. This call sets pcActual to the array size needed. Once the needed array size is determined, a second GetPixelFormats call with pguidPixelFormats set to an array of the appropriate size will retrieve the pixel formats.
Retrieves the color manangement version number the codec supports.
+The size of the version buffer. Use 0
on first call to determine needed buffer size.
Receives the color management version number. Use
on first call to determine needed buffer size.
The actual buffer size needed to retrieve the full color management version number.
If this method succeeds, it returns
The usage pattern for this method is a two call process. The first call retrieves the buffer size needed to retrieve the full color management version number by calling it with cchColorManagementVersion set to 0
and wzColorManagementVersion set to
. This call sets pcchActual to the buffer size needed. Once the needed buffer size is determined, a second GetColorManagementVersion call with cchColorManagementVersion set to the buffer size and wzColorManagementVersion set to a buffer of the appropriate size will retrieve the pixel formats.
Retrieves the name of the device manufacture associated with the codec.
+The size of the device manufacture's name. Use 0
on first call to determine needed buffer size.
Receives the device manufacture's name. Use
on first call to determine needed buffer size.
The actual buffer size needed to retrieve the device manufacture's name.
If this method succeeds, it returns
The usage pattern for this method is a two call process. The first call retrieves the buffer size needed to retrieve the full color management version number by calling it with cchDeviceManufacturer set to 0
and wzDeviceManufacturer set to
. This call sets pcchActual to the buffer size needed. Once the needed buffer size is determined, a second GetDeviceManufacturer call with cchDeviceManufacturer set to the buffer size and wzDeviceManufacturer set to a buffer of the appropriate size will retrieve the pixel formats.
Retrieves a comma delimited list of device models associated with the codec.
+The size of the device models buffer. Use 0
on first call to determine needed buffer size.
Receives a comma delimited list of device model names associated with the codec. Use
on first call to determine needed buffer size.
The actual buffer size needed to retrieve all of the device model names.
If this method succeeds, it returns
The usage pattern for this method is a two call process. The first call retrieves the buffer size needed to retrieve the full color management version number by calling it with cchDeviceModels set to 0
and wzDeviceModels set to
. This call sets pcchActual to the buffer size needed. Once the needed buffer size is determined, a second GetDeviceModels call with cchDeviceModels set to the buffer size and wzDeviceModels set to a buffer of the appropriate size will retrieve the pixel formats.
Retrieves a comma delimited sequence of mime types associated with the codec.
+The size of the mime types buffer. Use 0
on first call to determine needed buffer size.
Receives the mime types associated with the codec. Use
on first call to determine needed buffer size.
The actual buffer size needed to retrieve all mime types associated with the codec.
If this method succeeds, it returns
The usage pattern for this method is a two call process. The first call retrieves the buffer size needed to retrieve the full color management version number by calling it with cchMimeTypes set to 0
and wzMimeTypes set to
. This call sets pcchActual to the buffer size needed. Once the needed buffer size is determined, a second GetMimeTypes call with cchMimeTypes set to the buffer size and wzMimeTypes set to a buffer of the appropriate size will retrieve the pixel formats.
Retrieves a comma delimited list of the file name extensions associated with the codec.
+The size of the file name extension buffer. Use 0
on first call to determine needed buffer size.
Receives a comma delimited list of file name extensions associated with the codec. Use
on first call to determine needed buffer size.
The actual buffer size needed to retrieve all file name extensions associated with the codec.
If this method succeeds, it returns
The default extension for an image encoder is the first item in the list of returned extensions.
The usage pattern for this method is a two call process. The first call retrieves the buffer size needed to retrieve the full color management version number by calling it with cchFileExtensions set to 0
and wzFileExtensions set to
. This call sets pcchActual to the buffer size needed. Once the needed buffer size is determined, a second GetFileExtensions call with cchFileExtensions set to the buffer size and wzFileExtensions set to a buffer of the appropriate size will retrieve the pixel formats.
Retrieves a value indicating whether the codec supports animation.
+Receives TRUE if the codec supports images with timing information; otherwise,
If this method succeeds, it returns
Retrieves a value indicating whether the codec supports chromakeys.
+Receives TRUE if the codec supports chromakeys; otherwise,
If this method succeeds, it returns
Retrieves a value indicating whether the codec supports lossless formats.
+Receives TRUE if the codec supports lossless formats; otherwise,
If this method succeeds, it returns
Retrieves a value indicating whether the codec supports multi frame images.
+Receives TRUE if the codec supports multi frame images; otherwise,
If this method succeeds, it returns
Retrieves a value indicating whether the given mime type matches the mime type of the codec.
+The mime type to compare.
Receives TRUE if the mime types match; otherwise,
Note??The Windows provided codecs do not implement this method and return E_NOTIMPL.
+Retrieves the container
Retrieves a value indicating whether the codec supports animation.
+Retrieves a value indicating whether the codec supports chromakeys.
+Retrieves a value indicating whether the codec supports lossless formats.
+Retrieves a value indicating whether the codec supports multi frame images.
+Exposes methods used for progress notification for encoders and decoders.
+This interface is not supported by the Windows provided codecs.
+Registers a progress notification callback function.
+A function reference to the application defined progress notification callback function. See ProgressNotificationCallback for the callback signature.
A reference to component data for the callback method.
The
If this method succeeds, it returns
Applications can only register a single callback. Subsequent registration calls will replace the previously registered callback. To unregister a callback, pass in
Progress is reported in an increasing order between 0.0 and 1.0. If dwProgressFlags includes
Exposes methods that represent a decoder.
The interface provides access to the decoder's properties such as global thumbnails (if supported), frames, and palette.
+There are a number of concrete implemenations of this interface representing each of the standard decoders provided by the platform including bitmap (BMP), Portable Network Graphics (PNG), icon (ICO), Joint Photographic Experts Group (JPEG), Graphics Interchange Format (GIF), Tagged Image File Format (TIFF), and Microsoft?Windows Digital Photo (WDP). The following table includes the class identifier (CLSID) for each native decoder.
CLSID Name | CLSID |
---|---|
0x6b462062, 0x7cbf, 0x400d, 0x9f, 0xdb, 0x81, 0x3d, 0xd1, 0xf, 0x27, 0x78 | |
0x389ea17b, 0x5078, 0x4cde, 0xb6, 0xef, 0x25, 0xc1, 0x51, 0x75, 0xc7, 0x51 | |
0xc61bfcdf, 0x2e0f, 0x4aad, 0xa8, 0xd7, 0xe0, 0x6b, 0xaf, 0xeb, 0xcd, 0xfe | |
0x9456a480, 0xe88b, 0x43ea, 0x9e, 0x73, 0xb, 0x2d, 0x9b, 0x71, 0xb1, 0xca | |
0x381dda3c, 0x9ce9, 0x4834, 0xa2, 0x3e, 0x1f, 0x98, 0xf8, 0xfc, 0x52, 0xbe | |
0xb54e85d9, 0xfe23, 0x499f, 0x8b, 0x88, 0x6a, 0xce, 0xa7, 0x13, 0x75, 0x2b | |
0xa26cec36, 0x234c, 0x4950, 0xae, 0x16, 0xe3, 0x4a, 0xac, 0xe7, 0x1d, 0x0d |
?
This interface may be sub-classed to provide support for third party codecs as part of the extensibility model. See the AITCodec Sample CODEC.
Codecs written as TIFF container formats that are not register will decode as a TIFF image. Client applications should check for a zero frame count to determine if the codec is valid.
+Retrieves the capabilities of the decoder based on the specified stream.
+The stream to retrieve the decoder capabilities from.
The
Custom decoder implementations should save the current position of the specified
Initializes the decoder with the provided stream.
+The stream to use for initialization.
The stream contains the encoded pixels which are decoded each time the CopyPixels method on the
The
If this method succeeds, it returns
Retrieves the image's container format.
+A reference that receives the image's container format
If this method succeeds, it returns
Retrieves an
If this method succeeds, it returns
Copies the decoder's
If this method succeeds, it returns
CopyPalette returns a global palette (a palette that applies to all the frames in the image) if there is one; otherwise, it returns
Retrieves the metadata query reader from the decoder.
+Receives a reference to the decoder's
If this method succeeds, it returns
If an image format does not support container-level metadata, this will return
Retrieves a preview image, if supported.
+Receives a reference to the preview bitmap if supported.
If this method succeeds, it returns
Not all formats support previews. Only the native Microsoft?Windows Digital Photo (WDP) codec support previews.
+Retrieves the
If this method succeeds, it returns
Retrieves the
If this method succeeds, it returns
Retrieves a bitmap thumbnail of the image, if one exists
+Receives a reference to the
If this method succeeds, it returns
The returned thumbnail can be of any size, so the caller should scale the thumbnail to the desired size. The only Windows provided image formats that support thumbnails are JPEG, TIFF, and JPEG-XR. If the thumbnail is not available, this will return
Retrieves the total number of frames in the image.
+A reference that receives the total number of frames in the image.
If this method succeeds, it returns
Retrieves the specified frame of the image.
+The particular frame to retrieve.
A reference that receives a reference to the
Retrieves the image's container format.
+Retrieves an
Retrieves the metadata query reader from the decoder.
+If an image format does not support container-level metadata, this will return
Retrieves a preview image, if supported.
+Not all formats support previews. Only the native Microsoft?Windows Digital Photo (WDP) codec support previews.
+Retrieves a bitmap thumbnail of the image, if one exists
+The returned thumbnail can be of any size, so the caller should scale the thumbnail to the desired size. The only Windows provided image formats that support thumbnails are JPEG, TIFF, and JPEG-XR. If the thumbnail is not available, this will return
Retrieves the total number of frames in the image.
+Exposes methods that provide information about a decoder.
+Retrieves the file pattern signatures supported by the decoder.
+The array size of the pPatterns array.
Receives a list of
Receives the number of patterns the decoder supports.
Receives the actual buffer size needed to retrieve all pattern signatures supported by the decoder.
If this method succeeds, it returns
To retrieve all pattern signatures, this method should first be called with pPatterns set to
to retrieve the actual buffer size needed through pcbPatternsActual. Once the needed buffer size is known, allocate a buffer of the needed size and call GetPatterns again with the allocated buffer.
Retrieves a value that indicates whether the codec recognizes the pattern within a specified stream.
+The stream to pattern match within.
A reference that receives TRUE if the patterns match; otherwise,
Creates a new
If this method succeeds, it returns
Defines methods for setting an encoder's properties such as thumbnails, frames, and palettes.
+There are a number of concrete implemenations of this interface representing each of the standard encoders provided by the platform including bitmap (BMP), Portable Network Graphics (PNG), Joint Photographic Experts Group (JPEG), Graphics Interchange Format (GIF), Tagged Image File Format (TIFF), and Microsoft?Windows Digital Photo (WDP). The following table includes the class identifier (CLSID) for each native encoder.
CLSID Name | CLSID |
---|---|
0x69be8bb4, 0xd66d, 0x47c8, 0x86, 0x5a, 0xed, 0x15, 0x89, 0x43, 0x37, 0x82 | |
0x27949969, 0x876a, 0x41d7, 0x94, 0x47, 0x56, 0x8f, 0x6a, 0x35, 0xa4, 0xdc | |
0x1a34f5c1, 0x4a5a, 0x46dc, 0xb6, 0x44, 0x1f, 0x45, 0x67, 0xe7, 0xa6, 0x76 | |
0x114f5598, 0xb22, 0x40a0, 0x86, 0xa1, 0xc8, 0x3e, 0xa4, 0x95, 0xad, 0xbd | |
0x0131be10, 0x2001, 0x4c5f, 0xa9, 0xb0, 0xcc, 0x88, 0xfa, 0xb6, 0x4c, 0xe8 | |
0xac4ce3cb, 0xe1c1, 0x44cd, 0x82, 0x15, 0x5a, 0x16, 0x65, 0x50, 0x9e, 0xc2 |
?
Additionally this interface may be sub-classed to provide support for third party codecs as part of the extensibility model. See the AITCodec Sample CODEC.
+Proxy function for the Initialize method.
+If this function succeeds, it returns
Retrieves the encoder's container format.
+A reference that receives the encoder's container format
If this method succeeds, it returns
Retrieves an
If this method succeeds, it returns
Sets the
If this method succeeds, it returns
Sets the
If this method succeeds, it returns
Sets the global palette for the image.
+The
Returns
Returns
Only GIF images support an optional global palette, and you must set the global palette before adding any frames to the image. You only need to set the palette for indexed pixel formats. +
+Sets the global thumbnail for the image.
+The
Returns
Returns
Sets the global preview for the image.
+The
Returns
Returns
Creates a new
If this method succeeds, it returns
The parameter ppIEncoderOptions can be used to receive an
Note??Do not pass in a reference to an initialized
Otherwise, you can pass
See Encoding Overview for an example of how to set encoder options.
For formats that support encoding multiple frames (for example, TIFF, JPEG-XR), you can work on only one frame at a time. This means that you must call
Commits all changes for the image and closes the stream.
+If this method succeeds, it returns
To finalize an image, both the frame Commit and the encoder Commit must be called. However, only call the encoder Commit method after all frames have been committed.
After the encoder has been committed, it can't be re-initialized or reused with another stream. A new encoder interface must be created, for example, with
For the encoder Commit to succeed, you must at a minimum call
Retrieves a metadata query writer for the encoder.
+When this method returns, contains a reference to the encoder's metadata query writer.
If this method succeeds, it returns
Retrieves the encoder's container format.
+Retrieves an
Sets the global palette for the image.
+Only GIF images support an optional global palette, and you must set the global palette before adding any frames to the image. You only need to set the palette for indexed pixel formats. +
+Sets the global thumbnail for the image.
+Sets the global preview for the image.
+Retrieves a metadata query writer for the encoder.
+Exposes methods that provide information about an encoder.
+Creates a new
If this method succeeds, it returns
Exposes methods that produce a flipped (horizontal or vertical) and/or rotated (by 90 degree increments) bitmap source. Rotations are done before the flip.
+IWICBitmapFipRotator requests data on a per-pixel basis, while WIC codecs provide data on a per-scanline basis. This causes the fliprotator object to exhibit n2 behavior if there is no buffering. This occures because each pixel in the transformed image requires an entire scanline to be decoded in the file. It is recommended that you buffer the image using
Initializes the bitmap flip rotator with the provided parameters.
+The input bitmap source.
The
If this method succeeds, it returns
Defines methods for decoding individual image frames of an encoded file.
+Retrieves a metadata query reader for the frame.
+When this method returns, contains a reference to the frame's metadata query reader.
If this method succeeds, it returns
For image formats with one frame (JPG, PNG, JPEG-XR), the frame-level query reader of the first frame is used to access all image metadata, and the decoder-level query reader isn?t used. For formats with more than one frame (GIF, TIFF), the frame-level query reader for a given frame is used to access metadata specific to that frame, and in the case of GIF a decoder-level metadata reader will be present. If the decoder doesn?t support metadata (BMP, ICO), this will return
Retrieves the
If this method succeeds, it returns
If
The ppIColorContexts array must be filled with valid data: each
Retrieves the
If this method succeeds, it returns
If
The ppIColorContexts array must be filled with valid data: each
Retrieves a small preview of the frame, if supported by the codec.
+A reference that receives a reference to the
If this method succeeds, it returns
Not all formats support thumbnails. Joint Photographic Experts Group (JPEG), Tagged Image File Format (TIFF), and Microsoft?Windows Digital Photo (WDP) support thumbnails.
+Retrieves a metadata query reader for the frame.
+For image formats with one frame (JPG, PNG, JPEG-XR), the frame-level query reader of the first frame is used to access all image metadata, and the decoder-level query reader isn?t used. For formats with more than one frame (GIF, TIFF), the frame-level query reader for a given frame is used to access metadata specific to that frame, and in the case of GIF a decoder-level metadata reader will be present. If the decoder doesn?t support metadata (BMP, ICO), this will return
Retrieves a small preview of the frame, if supported by the codec.
+Not all formats support thumbnails. Joint Photographic Experts Group (JPEG), Tagged Image File Format (TIFF), and Microsoft?Windows Digital Photo (WDP) support thumbnails.
+Represents an encoder's individual image frames.
+Initializes the frame encoder using the given properties.
+The set of properties to use for
If this method succeeds, it returns
If you don't want any encoding options, pass
For a complete list of encoding options supported by the Windows-provided codecs, see Native WIC Codecs.
+Sets the output image dimensions for the frame.
+The width of the output image.
The height of the output image.
If this method succeeds, it returns
Sets the physical resolution of the output image.
+The horizontal resolution value.
The vertical resolution value.
If this method succeeds, it returns
Windows Imaging Component (WIC) doesn't perform any special processing as a result of DPI resolution values. For example, data returned from
Requests that the encoder use the specified pixel format.
+On input, the requested pixel format
Possible return values include the following.
Return code | Description |
---|---|
| Success. |
| The |
?
Sets a given number
If this method succeeds, it returns
Setting color contexts is unsupported. This function will return
Setting at most one color context is supported, and additional color contexts will be ignored. This context must be a
Setting up to one
In TIFF and JPEG-XR, the
Sets a given number
If this method succeeds, it returns
Setting color contexts is unsupported. This function will return
Setting at most one color context is supported, and additional color contexts will be ignored. This context must be a
Setting up to one
In TIFF and JPEG-XR, the
Sets the
If this method succeeds, it returns
This method doesn't fail if called on a frame whose pixel format is set to a non-indexed pixel format. If the target pixel format is a non-indexed format, the palette will be ignored.
If you already called
The palette must be specified before your first call to WritePixels/WriteSource. Doing so will cause WriteSource to use the specified palette when converting the source image to the encoder pixel format. If no palette is specified, a palette will be generated on the first call to WriteSource. +
+Sets the frame thumbnail if supported by the codec.
+The bitmap source to use as the thumbnail.
Returns
Returns
We recommend that you call SetThumbnail before calling WritePixels or WriteSource. The thumbnail won't be added to the encoded file if SetThumbnail is called after a call to WritePixels or WriteSource.
Setting thumbnails is unsupported. This function will return
Setting the thumbnail is supported. The source image will be re-encoded as either an 8bpp or 24bpp JPEG and will be written to the JPEG?s APP1 metadata block. +
Setting the thumbnail is supported. The source image will be re-encoded as a TIFF and will be written to the frame?s SubIFD block.
Setting the thumbnail is supported. The source image will be re-encoded as an additional 8bpp or 24bpp frame. +
Encodes a bitmap source.
+The bitmap source to encode.
The size rectangle of the bitmap source.
If this method succeeds, it returns
If SetSize is not called prior to calling WriteSource, the size given in prc is used if not
If SetPixelFormat is not called prior to calling WriteSource, the pixel format of the
If SetResolution is not called prior to calling WriteSource, the pixel format of pIBitmapSource is used.
If SetPalette is not called prior to calling WriteSource, the target pixel format is indexed, and the pixel format of pIBitmapSource matches the encoder frame's pixel format, then the pIBitmapSource pixel format is used.
When encoding a GIF image, if the global palette is set and the frame level palette is not set directly by the user or by a custom independent software vendor (ISV) GIF codec, WriteSource will use the global palette to encode the frame even when pIBitmapSource has a frame level palette.
Starting with Windows?Vista, repeated WriteSource calls can be made as long as the total accumulated source rect height is the same as set through SetSize.
Starting with Windows?8.1, the source rect must be at least the dimensions set through SetSize. If the source rect width exceeds the SetSize width, extra pixels on the right side are ignored. If the source rect height exceeds the remaining unfilled height, extra scan lines on the bottom are ignored. +
+Commits the frame to the image.
+If this method succeeds, it returns
After the frame Commit has been called, you can't use or reinitialize the
To finalize the image, both the frame Commit and the encoder Commit must be called. However, only call the encoder Commit method after all frames have been committed.
+Gets the metadata query writer for the encoder frame.
+When this method returns, contains a reference to metadata query writer for the encoder frame.
If this method succeeds, it returns
If you are setting metadata on the frame, you must do this before you use
Encodes the frame scanlines.
+The number of lines to encode.
Successive WritePixels calls are assumed to be sequential scanline access in the output image.
+Encodes the frame scanlines.
+The number of lines to encode.
Successive WritePixels calls are assumed to be sequential scanline access in the output image.
+Encodes the frame scanlines.
+The number of lines to encode.
The stride of the image pixels.
A reference to the pixel buffer.
Successive WritePixels calls are assumed to be sequential scanline access in the output image.
+Encodes a bitmap source.
+The bitmap source to encode.
If SetSize is not called prior to calling WriteSource, the size given in prc is used if not
If SetPixelFormat is not called prior to calling WriteSource, the pixel format of the
If SetResolution is not called prior to calling WriteSource, the pixel format of pIBitmapSource is used.
If SetPalette is not called prior to calling WriteSource, the target pixel format is indexed, and the pixel format of pIBitmapSource matches the encoder frame's pixel format, then the pIBitmapSource pixel format is used.
When encoding a GIF image, if the global palette is set and the frame level palette is not set directly by the user or by a custom independent software vendor (ISV) GIF codec, WriteSource will use the global palette to encode the frame even when pIBitmapSource has a frame level palette.
Windows Vista:The source rect width must match the width set through SetSize. Repeated WriteSource calls can be made as long as the total accumulated source rect height is the same as set through SetSize.
+Encodes a bitmap source.
+The bitmap source to encode.
The size rectangle of the bitmap source.
If SetSize is not called prior to calling WriteSource, the size given in prc is used if not
If SetPixelFormat is not called prior to calling WriteSource, the pixel format of the
If SetResolution is not called prior to calling WriteSource, the pixel format of pIBitmapSource is used.
If SetPalette is not called prior to calling WriteSource, the target pixel format is indexed, and the pixel format of pIBitmapSource matches the encoder frame's pixel format, then the pIBitmapSource pixel format is used.
When encoding a GIF image, if the global palette is set and the frame level palette is not set directly by the user or by a custom independent software vendor (ISV) GIF codec, WriteSource will use the global palette to encode the frame even when pIBitmapSource has a frame level palette.
Windows Vista:The source rect width must match the width set through SetSize. Repeated WriteSource calls can be made as long as the total accumulated source rect height is the same as set through SetSize.
+Sets the
This method doesn't fail if called on a frame whose pixel format is set to a non-indexed pixel format. If the target pixel format is a non-indexed format, the palette will be ignored.
If you already called
The palette must be specified before your first call to WritePixels/WriteSource. Doing so will cause WriteSource to use the specified palette when converting the source image to the encoder pixel format. If no palette is specified, a palette will be generated on the first call to WriteSource. +
+Sets the frame thumbnail if supported by the codec.
+We recommend that you call SetThumbnail before calling WritePixels or WriteSource. The thumbnail won't be added to the encoded file if SetThumbnail is called after a call to WritePixels or WriteSource.
Setting thumbnails is unsupported. This function will return
Setting the thumbnail is supported. The source image will be re-encoded as either an 8bpp or 24bpp JPEG and will be written to the JPEG?s APP1 metadata block. +
Setting the thumbnail is supported. The source image will be re-encoded as a TIFF and will be written to the frame?s SubIFD block.
Setting the thumbnail is supported. The source image will be re-encoded as an additional 8bpp or 24bpp frame. +
Gets the metadata query writer for the encoder frame.
+If you are setting metadata on the frame, you must do this before you use
Exposes methods that support the Lock method.
+The bitmap lock is simply an abstraction for a rectangular memory window into the bitmap. For the simplest case, a system memory bitmap, this is simply a reference to the top left corner of the rectangle and a stride value.
To release the exclusive lock set by Lock method and the associated
Retrieves the width and height, in pixels, of the locked rectangle.
+A reference that receives the width of the locked rectangle.
A reference that receives the height of the locked rectangle.
If this method succeeds, it returns
Provides access to the stride value for the memory.
+If this method succeeds, it returns
Note the stride value is specific to the
Gets the reference to the top left pixel in the locked rectangle.
+A reference that receives the size of the buffer.
A reference that receives a reference to the top left pixel in the locked rectangle.
The reference provided by this method should not be used outside of the lifetime of the lock itself.
GetDataPointer is not available in multi-threaded apartment applications.
+Gets the pixel format of for the locked area of pixels. This can be used to compute the number of bytes-per-pixel in the locked area.
+A reference that receives the pixel format
If this method succeeds, it returns
Provides access to the stride value for the memory.
+ Note the stride value is specific to the
Gets the pixel format of for the locked area of pixels. This can be used to compute the number of bytes-per-pixel in the locked area.
+Represents a resized version of the input bitmap using a resampling or filtering algorithm.
+Images can be scaled to larger sizes; however, even with sophisticated scaling algorithms, there is only so much information in the image and artifacts tend to worsen the more you scale up.
The scaler will reapply the resampling algorithm every time CopyPixels is called. If the scaled image is to be animated, the scaled image should be created once and cached in a new bitmap, after which the
The scaler is optimized to use the minimum amount of memory required to scale the image correctly. The scaler may be used to produce parts of the image incrementally (banding) by calling CopyPixels with different rectangles representing the output bands of the image. Resampling typically requires overlapping rectangles from the source image and thus may need to request the same pixels from the source bitmap multiple times. Requesting scanlines out-of-order from some image decoders can have a significant performance penalty. Because of this reason, the scaler is optimized to handle consecutive horizontal bands of scanlines (rectangle width equal to the bitmap width). In this case the accumulator from the previous vertically adjacent rectangle is re-used to avoid duplicate scanline requests from the source. This implies that banded output from the scaler may have better performance if the bands are requested sequentially. Of course if the scaler is simply used to produce a single rectangle output, this concern is eliminated because the scaler will internally request scanlines in the correct order.
+Initializes the bitmap scaler with the provided parameters.
+The input bitmap source.
The destination width.
The desination height.
The
If this method succeeds, it returns
Copies pixel data using the supplied input parameters.
+The rectangle of pixels to copy.
The width to scale the source bitmap. This parameter must equal the value obtainable through
The height to scale the source bitmap. This parameter must equal the value obtainable through
The
This
The desired rotation or flip to perform prior to the pixel copy.
The transform must be an operation supported by an DoesSupportTransform call.
If a dstTransform is specified, nStride is the transformed stride and is based on the pguidDstFormat pixel format, not the original source's pixel format.
The stride of the destination buffer.
The size of the destination buffer.
The output buffer.
If this method succeeds, it returns
Returns the closest dimensions the implementation can natively scale to given the desired dimensions.
+The desired width. A reference that receives the closest supported width.
The desired height. A reference that receives the closest supported height.
If this method succeeds, it returns
The Windows provided codecs provide the following support for native scaling: +
Retrieves the closest pixel format to which the implementation of
If this method succeeds, it returns
The Windows provided codecs provide the following support:
Determines whether a specific transform option is supported natively by the implementation of the
If this method succeeds, it returns
The Windows provided codecs provide the following level of support:
Exposes methods for color management.
+A Color Context is an abstraction for a color profile. The profile can either be loaded from a file (like "sRGB Color Space Profile.icm"), read from a memory buffer, or can be defined by an EXIF color space. The system color profile directory can be obtained by calling GetColorDirectory.
Once a color context has been initialized, it cannot be re-initialized.
+Initializes the color context from the given file.
+The name of the file.
If this method succeeds, it returns
Once a color context has been initialized, it can't be re-initialized. +
+Initializes the color context from a memory block.
+The buffer used to initialize the
The size of the pbBuffer buffer.
If this method succeeds, it returns
Once a color context has been initialized, it can't be re-initialized. +
+Initializes the color context using an Exchangeable Image File (EXIF) color space.
+The value of the EXIF color space.
Value | Meaning |
---|---|
| A sRGB color space. |
| An Adobe RGB color space. |
?
If this method succeeds, it returns
Once a color context has been initialized, it can't be re-initialized. +
+Retrieves the color context type.
+A reference that receives the
If this method succeeds, it returns
Retrieves the color context profile.
+The size of the pbBuffer buffer.
A reference that receives the color context profile.
A reference that receives the actual buffer size needed to retrieve the entire color context profile.
If this method succeeds, it returns
Only use this method if the context type is
Calling this method with pbBuffer set to
Retrieves the Exchangeable Image File (EXIF) color space color context.
+A reference that receives the EXIF color space color context.
Value | Meaning |
---|---|
| A sRGB color space. |
| An Adobe RGB color space. |
| Unused. |
?
If this method succeeds, it returns
This method should only be used when
Retrieves the color context type.
+Retrieves the Exchangeable Image File (EXIF) color space color context.
+This method should only be used when
Exposes methods that transforms an
A
Once initialized, a color transform cannot be reinitialized. Because of this, a color transform cannot be used with multiple sources or varying parameters.
+Initializes an
If this method succeeds, it returns
The currently supported formats for the pIContextSource and pixelFmtDest parameters are: +
In order to get correct behavior from a color transform, the input and output pixel formats must be compatible with the source and destination color profiles. For example, an sRGB destination color profile will produce incorrect results when used with a CMYK destination pixel format.
+Exposes methods that provide access to the capabilites of a raw codec format.
+Retrieves information about which capabilities are supported for a raw image.
+A reference that receives
If this method succeeds, it returns
It is recommended that a codec report that a capability is supported even if the results at the outer range limits are not of perfect quality.
+Sets the desired
If this method succeeds, it returns
Gets the current set of parameters.
+A reference that receives a reference to the current set of parameters.
If this method succeeds, it returns
Sets the exposure compensation stop value.
+The exposure compensation value. The value range for exposure compensation is -5.0 through +5.0, which equates to 10 full stops.
If this method succeeds, it returns
It is recommended that a codec report that this method is supported even if the results at the outer range limits are not of perfect quality.
+Gets the exposure compensation stop value of the raw image.
+A reference that receives the exposure compensation stop value. The default is the "as-shot" setting.
If this method succeeds, it returns
Sets the white point RGB values.
+The red white point value.
The green white point value.
The blue white point value.
If this method succeeds, it returns
Due to other white point setting methods (e.g. SetWhitePointKelvin), care must be taken by codec implementers to ensure proper interoperability. For instance, if the caller sets via a named white point then the codec implementer may whis to disable reading back the correspoinding Kelvin temperature. In specific cases where the codec implementer wishes to deny a given action because of previous calls,
Gets the white point RGB values.
+A reference that receives the red white point value.
A reference that receives the green white point value.
A reference that receives the blue white point value.
If this method succeeds, it returns
Sets the named white point of the raw file.
+A bitwise combination of the enumeration values.
If this method succeeds, it returns
If the named white points are not supported by the raw image or the raw file contains named white points that are not supported by this API, the codec implementer should still mark this capability as supported.
If the named white points are not supported by the raw image, a best effort should be made to adjust the image to the named white point even when it isn't a pre-defined white point of the raw file.
If the raw file containes named white points not supported by this API, the codec implementer should support the named white points in the API.
Due to other white point setting methods (e.g. SetWhitePointKelvin), care must be taken by codec implementers to ensure proper interoperability. For instance, if the caller sets via a named white point then the codec implementer may whis to disable reading back the correspoinding Kelvin temperature. In specific cases where the codec implementer wishes to deny a given action because of previous calls,
Gets the named white point of the raw image.
+A reference that receives the bitwise combination of the enumeration values.
If this method succeeds, it returns
If the named white points are not supported by the raw image or the raw file contains named white points that are not supported by this API, the codec implementer should still mark this capability as supported.
If the named white points are not supported by the raw image, a best effort should be made to adjust the image to the named white point even when it isn't a pre-defined white point of the raw file.
If the raw file containes named white points not supported by this API, the codec implementer should support the named white points in
Sets the white point Kelvin value.
+The white point Kelvin value. Acceptable Kelvin values are 1,500 through 30,000.
If this method succeeds, it returns
Codec implementers should faithfully adjust the color temperature within the range supported natively by the raw image. For values outside the native support range, the codec implementer should provide a best effort representation of the image at that color temperature.
Codec implementers should return
Codec implementers must ensure proper interoperability with other white point setting methods such as SetWhitePointRGB. For example, if the caller sets the white point via SetNamedWhitePoint then the codec implementer may want to disable reading back the correspoinding Kelvin temperature. In specific cases where the codec implementer wants to deny a given action because of previous calls,
Gets the white point Kelvin temperature of the raw image.
+A reference that receives the white point Kelvin temperature of the raw image. The default is the "as-shot" setting value.
If this method succeeds, it returns
Gets the information about the current Kelvin range of the raw image.
+A reference that receives the minimum Kelvin temperature.
A reference that receives the maximum Kelvin temperature.
A reference that receives the Kelvin step value.
If this method succeeds, it returns
Sets the contrast value of the raw image.
+The contrast value of the raw image. The default value is the "as-shot" setting. The value range for contrast is 0.0 through 1.0. The 0.0 lower limit represents no contrast applied to the image, while the 1.0 upper limit represents the highest amount of contrast that can be applied.
If this method succeeds, it returns
The codec implementer must determine what the upper range value represents and must determine how to map the value to their image processing routines.
+Gets the contrast value of the raw image.
+A reference that receives the contrast value of the raw image. The default value is the "as-shot" setting. The value range for contrast is 0.0 through 1.0. The 0.0 lower limit represents no contrast applied to the image, while the 1.0 upper limit represents the highest amount of contrast that can be applied.
If this method succeeds, it returns
Sets the desired gamma value.
+The desired gamma value.
If this method succeeds, it returns
Gets the current gamma setting of the raw image.
+A reference that receives the current gamma setting.
If this method succeeds, it returns
Sets the sharpness value of the raw image.
+The sharpness value of the raw image. The default value is the "as-shot" setting. The value range for sharpness is 0.0 through 1.0. The 0.0 lower limit represents no sharpening applied to the image, while the 1.0 upper limit represents the highest amount of sharpness that can be applied.
If this method succeeds, it returns
The codec implementer must determine what the upper range value represents and must determine how to map the value to their image processing routines.
+Gets the sharpness value of the raw image.
+A reference that receives the sharpness value of the raw image. The default value is the "as-shot" setting. The value range for sharpness is 0.0 through 1.0. The 0.0 lower limit represents no sharpening applied to the image, while the 1.0 upper limit represents the highest amount of sharpness that can be applied.
If this method succeeds, it returns
Sets the saturation value of the raw image.
+The saturation value of the raw image. The value range for saturation is 0.0 through 1.0. A value of 0.0 represents an image with a fully de-saturated image, while a value of 1.0 represents the highest amount of saturation that can be applied.
If this method succeeds, it returns
The codec implementer must determine what the upper range value represents and must determine how to map the value to their image processing routines.
+Gets the saturation value of the raw image.
+A reference that receives the saturation value of the raw image. The default value is the "as-shot" setting. The value range for saturation is 0.0 through 1.0. A value of 0.0 represents an image with a fully de-saturated image, while a value of 1.0 represents the highest amount of saturation that can be applied.
If this method succeeds, it returns
Sets the tint value of the raw image.
+The tint value of the raw image. The default value is the "as-shot" setting if it exists or 0.0. The value range for sharpness is -1.0 through +1.0. The -1.0 lower limit represents a full green bias to the image, while the 1.0 upper limit represents a full magenta bias.
If this method succeeds, it returns
The codec implementer must determine what the outer range values represent and must determine how to map the values to their image processing routines.
+Gets the tint value of the raw image.
+A reference that receives the tint value of the raw image. The default value is the "as-shot" setting if it exists or 0.0. The value range for sharpness is -1.0 through +1.0. The -1.0 lower limit represents a full green bias to the image, while the 1.0 upper limit represents a full magenta bias.
If this method succeeds, it returns
Sets the noise reduction value of the raw image.
+The noise reduction value of the raw image. The default value is the "as-shot" setting if it exists or 0.0. The value range for noise reduction is 0.0 through 1.0. The 0.0 lower limit represents no noise reduction applied to the image, while the 1.0 upper limit represents highest noise reduction amount that can be applied.
If this method succeeds, it returns
The codec implementer must determine what the upper range value represents and must determine how to map the value to their image processing routines.
+Gets the noise reduction value of the raw image.
+A reference that receives the noise reduction value of the raw image. The default value is the "as-shot" setting if it exists or 0.0. The value range for noise reduction is 0.0 through 1.0. The 0.0 lower limit represents no noise reduction applied to the image, while the 1.0 upper limit represents full highest noise reduction amount that can be applied.
If this method succeeds, it returns
Sets the destination color context.
+The destination color context.
If this method succeeds, it returns
Sets the tone curve for the raw image.
+The size of the pToneCurve structure.
The desired tone curve.
If this method succeeds, it returns
Gets the tone curve of the raw image.
+The size of the pToneCurve buffer.
A reference that receives the
A reference that receives the size needed to obtain the tone curve structure.
If this method succeeds, it returns
Sets the desired rotation angle.
+The desired rotation angle.
If this method succeeds, it returns
Gets the current rotation angle.
+A reference that receives the current rotation angle.
If this method succeeds, it returns
Sets the current
If this method succeeds, it returns
Gets the current
If this method succeeds, it returns
Sets the notification callback method.
+Pointer to the notification callback method.
If this method succeeds, it returns
Gets the current set of parameters.
+Gets or sets the exposure compensation stop value of the raw image.
+Gets or sets the named white point of the raw image.
+If the named white points are not supported by the raw image or the raw file contains named white points that are not supported by this API, the codec implementer should still mark this capability as supported.
If the named white points are not supported by the raw image, a best effort should be made to adjust the image to the named white point even when it isn't a pre-defined white point of the raw file.
If the raw file containes named white points not supported by this API, the codec implementer should support the named white points in
Gets or sets the white point Kelvin temperature of the raw image.
+Gets or sets the contrast value of the raw image.
+Gets or sets the current gamma setting of the raw image.
+Gets or sets the sharpness value of the raw image.
+Gets or sets the saturation value of the raw image.
+Gets or sets the tint value of the raw image.
+Gets or sets the noise reduction value of the raw image.
+Sets the destination color context.
+Gets or sets the current rotation angle.
+Gets or sets the current
Sets the notification callback method.
+Exposes a callback method for raw image change nofications.
+An application-defined callback method used for raw image parameter change notifications.
+A set of
If this method succeeds, it returns
Exposes methods that provide enumeration services for individual metadata items.
+Skips to given number of objects.
+The number of objects to skip.
If this method succeeds, it returns
Resets the current position to the beginning of the enumeration.
+If this method succeeds, it returns
Creates a copy of the current
If this method succeeds, it returns
Exposes methods used for in-place metadata editing. A fast metadata encoder enables you to add and remove metadata to an image without having to fully re-encode the image.
+ A decoder must be created using the
Not all metadata formats support fast metadata encoding. The native metadata handlers that support metadata are IFD, Exif, XMP, and GPS.
If a fast metadata encoder fails, the image will need to be fully re-encoded to add the metadata.
+Finalizes metadata changes to the image stream.
+If this method succeeds, it returns
If the commit fails and returns
If the commit fails for any reason, you will need to re-encode the image to ensure the new metadata is added to the image.
+Retrieves a metadata query writer for fast metadata encoding.
+When this method returns, contains a reference to the fast metadata encoder's metadata query writer.
If this method succeeds, it returns
Retrieves a metadata query writer for fast metadata encoding.
+ Represents an
Initializes the format converter.
+The input bitmap to convert
The destination pixel format
The
The palette to use for conversion.
The alpha threshold to use for conversion.
The palette translation type to use for conversion.
If this method succeeds, it returns
If you do not have a predefined palette, you must first create one. Use InitializeFromBitmap to create the palette object, then pass it in along with your other parameters.
dither, pIPalette, alphaThresholdPercent, and paletteTranslate are used to mitigate color loss when converting to a reduced bit-depth format. For conversions that do not need these settings, the following parameters values should be used: dither set to
The basic algorithm involved when using an ordered dither requires a fixed palette, found in the
If colors in pIPalette do not closely match those in paletteTranslate, the mapping may produce undesireable results.
When converting a bitmap which has an alpha channel, such as a Portable Network Graphics (PNG), to 8bpp, the alpha channel is normally ignored. Any pixels which were transparent in the original bitmap show up as black in the final output because both transparent and black have pixel values of zero in the respective formats.
Some 8bpp content can contains an alpha color; for instance, the Graphics Interchange Format (GIF) format allows for a single palette entry to be used as a transparent color. For this type of content, alphaThresholdPercent specifies what percentage of transparency should map to the transparent color. Because the alpha value is directly proportional to the opacity (not transparency) of a pixel, the alphaThresholdPercent indicates what level of opacity is mapped to the fully transparent color. For instance, 9.8% implies that any pixel with an alpha value of less than 25 will be mapped to the transparent color. A value of 100% maps all pixels which are not fully opaque to the transparent color. Note that the palette should provide a transparent color. If it does not, the 'transparent' color will be the one closest to zero - often black.
+Determines if the source pixel format can be converted to the destination pixel format.
+The source pixel format.
The destionation pixel format.
A reference that receives a value indicating whether the source pixel format can be converted to the destination pixel format.
Exposes methods that provide information about a pixel format converter.
+Retrieves a list of GUIDs that signify which pixel formats the converter supports.
+The size of the pPixelFormatGUIDs array.
Pointer to a
The actual array size needed to retrieve all pixel formats supported by the converter.
If this method succeeds, it returns
The format converter does not necessarily guarantee symmetricality with respect to conversion; that is, a converter may be able to convert FROM a particular format without actually being able to convert TO a particular format. In order to test symmetricality, use CanConvert.
To determine the number of pixel formats a coverter can handle, set cFormats to 0
and pPixelFormatGUIDs to
. The converter will fill pcActual with the number of formats supported by that converter.
Creates a new
If this method succeeds, it returns
Exposes methods used to create components for the Windows Imaging Component (WIC) such as decoders, encoders and pixel format converters.
+Creates a new instance of the
If this method succeeds, it returns
Creates a new instance of the
If this method succeeds, it returns
Creates a new instance of the
If this method succeeds, it returns
When a decoder is created using this method, the file handle must remain alive during the lifetime of the decoder.
+Creates a new instance of the
If this method succeeds, it returns
Creates a new instance of
If this method succeeds, it returns
Other values may be available for both guidContainerFormat and pguidVendor depending on the installed WIC-enabled encoders. The values listed are those that are natively supported by the operating system.
+Creates a new instance of the
If this method succeeds, it returns
Other values may be available for both guidContainerFormat and pguidVendor depending on the installed WIC-enabled encoders. The values listed are those that are natively supported by the operating system.
+Creates a new instance of the
If this method succeeds, it returns
Creates a new instance of the
If this method succeeds, it returns
Creates a new instance of an
If this method succeeds, it returns
Creates a new instance of an
If this method succeeds, it returns
Creates a new instance of an
If this method succeeds, it returns
Creates a new instance of the
If this method succeeds, it returns
Creates a new instance of the
If this method succeeds, it returns
Creates a new instance of the
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Creates a
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Providing a rectangle that is larger than the source will produce undefined results.
This method always creates a separate copy of the source image, similar to the cache option
Creates an
If this method succeeds, it returns
The size of the
The stride of the destination bitmap will equal the stride of the source data, regardless of the width and height specified.
The pixelFormat parameter defines the pixel format for both the input data and the output bitmap.
+Creates an
If this method succeeds, it returns
For a non-palletized bitmap, set
Creates an
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Component types must be enumerated seperately. Combinations of component types and
Creates a new instance of the fast metadata encoder based on the given
If this method succeeds, it returns
The Windows provided codecs do not support fast metadata encoding at the decoder level, and only support fast metadata encoding at the frame level. To create a fast metadata encoder from a frame, see CreateFastMetadataEncoderFromFrameDecode.
+Creates a new instance of the fast metadata encoder based on the given image frame.
+The
When this method returns, contains a reference to a new fast metadata encoder.
If this method succeeds, it returns
For a list of support metadata formats for fast metadata encoding, see WIC Metadata Overview.
+Creates a new instance of a query writer.
+The
The
When this method returns, contains a reference to a new
If this method succeeds, it returns
Creates a new instance of a query writer based on the given query reader. The query writer will be pre-populated with metadata from the query reader.
+The
The
When this method returns, contains a reference to a new metadata writer.
If this method succeeds, it returns
Exposes methods for retrieving metadata blocks and items from a decoder or its image frames using a metadata query expression.
+A metadata query reader uses metadata query expressions to access embedded metadata. For more information on the metadata query language, see the Metadata Query Language Overview.
The benefit of the query reader is the ability to access a metadata item in a single step. +
The query reader also provides the way to traverse the whole set of metadata hierarchy with the help of the GetEnumerator method. + However, it is not recommended to use this method since IWICMetadataBlockReader and IWICMetadataReader provide a more convenient and cheaper way. +
+Gets the metadata query readers container format.
+Pointer that receives the cointainer format
If this method succeeds, it returns
Retrieves the current path relative to the root metadata block.
+The length of the wzNamespace buffer.
Pointer that receives the current namespace location.
The actual buffer length that was needed to retrieve the current namespace location.
If this method succeeds, it returns
If you pass
If the query reader is relative to the top of the metadata hierarchy, it will return a single-char string.
If the query reader is relative to a nested metadata block, this method will return the path to the current query reader.
+Retrieves the metadata block or item identified by a metadata query expression.
+The query expression to the requested metadata block or item.
When this method returns, contains the metadata block or item requested.
If this method succeeds, it returns
GetMetadataByName uses metadata query expressions to access embedded metadata. For more information on the metadata query language, see the Metadata Query Language Overview.
If multiple blocks or items exist that are expressed by the same query expression, the first metadata block or item found will be returned.
+Gets an enumerator of all metadata items at the current relative location within the metadata hierarchy.
+A reference to a variable that receives a reference to the
The retrieved enumerator only contains query strings for the metadata blocks and items in the current level of the hierarchy. +
+Gets the metadata query readers container format.
+Exposes methods for setting or removing metadata blocks and items to an encoder or its image frames using a metadata query expression.
+A metadata query writer uses metadata query expressions to set or remove metadata. For more information on the metadata query language, see the Metadata Query Language Overview.
+Sets a metadata item to a specific location.
+The name of the metadata item.
The metadata to set.
If this method succeeds, it returns
SetMetadataByName uses metadata query expressions to remove metadata. For more information on the metadata query language, see the Metadata Query Language Overview.
If the value set is a nested metadata block then use variant type VT_UNKNOWN
and pvarValue pointing to the
Removes a metadata item from a specific location using a metadata query expression.
+The name of the metadata item to remove.
If this method succeeds, it returns
RemoveMetadataByName uses metadata query expressions to remove metadata. For more information on the metadata query language, see the Metadata Query Language Overview.
If the metadata item is a metadata block, it is removed from the metadata hierarchy.
+Exposes methods for accessing and building a color table, primarily for indexed pixel formats.
+If the
InitializeFromBitmap's fAddTransparentColor parameter will add a transparent color to the end of the color collection if its size if less than 256, otherwise index 255 will be replaced with the transparent color. If a pre-defined palette type is used, it will change to BitmapPaletteTypeCustom since it no longer matches the predefined palette.
The palette interface is an auxiliary imaging interface in that it does not directly concern bitmaps and pixels; rather it provides indexed color translation for indexed bitmaps. For an indexed pixel format with M bits per pixels: (The number of colors in the palette) greater than 2^M.
Traditionally the basic operation of the palette is to provide a translation from a byte (or smaller) index into a 32bpp color value. This is often accomplished by a 256 entry table of color values.
+Initializes the palette to one of the pre-defined palettes specified by
If this method succeeds, it returns
If a transparent color is added to a palette, the palette is no longer predefined and is returned as
Initializes a palette to the custom color entries provided.
+Pointer to the color array.
The number of colors in pColors.
If this method succeeds, it returns
If a transparent color is required, provide it as part of the custom entries. To add a transparent value to the palette, its alpha value must be 0 (0x00RRGGBB). +
The entry count is limited to 256.
+Initializes a palette using a computed optimized values based on the reference bitmap.
+Pointer to the source bitmap.
The number of colors to initialize the palette with.
A value to indicate whether to add a transparent color.
If this method succeeds, it returns
The resulting palette contains the specified number of colors which best represent the colors present in the bitmap. The algorithm operates on the opaque RGB color value of each pixel in the reference bitmap and hence ignores any alpha values. If a transparent color is required, set the fAddTransparentColor parameter to TRUE and one fewer optimized color will be computed, reducing the colorCount, and a fully transparent color entry will be added.
+Initialize the palette based on a given palette.
+Pointer to the source palette.
If this method succeeds, it returns
Retrieves the
If this method succeeds, it returns
WICBitmapPaletteCustom is used for palettes initialized from both InitializeCustom and InitializeFromBitmap. There is no distinction is made between optimized and custom palettes.
+Retrieves the number of colors in the color table.
+Pointer that receives the number of colors in the color table.
If this method succeeds, it returns
Fills out the supplied color array with the colors from the internal color table. The color array should be sized according to the return results from GetColorCount.
+If this method succeeds, it returns
Retrieves a value that describes whether the palette is black and white.
+A reference to a variable that receives a boolean value that indicates whether the palette is black and white. TRUE indicates that the palette is black and white; otherwise,
If this method succeeds, it returns
A palette is considered to be black and white only if it contains exactly two entries, one full black (0xFF000000) and one full white (0xFFFFFFF). +
+Retrieves a value that describes whether a palette is grayscale.
+A reference to a variable that receives a boolean value that indicates whether the palette is grayscale. TRUE indicates that the palette is grayscale; otherwise
If this method succeeds, it returns
A palette is considered grayscale only if, for every entry, the alpha value is 0xFF and the red, green and blue values match. +
+Indicates whether the palette contains an entry that is non-opaque (that is, an entry with an alpha that is less than 1). +
+Pointer that receives TRUE
if the palette contains a transparent color; otherwise,
.
If this method succeeds, it returns
Various image formats support alpha in different ways. PNG has full alpha support by supporting partially transparent palette entries. GIF stores colors as 24bpp, without alpha, but allows one palette entry to be specified as fully transparent. If a palette has multiple fully transparent entries (0x00RRGGBB), GIF will use the last one as its transparent index. +
+Retrieves the
WICBitmapPaletteCustom is used for palettes initialized from both InitializeCustom and InitializeFromBitmap. There is no distinction is made between optimized and custom palettes.
+Retrieves the number of colors in the color table.
+Retrieves a value that describes whether the palette is black and white.
+A palette is considered to be black and white only if it contains exactly two entries, one full black (0xFF000000) and one full white (0xFFFFFFF). +
+Retrieves a value that describes whether a palette is grayscale.
+A palette is considered grayscale only if, for every entry, the alpha value is 0xFF and the red, green and blue values match. +
+Exposes methods that provide information about a pixel format.
+Gets the pixel format
Pointer that receives the pixel format
If this method succeeds, it returns
Gets the pixel format's
If this method succeeds, it returns
The returned color context is the default color space for the pixel format. However, if an
Gets the bits per pixel (BPP) of the pixel format.
+Pointer that receives the BPP of the pixel format.
If this method succeeds, it returns
Gets the number of channels the pixel format contains.
+Pointer that receives the channel count.
If this method succeeds, it returns
Gets the pixel format's channel mask.
+The index to the channel mask to retrieve.
The size of the pbMaskBuffer buffer.
Pointer to the mask buffer.
The actual buffer size needed to obtain the channel mask.
If this method succeeds, it returns
If 0 and
Gets the pixel format
Gets the pixel format's
The returned color context is the default color space for the pixel format. However, if an
Gets the bits per pixel (BPP) of the pixel format.
+Gets the number of channels the pixel format contains.
+Extends
Returns whether the format supports transparent pixels.
+Returns TRUE if the pixel format supports transparency; otherwise,
If this method succeeds, it returns
An indexed pixel format will not return TRUE even though it may have some transparency support. +
+Returns the
If this method succeeds, it returns
Returns whether the format supports transparent pixels.
+An indexed pixel format will not return TRUE even though it may have some transparency support. +
+Notify method is documented only for compliance; its use is not recommended and may be altered or unavailable in the future. Instead, and use RegisterProgressNotification. +
+If this method succeeds, it returns
Exposes methods for obtaining information about and controlling progressive decoding.
+Images can only be progressively decoded if they were progressively encoded. Progressive images automatically start at the highest (best quality) progressive level. The caller must manually set the decoder to a lower progressive level.
E_NOTIMPL is returned if the codec does not support progressive level decoding.
+Gets the number of levels of progressive decoding supported by the CODEC.
+Indicates the number of levels supported by the CODEC.
If this method succeeds, it returns
Users should not use this function to iterate through the progressive levels of a progressive JPEG image. JPEG progressive levels are determined by the image and do not have a fixed level count. Using this method will force the application to wait for all progressive levels to be downloaded before it can return. Instead, applications should use the following code to iterate through the progressive levels of a progressive JPEG image.
+Gets the decoder's current progressive level.
+Indicates the current level specified.
If this method succeeds, it returns
The level always defaults to the highest progressive level. In order to decode a lower progressive level, SetCurrentLevel must first be called.
+Specifies the level to retrieve on the next call to CopyPixels.
+If this method succeeds, it returns
A call does not have to request every level supported. If a caller requests level 1, without having previously requested level 0, the bits returned by the next call to CopyPixels will include both levels.
If the requested level is invalid, the error returned is
Gets the number of levels of progressive decoding supported by the CODEC.
+Users should not use this function to iterate through the progressive levels of a progressive JPEG image. JPEG progressive levels are determined by the image and do not have a fixed level count. Using this method will force the application to wait for all progressive levels to be downloaded before it can return. Instead, applications should use the following code to iterate through the progressive levels of a progressive JPEG image.
+Gets or sets the decoder's current progressive level.
+The level always defaults to the highest progressive level. In order to decode a lower progressive level, SetCurrentLevel must first be called.
+Represents a Windows Imaging Component (WIC) stream for referencing imaging and metadata content.
+Decoders and metadata handlers are expected to create sub streams of whatever stream they hold when handing off control for embedded metadata to another metadata handler. If the stream is not restricted then use MAXLONGLONG as the max size and offset 0.
The
Initializes a stream from another stream. Access rights are inherited from the underlying stream.
+The initialize stream.
If this method succeeds, it returns
Initializes a stream from a particular file.
+The file used to initialize the stream.
The desired file access mode.
Value | Meaning |
---|---|
| Read access. |
| Write access. |
?
If this method succeeds, it returns
The
Initializes a stream to treat a block of memory as a stream. The stream cannot grow beyond the buffer size.
+Pointer to the buffer used to initialize the stream.
The size of buffer.
If this method succeeds, it returns
This method should be avoided whenever possible. The caller is responsible for ensuring the memory block is valid for the lifetime of the stream when using InitializeFromMemory. A workaround for this behavior is to create an
If you require a growable memory stream, use CreateStreamOnHGlobal.
+Initializes the stream as a substream of another stream.
+Pointer to the input stream.
The stream offset used to create the new stream.
The maximum size of the stream.
If this method succeeds, it returns
The stream functions with its own stream position, independent of the underlying stream but restricted to a region. All seek positions are relative to the sub region. It is allowed, though not recommended, to have multiple writable sub streams overlapping the same range.
+Contains members that identify a pattern within an image file which can be used to identify a particular format.
+The offset the pattern is located in the file.
The pattern length.
The actual pattern.
The pattern mask.
The end of the stream.
Defines raw codec capabilites.
+Size of the
The codec's major version.
The codec's minor version.
The
The
The
The
The
The
The
The
The
The
The
The
The
The
The
Represents a raw image tone curve.
+The number of tone curve points.
The array of tone curve points.
Represents a raw image tone curve point.
+The tone curve input.
The tone curve output.
Represents an object that can receive drawing commands. Interfaces that inherit from
Your application should create render targets once and hold onto them for the life of the application or until the render target's EndDraw method returns the
Represents a Direct2D drawing resource.
+Retrieves the factory associated with this resource.
+When this method returns, contains a reference to a reference to the factory that created this resource. This parameter is passed uninitialized.
Retrieves the factory associated with this resource.
+Creates a Direct2D bitmap from a reference to in-memory source data.
+The dimension of the bitmap to create in pixels.
A reference to the memory location of the image data, or
The byte count of each scanline, which is equal to (the image width in pixels ? the number of bytes per pixel) + memory padding. If srcData is
The pixel format and dots per inch (DPI) of the bitmap to create.
When this method returns, contains a reference to a reference to the new bitmap. This parameter is passed uninitialized.
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Before Direct2D can load a WIC bitmap, that bitmap must be converted to a supported pixel format and alpha mode. For a list of supported pixel formats and alpha modes, see Supported Pixel Formats and Alpha Modes.
+Creates an
If this method succeeds, it returns
The CreateSharedBitmap method is useful for efficiently reusing bitmap data and can also be used to provide interoperability with Direct3D.
+Creates an
If this method succeeds, it returns
Creates a new
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Creates a new bitmap render target for use during intermediate offscreen drawing that is compatible with the current render target and has the same pixel format (but not alpha mode) as the current render target.
+The desired size of the new render target in device-independent pixels. The pixel size is computed from the desired size using the parent target DPI. If the desiredSize maps to a integer-pixel size, the DPI of the compatible render target is the same as the DPI of the parent target. If desiredSize maps to a fractional-pixel size, the pixel size is rounded up to the nearest integer and the DPI for the compatible render target is slightly higher than the DPI of the parent render target. In all cases, the coordinate (desiredSize.width, desiredSize.height) maps to the lower-right corner of the compatible render target.
When this method returns, contains the address of a reference to a new bitmap render target. This parameter is passed uninitialized.
When this method returns, contains the address of a reference to a new bitmap render target. This parameter is passed uninitialized.
If this method succeeds, it returns
The bitmap render target created by this method is not compatible with GDI.
+Creates a layer resource that can be used with this render target and its compatible render targets. The new layer has the specified initial size.
+The initial size of the layer in device-independent pixels, or
When the method returns, contains a reference to a reference to the new layer. This parameter is passed uninitialized.
If this method succeeds, it returns
Regardless of whether a size is initially specified, the layer automatically resizes as needed.
+Create a mesh that uses triangles to describe a shape.
+When this method returns, contains a reference to a reference to the new mesh.
If this method succeeds, it returns
To populate a mesh, use its Open method to obtain an
Draws a line between the specified points using the specified stroke style.
+The start point of the line, in device-independent pixels.
The end point of the line, in device-independent pixels.
The brush used to paint the line's stroke.
The width of the stroke, in device-independent pixels. The value must be greater than or equal to 0.0f. If this parameter isn't specified, it defaults to 1.0f. The stroke is centered on the line.
The style of stroke to paint, or
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as DrawLine) failed, check the result returned by the
Draws the outline of a rectangle that has the specified dimensions and stroke style.
+The dimensions of the rectangle to draw, in device-independent pixels.
The brush used to paint the rectangle's stroke.
The width of the stroke, in device-independent pixels. The value must be greater than or equal to 0.0f. If this parameter isn't specified, it defaults to 1.0f. The stroke is centered on the line.
The style of stroke to paint, or
When this method fails, it does not return an error code. To determine whether a drawing method (such as DrawRectangle) failed, check the result returned by the
Paints the interior of the specified rectangle.
+The dimension of the rectangle to paint, in device-independent pixels.
The brush used to paint the rectangle's interior.
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as FillRectangle) failed, check the result returned by the
Draws the outline of the specified rounded rectangle using the specified stroke style.
+The dimensions of the rounded rectangle to draw, in device-independent pixels.
The brush used to paint the rounded rectangle's outline.
The width of the stroke, in device-independent pixels. The value must be greater than or equal to 0.0f. If this parameter isn't specified, it defaults to 1.0f. The stroke is centered on the line.
The style of the rounded rectangle's stroke, or
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as DrawRoundedRectangle) failed, check the result returned by the
Paints the interior of the specified rounded rectangle.
+The dimensions of the rounded rectangle to paint, in device independent pixels.
The brush used to paint the interior of the rounded rectangle.
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as FillRoundedRectangle) failed, check the result returned by the
Draws the outline of the specified ellipse using the specified stroke style.
+The position and radius of the ellipse to draw, in device-independent pixels.
The brush used to paint the ellipse's outline.
The width of the stroke, in device-independent pixels. The value must be greater than or equal to 0.0f. If this parameter isn't specified, it defaults to 1.0f. The stroke is centered on the line.
The style of stroke to apply to the ellipse's outline, or
The DrawEllipse method doesn't return an error code if it fails. To determine whether a drawing operation (such as DrawEllipse) failed, check the result returned by the
Paints the interior of the specified ellipse.
+The position and radius, in device-independent pixels, of the ellipse to paint.
The brush used to paint the interior of the ellipse.
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as FillEllipse) failed, check the result returned by the
Draws the outline of the specified geometry using the specified stroke style.
+The geometry to draw.
The brush used to paint the geometry's stroke.
The width of the stroke, in device-independent pixels. The value must be greater than or equal to 0.0f. If this parameter isn't specified, it defaults to 1.0f. The stroke is centered on the line.
The style of stroke to apply to the geometry's outline, or
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as DrawGeometry) failed, check the result returned by the
Paints the interior of the specified geometry.
+The geometry to paint.
The brush used to paint the geometry's interior.
The opacity mask to apply to the geometry, or
If the opacityBrush parameter is not
When this method fails, it does not return an error code. To determine whether a drawing operation (such as FillGeometry) failed, check the result returned by the
Paints the interior of the specified mesh.
+The mesh to paint.
The brush used to paint the mesh.
The current antialias mode of the render target must be
FillMesh does not expect a particular winding order for the triangles in the
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as FillMesh) failed, check the result returned by the
For this method to work properly, the render target must be using the
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as FillOpacityMask) failed, check the result returned by the
Draws the specified bitmap after scaling it to the size of the specified rectangle.
+The bitmap to render.
The size and position, in device-independent pixels in the render target's coordinate space, of the area to which the bitmap is drawn. If the rectangle is not well-ordered, nothing is drawn, but the render target does not enter an error state.
A value between 0.0f and 1.0f, inclusive, that specifies the opacity value to be applied to the bitmap; this value is multiplied against the alpha values of the bitmap's contents. Default is 1.0f.
The interpolation mode to use if the bitmap is scaled or rotated by the drawing operation. The default value is
The size and position, in device-independent pixels in the bitmap's coordinate space, of the area within the bitmap to draw;
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as DrawBitmap) failed, check the result returned by the
Draws the specified text using the format information provided by an
To create an
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as DrawText) failed, check the result returned by the
Draws the formatted text described by the specified
When drawing the same text repeatedly, using the DrawTextLayout method is more efficient than using the DrawText method because the text doesn't need to be formatted and the layout processed with each call.
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as DrawTextLayout) failed, check the result returned by the
Draws the specified glyphs.
+The origin, in device-independent pixels, of the glyphs' baseline.
The glyphs to render.
The brush used to paint the specified glyphs.
A value that indicates how glyph metrics are used to measure text when it is formatted. The default value is
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as DrawGlyphRun) failed, check the result returned by the
Gets the current transform of the render target.
+When this returns, contains the current transform of the render target. This parameter is passed uninitialized.
Sets the antialiasing mode of the render target. The antialiasing mode applies to all subsequent drawing operations, excluding text and glyph drawing operations.
+The antialiasing mode for future drawing operations.
To specify the antialiasing mode for text and glyph operations, use the SetTextAntialiasMode method.
+Retrieves the current antialiasing mode for nontext drawing operations.
+The current antialiasing mode for nontext drawing operations.
Specifies the antialiasing mode to use for subsequent text and glyph drawing operations.
+The antialiasing mode to use for subsequent text and glyph drawing operations.
Gets the current antialiasing mode for text and glyph drawing operations.
+The current antialiasing mode for text and glyph drawing operations.
Specifies text rendering options to be applied to all subsequent text and glyph drawing operations.
+The text rendering options to be applied to all subsequent text and glyph drawing operations;
If the settings specified by textRenderingParams are incompatible with the render target's text antialiasing mode (specified by SetTextAntialiasMode), subsequent text and glyph drawing operations will fail and put the render target into an error state.
+Retrieves the render target's current text rendering options.
+When this method returns, textRenderingParamscontains the address of a reference to the render target's current text rendering options.
If the settings specified by textRenderingParams are incompatible with the render target's text antialiasing mode (specified by SetTextAntialiasMode), subsequent text and glyph drawing operations will fail and put the render target into an error state.
+Specifies a label for subsequent drawing operations.
+A label to apply to subsequent drawing operations.
A label to apply to subsequent drawing operations.
The labels specified by this method are printed by debug error messages. If no tag is set, the default value for each tag is 0.
+Gets the label for subsequent drawing operations.
+When this method returns, contains the first label for subsequent drawing operations. This parameter is passed uninitialized. If
When this method returns, contains the second label for subsequent drawing operations. This parameter is passed uninitialized. If
If the same address is passed for both parameters, both parameters receive the value of the second tag.
+Adds the specified layer to the render target so that it receives all subsequent drawing operations until PopLayer is called.
+The PushLayer method allows a caller to begin redirecting rendering to a layer. All rendering operations are valid in a layer. The location of the layer is affected by the world transform set on the render target.
Each PushLayer must have a matching PopLayer call. If there are more PopLayer calls than PushLayer calls, the render target is placed into an error state. If Flush is called before all outstanding layers are popped, the render target is placed into an error state, and an error is returned. The error state can be cleared by a call to EndDraw.
A particular
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as PushLayer) failed, check the result returned by the
Stops redirecting drawing operations to the layer that is specified by the last PushLayer call.
+A PopLayer must match a previous PushLayer call.
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as PopLayer) failed, check the result returned by the
Executes all pending drawing commands.
+When this method returns, contains the tag for drawing operations that caused errors or 0 if there were no errors. This parameter is passed uninitialized.
When this method returns, contains the tag for drawing operations that caused errors or 0 if there were no errors. This parameter is passed uninitialized.
If the method succeeds, it returns
This command does not flush the device that is associated with the render target.
Calling this method resets the error state of the render target.
+Saves the current drawing state to the specified
Sets the render target's drawing state to that of the specified
Specifies a rectangle to which all subsequent drawing operations are clipped.
+The size and position of the clipping area, in device-independent pixels.
The antialiasing mode that is used to draw the edges of clip rects that have subpixel boundaries, and to blend the clip with the scene contents. The blending is performed once when the PopAxisAlignedClip method is called, and does not apply to each primitive within the layer.
The clipRect is transformed by the current world transform set on the render target. After the transform is applied to the clipRect that is passed in, the axis-aligned bounding box for the clipRect is computed. For efficiency, the contents are clipped to this axis-aligned bounding box and not to the original clipRect that is passed in.
The following diagrams show how a rotation transform is applied to the render target, the resulting clipRect, and a calculated axis-aligned bounding box.
Assume the rectangle in the following illustration is a render target that is aligned to the screen pixels.
Apply a rotation transform to the render target. In the following illustration, the black rectangle represents the original render target and the red dashed rectangle represents the transformed render target.
After calling PushAxisAlignedClip, the rotation transform is applied to the clipRect. In the following illustration, the blue rectangle represents the transformed clipRect.
The axis-aligned bounding box is calculated. The green dashed rectangle represents the bounding box in the following illustration. All contents are clipped to this axis-aligned bounding box.
Note??If rendering operations fail or if PopAxisAlignedClip is not called, clip rects may cause some artifacts on the render target. PopAxisAlignedClip can be considered a drawing operation that is designed to fix the borders of a clipping region. Without this call, the borders of a clipped area may be not antialiased or otherwise corrected.
The PushAxisAlignedClip and PopAxisAlignedClip must match. Otherwise, the error state is set. For the render target to continue receiving new commands, you can call Flush to clear the error.
A PushAxisAlignedClip and PopAxisAlignedClip pair can occur around or within a PushLayer and PopLayer, but cannot overlap. For example, the sequence of PushAxisAlignedClip, PushLayer, PopLayer, PopAxisAlignedClip is valid, but the sequence of PushAxisAlignedClip, PushLayer, PopAxisAlignedClip, PopLayer is invalid.
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as PushAxisAlignedClip) failed, check the result returned by the
Removes the last axis-aligned clip from the render target. After this method is called, the clip is no longer applied to subsequent drawing operations.
+A PushAxisAlignedClip/PopAxisAlignedClip pair can occur around or within a PushLayer/PopLayer pair, but may not overlap. For example, a PushAxisAlignedClip, PushLayer, PopLayer, PopAxisAlignedClip sequence is valid, but a PushAxisAlignedClip, PushLayer, PopAxisAlignedClip, PopLayer sequence is not.
PopAxisAlignedClip must be called once for every call to PushAxisAlignedClip.
For an example, see How to Clip with an Axis-Aligned Clip Rectangle.
This method doesn't return an error code if it fails. To determine whether a drawing operation (such as PopAxisAlignedClip) failed, check the result returned by the
Clears the drawing area to the specified color.
+The color to which the drawing area is cleared, or
Direct2D interprets the clearColor as straight alpha (not premultiplied). If the render target's alpha mode is
If the render target has an active clip (specified by PushAxisAlignedClip), the clear command is applied only to the area within the clip region.
+Initiates drawing on this render target.
+Drawing operations can only be issued between a BeginDraw and EndDraw call.
BeginDraw and EndDraw are used to indicate that a render target is in use by the Direct2D system. Different implementations of
The BeginDraw method must be called before rendering operations can be called, though state-setting and state-retrieval operations can be performed even outside of BeginDraw/EndDraw.
After BeginDraw is called, a render target will normally build up a batch of rendering commands, but defer processing of these commands until either an internal buffer is full, the Flush method is called, or until EndDraw is called. The EndDraw method causes any batched drawing operations to complete, and then returns an
If EndDraw is called without a matched call to BeginDraw, it returns an error indicating that BeginDraw must be called before EndDraw. Calling BeginDraw twice on a render target puts the target into an error state where nothing further is drawn, and returns an appropriate
Ends drawing operations on the render target and indicates the current error state and associated tags.
+When this method returns, contains the tag for drawing operations that caused errors or 0 if there were no errors. This parameter is passed uninitialized.
When this method returns, contains the tag for drawing operations that caused errors or 0 if there were no errors. This parameter is passed uninitialized.
If the method succeeds, it returns
Drawing operations can only be issued between a BeginDraw and EndDraw call.
BeginDraw and EndDraw are use to indicate that a render target is in use by the Direct2D system. Different implementations of
The BeginDraw method must be called before rendering operations can be called, though state-setting and state-retrieval operations can be performed even outside of BeginDraw/EndDraw.
After BeginDraw is called, a render target will normally build up a batch of rendering commands, but defer processing of these commands until either an internal buffer is full, the Flush method is called, or until EndDraw is called. The EndDraw method causes any batched drawing operations to complete, and then returns an
If EndDraw is called without a matched call to BeginDraw, it returns an error indicating that BeginDraw must be called before EndDraw. Calling BeginDraw twice on a render target puts the target into an error state where nothing further is drawn, and returns an appropriate
Retrieves the pixel format and alpha mode of the render target.
+The pixel format and alpha mode of the render target.
Sets the dots per inch (DPI) of the render target.
+A value greater than or equal to zero that specifies the horizontal DPI of the render target.
A value greater than or equal to zero that specifies the vertical DPI of the render target.
This method specifies the mapping from pixel space to device-independent space for the render target. If both dpiX and dpiY are 0, the factory-read system DPI is chosen. If one parameter is zero and the other unspecified, the DPI is not changed.
For
Return the render target's dots per inch (DPI).
+When this method returns, contains the horizontal DPI of the render target. This parameter is passed uninitialized.
When this method returns, contains the vertical DPI of the render target. This parameter is passed uninitialized.
This method indicates the mapping from pixel space to device-independent space for the render target.
For
Returns the size of the render target in device-independent pixels.
+The current size of the render target in device-independent pixels.
Returns the size of the render target in device pixels.
+The size of the render target in device pixels.
Gets the maximum size, in device-dependent units (pixels), of any one bitmap dimension supported by the render target.
+The maximum size, in pixels, of any one bitmap dimension supported by the render target.
This method returns the maximum texture size of the Direct3D device.
Note??The software renderer and WARP devices return the value of 16 megapixels (16*1024*1024). You can create a Direct2D texture that is this size, but not a Direct3D texture that is this size.
+Indicates whether the render target supports the specified properties.
+The render target properties to test.
TRUE if the specified render target properties are supported by this render target; otherwise,
This method does not evaluate the DPI settings specified by the renderTargetProperties parameter.
+Gets or sets the current transform of the render target.
+Retrieves or sets the current antialiasing mode for nontext drawing operations.
+Gets or sets the current antialiasing mode for text and glyph drawing operations.
+Retrieves or sets the render target's current text rendering options.
+If the settings specified by textRenderingParams are incompatible with the render target's text antialiasing mode (specified by SetTextAntialiasMode), subsequent text and glyph drawing operations will fail and put the render target into an error state.
+Retrieves the pixel format and alpha mode of the render target.
+Returns the size of the render target in device-independent pixels.
+Returns the size of the render target in device pixels.
+Gets the maximum size, in device-dependent units (pixels), of any one bitmap dimension supported by the render target.
+This method returns the maximum texture size of the Direct3D device.
Note??The software renderer and WARP devices return the value of 16 megapixels (16*1024*1024). You can create a Direct2D texture that is this size, but not a Direct3D texture that is this size.
+Encapsulates a 32-bit device independent bitmap and device context, which can be used for rendering glyphs.
+You create an
if (SUCCEEDED(hr)) + { hr = g_pGdiInterop->CreateBitmapRenderTarget(hdc, r.right, r.bottom, &g_pBitmapRenderTarget); + } +
STDMETHODIMP GdiTextRenderer::DrawGlyphRun( __maybenull void* clientDrawingContext, FLOAT baselineOriginX, FLOAT baselineOriginY, DWRITE_MEASURING_MODE measuringMode, __in DWRITE_GLYPH_RUN const* glyphRun, __in DWRITE_GLYPH_RUN_DESCRIPTION const* glyphRunDescription, IUnknown* clientDrawingEffect )
+ { HRESULT hr = S_OK; // Pass on the drawing call to the render target to do the real work. RECT dirtyRect = {0}; hr = pRenderTarget_->DrawGlyphRun( baselineOriginX, baselineOriginY, measuringMode, glyphRun, pRenderingParams_, RGB(0,200,255), &dirtyRect ); return hr;
+ }
+
+ The baselineOriginX, baslineOriginY, measuringMethod, and glyphRun parameters are provided (as arguments) when the callback method is invoked. The renderingParams, textColor and blackBoxRect are not. Default rendering params can be retrieved by using the Draws a run of glyphs to a bitmap target at the specified position.
+The horizontal position of the baseline origin, in DIPs, relative to the upper-left corner of the DIB.
The vertical position of the baseline origin, in DIPs, relative to the upper-left corner of the DIB.
The measuring method for glyphs in the run, used with the other properties to determine the rendering mode.
The structure containing the properties of the glyph run.
The object that controls rendering behavior.
The foreground color of the text.
The optional rectangle that receives the bounding box (in pixels not DIPs) of all the pixels affected by drawing the glyph run. The black box rectangle may extend beyond the dimensions of the bitmap.
If this method succeeds, it returns
You can use the
STDMETHODIMP GdiTextRenderer::DrawGlyphRun( __maybenull void* clientDrawingContext, FLOAT baselineOriginX, FLOAT baselineOriginY,measuringMode, __in const* glyphRun, __in const* glyphRunDescription, * clientDrawingEffect ) + { hr = ; // Pass on the drawing call to the render target to do the real work. dirtyRect = {0}; hr = pRenderTarget_->DrawGlyphRun( baselineOriginX, baselineOriginY, measuringMode, glyphRun, pRenderingParams_, RGB(0,200,255), &dirtyRect ); return hr; + } +
The baselineOriginX, baslineOriginY, measuringMethod, and glyphRun parameters are provided (as arguments) when the callback method is invoked. The renderingParams, textColor and blackBoxRect are not.
Default rendering params can be retrieved by using the
Gets a handle to the memory device context.
+Returns a device context handle to the memory device context.
An application can use the device context to draw using GDI functions. An application can obtain the bitmap handle (
Note that this method takes no parameters and returns an
memoryHdc = g_pBitmapRenderTarget->GetMemoryDC(); +
The
Gets the number of bitmap pixels per DIP.
+The number of bitmap pixels per DIP.
A DIP (device-independent pixel) is 1/96 inch. Therefore, this value is the number if pixels per inch divided by 96.
+Sets the number of bitmap pixels per DIP (device-independent pixel). A DIP is 1/96 inch, so this value is the number if pixels per inch divided by 96.
+A value that specifies the number of pixels per DIP.
If this method succeeds, it returns
Gets the transform that maps abstract coordinates to DIPs. By default this is the identity transform. Note that this is unrelated to the world transform of the underlying device context.
+When this method returns, contains a transform matrix.
If this method succeeds, it returns
Sets the transform that maps abstract coordinate to DIPs (device-independent pixel). This does not affect the world transform of the underlying device context.
+ Specifies the new transform. This parameter can be
If this method succeeds, it returns
Gets the dimensions of the target bitmap.
+Returns the width and height of the bitmap in pixels.
If this method succeeds, it returns
Resizes the bitmap.
+The new bitmap width, in pixels.
The new bitmap height, in pixels.
If this method succeeds, it returns
Gets a handle to the memory device context.
+ An application can use the device context to draw using GDI functions. An application can obtain the bitmap handle (
Note that this method takes no parameters and returns an
memoryHdc = g_pBitmapRenderTarget->GetMemoryDC(); +
The
Gets or sets the number of bitmap pixels per DIP.
+A DIP (device-independent pixel) is 1/96 inch. Therefore, this value is the number if pixels per inch divided by 96.
+Gets or sets the transform that maps abstract coordinates to DIPs. By default this is the identity transform. Note that this is unrelated to the world transform of the underlying device context.
+Gets the dimensions of the target bitmap.
+Wraps an application-defined inline graphic, allowing DWrite to query metrics as if the graphic were a glyph inline with the text.
+Wraps an application-defined inline graphic, allowing DWrite to query metrics as if the graphic were a glyph inline with the text.
+ The application implemented rendering callback (
If this method succeeds, it returns
If this method succeeds, it returns
The overhangs should be returned relative to the reported size of the object (see
If this method succeeds, it returns
Layout uses this to determine the line-breaking behavior of the inline object among the text.
+When this method returns, contains a value which indicates the line-breaking condition between the object and the content immediately preceding it.
When this method returns, contains a value which indicates the line-breaking condition between the object and the content immediately following it.
If this method succeeds, it returns
Used to create all subsequent DirectWrite objects. This interface is the root factory interface for all DirectWrite objects.
+ Create an
if (SUCCEEDED(hr)) + { hr =( , __uuidof( ), reinterpret_cast< **>(&pDWriteFactory_) ); + }
An
Gets an object which represents the set of installed fonts.
+If this parameter is nonzero, the function performs an immediate check for changes to the set of installed fonts. If this parameter is
When this method returns, contains the address of a reference to the system font collection object, or
Creates a font collection using a custom font collection loader.
+An application-defined font collection loader, which must have been previously registered using RegisterFontCollectionLoader.
The key used by the loader to identify a collection of font files. The buffer allocated for this key should at least be the size of collectionKeySize.
The size, in bytes, of the collection key.
Contains an address of a reference to the system font collection object if the method succeeds, or
If this method succeeds, it returns
Registers a custom font collection loader with the factory object.
+Pointer to a
If this method succeeds, it returns
This function registers a font collection loader with DirectWrite. The font collection loader interface, which should be implemented by a singleton object, handles enumerating font files in a font collection given a particular type of key. A given instance can only be registered once. Succeeding attempts will return an error, indicating that it has already been registered. Note that font file loader implementations must not register themselves with DirectWrite inside their constructors, and must not unregister themselves inside their destructors, because registration and unregistraton operations increment and decrement the object reference count respectively. Instead, registration and unregistration with DirectWrite of font file loaders should be performed outside of the font file loader implementation.
+Unregisters a custom font collection loader that was previously registered using RegisterFontCollectionLoader.
+If this method succeeds, it returns
Creates a font file reference object from a local font file.
+An array of characters that contains the absolute file path for the font file. Subsequent operations on the constructed object may fail if the user provided filePath doesn't correspond to a valid file on the disk.
The last modified time of the input file path. If the parameter is omitted, the function will access the font file to obtain its last write time. You should specify this value to avoid extra disk access. Subsequent operations on the constructed object may fail if the user provided lastWriteTime doesn't match the file on the disk.
When this method returns, contains an address of a reference to the newly created font file reference object, or
If this method succeeds, it returns
Creates a reference to an application-specific font file resource.
+A font file reference key that uniquely identifies the font file resource during the lifetime of fontFileLoader.
The size of the font file reference key in bytes.
The font file loader that will be used by the font system to load data from the file identified by fontFileReferenceKey.
Contains an address of a reference to the newly created font file object when this method succeeds, or
If this method succeeds, it returns
This function is provided for cases when an application or a document needs to use a private font without having to install it on the system. fontFileReferenceKey has to be unique only in the scope of the fontFileLoader used in this call.
+Creates an object that represents a font face.
+A value that indicates the type of file format of the font face.
The number of font files, in element count, required to represent the font face.
A font file object representing the font face. Because
The zero-based index of a font face, in cases when the font files contain a collection of font faces. If the font files contain a single face, this value should be zero.
A value that indicates which, if any, font face simulation flags for algorithmic means of making text bold or italic are applied to the current font face.
When this method returns, contains an address of a reference to the newly created font face object, or
If this method succeeds, it returns
Creates an object that represents a font face.
+A value that indicates the type of file format of the font face.
The number of font files, in element count, required to represent the font face.
A font file object representing the font face. Because
The zero-based index of a font face, in cases when the font files contain a collection of font faces. If the font files contain a single face, this value should be zero.
A value that indicates which, if any, font face simulation flags for algorithmic means of making text bold or italic are applied to the current font face.
When this method returns, contains an address of a reference to the newly created font face object, or
If this method succeeds, it returns
Creates a rendering parameters object with default settings for the primary monitor. Different monitors may have different rendering parameters, for more information see the How to Add Support for Multiple Monitors topic.
+Standard
Creates a rendering parameters object with default settings for the specified monitor. In most cases, this is the preferred way to create a rendering parameters object.
+A handle for the specified monitor.
When this method returns, contains an address of a reference to the rendering parameters object created by this method.
If this method succeeds, it returns
Creates a rendering parameters object with the specified properties.
+The gamma level to be set for the new rendering parameters object.
The enhanced contrast level to be set for the new rendering parameters object.
The ClearType level to be set for the new rendering parameters object.
Represents the internal structure of a device pixel (that is, the physical arrangement of red, green, and blue color components) that is assumed for purposes of rendering text.
A value that represents the method (for example, ClearType natural quality) for rendering glyphs.
When this method returns, contains an address of a reference to the newly created rendering parameters object.
If this method succeeds, it returns
Registers a font file loader with DirectWrite.
+Pointer to a
If this method succeeds, it returns
This function registers a font file loader with DirectWrite. The font file loader interface, which should be implemented by a singleton object, handles loading font file resources of a particular type from a key. A given instance can only be registered once. Succeeding attempts will return an error, indicating that it has already been registered. Note that font file loader implementations must not register themselves with DirectWrite inside their constructors, and must not unregister themselves inside their destructors, because registration and unregistraton operations increment and decrement the object reference count respectively. Instead, registration and unregistration with DirectWrite of font file loaders should be performed outside of the font file loader implementation.
+Unregisters a font file loader that was previously registered with the DirectWrite font system using RegisterFontFileLoader.
+If this method succeeds, it returns
This function unregisters font file loader callbacks with the DirectWrite font system. You should implement the font file loader interface by a singleton object. Note that font file loader implementations must not register themselves with DirectWrite inside their constructors and must not unregister themselves in their destructors, because registration and unregistraton operations increment and decrement the object reference count respectively. Instead, registration and unregistration of font file loaders with DirectWrite should be performed outside of the font file loader implementation.
+Creates a text format object used for text layout.
+An array of characters that contains the name of the font family
A reference to a font collection object. When this is
A value that indicates the font weight for the text object created by this method.
A value that indicates the font style for the text object created by this method.
A value that indicates the font stretch for the text object created by this method.
The logical size of the font in DIP ("device-independent pixel") units. A DIP equals 1/96 inch.
An array of characters that contains the locale name.
When this method returns, contains an address of a reference to a newly created text format object, or
If this method succeeds, it returns
Creates a typography object for use in a text layout.
+When this method returns, contains the address of a reference to a newly created typography object, or
If this method succeeds, it returns
Creates an object that is used for interoperability with GDI.
+When this method returns, contains an address of a reference to a GDI interop object if successful, or
If this method succeeds, it returns
Takes a string, text format, and associated constraints, and produces an object that represents the fully analyzed and formatted result.
+An array of characters that contains the string to create a new
The number of characters in the string.
A reference to an object that indicates the format to apply to the string.
The width of the layout box.
The height of the layout box.
When this method returns, contains an address of a reference to the resultant text layout object.
If this method succeeds, it returns
Takes a string, format, and associated constraints, and produces an object representing the result, formatted for a particular display resolution and measuring mode.
+An array of characters that contains the string to create a new
The length of the string, in character count.
The text formatting object to apply to the string.
The width of the layout box.
The height of the layout box.
The number of physical pixels per DIP (device independent pixel). For example, if rendering onto a 96 DPI device pixelsPerDip is 1. If rendering onto a 120 DPI device pixelsPerDip is 1.25 (120/96).
An optional transform applied to the glyphs and their positions. This transform is applied after the scaling specifies the font size and pixels per DIP.
Instructs the text layout to use the same metrics as GDI bi-level text when set to
When this method returns, contains an address to the reference of the resultant text layout object.
If this method succeeds, it returns
The resulting text layout should only be used for the intended resolution, and for cases where text scalability is desired CreateTextLayout should be used instead.
+Creates an inline object for trimming, using an ellipsis as the omission sign.
+A text format object, created with CreateTextFormat, used for text layout.
When this method returns, contains an address of a reference to the omission (that is, ellipsis trimming) sign created by this method.
If this method succeeds, it returns
The ellipsis will be created using the current settings of the format, including base font, style, and any effects. Alternate omission signs can be created by the application by implementing
Returns an interface for performing text analysis.
+When this method returns, contains an address of a reference to the newly created text analyzer object.
If this method succeeds, it returns
Creates a number substitution object using a locale name, substitution method, and an indicator whether to ignore user overrides (use NLS defaults for the given culture instead).
+A value that specifies how to apply number substitution on digits and related punctuation.
The name of the locale to be used in the numberSubstitution object.
A Boolean flag that indicates whether to ignore user overrides.
When this method returns, contains an address to a reference to the number substitution object created by this method.
If this method succeeds, it returns
Creates a glyph run analysis object, which encapsulates information used to render a glyph run.
+A structure that contains the properties of the glyph run (font face, advances, and so on).
Number of physical pixels per DIP (device independent pixel). For example, if rendering onto a 96 DPI bitmap then pixelsPerDip is 1. If rendering onto a 120 DPI bitmap then pixelsPerDip is 1.25.
Optional transform applied to the glyphs and their positions. This transform is applied after the scaling specified the emSize and pixelsPerDip.
A value that specifies the rendering mode, which must be one of the raster rendering modes (that is, not default and not outline).
Specifies the measuring mode to use with glyphs.
The horizontal position (X-coordinate) of the baseline origin, in DIPs.
Vertical position (Y-coordinate) of the baseline origin, in DIPs.
When this method returns, contains an address of a reference to the newly created glyph run analysis object.
If this method succeeds, it returns
The glyph run analysis object contains the results of analyzing the glyph run, including the positions of all the glyphs and references to all of the rasterized glyphs in the font cache.
+Creates an object that is used for interoperability with GDI.
+An object that encapsulates a set of fonts, such as the set of fonts installed on the system, or the set of fonts in a particular directory. The font collection API can be used to discover what font families and fonts are available, and to obtain some metadata about the fonts.
+The
* pFontCollection = null ; // Get the system font collection. + if (SUCCEEDED(hr)) + { hr = pDWriteFactory->GetSystemFontCollection(&pFontCollection); + } +
To determine what fonts are available on the system, get a reference to the system font collection. You can then use the
#include <dwrite.h> + #include <string.h> + #include <stdio.h> + #include <new> // SafeRelease inline function. + template <class T> inline void SafeRelease(T **ppT) + { if (*ppT) { (*ppT)->Release(); *ppT =+null ; } + } void wmain() + {* pDWriteFactory = null ;hr = ( , __uuidof( ), reinterpret_cast< **>(&pDWriteFactory) ); * pFontCollection = null ; // Get the system font collection. if (SUCCEEDED(hr)) { hr = pDWriteFactory->GetSystemFontCollection(&pFontCollection); } UINT32 familyCount = 0; // Get the number of font families in the collection. if (SUCCEEDED(hr)) { familyCount = pFontCollection->GetFontFamilyCount(); } for (UINT32 i = 0; i < familyCount; ++i) {* pFontFamily = null ; // Get the font family. if (SUCCEEDED(hr)) { hr = pFontCollection->GetFontFamily(i, &pFontFamily); }* pFamilyNames = null ; // Get a list of localized strings for the family name. if (SUCCEEDED(hr)) { hr = pFontFamily->GetFamilyNames(&pFamilyNames); } UINT32 index = 0;exists = false; wchar_t localeName[LOCALE_NAME_MAX_LENGTH]; if (SUCCEEDED(hr)) { // Get the default locale for this user. int defaultLocaleSuccess = GetUserDefaultLocaleName(localeName, LOCALE_NAME_MAX_LENGTH); // If the default locale is returned, find that locale name, otherwise use "en-us". if (defaultLocaleSuccess) { hr = pFamilyNames->FindLocaleName(localeName, &index, &exists); } if (SUCCEEDED(hr) && !exists) // if the above find did not find a match, retry with US English { hr = pFamilyNames->FindLocaleName(L"en-us", &index, &exists); } } // If the specified locale doesn't exist, select the first on the list. if (!exists) index = 0; UINT32 length = 0; // Get the string length. if (SUCCEEDED(hr)) { hr = pFamilyNames->GetStringLength(index, &length); } // Allocate a string big enough to hold the name. wchar_t* name = new (std::nothrow) wchar_t[length+1]; if (name == null ) { hr = E_OUTOFMEMORY; } // Get the family name. if (SUCCEEDED(hr)) { hr = pFamilyNames->GetString(index, name, length+1); } if (SUCCEEDED(hr)) { // Print out the family name. wprintf(L"%s\n", name); } SafeRelease(&pFontFamily); SafeRelease(&pFamilyNames); delete [] name; } SafeRelease(&pFontCollection); SafeRelease(&pDWriteFactory); + }
Gets the number of font families in the collection.
+The number of font families in the collection.
Creates a font family object given a zero-based font family index.
+Zero-based index of the font family.
When this method returns, contains the address of a reference to the newly created font family object.
Finds the font family with the specified family name.
+An array of characters, which is null-terminated, containing the name of the font family. The name is not case-sensitive but must otherwise exactly match a family name in the collection.
When this method returns, contains the zero-based index of the matching font family if the family name was found; otherwise, UINT_MAX.
When this method returns, TRUE if the family name exists; otherwise,
Gets the font object that corresponds to the same physical font as the specified font face object. The specified physical font must belong to the font collection.
+A font face object that specifies the physical font.
When this method returns, contains the address of a reference to the newly created font object if successful; otherwise,
Gets the number of font families in the collection.
+Used to construct a collection of fonts given a particular type of key.
+The font collection loader interface is recommended to be implemented by a singleton object. Note that font collection loader implementations must not register themselves with DirectWrite factory inside their constructors and must not unregister themselves in their destructors, because registration and unregistraton operations increment and decrement the object reference count respectively. Instead, registration and unregistration of font file loaders with DirectWrite factory should be performed outside of the font file loader implementation as a separate step.
+Represents an absolute reference to a font face which contains font face type, appropriate file references, face identification data and various font data such as metrics, names and glyph outlines.
+Obtains the file format type of a font face.
+A value that indicates the type of format for the font face (such as Type 1, TrueType, vector, or bitmap).
Obtains the font files representing a font face.
+If fontFiles is
When this method returns, contains a reference to a user-provided array that stores references to font files representing the font face. This parameter can be
If this method succeeds, it returns
The
Then, call the method a second time, passing the numberOfFiles value that was output the first call, and a non-null buffer of the correct size to store the
Obtains the index of a font face in the context of its font files.
+The zero-based index of a font face in cases when the font files contain a collection of font faces. If the font files contain a single face, this value is zero.
Obtains the algorithmic style simulation flags of a font face.
+Font face simulation flags for algorithmic means of making text bold or italic.
Determines whether the font is a symbol font.
+Returns TRUE if the font is a symbol font, otherwise
Obtains design units and common metrics for the font face. These metrics are applicable to all the glyphs within a font face and are used by applications for layout calculations.
+When this method returns, a?
Obtains the number of glyphs in the font face.
+The number of glyphs in the font face.
Obtains ideal (resolution-independent) glyph metrics in font design units.
+An array of glyph indices for which to compute metrics. The array must contain at least as many elements as specified by glyphCount.
The number of elements in the glyphIndices array.
When this method returns, contains an array of
Indicates whether the font is being used in a sideways run. This can affect the glyph metrics if the font has oblique simulation because sideways oblique simulation differs from non-sideways oblique simulation
If this method succeeds, it returns
Design glyph metrics are used for glyph positioning.
+Returns the nominal mapping of UCS4 Unicode code points to glyph indices as defined by the font 'CMAP' table.
+An array of USC4 code points from which to obtain nominal glyph indices. The array must be allocated and be able to contain the number of elements specified by codePointCount.
The number of elements in the codePoints array.
When this method returns, contains a reference to an array of nominal glyph indices filled by this function.
If this method succeeds, it returns
Note that this mapping is primarily provided for line layout engines built on top of the physical font API. Because of OpenType glyph substitution and line layout character substitution, the nominal conversion does not always correspond to how a Unicode string will map to glyph indices when rendering using a particular font face. Also, note that Unicode variant selectors provide for alternate mappings for character to glyph. This call will always return the default variant.
When characters are not present in the font this method returns the index 0, which is the undefined glyph or ".notdef" glyph. If a character isn't in a font,
Finds the specified OpenType font table if it exists and returns a reference to it. The function accesses the underlying font data through the
If this method succeeds, it returns
The context for the same tag may be different for each call, so each one must be held and released separately.
+Releases the table obtained earlier from TryGetFontTable.
+Computes the outline of a run of glyphs by calling back to the outline sink interface.
+The logical size of the font in DIP units. A DIP ("device-independent pixel") equals 1/96 inch.
An array of glyph indices. The glyphs are in logical order and the advance direction depends on the isRightToLeft parameter. The array must be allocated and be able to contain the number of elements specified by glyphCount.
An optional array of glyph advances in DIPs. The advance of a glyph is the amount to advance the position (in the direction of the baseline) after drawing the glyph. glyphAdvances contains the number of elements specified by glyphCount.
An optional array of glyph offsets, each of which specifies the offset along the baseline and offset perpendicular to the baseline of a glyph relative to the current pen position. glyphOffsets contains the number of elements specified by glyphCount.
The number of glyphs in the run.
If TRUE, the ascender of the glyph runs alongside the baseline. If
A client can render a vertical run by setting isSideways to TRUE and rotating the resulting geometry 90 degrees to the right using a transform. The isSideways and isRightToLeft parameters cannot both be true.
The visual order of the glyphs. If this parameter is
A reference to the interface that is called back to perform outline drawing operations.
If this method succeeds, it returns
Determines the recommended rendering mode for the font, using the specified size and rendering parameters.
+The logical size of the font in DIP units. A DIP ("device-independent pixel") equals 1/96 inch.
The number of physical pixels per DIP. For example, if the DPI of the rendering surface is 96, this value is 1.0f. If the DPI is 120, this value is 120.0f/96.
The measuring method that will be used for glyphs in the font. Renderer implementations may choose different rendering modes for different measuring methods, for example:
A reference to an object that contains rendering settings such as gamma level, enhanced contrast, and ClearType level. This parameter is necessary in case the rendering parameters object overrides the rendering mode.
When this method returns, contains a value that indicates the recommended rendering mode to use.
Obtains design units and common metrics for the font face. These metrics are applicable to all the glyphs within a fontface and are used by applications for layout calculations.
+The logical size of the font in DIP units.
The number of physical pixels per DIP.
An optional transform applied to the glyphs and their positions. This transform is applied after the scaling specified by the font size and pixelsPerDip.
A reference to a DWRITE_FONT_METRICS structure to fill in. The metrics returned by this function are in font design units.
Obtains glyph metrics in font design units with the return values compatible with what GDI would produce.
+The ogical size of the font in DIP units.
The number of physical pixels per DIP.
An optional transform applied to the glyphs and their positions. This transform is applied after the scaling specified by the font size and pixelsPerDip.
When set to
An array of glyph indices for which to compute the metrics.
The number of elements in the glyphIndices array.
An array of
A
Standard
Obtains the file format type of a font face.
+Obtains the index of a font face in the context of its font files.
+Obtains the algorithmic style simulation flags of a font face.
+Determines whether the font is a symbol font.
+Obtains design units and common metrics for the font face. These metrics are applicable to all the glyphs within a font face and are used by applications for layout calculations.
+Obtains the number of glyphs in the font face.
+Specifies properties used to identify and execute typographic features in the current font face.
+A non-zero value generally enables the feature execution, while the zero value disables it. A feature requiring a selector uses this value to indicate the selector index.
The OpenType standard provides access to typographic features available in the font by means of a feature tag with the associated parameters. The OpenType feature tag is a 4-byte identifier of the registered name of a feature. For example, the 'kern' feature name tag is used to identify the 'Kerning' feature in OpenType font. Similarly, the OpenType feature tag for 'Standard Ligatures' and 'Fractions' is 'liga' and 'frac' respectively. Since a single run can be associated with more than one typographic features, the Text String API accepts typographic settings for a run as a list of features and are executed in the order they are specified.
The value of the tag member represents the OpenType name tag of the feature, while the param value represents additional parameter for the execution of the feature referred by the tag member. Both nameTag and parameter are stored as little endian, the same convention followed by GDI. Most features treat the Param value as a binary value that indicates whether to turn the execution of the feature on or off, with it being off by default in the majority of cases. Some features, however, treat this value as an integral value representing the integer index to the list of alternate results it may produce during the execution; for instance, the feature 'Stylistic Alternates' or 'salt' uses the parameter value as an index to the list of alternate substituting glyphs it could produce for a specified glyph.
+The feature OpenType name identifier.
The execution parameter of the feature.
Represents a font file. Applications such as font managers or font viewers can call
Obtains the reference to the reference key of a font file. The returned reference is valid until the font file object is released.
+When this method returns, contains an address of a reference to the font file reference key. Note that the reference value is only valid until the font file object it is obtained from is released. This parameter is passed uninitialized.
When this method returns, contains the size of the font file reference key in bytes. This parameter is passed uninitialized.
If this method succeeds, it returns
Obtains the file loader associated with a font file object.
+When this method returns, contains the address of a reference to the font file loader associated with the font file object.
If this method succeeds, it returns
Analyzes a file and returns whether it represents a font, and whether the font type is supported by the font system.
+TRUE if the font type is supported by the font system; otherwise,
When this method returns, contains a value that indicates the type of the font file. Note that even if isSupportedFontType is
When this method returns, contains a value that indicates the type of the font face. If fontFileType is not equal to
When this method returns, contains the number of font faces contained in the font file.
If this method succeeds, it returns
Important??Certain font file types are recognized, but not supported by the font system. For example, the font system will recognize a file as a Type 1 font file but will not be able to construct a font face object from it. In such situations, Analyze will set isSupportedFontType output parameter to
Encapsulates a collection of font files. The font system uses this interface to enumerate font files when building a font collection.
+Loads font file data from a custom font file loader.
+Loads font file data from a custom font file loader.
+Reads a fragment from a font file.
+When this method returns, contains an address of a reference to the start of the font file fragment. This parameter is passed uninitialized.
The offset of the fragment, in bytes, from the beginning of the font file.
The size of the file fragment, in bytes.
When this method returns, contains the address of a reference to a reference to the client-defined context to be passed to ReleaseFileFragment.
If this method succeeds, it returns
Note that ReadFileFragment implementations must check whether the requested font file fragment is within the file bounds. Otherwise, an error should be returned from ReadFileFragment.
DirectWrite may invoke
Releases a fragment from a file.
+A reference to the client-defined context of a font fragment returned from ReadFileFragment.
Obtains the total size of a file.
+When this method returns, contains the total size of the file.
If this method succeeds, it returns
Implementing GetFileSize() for asynchronously loaded font files may require downloading the complete file contents. Therefore, this method should be used only for operations that either require a complete font file to be loaded (for example, copying a font file) or that need to make decisions based on the value of the file size (for example, validation against a persisted file size).
+Obtains the last modified time of the file.
+When this method returns, contains the last modified time of the file in the format that represents the number of 100-nanosecond intervals since January 1, 1601 (UTC).
If this method succeeds, it returns
The "last modified time" is used by DirectWrite font selection algorithms to determine whether one font resource is more up to date than another one.
+Provides interoperability with GDI, such as methods to convert a font face to a
Creates a font object that matches the properties specified by the
A structure containing a GDI-compatible font description.
When this method returns, contains an address of a reference to a newly created
If this method succeeds, it returns
Initializes a
An
When this method returns, contains a structure that receives a GDI-compatible font description.
When this method returns, contains TRUE if the specified font object is part of the system font collection; otherwise,
If this method succeeds, it returns
The conversion to a
Initializes a
An
When this method returns, contains a reference to a structure that receives a GDI-compatible font description.
If this method succeeds, it returns
The conversion to a
Creates an
A handle to a device context into which a font has been selected. It is assumed that the client has already performed font mapping and that the font selected into the device context is the actual font to be used for rendering glyphs.
Contains an address of a reference to the newly created font face object, or
This function is intended for scenarios in which an application wants to use GDI and Uniscribe 1.x for text layout and shaping, but DirectWrite for final rendering. This function assumes the client is performing text output using glyph indexes.
+Creates an object that encapsulates a bitmap and memory DC (device context) which can be used for rendering glyphs.
+A handle to the optional device context used to create a compatible memory DC (device context).
The width of the bitmap render target.
The height of the bitmap render target.
When this method returns, contains an address of a reference to the newly created
Contains the information needed by renderers to draw glyph runs. All coordinates are in device independent pixels (DIPs).
+The physical font face object to draw with.
The logical size of the font in DIPs (equals 1/96 inch), not points.
The number of glyphs in the glyph run.
A reference to an array of indices to render for the glyph run.
A reference to an array containing glyph advance widths for the glyph run.
A reference to an array containing glyph offsets for the glyph run.
If true, specifies that glyphs are rotated 90 degrees to the left and vertical metrics are used. Vertical writing is achieved by specifying isSideways = true and rotating the entire run 90 degrees to the right via a rotate transform.
The implicit resolved bidi level of the run. Odd levels indicate right-to-left languages like Hebrew and Arabic, while even levels indicate left-to-right languages like English and Japanese (when written horizontally). For right-to-left languages, the text origin is on the right, and text should be drawn to the left.
Contains low-level information used to render a glyph run.
+The alpha texture can be a bi-level alpha texture or a ClearType alpha texture.
A bi-level alpha texture contains one byte per pixel, therefore the size of the buffer for a bi-level texture will be the area of the texture bounds, in bytes. Each byte in a bi-level alpha texture created by CreateAlphaTexture is either set to DWRITE_ALPHA_MAX (that is, 255) or zero.
A ClearType alpha texture contains three bytes per pixel, therefore the size of the buffer for a ClearType alpha texture is three times the area of the texture bounds, in bytes.
+Gets the bounding rectangle of the physical pixels affected by the glyph run.
+Specifies the type of texture requested. If a bi-level texture is requested, the bounding rectangle includes only bi-level glyphs. Otherwise, the bounding rectangle includes only antialiased glyphs.
When this method returns, contains the bounding rectangle of the physical pixels affected by the glyph run, or an empty rectangle if there are no glyphs of the specified texture type.
Creates an alpha texture of the specified type for glyphs within a specified bounding rectangle.
+A value that specifies the type of texture requested. This can be DWRITE_TEXTURE_BILEVEL_1x1 or
The bounding rectangle of the texture, which can be different than the bounding rectangle returned by GetAlphaTextureBounds.
When this method returns, contains the array of alpha values from the texture. The buffer allocated for this array must be at least the size of bufferSize.
The size of the alphaValues array, in bytes. The minimum size depends on the dimensions of the rectangle and the type of texture requested.
If this method succeeds, it returns
Gets alpha blending properties required for ClearType blending.
+An object that specifies the ClearType level and enhanced contrast, gamma, pixel geometry, and rendering mode. In most cases, the values returned by the output parameters of this method are based on the properties of this object, unless a GDI-compatible rendering mode was specified.
When this method returns, contains the gamma value to use for gamma correction.
When this method returns, contains the enhanced contrast value to be used for blending.
When this method returns, contains the ClearType level used in the alpha blending.
If this method succeeds, it returns
Contains additional properties related to those in
Line breakpoint characteristics of a character.
+Indicates a breaking condition before the character.
Indicates a breaking condition after the character.
Indicates that the character is some form of whitespace, which may be meaningful for justification.
Indicates that the character is a soft hyphen, often used to indicate hyphenation points inside words.
Reserved for future use.
Represents a collection of strings indexed by locale name.
+The set of strings represented by an
A common use for the
+* pFamilyNames = null ; // Get a list of localized strings for the family name. + if (SUCCEEDED(hr)) + { hr = pFontFamily->GetFamilyNames(&pFamilyNames); + } UINT32 index = 0; +exists = false; wchar_t localeName[LOCALE_NAME_MAX_LENGTH]; if (SUCCEEDED(hr)) + { // Get the default locale for this user. int defaultLocaleSuccess = GetUserDefaultLocaleName(localeName, LOCALE_NAME_MAX_LENGTH); // If the default locale is returned, find that locale name, otherwise use "en-us". if (defaultLocaleSuccess) { hr = pFamilyNames->FindLocaleName(localeName, &index, &exists); } if (SUCCEEDED(hr) && !exists) // if the above find did not find a match, retry with US English { hr = pFamilyNames->FindLocaleName(L"en-us", &index, &exists); } + } // If the specified locale doesn't exist, select the first on the list. + if (!exists) index = 0; UINT32 length = 0; // Get the string length. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetStringLength(index, &length); + } // Allocate a string big enough to hold the name. + wchar_t* name = new (std::nothrow) wchar_t[length+1]; + if (name == null ) + { hr = E_OUTOFMEMORY; + } // Get the family name. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetString(index, name, length+1); + } +
Gets the number of language/string pairs.
+The number of language/string pairs.
Gets the zero-based index of the locale name/string pair with the specified locale name.
+A null-terminated array of characters containing the locale name to look for.
The zero-based index of the locale name/string pair. This method initializes index to UINT_MAX.
When this method returns, contains TRUE if the locale name exists; otherwise,
Note that if the locale name does not exist, the return value is a success and the exists parameter is
UINT32 index = 0; ++exists = false; wchar_t localeName[LOCALE_NAME_MAX_LENGTH]; if (SUCCEEDED(hr)) + { // Get the default locale for this user. int defaultLocaleSuccess = GetUserDefaultLocaleName(localeName, LOCALE_NAME_MAX_LENGTH); // If the default locale is returned, find that locale name, otherwise use "en-us". if (defaultLocaleSuccess) { hr = pFamilyNames->FindLocaleName(localeName, &index, &exists); } if (SUCCEEDED(hr) && !exists) // if the above find did not find a match, retry with US English { hr = pFamilyNames->FindLocaleName(L"en-us", &index, &exists); } + } // If the specified locale doesn't exist, select the first on the list. + if (!exists) index = 0; +
Gets the length in characters (not including the null terminator) of the locale name with the specified index.
+Zero-based index of the locale name to be retrieved.
When this method returns, contains the length in characters of the locale name, not including the null terminator.
If this method succeeds, it returns
Copies the locale name with the specified index to the specified array.
+Zero-based index of the locale name to be retrieved.
When this method returns, contains a character array, which is null-terminated, that receives the locale name from the language/string pair. The buffer allocated for this array must be at least the size of size, in element count.
The size of the array in characters. The size must include space for the terminating null character.
If this method succeeds, it returns
Gets the length in characters (not including the null terminator) of the string with the specified index.
+A zero-based index of the language/string pair.
The length in characters of the string, not including the null terminator, from the language/string pair.
If this method succeeds, it returns
Use GetStringLength to get the string length before calling the
UINT32 length = 0; // Get the string length. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetStringLength(index, &length); + } // Allocate a string big enough to hold the name. + wchar_t* name = new (std::nothrow) wchar_t[length+1]; + if (name ==+null ) + { hr = E_OUTOFMEMORY; + } // Get the family name. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetString(index, name, length+1); + } +
Copies the string with the specified index to the specified array.
+The zero-based index of the language/string pair to be examined.
The null terminated array of characters that receives the string from the language/string pair. The buffer allocated for this array should be at least the size of size. GetStringLength can be used to get the size of the array before using this method.
The size of the array in characters. The size must include space for the terminating null character. GetStringLength can be used to get the size of the array before using this method.
If this method succeeds, it returns
The string returned must be allocated by the caller. You can get the size of the string by using the GetStringLength method prior to calling GetString, as shown in the following example.
UINT32 length = 0; // Get the string length. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetStringLength(index, &length); + } // Allocate a string big enough to hold the name. + wchar_t* name = new (std::nothrow) wchar_t[length+1]; + if (name ==+null ) + { hr = E_OUTOFMEMORY; + } // Get the family name. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetString(index, name, length+1); + } +
Gets the number of language/string pairs.
+Holds the appropriate digits and numeric punctuation for a specified locale.
+Defines the pixel snapping properties such as pixels per DIP(device-independent pixel) and the current transform matrix of a text renderer.
+Represents text rendering settings such as ClearType level, enhanced contrast, and gamma correction for glyph rasterization and filtering.
An application typically obtains a rendering parameters object by calling the
Gets the gamma value used for gamma correction. Valid values must be greater than zero and cannot exceed 256.
+Returns the gamma value used for gamma correction. Valid values must be greater than zero and cannot exceed 256.
The gamma value is used for gamma correction, which compensates for the non-linear luminosity response of most monitors.
+Gets the enhanced contrast property of the rendering parameters object. Valid values are greater than or equal to zero.
+Returns the amount of contrast enhancement. Valid values are greater than or equal to zero.
Enhanced contrast is the amount to increase the darkness of text, and typically ranges from 0 to 1. Zero means no contrast enhancement.
+Gets the ClearType level of the rendering parameters object.
+The ClearType level of the rendering parameters object.
The ClearType level represents the amount of ClearType ? that is, the degree to which the red, green, and blue subpixels of each pixel are treated differently. Valid values range from zero (meaning no ClearType, which is equivalent to grayscale anti-aliasing) to one (meaning full ClearType)
+Gets the pixel geometry of the rendering parameters object.
+A value that indicates the type of pixel geometry used in the rendering parameters object.
Gets the rendering mode of the rendering parameters object.
+A value that indicates the rendering mode of the rendering parameters object.
By default, the rendering mode is initialized to
Gets the gamma value used for gamma correction. Valid values must be greater than zero and cannot exceed 256.
+The gamma value is used for gamma correction, which compensates for the non-linear luminosity response of most monitors.
+Gets the enhanced contrast property of the rendering parameters object. Valid values are greater than or equal to zero.
+Enhanced contrast is the amount to increase the darkness of text, and typically ranges from 0 to 1. Zero means no contrast enhancement.
+Gets the ClearType level of the rendering parameters object.
+The ClearType level represents the amount of ClearType ? that is, the degree to which the red, green, and blue subpixels of each pixel are treated differently. Valid values range from zero (meaning no ClearType, which is equivalent to grayscale anti-aliasing) to one (meaning full ClearType)
+Gets the pixel geometry of the rendering parameters object.
+Gets the rendering mode of the rendering parameters object.
+By default, the rendering mode is initialized to
Contains shaping output properties for an output glyph.
+Indicates that the glyph has justification applied.
Indicates that the glyph is the start of a cluster.
Indicates that the glyph is a diacritic mark.
Indicates that the glyph is a word boundary with no visible space.
Reserved for future use.
This interface is implemented by the text analyzer's client to receive the output of a given text analysis.
+The text analyzer disregards any current state of the analysis sink, therefore, a Set method call on a range overwrites the previously set analysis result of the same range.
+Implemented by the text analyzer's client to provide text to the analyzer. It allows the separation between the logical view of text as a continuous stream of characters identifiable by unique text positions, and the actual memory layout of potentially discrete blocks of text in the client's backing store.
+If any of these callbacks returns an error, then the analysis functions will stop prematurely and return a callback error. Note that rather than return E_NOTIMPL, an application should stub the method and return a constant/null and
Analyzes various text properties for complex script processing such as bidirectional (bidi) support for languages like Arabic, determination of line break opportunities, glyph placement, and number substitution.
+Analyzes a text range for script boundaries, reading text attributes from the source and reporting the Unicode script ID to the sink callback SetScript.
+If this method succeeds, it returns
Analyzes a text range for script directionality, reading attributes from the source and reporting levels to the sink callback SetBidiLevel.
+If this method succeeds, it returns
While the function can handle multiple paragraphs, the text range should not arbitrarily split the middle of paragraphs. Otherwise, the returned levels may be wrong, because the Bidi algorithm is meant to apply to the paragraph as a whole.
+Analyzes a text range for spans where number substitution is applicable, reading attributes from the source and reporting substitutable ranges to the sink callback SetNumberSubstitution.
+If this method succeeds, it returns
Although the function can handle multiple ranges of differing number substitutions, the text ranges should not arbitrarily split the middle of numbers. Otherwise, it will treat the numbers separately and will not translate any intervening punctuation.
+Analyzes a text range for potential breakpoint opportunities, reading attributes from the source and reporting breakpoint opportunities to the sink callback SetLineBreakpoints.
+If this method succeeds, it returns
Although the function can handle multiple paragraphs, the text range should not arbitrarily split the middle of paragraphs, unless the specified text span is considered a whole unit. Otherwise, the returned properties for the first and last characters will inappropriately allow breaks.
+Parses the input text string and maps it to the set of glyphs and associated glyph data according to the font and the writing system's rendering rules.
+An array of characters to convert to glyphs.
The length of textString.
The font face that is the source of the output glyphs.
A Boolean flag set to TRUE if the text is intended to be drawn vertically.
A Boolean flag set to TRUE for right-to-left text.
A reference to a Script analysis result from an AnalyzeScript call.
The locale to use when selecting glyphs. For example the same character may map to different glyphs for ja-jp versus zh-chs. If this is
A reference to an optional number substitution which selects the appropriate glyphs for digits and related numeric characters, depending on the results obtained from AnalyzeNumberSubstitution. Passing
An array of references to the sets of typographic features to use in each feature range.
The length of each feature range, in characters. The sum of all lengths should be equal to textLength.
The number of feature ranges.
The maximum number of glyphs that can be returned.
When this method returns, contains the mapping from character ranges to glyph ranges.
When this method returns, contains a reference to an array of structures that contains shaping properties for each character.
The output glyph indices.
When this method returns, contains a reference to an array of structures that contain shaping properties for each output glyph.
When this method returns, contains the actual number of glyphs returned if the call succeeds.
If this method succeeds, it returns
Note that the mapping from characters to glyphs is, in general, many-to-many. The recommended estimate for the per-glyph output buffers is (3 * textLength / 2 + 16). This is not guaranteed to be sufficient. The value of the actualGlyphCount parameter is only valid if the call succeeds. In the event that maxGlyphCount is not big enough, HRESULT_FROM_WIN32(
Places glyphs output from the GetGlyphs method according to the font and the writing system's rendering rules.
+If this method succeeds, it returns
Place glyphs output from the GetGlyphs method according to the font and the writing system's rendering rules.
+If this method succeeds, it returns
The
To get a reference to the
if (SUCCEEDED(hr)) + { hr = pDWriteFactory_->CreateTextFormat( L"Gabriola",null ,, , , 72.0f, L"en-us", &pTextFormat_ ); + }
When creating an
These properties cannot be changed after the
The
To draw text with multiple formats, or to use a custom text renderer, use the
This object may not be thread-safe, and it may carry the state of text format change.
+Sets trimming options for text overflowing the layout width.
+Text trimming options.
Application-defined omission sign. This parameter may be
If this method succeeds, it returns
Sets the alignment of text in a paragraph, relative to the leading and trailing edge of a layout box for a
This method can return one of these values.
Return code | Description |
---|---|
| The method succeeded. |
| The textAlignment argument is invalid. |
?
The text can be aligned to the leading or trailing edge of the layout box, or it can be centered. The following illustration shows text with the alignment set to
Note??The alignment is dependent on reading direction, the above is for left-to-right reading direction. For right-to-left reading direction it would be the opposite.
See
Sets the alignment option of a paragraph relative to the layout box's top and bottom edge.
+The paragraph alignment option being set for a paragraph; see
If this method succeeds, it returns
Sets the word wrapping option.
+The word wrapping option being set for a paragraph; see
If this method succeeds, it returns
Sets the paragraph reading direction.
+The text reading direction (for example,
If this method succeeds, it returns
Sets the paragraph flow direction.
+The paragraph flow direction; see
If this method succeeds, it returns
Sets a fixed distance between two adjacent tab stops.
+The fixed distance between two adjacent tab stops.
If this method succeeds, it returns
Sets trimming options for text overflowing the layout width.
+Text trimming options.
Application-defined omission sign. This parameter may be
If this method succeeds, it returns
Sets the line spacing.
+Specifies how line height is being determined; see
The line height, or distance between one baseline to another.
The distance from top of line to baseline. A reasonable ratio to lineSpacing is 80 percent.
If this method succeeds, it returns
For the default method, spacing depends solely on the content. For uniform spacing, the specified line height overrides the content.
+Gets the alignment option of text relative to the layout box's leading and trailing edge.
+Returns the text alignment option of the current paragraph.
Gets the alignment option of a paragraph which is relative to the top and bottom edges of a layout box.
+A value that indicates the current paragraph alignment option.
Gets the word wrapping option.
+Returns the word wrapping option; see
Gets the current reading direction for text in a paragraph.
+A value that indicates the current reading direction for text in a paragraph.
Gets the direction that text lines flow.
+The direction that text lines flow within their parent container. For example,
Gets the incremental tab stop position.
+The incremental tab stop value.
Gets the trimming options for text that overflows the layout box.
+When this method returns, it contains a reference to a
When this method returns, contains an address of a reference to a trimming omission sign. This parameter may be
If this method succeeds, it returns
Gets the line spacing adjustment set for a multiline text paragraph.
+A value that indicates how line height is determined.
When this method returns, contains the line height, or distance between one baseline to another.
When this method returns, contains the distance from top of line to baseline. A reasonable ratio to lineSpacing is 80 percent.
If this method succeeds, it returns
Gets the current font collection.
+When this method returns, contains an address of a reference to the font collection being used for the current text.
If this method succeeds, it returns
Gets the length of the font family name.
+The size of the character array, in character count, not including the terminated
Gets a copy of the font family name.
+When this method returns, contains a reference to a character array, which is null-terminated, that receives the current font family name. The buffer allocated for this array should be at least the size, in elements, of nameSize.
The size of the fontFamilyName character array, in character count, including the terminated
If this method succeeds, it returns
Gets the font weight of the text.
+A value that indicates the type of weight (such as normal, bold, or black).
Gets the font style of the text.
+A value which indicates the type of font style (such as slope or incline).
Gets the font stretch of the text.
+A value which indicates the type of font stretch (such as normal or condensed).
Gets the font size in DIP unites.
+The current font size in DIP units.
Gets the length of the locale name.
+The size of the character array in character count, not including the terminated
Gets a copy of the locale name.
+Contains a character array that receives the current locale name.
The size of the character array, in character count, including the terminated
If this method succeeds, it returns
Gets or sets the alignment option of text relative to the layout box's leading and trailing edge.
+Gets or sets the alignment option of a paragraph which is relative to the top and bottom edges of a layout box.
+Gets or sets the word wrapping option.
+Gets or sets the current reading direction for text in a paragraph.
+Gets or sets the direction that text lines flow.
+Gets or sets the incremental tab stop position.
+Gets the current font collection.
+Gets the font weight of the text.
+Gets the font style of the text.
+Gets the font stretch of the text.
+Gets the font size in DIP unites.
+The
To get a reference to the
// Create a text layout using the text format. + if (SUCCEEDED(hr)) + {rect; GetClientRect(hwnd_, &rect); float width = rect.right / dpiScaleX_; float height = rect.bottom / dpiScaleY_; hr = pDWriteFactory_->CreateTextLayout( wszText_, // The string to be laid out and formatted. cTextLength_, // The length of the string. pTextFormat_, // The text format to apply to the string (contains font information, etc). width, // The width of the layout box. height, // The height of the layout box. &pTextLayout_ // The interface reference. ); + }
The
// Set the font weight to bold for the first 5 letters. +textRange = {0, 4}; if (SUCCEEDED(hr)) + { hr = pTextLayout_->SetFontWeight( , textRange); + }
To draw the block of text represented by an
Sets the layout maximum width.
+A value that indicates the maximum width of the layout box.
If this method succeeds, it returns
Sets the layout maximum height.
+A value that indicates the maximum height of the layout box.
If this method succeeds, it returns
Sets the font collection.
+The font collection to set.
Text range to which this change applies.
If this method succeeds, it returns
Sets null-terminated font family name for text within a specified text range.
+The font family name that applies to the entire text string within the range specified by textRange.
Text range to which this change applies.
If this method succeeds, it returns
Sets the font weight for text within a text range specified by a
If this method succeeds, it returns
The font weight can be set to one of the predefined font weight values provided in the
The following illustration shows an example of Normal and UltraBold weights for the Palatino Linotype typeface.
+ Sets the font style for text within a text range specified by a
If this method succeeds, it returns
The font style can be set to Normal, Italic or Oblique. The following illustration shows three styles for the Palatino font. For more information, see
Sets the font stretch for text within a specified text range.
+A value which indicates the type of font stretch for text within the range specified by textRange.
Text range to which this change applies.
If this method succeeds, it returns
Sets the font size in DIP units for text within a specified text range.
+The font size in DIP units to be set for text in the range specified by textRange.
Text range to which this change applies.
If this method succeeds, it returns
Sets underlining for text within a specified text range.
+A Boolean flag that indicates whether underline takes place within a specified text range.
Text range to which this change applies.
If this method succeeds, it returns
Sets strikethrough for text within a specified text range.
+A Boolean flag that indicates whether strikethrough takes place in the range specified by textRange.
Text range to which this change applies.
If this method succeeds, it returns
Sets the application-defined drawing effect.
+Application-defined drawing effects that apply to the range. This data object will be passed back to the application's drawing callbacks for final rendering.
The text range to which this change applies.
If this method succeeds, it returns
An
This drawing effect is associated with the specified range and will be passed back to the application by way of the callback when the range is drawn at drawing time.
+Sets the inline object.
+An application-defined inline object.
Text range to which this change applies.
If this method succeeds, it returns
The application may call this function to specify the set of properties describing an application-defined inline object for specific range.
This inline object applies to the specified range and will be passed back to the application by way of the DrawInlineObject callback when the range is drawn. Any text in that range will be suppressed.
+Sets font typography features for text within a specified text range.
+Pointer to font typography settings.
Text range to which this change applies.
If this method succeeds, it returns
Sets the locale name for text within a specified text range.
+A null-terminated locale name string.
Text range to which this change applies.
If this method succeeds, it returns
Gets the layout maximum width.
+Returns the layout maximum width.
Gets the layout maximum height.
+The layout maximum height.
Gets the font collection associated with the text at the specified position.
+The position of the text to inspect.
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the underline.
Contains an address of a reference to the current font collection.
Get the length of the font family name at the current position.
+The current text position.
When this method returns, contains the size of the character array containing the font family name, in character count, not including the terminated
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the font family.
If this method succeeds, it returns
Copies the font family name of the text at the specified position.
+The position of the text to examine.
When this method returns, contains an array of characters that receives the current font family name. You must allocate storage for this parameter.
The size of the character array in character count including the terminated
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the font family name.
If this method succeeds, it returns
Gets the font weight of the text at the specified position.
+The position of the text to inspect.
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the font weight.
When this method returns, contains a value which indicates the type of font weight being applied at the specified position.
Gets the font style (also known as slope) of the text at the specified position.
+The position of the text to inspect.
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the font style.
When this method returns, contains a value which indicates the type of font style (also known as slope or incline) being applied at the specified position.
Gets the font stretch of the text at the specified position.
+The position of the text to inspect.
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the font stretch.
When this method returns, contains a value which indicates the type of font stretch (also known as width) being applied at the specified position.
Gets the font em height of the text at the specified position.
+The position of the text to inspect.
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the font size.
When this method returns, contains the size of the font in ems of the text at the specified position.
Gets the underline presence of the text at the specified position.
+The current text position.
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the underline.
A Boolean flag that indicates whether underline is present at the position indicated by currentPosition.
Get the strikethrough presence of the text at the specified position.
+The position of the text to inspect.
Contains the range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to strikethrough.
A Boolean flag that indicates whether strikethrough is present at the position indicated by currentPosition.
Gets the application-defined drawing effect at the specified text position.
+The position of the text whose drawing effect is to be retrieved.
Contains the range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the drawing effect.
When this method returns, contains an address of a reference to the current application-defined drawing effect. Usually this effect is a foreground brush that is used in glyph drawing.
Gets the inline object at the specified position.
+The specified text position.
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the inline object.
Contains the application-defined inline object.
Gets the typography setting of the text at the specified position.
+The position of the text to inspect.
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the typography.
When this method returns, contains an address of a reference to the current typography setting.
Gets the length of the locale name of the text at the specified position.
+The position of the text to inspect.
Size of the character array, in character count, not including the terminated
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the locale name.
If this method succeeds, it returns
Gets the locale name of the text at the specified position.
+The position of the text to inspect.
When this method returns, contains the character array receiving the current locale name.
Size of the character array, in character count, including the terminated
The range of text that has the same formatting as the text at the position specified by currentPosition. This means the run has the exact formatting as the position specified, including but not limited to the locale name.
If this method succeeds, it returns
Draws text using the specified client drawing context.
+An application-defined drawing context.
Pointer to the set of callback functions used to draw parts of a text string.
The x-coordinate of the layout's left side.
The y-coordinate of the layout's top side.
If this method succeeds, it returns
To draw text with this method, a textLayout object needs to be created by the application using
After the textLayout object is obtained, the application calls the
Retrieves the information about each individual text line of the text string.
+When this method returns, contains a reference to an array of structures containing various calculated length values of individual text lines.
The maximum size of the lineMetrics array.
When this method returns, contains the actual size of the lineMetrics array that is needed.
If this method succeeds, it returns
If maxLineCount is not large enough E_NOT_SUFFICIENT_BUFFER, which is equivalent to HRESULT_FROM_WIN32(
Retrieves overall metrics for the formatted string.
+When this method returns, contains the measured distances of text and associated content after being formatted.
If this method succeeds, it returns
Returns the overhangs (in DIPs) of the layout and all objects contained in it, including text glyphs and inline objects.
+Overshoots of visible extents (in DIPs) outside the layout.
If this method succeeds, it returns
Underlines and strikethroughs do not contribute to the black box determination, since these are actually drawn by the renderer, which is allowed to draw them in any variety of styles.
+Retrieves logical properties and measurements of each glyph cluster.
+When this method returns, contains metrics, such as line-break or total advance width, for a glyph cluster.
The maximum size of the clusterMetrics array.
When this method returns, contains the actual size of the clusterMetrics array that is needed.
If this method succeeds, it returns
If maxClusterCount is not large enough, then E_NOT_SUFFICIENT_BUFFER, which is equivalent to HRESULT_FROM_WIN32(
Determines the minimum possible width the layout can be set to without emergency breaking between the characters of whole words occurring.
+Minimum width.
The application calls this function passing in a specific pixel location relative to the top-left location of the layout box and obtains the information about the correspondent hit-test metrics of the text string where the hit-test has occurred. When the specified pixel location is outside the text string, the function sets the output value *isInside to
The pixel location X to hit-test, relative to the top-left location of the layout box.
The pixel location Y to hit-test, relative to the top-left location of the layout box.
An output flag that indicates whether the hit-test location is at the leading or the trailing side of the character. When the output *isInside value is set to
An output flag that indicates whether the hit-test location is inside the text string. When
The output geometry fully enclosing the hit-test location. When the output *isInside value is set to
The application calls this function to get the pixel location relative to the top-left of the layout box given the text position and the logical side of the position. This function is normally used as part of caret positioning of text where the caret is drawn at the location corresponding to the current text editing position. It may also be used as a way to programmatically obtain the geometry of a particular text position in UI automation.
+The text position used to get the pixel location.
A Boolean flag that indicates whether the pixel location is of the leading or the trailing side of the specified text position.
When this method returns, contains the output pixel location X, relative to the top-left location of the layout box.
When this method returns, contains the output pixel location Y, relative to the top-left location of the layout box.
When this method returns, contains the output geometry fully enclosing the specified text position.
The application calls this function to get a set of hit-test metrics corresponding to a range of text positions. One of the main usages is to implement highlight selection of the text string. The function returns E_NOT_SUFFICIENT_BUFFER, which is equivalent to HRESULT_FROM_WIN32(
If this method succeeds, it returns
Gets or sets the layout maximum width.
+Gets or sets the layout maximum height.
+Retrieves overall metrics for the formatted string.
+Returns the overhangs (in DIPs) of the layout and all objects contained in it, including text glyphs and inline objects.
+Underlines and strikethroughs do not contribute to the black box determination, since these are actually drawn by the renderer, which is allowed to draw them in any variety of styles.
+Specifies a range of text positions where format is applied in the text represented by an
Represents a set of application-defined callbacks that perform rendering of text, inline objects, and decorations such as underlines.
+Represents a bitmap that has been bound to an
Returns the size, in device-independent pixels (DIPs), of the bitmap.
+The size, in DIPs, of the bitmap.
A DIP is 1/96?of an inch. To retrieve the size in device pixels, use the
Returns the size, in device-dependent units (pixels), of the bitmap.
+The size, in pixels, of the bitmap.
Retrieves the pixel format and alpha mode of the bitmap.
+The pixel format and alpha mode of the bitmap.
Return the dots per inch (DPI) of the bitmap.
+The horizontal DPI of the image. You must allocate storage for this parameter.
The vertical DPI of the image. You must allocate storage for this parameter.
Copies the specified region from the specified bitmap into the current bitmap.
+In the current bitmap, the upper-left corner of the area to which the region specified by srcRect is copied.
The bitmap to copy from.
The area of bitmap to copy.
If this method succeeds, it returns
This method does not update the size of the current bitmap. If the contents of the source bitmap do not fit in the current bitmap, this method fails. Also, note that this method does not perform format conversion, and will fail if the bitmap formats do not match.
Calling this method may cause the current batch to flush if the bitmap is active in the batch. If the batch that was flushed does not complete successfully, this method fails. However, this method does not clear the error state of the render target on which the batch was flushed. The failing
Starting with Windows?8.1, this method supports block compressed bitmaps. If you are using a block compressed format, the end coordinates of the srcRect parameter must be multiples of 4 or the method returns E_INVALIDARG.
+Copies the specified region from the specified render target into the current bitmap.
+In the current bitmap, the upper-left corner of the area to which the region specified by srcRect is copied.
The render target that contains the region to copy.
The area of renderTarget to copy.
If this method succeeds, it returns
This method does not update the size of the current bitmap. If the contents of the source bitmap do not fit in the current bitmap, this method fails. Also, note that this method does not perform format conversion, and will fail if the bitmap formats do not match.
Calling this method may cause the current batch to flush if the bitmap is active in the batch. If the batch that was flushed does not complete successfully, this method fails. However, this method does not clear the error state of the render target on which the batch was flushed. The failing
All clips and layers must be popped off of the render target before calling this method. The method returns
Copies the specified region from memory into the current bitmap.
+In the current bitmap, the upper-left corner of the area to which the region specified by srcRect is copied.
The data to copy.
The stride, or pitch, of the source bitmap stored in srcData. The stride is the byte count of a scanline (one row of pixels in memory). The stride can be computed from the following formula: pixel width * bytes per pixel + memory padding.
If this method succeeds, it returns
This method does not update the size of the current bitmap. If the contents of the source bitmap do not fit in the current bitmap, this method fails. Also, note that this method does not perform format conversion; the two bitmap formats should match.
If this method is passed invalid input (such as an invalid destination rectangle), can produce unpredictable results, such as a distorted image or device failure.
Calling this method may cause the current batch to flush if the bitmap is active in the batch. If the batch that was flushed does not complete successfully, this method fails. However, this method does not clear the error state of the render target on which the batch was flushed. The failing
Starting with Windows?8.1, this method supports block compressed bitmaps. If you are using a block compressed format, the end coordinates of the srcRect parameter must be multiples of 4 or the method returns E_INVALIDARG.
+Returns the size, in device-independent pixels (DIPs), of the bitmap.
+A DIP is 1/96?of an inch. To retrieve the size in device pixels, use the
Returns the size, in device-dependent units (pixels), of the bitmap.
+Retrieves the pixel format and alpha mode of the bitmap.
+Paints an area with a bitmap.
+A bitmap brush is used to fill a geometry with a bitmap. Like all brushes, it defines an infinite plane of content. Because bitmaps are finite, the brush relies on an "extend mode" to determine how the plane is filled horizontally and vertically.
+Defines an object that paints an area. Interfaces that derive from
An
Brush space in Direct2D is specified differently than in XPS and Windows Presentation Foundation (WPF). In Direct2D, brush space is not relative to the object being drawn, but rather is the current coordinate system of the render target, transformed by the brush transform, if present. To paint an object as it would be painted by a WPF brush, you must translate the brush space origin to the upper-left corner of the object's bounding box, and then scale the brush space so that the base tile fills the bounding box of the object.
For more information about brushes, see the Brushes Overview.
+Sets the degree of opacity of this brush.
+A value between zero and 1 that indicates the opacity of the brush. This value is a constant multiplier that linearly scales the alpha value of all pixels filled by the brush. The opacity values are clamped in the range 0?1 before they are multipled together.
When you paint with a brush, it paints in the coordinate space of the render target. Brushes do not automatically position themselves to align with the object being painted; by default, they begin painting at the origin (0, 0) of the render target.
You can "move" the gradient defined by an
To align the content of an
The following illustrations show the effect of using an
The illustration on the right shows the result of transforming the
Gets the degree of opacity of this brush.
+A value between zero and 1 that indicates the opacity of the brush. This value is a constant multiplier that linearly scales the alpha value of all pixels filled by the brush. The opacity values are clamped in the range 0?1 before they are multipled together.
Gets the transform applied to this brush.
+The transform applied to this brush.
When the brush transform is the identity matrix, the brush appears in the same coordinate space as the render target in which it is drawn.
+Gets or sets the degree of opacity of this brush.
+Gets or sets the transform applied to this brush.
+When the brush transform is the identity matrix, the brush appears in the same coordinate space as the render target in which it is drawn.
+Specifies how the brush horizontally tiles those areas that extend past its bitmap.
+A value that specifies how the brush horizontally tiles those areas that extend past its bitmap.
Sometimes, the bitmap for a bitmap brush doesn't completely fill the area being painted. When this happens, Direct2D uses the brush's horizontal (SetExtendModeX) and vertical (SetExtendModeY) extend mode settings to determine how to fill the remaining area.
The following illustration shows the results from every possible combination of the extend modes for an
Specifies how the brush vertically tiles those areas that extend past its bitmap.
+A value that specifies how the brush vertically tiles those areas that extend past its bitmap.
Sometimes, the bitmap for a bitmap brush doesn't completely fill the area being painted. When this happens, Direct2D uses the brush's horizontal (SetExtendModeX) and vertical (SetExtendModeY) extend mode settings to determine how to fill the remaining area.
The following illustration shows the results from every possible combination of the extend modes for an
Specifies the interpolation mode used when the brush bitmap is scaled or rotated.
+The interpolation mode used when the brush bitmap is scaled or rotated.
This method sets the interpolation mode for a bitmap, which is an enum value that is specified in the
The interpolation mode of a bitmap also affects subpixel translations. In a subpixel translation, bilinear interpolation positions the bitmap more precisely to the application requests, but blurs the bitmap in the process.
+Specifies the bitmap source that this brush uses to paint.
+The bitmap source used by the brush.
This method specifies the bitmap source that this brush uses to paint. The bitmap is not resized or rescaled automatically to fit the geometry that it fills. The bitmap stays at its native size. To resize or translate the bitmap, use the SetTransform method to apply a transform to the brush.
The native size of a bitmap is the width and height in bitmap pixels, divided by the bitmap DPI. This native size forms the base tile of the brush. To tile a subregion of the bitmap, you must generate a new bitmap containing this subregion and use SetBitmap to apply it to the brush. +
+Gets the method by which the brush horizontally tiles those areas that extend past its bitmap.
+A value that specifies how the brush horizontally tiles those areas that extend past its bitmap.
Like all brushes,
Gets the method by which the brush vertically tiles those areas that extend past its bitmap.
+A value that specifies how the brush vertically tiles those areas that extend past its bitmap.
Like all brushes,
Gets the interpolation method used when the brush bitmap is scaled or rotated.
+The interpolation method used when the brush bitmap is scaled or rotated.
This method gets the interpolation mode of a bitmap, which is specified by the
The interpolation mode of a bitmap also affects subpixel translations. In a subpixel translation, linear interpolation positions the bitmap more precisely to the application request, but blurs the bitmap in the process.
+Gets the bitmap source that this brush uses to paint.
+When this method returns, contains the address to a reference to the bitmap with which this brush paints.
Gets or sets the method by which the brush horizontally tiles those areas that extend past its bitmap.
+Like all brushes,
Gets or sets the method by which the brush vertically tiles those areas that extend past its bitmap.
+Like all brushes,
Gets or sets the interpolation method used when the brush bitmap is scaled or rotated.
+This method gets the interpolation mode of a bitmap, which is specified by the
The interpolation mode of a bitmap also affects subpixel translations. In a subpixel translation, linear interpolation positions the bitmap more precisely to the application request, but blurs the bitmap in the process.
+Gets or sets the bitmap source that this brush uses to paint.
+Describes the pixel format and dpi of a bitmap.
+The bitmap's pixel format and alpha mode.
The horizontal dpi of the bitmap.
The vertical dpi of the bitmap.
Renders to an intermediate texture created by the CreateCompatibleRenderTarget method.
+An
To write directly to a WIC bitmap instead, use the
Retrieves the bitmap for this render target. The returned bitmap can be used for drawing operations.
+When this method returns, contains the address of a reference to the bitmap for this render target. This bitmap can be used for drawing operations.
If this method succeeds, it returns
The DPI for the
Retrieves the bitmap for this render target. The returned bitmap can be used for drawing operations.
+The DPI for the
Represents a font typography setting.
+Adds an OpenType font feature.
+A structure that contains the OpenType name identifier and the execution parameter for the font feature being added.
If this method succeeds, it returns
Gets the number of OpenType font features for the current font.
+The number of font features for the current text format.
A single run of text can be associated with more than one typographic feature. The
Gets the font feature at the specified index.
+The zero-based index of the font feature to retrieve.
When this method returns, contains the font feature which is at the specified index.
A single run of text can be associated with more than one typographic feature. The
Gets the number of OpenType font features for the current font.
+A single run of text can be associated with more than one typographic feature. The
Contains the center point, x-radius, and y-radius of an ellipse.
+The center point of the ellipse.
The X-radius of the ellipse.
The Y-radius of the ellipse.
Provides access to an device context that can accept GDI drawing commands.
+You don't create an
Not all render targets support the
Note that the QueryInterface method always succeeds; if the render target doesn't support the
To test whether a given render target supports the
Retrieves the device context associated with this render target.
+A value that specifies whether the device context should be cleared.
When this method returns, contains the device context associated with this render target. You must allocate storage for this parameter.
Calling this method flushes the render target.
This command can be called only after BeginDraw and before EndDraw.
Note??In Windows?7 and earlier, you should not call GetDC between PushAxisAlignedClip/PopAxisAlignedClip commands or between PushLayer/PopLayer. However, this restriction does not apply to Windows?8 and later.
ReleaseDC must be called once for each call to GetDC.
+Indicates that drawing with the device context retrieved using the GetDC method is finished.
+If this method succeeds, it returns
ReleaseDC must be called once for each call to GetDC.
+Indicates the condition at the edges of inline object or text used to determine line-breaking behavior.
+Indicates whether a break is allowed by determining the condition of the neighboring text span or inline object.
Indicates that a line break is allowed, unless overruled by the condition of the neighboring text span or inline object, either prohibited by a "may not break" condition or forced by a "must break" condition.
Indicates that there should be no line break, unless overruled by a "must break" condition from the neighboring text span or inline object.
Indicates that the line break must happen, regardless of the condition of the adjacent text span or inline object.
Specifies the type of DirectWrite factory object.
+A DirectWrite factory object contains information about its internal state, such as font loader registration and cached font data. In most cases you should use the shared factory object, because it allows multiple components that use DirectWrite to share internal DirectWrite state information, thereby reducing memory usage. However, there are cases when it is desirable to reduce the impact of a component on the rest of the process, such as a plug-in from an untrusted source, by sandboxing and isolating it from the rest of the process components. In such cases, you should use an isolated factory for the sandboxed component.
+Indicates that the DirectWrite factory is a shared factory and that it allows for the reuse of cached font data across multiple in-process components. Such factories also take advantage of cross process font caching components for better performance.
Indicates that the DirectWrite factory object is isolated. Objects created from the isolated factory do not interact with internal DirectWrite state from other components.
Indicates the direction of flow for placing lines of text in a paragraph.
+Note??Windows?8.1 and later only.
Specifies that text lines stack in increasing order.
Indicates the file format of a complete font face.
+Font formats that consist of multiple files, such as Type 1 .PFM and .PFB, have a single enum entry.
+OpenType font face with CFF outlines.
OpenType font face with TrueType outlines.
OpenType font face that is a part of a TrueType collection.
A Type 1 font face.
A vector .FON format font face.
A bitmap .FON format font face.
Font face type is not recognized by the DirectWrite font system.
A value that indicates the typographic feature of text supplied by the font.
+Replaces figures separated by a slash with an alternative form.
Equivalent OpenType tag: 'afrc'
Turns capital characters into petite capitals. It is generally used for words which would otherwise be set in all caps, such as acronyms, but which are desired in petite-cap form to avoid disrupting the flow of text. See the pcap feature description for notes on the relationship of caps, smallcaps and petite caps.
Equivalent OpenType tag: 'c2pc'
Turns capital characters into small capitals. It is generally used for words which would otherwise be set in all caps, such as acronyms, but which are desired in small-cap form to avoid disrupting the flow of text.
Equivalent OpenType tag: 'c2sc'
In specified situations, replaces default glyphs with alternate forms which provide better joining behavior. Used in script typefaces which are designed to have some or all of their glyphs join.
Equivalent OpenType tag: 'calt'
Shifts various punctuation marks up to a position that works better with all-capital sequences or sets of lining figures; also changes oldstyle figures to lining figures. By default, glyphs in a text face are designed to work with lowercase characters. Some characters should be shifted vertically to fit the higher visual center of all-capital or lining text. Also, lining figures are the same height (or close to it) as capitals, and fit much better with all-capital text.
Equivalent OpenType tag: 'case'
To minimize the number of glyph alternates, it is sometimes desired to decompose a character into two glyphs. Additionally, it may be preferable to compose two characters into a single glyph for better glyph processing. This feature permits such composition/decomposition. The feature should be processed as the first feature processed, and should be processed only when it is called.
Equivalent OpenType tag: 'ccmp'
Replaces a sequence of glyphs with a single glyph which is preferred for typographic purposes. Unlike other ligature features, clig specifies the context in which the ligature is recommended. This capability is important in some script designs and for swash ligatures.
Equivalent OpenType tag: 'clig'
Globally adjusts inter-glyph spacing for all-capital text. Most typefaces contain capitals and lowercase characters, and the capitals are positioned to work with the lowercase. When capitals are used for words, they need more space between them for legibility and esthetics. This feature would not apply to monospaced designs. Of course the user may want to override this behavior in order to do more pronounced letterspacing for esthetic reasons.
Equivalent OpenType tag: 'cpsp'
Replaces default character glyphs with corresponding swash glyphs in a specified context. Note that there may be more than one swash alternate for a given character.
Equivalent OpenType tag: 'cswh'
In cursive scripts like Arabic, this feature cursively positions adjacent glyphs.
Equivalent OpenType tag: 'curs'
Globally adjusts inter-glyph spacing for all-capital text. Most typefaces contain capitals and lowercase characters, and the capitals are positioned to work with the lowercase. When capitals are used for words, they need more space between them for legibility and esthetics. This feature would not apply to monospaced designs. Of course the user may want to override this behavior in order to do more pronounced letterspacing for esthetic reasons.
Equivalent OpenType tag: 'cpsp'
Replaces a sequence of glyphs with a single glyph which is preferred for typographic purposes. This feature covers those ligatures which may be used for special effect, at the user's preference.
Equivalent OpenType tag: 'dlig'
Replaces standard forms in Japanese fonts with corresponding forms preferred by typographers. For example, a user would invoke this feature to replace kanji character U+5516 with U+555E. +
Equivalent OpenType tag: 'expt'
Replaces figures separated by a slash with 'common' (diagonal) fractions.
Equivalent OpenType tag: 'frac'
Replaces glyphs set on other widths with glyphs set on full (usually em) widths. In a CJKV font, this may include "lower ASCII" Latin characters and various symbols. In a European font, this feature replaces proportionally-spaced glyphs with monospaced glyphs, which are generally set on widths of 0.6 em. For example, a user may invoke this feature in a Japanese font to get full monospaced Latin glyphs instead of the corresponding proportionally-spaced versions.
Equivalent OpenType tag: 'fwid'
Produces the half forms of consonants in Indic scripts. For example, in Hindi (Devanagari script), the conjunct KKa, obtained by doubling the Ka, is denoted with a half form of Ka followed by the full form.
Equivalent OpenType tag: 'half'
Produces the halant forms of consonants in Indic scripts. For example, in Sanskrit (Devanagari script), syllable final consonants are frequently required in their halant form.
Equivalent OpenType tag: 'haln'
Respaces glyphs designed to be set on full-em widths, fitting them onto half-em widths. This differs from hwid in that it does not substitute new glyphs.
Equivalent OpenType tag: 'halt'
Replaces the default (current) forms with the historical alternates. While some ligatures are also used for historical effect, this feature deals only with single characters. Some fonts include the historical forms as alternates, so they can be used for a 'period' effect.
Equivalent OpenType tag: 'hist'
Replaces standard kana with forms that have been specially designed for only horizontal writing. This is a typographic optimization for improved fit and more even color.
Equivalent OpenType tag: 'hkna'
Replaces the default (current) forms with the historical alternates. Some ligatures were in common use in the past, but appear anachronistic today. Some fonts include the historical forms as alternates, so they can be used for a 'period' effect.
Equivalent OpenType tag: 'hlig'
Replaces glyphs on proportional widths, or fixed widths other than half an em, with glyphs on half-em (en) widths. Many CJKV fonts have glyphs which are set on multiple widths; this feature selects the half-em version. There are various contexts in which this is the preferred behavior, including compatibility with older desktop documents.
Equivalent OpenType tag: 'hwid'
Used to access the JIS X 0212-1990 glyphs for the cases when the JIS X 0213:2004 form is encoded. The JIS X 0212-1990 (aka, "Hojo Kanji") and JIS X 0213:2004 character sets overlap significantly. In some cases their prototypical glyphs differ. When building fonts that support both JIS X 0212-1990 and JIS X 0213:2004 (such as those supporting the Adobe-Japan 1-6 character collection), it is recommended that JIS X 0213:2004 forms be the preferred encoded form.
Equivalent OpenType tag: 'hojo'
The National Language Council (NLC) of Japan has defined new glyph shapes for a number of JIS characters, which were incorporated into JIS X 0213:2004 as new prototypical forms. The 'jp04' feature is A subset of the 'nlck' feature, and is used to access these prototypical glyphs in a manner that maintains the integrity of JIS X 0213:2004.
Equivalent OpenType tag: 'jp04'
Replaces default (JIS90) Japanese glyphs with the corresponding forms from the JIS C 6226-1978 (JIS78) specification.
Equivalent OpenType tag: 'jp78'
Replaces default (JIS90) Japanese glyphs with the corresponding forms from the JIS X 0208-1983 (JIS83) specification.
Equivalent OpenType tag: 'jp83'
Replaces Japanese glyphs from the JIS78 or JIS83 specifications with the corresponding forms from the JIS X 0208-1990 (JIS90) specification.
Equivalent OpenType tag: 'jp90'
Adjusts amount of space between glyphs, generally to provide optically consistent spacing between glyphs. Although a well-designed typeface has consistent inter-glyph spacing overall, some glyph combinations require adjustment for improved legibility. Besides standard adjustment in the horizontal direction, this feature can supply size-dependent kerning data via device tables, "cross-stream" kerning in the Y text direction, and adjustment of glyph placement independent of the advance adjustment. Note that this feature may apply to runs of more than two glyphs, and would not be used in monospaced fonts. Also note that this feature does not apply to text set vertically.
Equivalent OpenType tag: 'kern'
Replaces a sequence of glyphs with a single glyph which is preferred for typographic purposes. This feature covers the ligatures which the designer/manufacturer judges should be used in normal conditions.
Equivalent OpenType tag: 'liga'
Changes selected figures from oldstyle to the default lining form. For example, a user may invoke this feature in order to get lining figures, which fit better with all-capital text. This feature overrides results of the Oldstyle Figures feature (onum).
Equivalent OpenType tag: 'lnum'
Enables localized forms of glyphs to be substituted for default forms. Many scripts used to write multiple languages over wide geographical areas have developed localized variant forms of specific letters, which are used by individual literary communities. For example, a number of letters in the Bulgarian and Serbian alphabets have forms distinct from their Russian counterparts and from each other. In some cases the localized form differs only subtly from the script 'norm', in others the forms are radically distinct.
Equivalent OpenType tag: 'locl'
Positions mark glyphs with respect to base glyphs. For example, in Arabic script positioning the Hamza above the Yeh.
Equivalent OpenType tag: 'mark'
Replaces standard typographic forms of Greek glyphs with corresponding forms commonly used in mathematical notation (which are a subset of the Greek alphabet).
Equivalent OpenType tag: 'mgrk'
Positions marks with respect to other marks. Required in various non-Latin scripts like Arabic. For example, in Arabic, the ligaturised mark Ha with Hamza above it can also be obtained by positioning these marks relative to one another.
Equivalent OpenType tag: 'mkmk'
Replaces default glyphs with various notational forms (such as glyphs placed in open or solid circles, squares, parentheses, diamonds or rounded boxes). In some cases an annotation form may already be present, but the user may want a different one.
Equivalent OpenType tag: 'nalt'
Used to access glyphs made from glyph shapes defined by the National Language Council (NLC) of Japan for a number of JIS characters in 2000.
Equivalent OpenType tag: 'nlck'
Changes selected figures from the default lining style to oldstyle form. For example, a user may invoke this feature to get oldstyle figures, which fit better into the flow of normal upper- and lowercase text. This feature overrides results of the Lining Figures feature (lnum).
Equivalent OpenType tag: 'onum'
Replaces default alphabetic glyphs with the corresponding ordinal forms for use after figures. One exception to the follows-a-figure rule is the numero character (U+2116), which is actually a ligature substitution, but is best accessed through this feature.
Equivalent OpenType tag: 'ordn'
Respaces glyphs designed to be set on full-em widths, fitting them onto individual (more or less proportional) horizontal widths. This differs from pwid in that it does not substitute new glyphs (GPOS, not GSUB feature). The user may prefer the monospaced form, or may simply want to ensure that the glyph is well-fit and not rotated in vertical setting (Latin forms designed for proportional spacing would be rotated).
Equivalent OpenType tag: 'palt'
Turns lowercase characters into petite capitals. Forms related to petite capitals, such as specially designed figures, may be included. Some fonts contain an additional size of capital letters, shorter than the regular smallcaps and it is referred to as petite caps. Such forms are most likely to be found in designs with a small lowercase x-height, where they better harmonise with lowercase text than the taller smallcaps (for examples of petite caps, see the Emigre type families Mrs Eaves and Filosofia).
Equivalent OpenType tag: 'pcap'
Replaces figure glyphs set on uniform (tabular) widths with corresponding glyphs set on glyph-specific (proportional) widths. Tabular widths will generally be the default, but this cannot be safely assumed. Of course this feature would not be present in monospaced designs.
Equivalent OpenType tag: 'pnum'
Replaces glyphs set on uniform widths (typically full or half-em) with proportionally spaced glyphs. The proportional variants are often used for the Latin characters in CJKV fonts, but may also be used for Kana in Japanese fonts.
Equivalent OpenType tag: 'pwid'
Replaces glyphs on other widths with glyphs set on widths of one quarter of an em (half an en). The characters involved are normally figures and some forms of punctuation.
Equivalent OpenType tag: 'qwid'
Replaces a sequence of glyphs with a single glyph which is preferred for typographic purposes. This feature covers those ligatures, which the script determines as required to be used in normal conditions. This feature is important for some scripts to ensure correct glyph formation.
Equivalent OpenType tag: 'rlig'
Identifies glyphs in the font which have been designed for "ruby", from the old typesetting term for four-point-sized type. Japanese typesetting often uses smaller kana glyphs, generally in superscripted form, to clarify the meaning of kanji which may be unfamiliar to the reader.
Equivalent OpenType tag: 'ruby'
Replaces the default forms with the stylistic alternates. Many fonts contain alternate glyph designs for a purely esthetic effect; these don't always fit into a clear category like swash or historical. As in the case of swash glyphs, there may be more than one alternate form.
Equivalent OpenType tag: 'salt'
Replaces lining or oldstyle figures with inferior figures (smaller glyphs which sit lower than the standard baseline, primarily for chemical or mathematical notation). May also replace lowercase characters with alphabetic inferiors.
Equivalent OpenType tag: 'sinf'
Turns lowercase characters into small capitals. This corresponds to the common SC font layout. It is generally used for display lines set in Large & small caps, such as titles. Forms related to small capitals, such as oldstyle figures, may be included.
Equivalent OpenType tag: 'smcp'
Replaces 'traditional' Chinese or Japanese forms with the corresponding 'simplified' forms.
Equivalent OpenType tag: 'smpl'
In addition to, or instead of, stylistic alternatives of individual glyphs (see 'salt' feature), some fonts may contain sets of stylistic variant glyphs corresponding to portions of the character set, such as multiple variants for lowercase letters in a Latin font. Glyphs in stylistic sets may be designed to harmonise visually, interract in particular ways, or otherwise work together. Examples of fonts including stylistic sets are Zapfino Linotype and Adobe's Poetica. Individual features numbered sequentially with the tag name convention 'ss01' 'ss02' 'ss03' . 'ss20' provide a mechanism for glyphs in these sets to be associated via GSUB lookup indexes to default forms and to each other, and for users to select from available stylistic sets
Equivalent OpenType tag: 'ss01'
See the description for
Equivalent OpenType tag: 'ss02'
See the description for
Equivalent OpenType tag: 'ss03'
See the description for
Equivalent OpenType tag: 'ss04'
See the description for
Equivalent OpenType tag: 'ss05'
See the description for
Equivalent OpenType tag: 'ss06'
See the description for
Equivalent OpenType tag: 'ss07'
See the description for
Equivalent OpenType tag: 'ss08'
See the description for
Equivalent OpenType tag: 'ss09'
See the description for
Equivalent OpenType tag: 'ss10'
See the description for
Equivalent OpenType tag: 'ss11'
See the description for
Equivalent OpenType tag: 'ss12'
See the description for
Equivalent OpenType tag: 'ss13'
See the description for
Equivalent OpenType tag: 'ss14'
See the description for
Equivalent OpenType tag: 'ss15'
See the description for
Equivalent OpenType tag: 'ss16'
See the description for
Equivalent OpenType tag: 'ss17'
See the description for
Equivalent OpenType tag: 'ss18'
See the description for
Equivalent OpenType tag: 'ss19'
See the description for
Equivalent OpenType tag: 'ss20'
May replace a default glyph with a subscript glyph, or it may combine a glyph substitution with positioning adjustments for proper placement.
Equivalent OpenType tag: 'subs'
Replaces lining or oldstyle figures with superior figures (primarily for footnote indication), and replaces lowercase letters with superior letters (primarily for abbreviated French titles).
Equivalent OpenType tag: 'sups'
Replaces default character glyphs with corresponding swash glyphs. Note that there may be more than one swash alternate for a given character.
Equivalent OpenType tag: 'swsh'
Replaces the default glyphs with corresponding forms designed specifically for titling. These may be all-capital and/or larger on the body, and adjusted for viewing at larger sizes.
Equivalent OpenType tag: 'titl'
Replaces 'simplified' Japanese kanji forms with the corresponding 'traditional' forms. This is equivalent to the Traditional Forms feature, but explicitly limited to the traditional forms considered proper for use in personal names (as many as 205 glyphs in some fonts).
Equivalent OpenType tag: 'tnam'
Replaces figure glyphs set on proportional widths with corresponding glyphs set on uniform (tabular) widths. Tabular widths will generally be the default, but this cannot be safely assumed. Of course this feature would not be present in monospaced designs.
Equivalent OpenType tag: 'tnum'
Replaces 'simplified' Chinese hanzi or Japanese kanji forms with the corresponding 'traditional' forms.
Equivalent OpenType tag: 'trad'
Replaces glyphs on other widths with glyphs set on widths of one third of an em. The characters involved are normally figures and some forms of punctuation.
Equivalent OpenType tag: 'twid'
Maps upper- and lowercase letters to a mixed set of lowercase and small capital forms, resulting in a single case alphabet (for an example of unicase, see the Emigre type family Filosofia). The letters substituted may vary from font to font, as appropriate to the design. If aligning to the x-height, smallcap glyphs may be substituted, or specially designed unicase forms might be used. Substitutions might also include specially designed figures. +
Equivalent OpenType tag: 'unic'
Allows the user to change from the default 0 to a slashed form. Some fonts contain both a default form of zero, and an alternative form which uses a diagonal slash through the counter. Especially in condensed designs, it can be difficult to distinguish between 0 and O (zero and capital O) in any situation where capitals and lining figures may be arbitrarily mixed.
Equivalent OpenType tag: 'zero'
The type of a font represented by a single font file. Font formats that consist of multiple files, for example Type 1 .PFM and .PFB, have separate enum values for each of the file types.
+Font type is not recognized by the DirectWrite font system.
OpenType font with CFF outlines.
OpenType font with TrueType outlines.
OpenType font that contains a TrueType collection.
Type 1 PFM font.
Type 1 PFB font.
Vector .FON font.
Bitmap .FON font.
Specifies algorithmic style simulations to be applied to the font face. Bold and oblique simulations can be combined via bitwise OR operation.
+Style simulations are not recommended for good typographic quality.
+Indicates that no simulations are applied to the font face.
Indicates that algorithmic emboldening is applied to the font face.
Indicates that algorithmic italicization is applied to the font face.
Represents the degree to which a font has been stretched compared to a font's normal aspect ratio. The enumerated values correspond to the usWidthClass definition in the OpenType specification. The usWidthClass represents an integer value between 1 and 9?lower values indicate narrower widths; higher values indicate wider widths.
+A font stretch describes the degree to which a font form is stretched from its normal aspect ratio, which is the original width to height ratio specified for the glyphs in the font. + The following illustration shows an example of Normal and Condensed stretches for the Rockwell Bold typeface.
Note??Values other than the ones defined in the enumeration are considered to be invalid, and are rejected by font API functions.
+Predefined font stretch : Not known (0).
Predefined font stretch : Ultra-condensed (1).
Predefined font stretch : Extra-condensed (2).
Predefined font stretch : Condensed (3).
Predefined font stretch : Semi-condensed (4).
Predefined font stretch : Normal (5).
Predefined font stretch : Medium (5).
Predefined font stretch : Semi-expanded (6).
Predefined font stretch : Expanded (7).
Predefined font stretch : Extra-expanded (8).
Predefined font stretch : Ultra-expanded (9).
Represents the style of a font face as normal, italic, or oblique.
+Three terms categorize the slant of a font: normal, italic, and oblique.
Font style | Description |
---|---|
Normal | The characters in a normal, or roman, font are upright. + |
Italic + | The characters in an italic font are truly slanted and appear as they were designed. + |
Oblique | The characters in an oblique font are artificially slanted. |
?
For Oblique, the slant is achieved by performing a shear transformation on the characters from a normal font. When a true italic font is not available on a computer or printer, an oblique style can be generated from the normal font and used to simulate an italic font. The following illustration shows the normal, italic, and oblique font styles for the Palatino Linotype font. Notice how the italic font style has a more flowing and visually appealing appearance than the oblique font style, which is simply created by skewing the normal font style version of the text.
Note?? Values other than the ones defined in the enumeration are considered to be invalid, and they are rejected by font API functions.
+Font style : Normal.
Font style : Oblique.
Font style : Italic.
Represents the density of a typeface, in terms of the lightness or heaviness of the strokes. The enumerated values correspond to the usWeightClass definition in the OpenType specification. The usWeightClass represents an integer value between 1 and 999. Lower values indicate lighter weights; higher values indicate heavier weights.
+Weight differences are generally differentiated by an increased stroke or thickness that is associated with a given character in a typeface, as compared to a "normal" character from that same typeface. + The following illustration shows an example of Normal and UltraBold weights for the Palatino Linotype typeface.
Note??Not all weights are available for all typefaces. When a weight is not available for a typeface, the closest matching weight is returned.
Font weight values less than 1 or greater than 999 are considered invalid, and they are rejected by font API functions.
+Predefined font weight : Thin (100).
Predefined font weight : Extra-light (200).
Predefined font weight : Ultra-light (200).
Predefined font weight : Light (300).
Predefined font weight : Normal (400).
Predefined font weight : Regular (400).
Predefined font weight : Medium (500).
Predefined font weight : Demi-bold (600).
Predefined font weight : Semi-bold (600).
Predefined font weight : Bold (700).
Predefined font weight : Extra-bold (800).
Predefined font weight : Ultra-bold (800).
Predefined font weight : Black (900).
Predefined font weight : Heavy (900).
Predefined font weight : Extra-black (950).
Predefined font weight : Ultra-black (950).
The informational string enumeration which identifies a string embedded in a font file.
+Indicates the string containing the unspecified name ID.
Indicates the string containing the copyright notice provided by the font.
Indicates the string containing a version number.
Indicates the string containing the trademark information provided by the font.
Indicates the string containing the name of the font manufacturer.
Indicates the string containing the name of the font designer.
Indicates the string containing the URL of the font designer (with protocol, e.g., http://, ftp://).
Indicates the string containing the description of the font. This may also contain revision information, usage recommendations, history, features, and so on.
Indicates the string containing the URL of the font vendor (with protocol, e.g., http://, ftp://). If a unique serial number is embedded in the URL, it can be used to register the font.
Indicates the string containing the description of how the font may be legally used, or different example scenarios for licensed use.
Indicates the string containing the URL where additional licensing information can be found.
Indicates the string containing the GDI-compatible family name. Since GDI allows a maximum of four fonts per family, fonts in the same family may have different GDI-compatible family names (e.g., "Arial", "Arial Narrow", "Arial Black").
Indicates the string containing a GDI-compatible subfamily name.
Indicates the string containing the family name preferred by the designer. This enables font designers to group more than four fonts in a single family without losing compatibility with GDI. This name is typically only present if it differs from the GDI-compatible family name.
Indicates the string containing the subfamily name preferred by the designer. This name is typically only present if it differs from the GDI-compatible subfamily name.
Contains sample text for display in font lists. This can be the font name or any other text that the designer thinks is the best example to display the font in.
The method used for line spacing in a text layout.
+The line spacing method is set by using the SetLineSpacing method of the
Line spacing depends solely on the content, adjusting to accommodate the size of fonts and inline objects.
Lines are explicitly set to uniform spacing, regardless of the size of fonts and inline objects. This can be useful to avoid the uneven appearance that can occur from font fallback.
Specifies how to apply number substitution on digits and related punctuation.
+Specifies that the substitution method should be determined based on the LOCALE_IDIGITSUBSTITUTION value of the specified text culture.
If the culture is Arabic or Persian, specifies that the number shapes depend on the context. Either traditional or nominal number shapes are used, depending on the nearest preceding strong character or (if there is none) the reading direction of the paragraph.
Specifies that code points 0x30-0x39 are always rendered as nominal numeral shapes (ones of the European number), that is, no substitution is performed.
Specifies that numbers are rendered using the national number shapes as specified by the LOCALE_SNATIVEDIGITS value of the specified text culture.
Specifies that numbers are rendered using the traditional shapes for the specified culture. For most cultures, this is the same as NativeNational. However, NativeNational results in Latin numbers for some Arabic cultures, whereasDWRITE_NUMBER_SUBSTITUTION_METHOD_TRADITIONAL results in arabic numbers for all Arabic cultures.
Specifies the alignment of paragraph text along the flow direction axis, relative to the top and bottom of the flow's layout box.
+The top of the text flow is aligned to the top edge of the layout box.
The bottom of the text flow is aligned to the bottom edge of the layout box.
The center of the flow is aligned to the center of the layout box.
Represents the internal structure of a device pixel (that is, the physical arrangement of red, green, and blue color components) that is assumed for purposes of rendering text. +
+The red, green, and blue color components of each pixel are assumed to occupy the same point.
Each pixel is composed of three vertical stripes, with red on the left, green in the center, and blue on the right. This is the most common pixel geometry for LCD monitors.
Each pixel is composed of three vertical stripes, with blue on the left, green in the center, and red on the right.
Specifies the direction in which reading progresses.
Note??DWRITE_READING_DIRECTION_TOP_TO_BOTTOM and DWRITE_READING_DIRECTION_BOTTOM_TO_TOP are available in Windows?8.1 and later, only.
+Indicates that reading progresses from left to right.
Indicates that reading progresses from right to left.
Represents a method of rendering glyphs.
Note??This topic is about
Indicates additional shaping requirements for text.
+Indicates that there is no additional shaping requirements for text. Text is shaped with the writing system default behavior.
Indicates that text should leave no visible control or format control characters.
Specifies the alignment of paragraph text along the reading direction axis, relative to the leading and trailing edge of the layout box.
+The leading edge of the paragraph text is aligned to the leading edge of the layout box.
The trailing edge of the paragraph text is aligned to the trailing edge of the layout box.
The center of the paragraph text is aligned to the center of the layout box.
Identifies a type of alpha texture.
+An alpha texture is a bitmap of alpha values, each representing opacity of a pixel or subpixel.
+Specifies an alpha texture for aliased text rendering (that is, each pixel is either fully opaque or fully transparent), with one byte per pixel.
Specifies an alpha texture for ClearType text rendering, with three bytes per pixel in the horizontal dimension and one byte per pixel in the vertical dimension.
Specifies the text granularity used to trim text overflowing the layout box.
+No trimming occurs. Text flows beyond the layout width.
Trimming occurs at a character cluster boundary.
Trimming occurs at a word boundary.
Specifies the word wrapping to be used in a particular multiline paragraph.
Note??DWRITE_WORD_WRAPPING_EMERGENCY_BREAK, DWRITE_WORD_WRAPPING_WHOLE _WORD, and DWRITE_WORD_WRAPPING_CHARACTER are available in Windows?8.1 and later, only.
+Indicates that words are broken across lines to avoid text overflowing the layout box.
Indicates that words are kept within the same line even when it overflows the layout box. This option is often used with scrolling to reveal overflow text.
Creates a DirectWrite factory object that is used for subsequent creation of individual DirectWrite objects.
+A value that specifies whether the factory object will be shared or isolated.
A
An address of a reference to the newly created DirectWrite factory object.
If this function succeeds, it returns
This function creates a DirectWrite factory object that is used for subsequent creation of individual DirectWrite objects. DirectWrite factory contains internal state data such as font loader registration and cached font data. In most cases it is recommended you use the shared factory object, because it allows multiple components that use DirectWrite to share internal DirectWrite state data, and thereby reduce memory usage. However, there are cases when it is desirable to reduce the impact of a component, such as a plug-in from an untrusted source, on the rest of the process, by sandboxing and isolating it from the rest of the process components. In such cases, it is recommended you use an isolated factory for the sandboxed component.
The following example shows how to create a shared DirectWrite factory.
if (SUCCEEDED(hr)) + { hr =( , __uuidof( ), reinterpret_cast< **>(&pDWriteFactory_) ); + }
Windows Phone 8.1: This API is supported.
+Represents a physical font in a font collection. This interface is used to create font faces from physical fonts, or to retrieve information such as font face metrics or face names from existing font faces.
+Gets the font family to which the specified font belongs.
+When this method returns, contains an address of a reference to the font family object to which the specified font belongs.
If this method succeeds, it returns
Gets the weight, or stroke thickness, of the specified font.
+A value that indicates the weight for the specified font.
Gets the stretch, or width, of the specified font.
+A value that indicates the type of stretch, or width, applied to the specified font.
Gets the style, or slope, of the specified font.
+A value that indicates the type of style, or slope, of the specified font.
Determines whether the font is a symbol font.
+TRUE if the font is a symbol font; otherwise,
Gets a localized strings collection containing the face names for the font (such as Regular or Bold), indexed by locale name.
+When this method returns, contains an address to a reference to the newly created localized strings object.
If this method succeeds, it returns
Gets a localized strings collection containing the specified informational strings, indexed by locale name.
+A value that identifies the informational string to get. For example,
When this method returns, contains an address of a reference to the newly created localized strings object.
When this method returns, TRUE if the font contains the specified string ID; otherwise,
If the font does not contain the string specified by informationalStringID, the return value is
Gets a value that indicates what simulations are applied to the specified font.
+A value that indicates one or more of the types of simulations (none, bold, or oblique) applied to the specified font.
Obtains design units and common metrics for the font face. These metrics are applicable to all the glyphs within a font face and are used by applications for layout calculations.
+When this method returns, contains a structure that has font metrics for the current font face. The metrics returned by this function are in font design units.
Determines whether the font supports a specified character.
+A Unicode (UCS-4) character value for the method to inspect.
When this method returns, TRUE if the font supports the specified character; otherwise,
Creates a font face object for the font.
+When this method returns, contains an address of a reference to the newly created font face object.
If this method succeeds, it returns
Gets the font family to which the specified font belongs.
+Gets the weight, or stroke thickness, of the specified font.
+Gets the stretch, or width, of the specified font.
+Gets the style, or slope, of the specified font.
+Determines whether the font is a symbol font.
+Gets a localized strings collection containing the face names for the font (such as Regular or Bold), indexed by locale name.
+Gets a value that indicates what simulations are applied to the specified font.
+Obtains design units and common metrics for the font face. These metrics are applicable to all the glyphs within a font face and are used by applications for layout calculations.
+Creates a localized strings object that contains the family names for the font family, indexed by locale name.
+ The following code example shows how to get the font family name from a
+* pFamilyNames = null ; // Get a list of localized strings for the family name. + if (SUCCEEDED(hr)) + { hr = pFontFamily->GetFamilyNames(&pFamilyNames); + } UINT32 index = 0; +exists = false; wchar_t localeName[LOCALE_NAME_MAX_LENGTH]; if (SUCCEEDED(hr)) + { // Get the default locale for this user. int defaultLocaleSuccess = GetUserDefaultLocaleName(localeName, LOCALE_NAME_MAX_LENGTH); // If the default locale is returned, find that locale name, otherwise use "en-us". if (defaultLocaleSuccess) { hr = pFamilyNames->FindLocaleName(localeName, &index, &exists); } if (SUCCEEDED(hr) && !exists) // if the above find did not find a match, retry with US English { hr = pFamilyNames->FindLocaleName(L"en-us", &index, &exists); } + } // If the specified locale doesn't exist, select the first on the list. + if (!exists) index = 0; UINT32 length = 0; // Get the string length. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetStringLength(index, &length); + } // Allocate a string big enough to hold the name. + wchar_t* name = new (std::nothrow) wchar_t[length+1]; + if (name == null ) + { hr = E_OUTOFMEMORY; + } // Get the family name. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetString(index, name, length+1); + } +
Represents a list of fonts.
+Gets the font collection that contains the fonts in the font list.
+When this method returns, contains the address of a reference to the current
If this method succeeds, it returns
Gets the number of fonts in the font list.
+The number of fonts in the font list.
Gets a font given its zero-based index.
+Zero-based index of the font in the font list.
When this method returns, contains the address of a reference to the newly created
Gets the font collection that contains the fonts in the font list.
+Gets the number of fonts in the font list.
+Creates a localized strings object that contains the family names for the font family, indexed by locale name.
+The address of a reference to the newly created
If this method succeeds, it returns
The following code example shows how to get the font family name from a
+* pFamilyNames = null ; // Get a list of localized strings for the family name. + if (SUCCEEDED(hr)) + { hr = pFontFamily->GetFamilyNames(&pFamilyNames); + } UINT32 index = 0; +exists = false; wchar_t localeName[LOCALE_NAME_MAX_LENGTH]; if (SUCCEEDED(hr)) + { // Get the default locale for this user. int defaultLocaleSuccess = GetUserDefaultLocaleName(localeName, LOCALE_NAME_MAX_LENGTH); // If the default locale is returned, find that locale name, otherwise use "en-us". if (defaultLocaleSuccess) { hr = pFamilyNames->FindLocaleName(localeName, &index, &exists); } if (SUCCEEDED(hr) && !exists) // if the above find did not find a match, retry with US English { hr = pFamilyNames->FindLocaleName(L"en-us", &index, &exists); } + } // If the specified locale doesn't exist, select the first on the list. + if (!exists) index = 0; UINT32 length = 0; // Get the string length. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetStringLength(index, &length); + } // Allocate a string big enough to hold the name. + wchar_t* name = new (std::nothrow) wchar_t[length+1]; + if (name == null ) + { hr = E_OUTOFMEMORY; + } // Get the family name. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetString(index, name, length+1); + } +
Gets the font that best matches the specified properties.
+A value that is used to match a requested font weight.
A value that is used to match a requested font stretch.
A value that is used to match a requested font style.
When this method returns, contains the address of a reference to the newly created
Gets a list of fonts in the font family ranked in order of how well they match the specified properties.
+A value that is used to match a requested font weight.
A value that is used to match a requested font stretch.
A value that is used to match a requested font style.
An address of a reference to the newly created
Creates a localized strings object that contains the family names for the font family, indexed by locale name.
+ The following code example shows how to get the font family name from a
+* pFamilyNames = null ; // Get a list of localized strings for the family name. + if (SUCCEEDED(hr)) + { hr = pFontFamily->GetFamilyNames(&pFamilyNames); + } UINT32 index = 0; +exists = false; wchar_t localeName[LOCALE_NAME_MAX_LENGTH]; if (SUCCEEDED(hr)) + { // Get the default locale for this user. int defaultLocaleSuccess = GetUserDefaultLocaleName(localeName, LOCALE_NAME_MAX_LENGTH); // If the default locale is returned, find that locale name, otherwise use "en-us". if (defaultLocaleSuccess) { hr = pFamilyNames->FindLocaleName(localeName, &index, &exists); } if (SUCCEEDED(hr) && !exists) // if the above find did not find a match, retry with US English { hr = pFamilyNames->FindLocaleName(L"en-us", &index, &exists); } + } // If the specified locale doesn't exist, select the first on the list. + if (!exists) index = 0; UINT32 length = 0; // Get the string length. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetStringLength(index, &length); + } // Allocate a string big enough to hold the name. + wchar_t* name = new (std::nothrow) wchar_t[length+1]; + if (name == null ) + { hr = E_OUTOFMEMORY; + } // Get the family name. + if (SUCCEEDED(hr)) + { hr = pFamilyNames->GetString(index, name, length+1); + } +
Contains information about a glyph cluster.
+The total advance width of all glyphs in the cluster.
The number of text positions in the cluster.
Indicates whether a line can be broken right after the cluster.
Indicates whether the cluster corresponds to a whitespace character.
Indicates whether the cluster corresponds to a newline character.
Indicates whether the cluster corresponds to a soft hyphen character.
Indicates whether the cluster is read from right to left.
Reserved for future use.
The
The number of font design units per em unit. Font files use their own coordinate system of font design units. A font design unit is the smallest measurable unit in the em square, an imaginary square that is used to size and align glyphs. The concept of em square is used as a reference scale factor when defining font size and device transformation semantics. The size of one em square is also commonly used to compute the paragraph identation value.
The ascent value of the font face in font design units. Ascent is the distance from the top of font character alignment box to the English baseline.
The descent value of the font face in font design units. Descent is the distance from the bottom of font character alignment box to the English baseline.
The line gap in font design units. Recommended additional white space to add between lines to improve legibility. The recommended line spacing (baseline-to-baseline distance) is the sum of ascent, descent, and lineGap. The line gap is usually positive or zero but can be negative, in which case the recommended line spacing is less than the height of the character alignment box.
The cap height value of the font face in font design units. Cap height is the distance from the English baseline to the top of a typical English capital. Capital "H" is often used as a reference character for the purpose of calculating the cap height value.
The x-height value of the font face in font design units. x-height is the distance from the English baseline to the top of lowercase letter "x", or a similar lowercase character.
The underline position value of the font face in font design units. Underline position is the position of underline relative to the English baseline. The value is usually made negative in order to place the underline below the baseline.
The suggested underline thickness value of the font face in font design units.
The strikethrough position value of the font face in font design units. Strikethrough position is the position of strikethrough relative to the English baseline. The value is usually made positive in order to place the strikethrough above the baseline.
The suggested strikethrough thickness value of the font face in font design units.
Specifies the metrics of an individual glyph. The units depend on how the metrics are obtained.
+Specifies the X offset from the glyph origin to the left edge of the black box. The glyph origin is the current horizontal writing position. A negative value means the black box extends to the left of the origin (often true for lowercase italic 'f').
Specifies the X offset from the origin of the current glyph to the origin of the next glyph when writing horizontally.
Specifies the X offset from the right edge of the black box to the origin of the next glyph when writing horizontally. The value is negative when the right edge of the black box overhangs the layout box.
Specifies the vertical offset from the vertical origin to the top of the black box. Thus, a positive value adds whitespace whereas a negative value means the glyph overhangs the top of the layout box.
Specifies the Y offset from the vertical origin of the current glyph to the vertical origin of the next glyph when writing vertically. Note that the term "origin" by itself denotes the horizontal origin. The vertical origin is different. Its Y coordinate is specified by verticalOriginY value, and its X coordinate is half the advanceWidth to the right of the horizontal origin.
Specifies the vertical distance from the bottom edge of the black box to the advance height. This is positive when the bottom edge of the black box is within the layout box, or negative when the bottom edge of black box overhangs the layout box.
Specifies the Y coordinate of a glyph's vertical origin, in the font's design coordinate system. The y coordinate of a glyph's vertical origin is the sum of the glyph's top side bearing and the top (that is, yMax) of the glyph's bounding box.
The optional adjustment to a glyph's position.
+An glyph offset changes the position of a glyph without affecting the pen position. Offsets are in logical, pre-transform units.
+The offset in the advance direction of the run. A positive advance offset moves the glyph to the right (in pre-transform coordinates) if the run is left-to-right or to the left if the run is right-to-left.
The offset in the ascent direction, that is, the direction ascenders point. A positive ascender offset moves the glyph up (in pre-transform coordinates). A negative ascender offset moves the glyph down.
Describes the region obtained by a hit test.
+The first text position within the hit region.
The number of text positions within the hit region.
The x-coordinate of the upper-left corner of the hit region.
The y-coordinate of the upper-left corner of the hit region.
The width of the hit region.
The height of the hit region.
The BIDI level of the text positions within the hit region.
true if the hit region contains text; otherwise, false.
true if the text range is trimmed; otherwise, false.
Contains properties describing the geometric measurement of an + application-defined inline object.
+The width of the inline object.
The height of the inline object.
The distance from the top of the object to the point where it is lined up with the adjacent text. If the baseline is at the bottom, then baseline simply equals height.
A Boolean flag that indicates whether the object is to be placed upright or alongside the text baseline for vertical text.
Contains information about a formatted line of text.
+The number of text positions in the text line. This includes any trailing whitespace and newline characters.
The number of whitespace positions at the end of the text line. Newline sequences are considered whitespace.
The number of characters in the newline sequence at the end of the text line. If the count is zero, then the text line was either wrapped or it is the end of the text.
The height of the text line.
The distance from the top of the text line to its baseline.
The line is trimmed.
Indicates how much any visible DIPs (device independent pixels) overshoot each side of the layout or inline objects.
Positive overhangs indicate that the visible area extends outside the layout box or inline object, while negative values mean there is whitespace inside. The returned values are unaffected by rendering transforms or pixel snapping. Additionally, they may not exactly match the final target's pixel bounds after applying grid fitting and hinting.
+The distance from the left-most visible DIP to its left-alignment edge.
The distance from the top-most visible DIP to its top alignment edge.
The distance from the right-most visible DIP to its right-alignment edge.
The distance from the bottom-most visible DIP to its lower-alignment edge.
Stores the association of text and its writing system script, as well as some display attributes.
+The zero-based index representation of writing system script.
A value that indicates additional shaping requirement of text.
Shaping output properties for an output glyph.
+Indicates that the glyph is shaped alone.
Reserved for future use.
Contains information regarding the size and placement of strikethroughs. All coordinates are in device independent pixels (DIPs).
+A value that indicates the width of the strikethrough, measured parallel to the baseline.
A value that indicates the thickness of the strikethrough, measured perpendicular to the baseline.
A value that indicates the offset of the strikethrough from the baseline. A positive offset represents a position below the baseline and a negative offset is above. Typically, the offset will be negative.
Reading direction of the text associated with the strikethrough. This value is used to interpret whether the width value runs horizontally or vertically.
Flow direction of the text associated with the strikethrough. This value is used to interpret whether the thickness value advances top to bottom, left to right, or right to left.
An array of characters containing the locale of the text that is the strikethrough is being drawn over.
The measuring mode can be useful to the renderer to determine how underlines are rendered, such as rounding the thickness to a whole pixel in GDI-compatible modes.
Contains the metrics associated with text after layout. All coordinates are in device independent pixels (DIPs).
+A value that indicates the left-most point of formatted text relative to the layout box, while excluding any glyph overhang.
A value that indicates the top-most point of formatted text relative to the layout box, while excluding any glyph overhang.
A value that indicates the width of the formatted text, while ignoring trailing whitespace at the end of each line.
The width of the formatted text, taking into account the trailing whitespace at the end of each line.
The height of the formatted text. The height of an empty string is set to the same value as that of the default font.
The initial width given to the layout. It can be either larger or smaller than the text content width, depending on whether the text was wrapped.
Initial height given to the layout. Depending on the length of the text, it may be larger or smaller than the text content height.
The maximum reordering count of any line of text, used to calculate the most number of hit-testing boxes needed. If the layout has no bidirectional text, or no text at all, the minimum level is 1.
Total number of lines.
Specifies the trimming option for text overflowing the layout box.
+A value that specifies the text granularity used to trim text overflowing the layout box.
A character code used as the delimiter that signals the beginning of the portion of text to be preserved. Most useful for path ellipsis, where the delimiter would be a slash.
A value that indicates how many occurrences of the delimiter to step back.
Contains a set of typographic features to be applied during text shaping.
+A reference to a structure that specifies properties used to identify and execute typographic features in the font.
A value that indicates the number of features being applied to a font face.
Contains information about the width, thickness, offset, run height, reading direction, and flow direction of an underline.
+All coordinates are in device independent pixels (DIPs).
+A value that indicates the width of the underline, measured parallel to the baseline.
A value that indicates the thickness of the underline, measured perpendicular to the baseline.
A value that indicates the offset of the underline from the baseline. A positive offset represents a position below the baseline (away from the text) and a negative offset is above (toward the text).
A value that indicates the height of the tallest run where the underline is applied.
A value that indicates the reading direction of the text associated with the underline. This value is used to interpret whether the width value runs horizontally or vertically.
A value that indicates the flow direction of the text associated with the underline. This value is used to interpret whether the thickness value advances top to bottom, left to right, or right to left.
An array of characters which contains the locale of the text that the underline is being drawn under. For example, in vertical text, the underline belongs on the left for Chinese but on the right for Japanese.
The measuring mode can be useful to the renderer to determine how underlines are rendered, such as rounding the thickness to a whole pixel in GDI-compatible modes.
Specifies how the alpha value of a bitmap or render target should be treated.
+The
The alpha value might not be meaningful.
The alpha value has been premultiplied. Each color is first scaled by the alpha value. The alpha value itself is the same in both straight and premultiplied alpha. Typically, no color channel value is greater than the alpha channel value. If a color channel value in a premultiplied format is greater than the alpha channel, the standard source-over blending math results in an additive blend.
The alpha value has not been premultiplied. The alpha channel indicates the transparency of the color.
The alpha value is ignored.
Specifies how the edges of nontext primitives are rendered.
+Edges are antialiased using the Direct2D per-primitive method of high-quality antialiasing.
Objects are aliased in most cases. Objects are antialiased only when they are drawn to a render target created by the CreateDxgiSurfaceRenderTarget method and Direct3D multisampling has been enabled on the backing DirectX Graphics Infrastructure (DXGI) surface.
Specifies whether an arc should be greater than 180 degrees.
+An arc's sweep should be 180 degrees or less.
An arc's sweep should be 180 degrees or greater.
Specifies the algorithm that is used when images are scaled or rotated.
Note??Starting in Windows?8, more interpolations modes are available. See D2D1_INTERPOLATION_MODE for more info.
+ To stretch an image, each pixel in the original image must be mapped to a group of pixels in the larger image. To shrink an image, groups of pixels in the original image must be mapped to single pixels in the smaller image. The effectiveness of the algorithms that perform these mappings determines the quality of a scaled image. Algorithms that produce higher-quality scaled images tend to require more processing time.
Describes the shape at the end of a line or segment.
+The following illustration shows the available cap styles for lines or segments. The red portion of the line shows the extra area added by the line cap setting.
+A cap that does not extend past the last point of the line. Comparable to cap used for objects other than lines.
Half of a square that has a length equal to the line thickness.
A semicircle that has a diameter equal to the line thickness.
An isosceles right triangle whose hypotenuse is equal in length to the thickness of the line.
Specifies the different methods by which two geometries can be combined.
+The following illustration shows the different geometry combine modes. +
+The two regions are combined by taking the union of both. Given two geometries, A and B, the resulting geometry is geometry A + geometry B.
The two regions are combined by taking their intersection. The new area consists of the overlapping region between the two geometries.
The two regions are combined by taking the area that exists in the first region but not the second and the area that exists in the second region but not the first. Given two geometries, A and B, the new region consists of (A-B) + (B-A).
The second region is excluded from the first. Given two geometries, A and B, the area of geometry B is removed from the area of geometry A, producing a region that is A-B.
Specifies additional features supportable by a compatible render target when it is created. This enumeration allows a bitwise combination of its member values.
+Use this enumeration when creating a compatible render target with the CreateCompatibleRenderTarget method. For more information about compatible render targets, see the Render Targets Overview.
The
The render target supports no additional features.
The render target supports interoperability with the Windows Graphics Device Interface (GDI).
Describes the sequence of dashes and gaps in a stroke.
+The following illustration shows several available dash styles. For more information, see the Stroke Style Example.
+A solid line with no breaks.
A dash followed by a gap of equal length. The dash and the gap are each twice as long as the stroke thickness.
The equivalent dash array for
A dot followed by a longer gap.
The equivalent dash array for
A dash, followed by a gap, followed by a dot, followed by another gap.
The equivalent dash array for
A dash, followed by a gap, followed by a dot, followed by another gap, followed by another dot, followed by another gap.
The equivalent dash array for
The dash pattern is specified by an array of floating-point values.
Indicates the type of information provided by the Direct2D Debug Layer.
+To receive debugging messages, you must install the Direct2D Debug Layer.
+Specifies how a device context is initialized for GDI rendering when it is retrieved from the render target.
+Use this enumeration with the
The current contents of the render target are copied to the device context when it is initialized.
The device context is cleared to transparent black when it is initialized.
Specifies whether text snapping is suppressed or clipping to the layout rectangle is enabled. This enumeration allows a bitwise combination of its member values.
+Text is not vertically snapped to pixel boundaries. This setting is recommended for text that is being animated.
Text is clipped to the layout rectangle.
Text is vertically snapped to pixel boundaries and is not clipped to the layout rectangle.
Specifies how a brush paints areas outside of its normal content area.
+For an
For an example, see the Draw Extend Mode Example.
+Repeat the edge pixels of the brush's content for all regions outside the normal content area.
Repeat the brush's content.
The same as
Specifies whether Direct2D provides synchronization for an
When you create a factory, you can specify whether it is multithreaded or singlethreaded. A singlethreaded factory provides no serialization against any other single threaded instance within Direct2D, so this mechanism provides a very large degree of scaling on the CPU.
You can also create a multithreaded factory instance. In this case, the factory and all derived objects can be used from any thread, and each render target can be rendered to independently. Direct2D serializes calls to these objects, so a single multithreaded Direct2D instance won't scale as well on the CPU as many single threaded instances. However, the resources can be shared within the multithreaded instance.
Note the qualifier "On the CPU": GPUs generally take advantage of fine-grained parallelism more so than CPUs. For example, multithreaded calls from the CPU might still end up being serialized when being sent to the GPU; however, a whole bank of pixel and vertex shaders will run in parallel to perform the rendering.
+Describes the minimum DirectX support required for hardware rendering by a render target.
+Direct2D determines whether the video card provides adequate hardware rendering support.
The video card must support DirectX 9.
The video card must support DirectX 10.
Indicates whether a specific
Note??A hollow figure is not filled and consists only of an outline.
+Indicates whether a specific
Specifies how the intersecting areas of geometries or figures are combined to form the area of the composite geometry.
+Use the
Direct2D fills the interior of a path by using one of the two fill modes specified by this enumeration:
To see the difference between the winding and alternate fill modes, assume that you have four circles with the same center and a different radius, as shown in the following illustration. The first one has the radius of 25, the second 50, the third 75, and the fourth 100.
The following illustration shows the shape filled by using the alternate fill mode. Notice that the center and third ring are not filled. This is because a ray drawn from any point in either of those two rings passes through an even number of segments.
The following illustration explains this process.
The following illustration shows how the same shape is filled when the winding fill mode is specified.
Notice that all the rings are filled. This is because all the segments run in the same direction, so a ray drawn from any point will cross one or more segments, and the sum of the crossings will not equal zero.
The following illustration explains this process. The red arrows represent the direction in which the segments are drawn and the black arrow represents an arbitrary ray that runs from a point in the innermost ring. Starting with a value of zero, for each segment that the ray crosses, a value of one is added for every clockwise intersection. All points lie in the fill region in this illustration, because the count does not equal zero.
+Determines whether a point is in the fill region by drawing a ray from that point to infinity in any direction, and then counting the number of path segments within the given shape that the ray crosses. If this number is odd, the point is in the fill region; if even, the point is outside the fill region.
Determines whether a point is in the fill region of the path by drawing a ray from that point to infinity in any direction, and then examining the places where a segment of the shape crosses the ray. Starting with a count of zero, add one each time a segment crosses the ray from left to right and subtract one each time a path segment crosses the ray from right to left, as long as left and right are seen from the perspective of the ray. After counting the crossings, if the result is zero, then the point is outside the path. Otherwise, it is inside the path.
Specifies which gamma is used for interpolation.
+Interpolating in a linear gamma space (
The first gradient is interpolated linearly in the space of the render target (sRGB in this case), and one can see the dark bands between each color. The second gradient uses a gamma-correct linear interpolation, and thus does not exhibit the same variations in brightness.
+Interpolation is performed in the standard RGB (sRGB) gamma.
Interpolation is performed in the linear-gamma color space.
Describes how one geometry object is spatially related to another geometry object.
+The relationship between the two geometries cannot be determined. This value is never returned by any D2D method.
The two geometries do not intersect at all.
The instance geometry is entirely contained by the passed-in geometry.
The instance geometry entirely contains the passed-in geometry.
The two geometries overlap but neither completely contains the other.
Specifies how a geometry is simplified to an
Specifies options that can be applied when a layer resource is applied to create a layer.
Note??Starting in Windows?8, the
ClearType antialiasing must use the current contents of the render target to blend properly. When a pushed layer requests initializing for ClearType, Direct2D copies the current contents of the render target into the layer so that ClearType antialiasing can be performed. Rendering ClearType text into a transparent layer does not produce the desired results.
A small performance hit from re-copying content occurs when
Describes the shape that joins two lines or segments.
+ A miter limit affects how sharp miter joins are allowed to be. If the line join style is
The following illustration shows different line join settings for the same stroked path geometry. For more information, see Stroke Style Example.
+Regular angular vertices.
Beveled vertices.
Rounded vertices.
Regular angular vertices unless the join would extend beyond the miter limit; otherwise, beveled vertices.
Indicates the measuring method used for text layout.
+Specifies that text is measured using glyph ideal metrics whose values are independent to the current display resolution.
Specifies that text is measured using glyph display-compatible metrics whose values tuned for the current display resolution.
Specifies that text is measured using the same glyph display metrics as text measured by GDI using a font created with CLEARTYPE_NATURAL_QUALITY.
Describes whether an opacity mask contains graphics or text. Direct2D uses this information to determine which gamma space to use when blending the opacity mask.
+The opacity mask contains graphics. The opacity mask is blended in the gamma 2.2 color space.
The opacity mask contains non-GDI text. The gamma space used for blending is obtained from the render target's text rendering parameters. (
The opacity mask contains text rendered using the GDI-compatible rendering mode. The opacity mask is blended using the gamma for GDI rendering.
Indicates whether a segment should be stroked and whether the join between this segment and the previous one should be smooth. This enumeration allows a bitwise combination of its member values.
+The segment is joined as specified by the
The segment is not stroked.
The segment is always joined with the one preceding it using a round line join, regardless of which
Describes how a render target behaves when it presents its content. This enumeration allows a bitwise combination of its member values.
+The render target waits until the display refreshes to present and discards the frame upon presenting.
The render target does not discard the frame upon presenting.
The render target does not wait until the display refreshes to present.
Describes whether a render target uses hardware or software rendering, or if Direct2D should select the rendering mode.
+Not every render target supports hardware rendering. For more information, see the Render Targets Overview.
+The render target uses hardware rendering, if available; otherwise, it uses software rendering.
The render target uses software rendering only.
The render target uses hardware rendering only.
Describes how a render target is remoted and whether it should be GDI-compatible. This enumeration allows a bitwise combination of its member values.
+The render target attempts to use Direct3D command-stream remoting and uses bitmap remoting if stream remoting fails. The render target is not GDI-compatible.
The render target renders content locally and sends it to the terminal services client as a bitmap.
The render target can be used efficiently with GDI.
Defines the direction that an elliptical arc is drawn.
+Arcs are drawn in a counterclockwise (negative-angle) direction.
Arcs are drawn in a clockwise (positive-angle) direction.
Describes the antialiasing mode used for drawing text.
+This enumeration is used with the SetTextAntialiasMode of an
By default, Direct2D renders text in ClearType mode. Factors that can downgrade the default quality to grayscale or aliased:
Use the system default. See Remarks.
Use ClearType antialiasing.
Use grayscale antialiasing.
Do not use antialiasing.
Describes whether a window is occluded.
+If the window was occluded the last time EndDraw was called, the next time the render target calls CheckWindowState, it returns
The window is not occluded.
The window is occluded.
Issues drawing commands to a GDI device context.
+Binds the render target to the device context to which it issues drawing commands.
+The device context to which the render target issues drawing commands.
The dimensions of the handle to a device context (
If this method succeeds, it returns
Before you can render with the DC render target, you must use its BindDC method to associate it with a GDI DC. You do this each time you use a different DC, or the size of the area you want to draw to changes.
+Represents the drawing state of a render target: the antialiasing mode, transform, tags, and text-rendering options.
+Retrieves the antialiasing mode, transform, and tags portion of the drawing state.
+When this method returns, contains the antialiasing mode, transform, and tags portion of the drawing state. You must allocate storage for this parameter.
Specifies the antialiasing mode, transform, and tags portion of the drawing state.
+The antialiasing mode, transform, and tags portion of the drawing state.
Specifies the text-rendering configuration of the drawing state.
+The text-rendering configuration of the drawing state, or
Retrieves the text-rendering configuration of the drawing state.
+When this method returns, contains the address of a reference to an
Retrieves or sets the antialiasing mode, transform, and tags portion of the drawing state.
+Retrieves or sets the text-rendering configuration of the drawing state.
+Represents an ellipse.
+Represents a geometry resource and defines a set of helper methods for manipulating and measuring geometric shapes. Interfaces that inherit from
There are several types of Direct2D geometry objects: a simple geometry (
Direct2D geometries enable you to describe two-dimensional figures and also offer many uses, such as defining hit-test regions, clip regions, and even animation paths.
Direct2D geometries are immutable and device-independent resources created by
Retrieves the bounds of the geometry.
+The transform to apply to this geometry before calculating its bounds, or
When this method returns, contains the bounds of this geometry. If the bounds are empty, this parameter will be a rect where bounds.left > bounds.right. You must allocate storage for this parameter.
Gets the bounds of the geometry after it has been widened by the specified stroke width and style and transformed by the specified matrix.
+The amount by which to widen the geometry by stroking its outline.
The style of the stroke that widens the geometry.
A transform to apply to the geometry after the geometry is transformed and after the geometry has been stroked.
When this method returns, contains the bounds of the widened geometry. You must allocate storage for this parameter.
When this method returns, contains the bounds of the widened geometry. You must allocate storage for this parameter.
Determines whether the geometry's stroke contains the specified point given the specified stroke thickness, style, and transform.
+The point to test for containment.
The thickness of the stroke to apply.
The style of stroke to apply.
The transform to apply to the stroked geometry.
When this method returns, contains a boolean value set to true if the geometry's stroke contains the specified point; otherwise, false. You must allocate storage for this parameter.
When this method returns, contains a boolean value set to true if the geometry's stroke contains the specified point; otherwise, false. You must allocate storage for this parameter.
Indicates whether the area filled by the geometry would contain the specified point given the specified flattening tolerance.
+The point to test.
The transform to apply to the geometry prior to testing for containment, or
The numeric accuracy with which the precise geometric path and path intersection is calculated. Points missing the fill by less than the tolerance are still considered inside. Smaller values produce more accurate results but cause slower execution.
When this method returns, contains a
Describes the intersection between this geometry and the specified geometry. The comparison is performed by using the specified flattening tolerance.
+The geometry to test.
The transform to apply to inputGeometry, or
The maximum error allowed when constructing a polygonal approximation of the geometry. No point in the polygonal representation will diverge from the original geometry by more than the flattening tolerance. Smaller values produce more accurate results but cause slower execution.
When this method returns, contains a reference to a value that describes how this geometry is related to inputGeometry. You must allocate storage for this parameter.
When interpreting the returned relation value, it is important to remember that the member
For more information about how to interpret other possible return values, see
Creates a simplified version of the geometry that contains only lines and (optionally) cubic Bezier curves and writes the result to an
If this method succeeds, it returns
Creates a set of clockwise-wound triangles that cover the geometry after it has been transformed using the specified matrix and flattened using the default tolerance.
+The transform to apply to this geometry.
The
The
If this method succeeds, it returns
Combines this geometry with the specified geometry and stores the result in an
If this method succeeds, it returns
Computes the outline of the geometry and writes the result to an
If this method succeeds, it returns
The Outline method allows the caller to produce a geometry with an equivalent fill to the input geometry, with the following additional properties:
Additionally, the Outline method can be useful in removing redundant portions of said geometries to simplify complex geometries. It can also be useful in combination with
Computes the area of the geometry after it has been transformed by the specified matrix and flattened using the specified tolerance.
+The transform to apply to this geometry before computing its area, or
The maximum error allowed when constructing a polygonal approximation of the geometry. No point in the polygonal representation will diverge from the original geometry by more than the flattening tolerance. Smaller values produce more accurate results but cause slower execution.
When this this method returns, contains a reference to the area of the transformed, flattened version of this geometry. You must allocate storage for this parameter.
Calculates the point and tangent vector at the specified distance along the geometry after it has been transformed by the specified matrix and flattened using the specified tolerance.
+The distance along the geometry of the point and tangent to find. If this distance is less then 0, this method calculates the first point in the geometry. If this distance is greater than the length of the geometry, this method calculates the last point in the geometry.
The transform to apply to the geometry before calculating the specified point and tangent, or
The maximum error allowed when constructing a polygonal approximation of the geometry. No point in the polygonal representation will diverge from the original geometry by more than the flattening tolerance. Smaller values produce more accurate results but cause slower execution.
When this method returns, contains a reference to the tangent vector at the specified distance along the geometry. If the geometry is empty, this vector contains NaN as its x and y values. You must allocate storage for this parameter.
The location at the specified distance along the geometry. If the geometry is empty, this point contains NaN as its x and y values.
Widens the geometry by the specified stroke and writes the result to an
If this method succeeds, it returns
Gets the
Gets the
Creates Direct2D resources.
+The
A factory defines a set of CreateResource methods that can produce the following drawing resources:
To create an
Forces the factory to refresh any system defaults that it might have changed since factory creation.
+If this method succeeds, it returns
You should call this method before calling the GetDesktopDpi method, to ensure that the system DPI is current.
+Retrieves the current desktop dots per inch (DPI). To refresh this value, call ReloadSystemMetrics.
+Use this method to obtain the system DPI when setting physical pixel values, such as when you specify the size of a window.
+ Creates an
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Geometry groups are a convenient way to group several geometries simultaneously so all figures of several distinct geometries are concatenated into one. To create a
Creates an
If this method succeeds, it returns
Geometry groups are a convenient way to group several geometries simultaneously so all figures of several distinct geometries are concatenated into one. To create a
Transforms the specified geometry and stores the result as an
If this method succeeds, it returns
Like other resources, a transformed geometry inherits the resource space and threading policy of the factory that created it. This object is immutable.
When stroking a transformed geometry with the DrawGeometry method, the stroke width is not affected by the transform applied to the geometry. The stroke width is only affected by the world transform.
+Creates an empty
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Creates an
If this method succeeds, it returns
Creates a render target that renders to a Microsoft Windows Imaging Component (WIC) bitmap.
+The bitmap that receives the rendering output of the render target.
The rendering mode, pixel format, remoting options, DPI information, and the minimum DirectX support required for hardware rendering. For information about supported pixel formats, see Supported Pixel Formats and Alpha Modes.
When this method returns, contains the address of the reference to the
If this method succeeds, it returns
You must use
Your application should create render targets once and hold onto them for the life of the application or until the
Note?? This method isn't supported on Windows Phone and will fail when called on a device with error code 0x8899000b (?There is no hardware rendering device available for this operation?). Because the Windows Phone Emulator supports WARP rendering, this method will fail when called on the emulator with a different error code, 0x88982f80 (wincodec_err_unsupportedpixelformat).
+Creates an
If this method succeeds, it returns
When you create a render target and hardware acceleration is available, you allocate resources on the computer's GPU. By creating a render target once and retaining it as long as possible, you gain performance benefits. Your application should create render targets once and hold onto them for the life of the application or until the
Creates a render target that draws to a DirectX Graphics Infrastructure (DXGI) surface.
+The
The rendering mode, pixel format, remoting options, DPI information, and the minimum DirectX support required for hardware rendering. For information about supported pixel formats, see Supported Pixel Formats and Alpha Modes.
When this method returns, contains the address of the reference to the
If this method succeeds, it returns
To write to a Direct3D surface, you obtain an
A DXGI surface render target is a type of
The DXGI surface render target and the DXGI surface must use the same DXGI format. If you specify the DXGI_FORMAT_UNKOWN format when you create the render target, it will automatically use the surface's format.
The DXGI surface render target does not perform DXGI surface synchronization.
For more information about creating and using DXGI surface render targets, see the Direct2D and Direct3D Interoperability Overview.
To work with Direct2D, the Direct3D device that provides the
When you create a render target and hardware acceleration is available, you allocate resources on the computer's GPU. By creating a render target once and retaining it as long as possible, you gain performance benefits. Your application should create render targets once and hold onto them for the life of the application or until the render target's EndDraw method returns the
Creates a render target that draws to a Windows Graphics Device Interface (GDI) device context.
+The rendering mode, pixel format, remoting options, DPI information, and the minimum DirectX support required for hardware rendering. To enable the device context (DC) render target to work with GDI, set the DXGI format to
When this method returns, dcRenderTarget contains the address of the reference to the
If this method succeeds, it returns
Before you can render with a DC render target, you must use the render target's BindDC method to associate it with a GDI DC. Do this for each different DC and whenever there is a change in the size of the area you want to draw to.
To enable the DC render target to work with GDI, set the render target's DXGI format to
Your application should create render targets once and hold on to them for the life of the application or until the render target's EndDraw method returns the
Represents a composite geometry, composed of other
Geometry groups are a convenient way to group several geometries simultaneously so all figures of several distinct geometries are concatenated into one.
+Indicates how the intersecting areas of the geometries contained in this geometry group are combined.
+A value that indicates how the intersecting areas of the geometries contained in this geometry group are combined.
Indicates the number of geometry objects in the geometry group.
+The number of geometries in the
Retrieves the geometries in the geometry group.
+When this method returns, contains the address of a reference to an array of geometries to be filled by this method. The length of the array is specified by the geometryCount parameter. If the array is
A value indicating the number of geometries to return in the geometries array. If this value is less than the number of geometries in the geometry group, the remaining geometries are omitted. If this value is larger than the number of geometries in the geometry group, the extra geometries are set to
The returned geometries are referenced and counted, and the caller must release them.
+Indicates how the intersecting areas of the geometries contained in this geometry group are combined.
+Indicates the number of geometry objects in the geometry group.
+Describes a geometric path that can contain lines, arcs, cubic Bezier curves, and quadratic Bezier curves.
+The
A geometry sink consists of one or more figures. Each figure is made up of one or more line, curve, or arc segments. To create a figure, call the BeginFigure method, specify the figure's start point, and then use its Add methods (such as AddLine and AddBezier) to add segments. When you are finished adding segments, call the EndFigure method. You can repeat this sequence to create additional figures. When you are finished creating figures, call the Close method.
+Describes a geometric path that does not contain quadratic bezier curves or arcs.
+A geometry sink consists of one or more figures. Each figure is made up of one or more line or Bezier curve segments. To create a figure, call the BeginFigure method and specify the figure's start point, then use AddLines and AddBeziers to add line and Bezier segments. When you are finished adding segments, call the EndFigure method. You can repeat this sequence to create additional figures. When you are finished creating figures, call the Close method.
To create geometry paths that can contain arcs and quadratic Bezier curves, use an
Describes a geometric path that can contain lines, arcs, cubic Bezier curves, and quadratic Bezier curves.
+The
A geometry sink consists of one or more figures. Each figure is made up of one or more line, curve, or arc segments. To create a figure, call the BeginFigure method, specify the figure's start point, and then use its Add methods (such as AddLine and AddBezier) to add segments. When you are finished adding segments, call the EndFigure method. You can repeat this sequence to create additional figures. When you are finished creating figures, call the Close method.
+Describes a geometric path that does not contain quadratic bezier curves or arcs.
+A geometry sink consists of one or more figures. Each figure is made up of one or more line or Bezier curve segments. To create a figure, call the BeginFigure method and specify the figure's start point, then use AddLines and AddBeziers to add line and Bezier segments. When you are finished adding segments, call the EndFigure method. You can repeat this sequence to create additional figures. When you are finished creating figures, call the Close method.
To create geometry paths that can contain arcs and quadratic Bezier curves, use an
Specifies the method used to determine which points are inside the geometry described by this geometry sink and which points are outside.
+The method used to determine whether a given point is part of the geometry.
The fill mode defaults to
Specifies stroke and join options to be applied to new segments added to the geometry sink.
+Stroke and join options to be applied to new segments added to the geometry sink.
After this method is called, the specified segment flags are applied to each segment subsequently added to the sink. The segment flags are applied to every additional segment until this method is called again and a different set of segment flags is specified.
+Starts a new figure at the specified point.
+The point at which to begin the new figure.
Whether the new figure should be hollow or filled.
If this method is called while a figure is currently in progress, the interface is invalidated and all future methods will fail.
+Creates a sequence of lines using the specified points and adds them to the geometry sink.
+A reference to an array of one or more points that describe the lines to draw. A line is drawn from the geometry sink's current point (the end point of the last segment drawn or the location specified by BeginFigure) to the first point in the array. if the array contains additional points, a line is drawn from the first point to the second point in the array, from the second point to the third point, and so on.
The number of points in the points array.
Creates a sequence of cubic Bezier curves and adds them to the geometry sink.
+A reference to an array of Bezier segments that describes the Bezier curves to create. A curve is drawn from the geometry sink's current point (the end point of the last segment drawn or the location specified by BeginFigure) to the end point of the first Bezier segment in the array. if the array contains additional Bezier segments, each subsequent Bezier segment uses the end point of the preceding Bezier segment as its start point.
The number of Bezier segments in the beziers array.
Ends the current figure; optionally, closes it.
+A value that indicates whether the current figure is closed. If the figure is closed, a line is drawn between the current point and the start point specified by BeginFigure.
Calling this method without a matching call to BeginFigure places the geometry sink in an error state; subsequent calls are ignored, and the overall failure will be returned when the Close method is called.
+Closes the geometry sink, indicates whether it is in an error state, and resets the sink's error state.
+If this method succeeds, it returns
Do not close the geometry sink while a figure is still in progress; doing so puts the geometry sink in an error state. For the close operation to be successful, there must be one EndFigure call for each call to BeginFigure.
After calling this method, the geometry sink might not be usable. Direct2D implementations of this interface do not allow the geometry sink to be modified after it is closed, but other implementations might not impose this restriction.
+Creates a line segment between the current point and the specified end point and adds it to the geometry sink.
+The end point of the line to draw.
Creates a cubic Bezier curve between the current point and the specified end point.
+A structure that describes the control points and end point of the Bezier curve to add.
Creates a quadratic Bezier curve between the current point and the specified endpoint.
+A structure that describes the control point and the endpoint of the quadratic Bezier curve to add.
Adds a sequence of quadratic Bezier segments as an array in a single call.
+An array of a sequence of quadratic Bezier segments.
A value indicating the number of quadratic Bezier segments in beziers.
Adds a single arc to the path geometry.
+The arc segment to add to the figure.
Represents an collection of
Retrieves the number of gradient stops in the collection.
+The number of gradient stops in the collection.
Copies the gradient stops from the collection into an array of
Gradient stops are copied in order of position, starting with the gradient stop with the smallest position value and progressing to the gradient stop with the largest position value.
+Indicates the gamma space in which the gradient stops are interpolated.
+The gamma space in which the gradient stops are interpolated.
Indicates the behavior of the gradient outside the normalized gradient range.
+The behavior of the gradient outside the [0,1] normalized gradient range.
Retrieves the number of gradient stops in the collection.
+Indicates the gamma space in which the gradient stops are interpolated.
+Indicates the behavior of the gradient outside the normalized gradient range.
+Represents the backing store required to render a layer.
+To create a layer, call the CreateLayer method of the render target where the layer will be used. To draw to a layer, push the layer to the render target stack by calling the PushLayer method. After you have finished drawing to the layer, call the PopLayer method.
Between PushLayer and PopLayer calls, the layer is in use and cannot be used by another render target.
If the size of the layer is not specified, the corresponding PushLayer call determines the minimum layer size, based on the layer content bounds and the geometric mask. The layer resource can be larger than the size required by PushLayer without any rendering artifacts.
If the size of a layer is specified, or if the layer has been used and the required backing store size as calculated during PushLayer is larger than the layer, then the layer resource is expanded on each axis monotonically to ensure that it is large enough. The layer resource never shrinks in size.
+Gets the size of the layer in device-independent pixels.
+The size of the layer in device-independent pixels.
Gets the size of the layer in device-independent pixels.
+Paints an area with a linear gradient.
+An
The start point and end point are described in the brush space and are mappped to the render target when the brush is used. Note the starting and ending coordinates are absolute, not relative to the render target size. A value of (0, 0) maps to the upper-left corner of the render target, while a value of (1, 1) maps one pixel diagonally away from (0, 0). If there is a nonidentity brush transform or render target transform, the brush start point and end point are also transformed.
It is possible to specify a gradient axis that does not completely fill the area that is being painted. When this occurs, the
Sets the starting coordinates of the linear gradient in the brush's coordinate space.
+The starting two-dimensional coordinates of the linear gradient, in the brush's coordinate space.
The start point and end point are described in the brush's space and are mapped to the render target when the brush is used. If there is a non-identity brush transform or render target transform, the brush's start point and end point are also transformed.
+Sets the ending coordinates of the linear gradient in the brush's coordinate space.
+The ending two-dimensional coordinates of the linear gradient, in the brush's coordinate space.
The start point and end point are described in the brush's space and are mapped to the render target when the brush is used. If there is a non-identity brush transform or render target transform, the brush's start point and end point are also transformed.
+Retrieves the starting coordinates of the linear gradient.
+The starting two-dimensional coordinates of the linear gradient, in the brush's coordinate space.
The start point and end point are described in the brush's space and are mapped to the render target when the brush is used. If there is a non-identity brush transform or render target transform, the brush's start point and end point are also transformed.
+Retrieves the ending coordinates of the linear gradient.
+The ending two-dimensional coordinates of the linear gradient, in the brush's coordinate space.
The start point and end point are described in the brush's space and are mapped to the render target when the brush is used. If there is a non-identity brush transform or render target transform, the brush's start point and end point are also transformed.
+ Retrieves the
Retrieves or sets the starting coordinates of the linear gradient.
+The start point and end point are described in the brush's space and are mapped to the render target when the brush is used. If there is a non-identity brush transform or render target transform, the brush's start point and end point are also transformed.
+Retrieves or sets the ending coordinates of the linear gradient.
+The start point and end point are described in the brush's space and are mapped to the render target when the brush is used. If there is a non-identity brush transform or render target transform, the brush's start point and end point are also transformed.
+ Retrieves the
Represents a set of vertices that form a list of triangles.
+Opens the mesh for population.
+When this method returns, contains a reference to a reference to an
If this method succeeds, it returns
Represents a complex shape that may be composed of arcs, curves, and lines.
+An
Retrieves the geometry sink that is used to populate the path geometry with figures and segments.
+When this method returns, geometrySink contains the address of a reference to the geometry sink that is used to populate the path geometry with figures and segments. This parameter is passed uninitialized.
Because path geometries are immutable and can only be populated once, it is an error to call Open on a path geometry more than once.
Note that the fill mode defaults to
Copies the contents of the path geometry to the specified
If this method succeeds, it returns
Retrieves the number of segments in the path geometry.
+A reference that receives the number of segments in the path geometry when this method returns. You must allocate storage for this parameter.
If this method succeeds, it returns
Retrieves the number of figures in the path geometry.
+A reference that receives the number of figures in the path geometry when this method returns. You must allocate storage for this parameter.
If this method succeeds, it returns
Retrieves the number of segments in the path geometry.
+Retrieves the number of figures in the path geometry.
+Paints an area with a radial gradient.
+The
The brush maps the gradient stop position 0.0f of the gradient origin, and the position 1.0f is mapped to the ellipse boundary. When the gradient origin is within the ellipse, the contents of the ellipse enclose the entire [0, 1] range of the brush gradient stops. If the gradient origin is outside the bounds of the ellipse, the brush still works, but its gradient is not well-defined.
The start point and end point are described in the brush space and are mappped to the render target when the brush is used. Note the starting and ending coordinates are absolute, not relative to the render target size. A value of (0, 0) maps to the upper-left corner of the render target, while a value of (1, 1) maps just one pixel diagonally away from (0, 0). If there is a nonidentity brush transform or render target transform, the brush ellipse and gradient origin are also transformed.
It is possible to specify an ellipse that does not completely fill area being painted. When this occurs, the
Specifies the center of the gradient ellipse in the brush's coordinate space.
+The center of the gradient ellipse, in the brush's coordinate space.
Specifies the offset of the gradient origin relative to the gradient ellipse's center.
+The offset of the gradient origin from the center of the gradient ellipse.
Specifies the x-radius of the gradient ellipse, in the brush's coordinate space.
+The x-radius of the gradient ellipse. This value is in the brush's coordinate space.
Specifies the y-radius of the gradient ellipse, in the brush's coordinate space.
+The y-radius of the gradient ellipse. This value is in the brush's coordinate space.
Retrieves the center of the gradient ellipse.
+The center of the gradient ellipse. This value is expressed in the brush's coordinate space.
Retrieves the offset of the gradient origin relative to the gradient ellipse's center.
+The offset of the gradient origin from the center of the gradient ellipse. This value is expressed in the brush's coordinate space.
Retrieves the x-radius of the gradient ellipse.
+The x-radius of the gradient ellipse. This value is expressed in the brush's coordinate space.
Retrieves the y-radius of the gradient ellipse.
+The y-radius of the gradient ellipse. This value is expressed in the brush's coordinate space.
Retrieves the
Retrieves or sets the center of the gradient ellipse.
+Retrieves or sets the offset of the gradient origin relative to the gradient ellipse's center.
+Retrieves or sets the x-radius of the gradient ellipse.
+Retrieves or sets the y-radius of the gradient ellipse.
+Retrieves the
Describes a two-dimensional rectangle.
+Retrieves the rectangle that describes the rectangle geometry's dimensions.
+Contains a reference to a rectangle that describes the rectangle geometry's dimensions when this method returns. You must allocate storage for this parameter.
Retrieves the rectangle that describes the rectangle geometry's dimensions.
+Describes a rounded rectangle.
+Retrieves a rounded rectangle that describes this rounded rectangle geometry.
+A reference that receives a rounded rectangle that describes this rounded rectangle geometry. You must allocate storage for this parameter.
Retrieves a rounded rectangle that describes this rounded rectangle geometry.
+Paints an area with a solid color.
+Specifies the color of this solid-color brush.
+The color of this solid-color brush.
To help create colors, Direct2D provides the ColorF class. It offers several helper methods for creating colors and provides a set or predefined colors.
+Retrieves the color of the solid color brush.
+The color of this solid color brush.
Retrieves or sets the color of the solid color brush.
+Describes the caps, miter limit, line join, and dash information for a stroke.
+Retrieves the type of shape used at the beginning of a stroke.
+The type of shape used at the beginning of a stroke.
Retrieves the type of shape used at the end of a stroke.
+The type of shape used at the end of a stroke.
Gets a value that specifies how the ends of each dash are drawn.
+A value that specifies how the ends of each dash are drawn.
Retrieves the limit on the ratio of the miter length to half the stroke's thickness.
+A positive number greater than or equal to 1.0f that describes the limit on the ratio of the miter length to half the stroke's thickness.
Retrieves the type of joint used at the vertices of a shape's outline.
+A value that specifies the type of joint used at the vertices of a shape's outline.
Retrieves a value that specifies how far in the dash sequence the stroke will start.
+A value that specifies how far in the dash sequence the stroke will start.
Gets a value that describes the stroke's dash pattern.
+A value that describes the predefined dash pattern used, or
If a custom dash style is specified, the dash pattern is described by the dashes array, which can be retrieved by calling the GetDashes method.
+Retrieves the number of entries in the dashes array.
+The number of entries in the dashes array if the stroke is dashed; otherwise, 0.
Copies the dash pattern to the specified array.
+A reference to an array that will receive the dash pattern. The array must be able to contain at least as many elements as specified by dashesCount. You must allocate storage for this array.
The number of dashes to copy. If this value is less than the number of dashes in the stroke style's dashes array, the returned dashes are truncated to dashesCount. If this value is greater than the number of dashes in the stroke style's dashes array, the extra dashes are set to 0.0f. To obtain the actual number of dashes in the stroke style's dashes array, use the GetDashesCount method.
The dashes are specified in units that are a multiple of the stroke width, with subsequent members of the array indicating the dashes and gaps between dashes: the first entry indicates a filled dash, the second a gap, and so on.
+Retrieves the type of shape used at the beginning of a stroke.
+Retrieves the type of shape used at the end of a stroke.
+Gets a value that specifies how the ends of each dash are drawn.
+Retrieves the limit on the ratio of the miter length to half the stroke's thickness.
+Retrieves the type of joint used at the vertices of a shape's outline.
+Retrieves a value that specifies how far in the dash sequence the stroke will start.
+Gets a value that describes the stroke's dash pattern.
+If a custom dash style is specified, the dash pattern is described by the dashes array, which can be retrieved by calling the GetDashes method.
+Retrieves the number of entries in the dashes array.
+Populates an
Populates an
Copies the specified triangles to the sink.
+An array of
The number of triangles to copy from the triangles array.
Closes the sink and returns its error status.
+If this method succeeds, it returns
Represents a geometry that has been transformed.
+Using an
Retrieves the source geometry of this transformed geometry object.
+When this method returns, contains a reference to a reference to the source geometry for this transformed geometry object. This parameter is passed uninitialized.
Retrieves the matrix used to transform the
Retrieves the source geometry of this transformed geometry object.
+Retrieves the matrix used to transform the
Renders drawing instructions to a window.
+As is the case with other render targets, you must call BeginDraw before issuing drawing commands. After you've finished drawing, call EndDraw to indicate that drawing is finished and to release access to the buffer backing the render target. For
A hardware render target's back-buffer is the size specified by GetPixelSize. If EndDraw presents the buffer, this bitmap is stretched to cover the surface where it is presented: the entire client area of the window. This stretch is performed using bilinear filtering if the render target is rendering in hardware and using nearest-neighbor filtering if the rendering target is using software. (Typically, an application will call Resize to ensure the pixel size of the render target and the pixel size of the destination match, and no scaling is necessary, though this is not a requirement.)
In the case where a window straddles adapters, Direct2D ensures that the portion of the off-screen render target is copied from the adapter where rendering is occurring to the adapter that needs to display the contents. If the adapter a render target is on has been removed or the driver upgraded while the application is running, this is returned as an error in the EndDraw call. In this case, the application should create a new render target and resources as necessary. +
+Indicates whether the
A value that indicates whether the
Note??If the window was occluded the last time that EndDraw was called, the next time that the render target calls CheckWindowState, it will return
Changes the size of the render target to the specified pixel size.
+The new size of the render target in device pixels.
If this method succeeds, it returns
After this method is called, the contents of the render target's back-buffer are not defined, even if the
Returns the
The
Returns the
Describes an elliptical arc between two points.
+The end point of the arc.
The x-radius and y-radius of the arc.
A value that specifies how many degrees in the clockwise direction the ellipse is rotated relative to the current coordinate system.
A value that specifies whether the arc sweep is clockwise or counterclockwise.
A value that specifies whether the given arc is larger than 180 degrees.
Represents a cubic bezier segment drawn between two points.
+A cubic Bezier curve is defined by four points: a start point, an end point (point3), and two control points (point1 and point2). A Bezier segment does not contain a property for the starting point of the curve; it defines only the end point. The beginning point of the curve is the current point of the path to which the Bezier curve is added.
The two control points of a cubic Bezier curve behave like magnets, attracting portions of what would otherwise be a straight line toward themselves and producing a curve. The first control point, point1, affects the beginning portion of the curve; the second control point, point2, affects the ending portion of the curve.
Note??The curve doesn't necessarily pass through either of the control points; each control point moves its portion of the line toward itself, but not through itself.
+The first control point for the Bezier segment.
The second control point for the Bezier segment.
The end point for the Bezier segment.
Describes the extend modes and the interpolation mode of an
Describes the opacity and transformation of a brush.
+This structure is used when creating a brush. For convenience, Direct2D provides the D2D1::BrushProperties function for creating
After creating a brush, you can change its opacity or transform by calling the SetOpacity or SetTransform methods.
+A value between 0.0f and 1.0f, inclusive, that specifies the degree of opacity of the brush.
The transformation that is applied to the brush.
Describes the drawing state of a render target.
+The antialiasing mode for subsequent nontext drawing operations.
The antialiasing mode for subsequent text and glyph drawing operations.
A label for subsequent drawing operations.
A label for subsequent drawing operations.
The transformation to apply to subsequent drawing operations.
Contains the debugging level of an
To enable debugging, you must install the Direct2D Debug Layer.
+Contains the position and color of a gradient stop.
+Gradient stops can be specified in any order if they are at different positions. Two stops may share a position. In this case, the first stop specified is treated as the "low" stop (nearer 0.0f) and subsequent stops are treated as "higher" (nearer 1.0f). This behavior is useful if a caller wants an instant transition in the middle of a stop.
Typically, there are at least two points in a collection, although creation with only one stop is permitted. For example, one point is at position 0.0f, another point is at position 1.0f, and additional points are distributed in the [0, 1] range. Where the gradient progression is beyond the range of [0, 1], the stops are stored, but may affect the gradient.
When drawn, the [0, 1] range of positions is mapped to the brush, in a brush-dependent way. For details, see
Gradient stops with a position outside the [0, 1] range cannot be seen explicitly, but they can still affect the colors produced in the [0, 1] range. For example, a two-stop gradient 0.0f, Black}, {2.0f, White is indistinguishable visually from 0.0f, Black}, {1.0f, Mid-level gray. Also, the colors are clamped before interpolation.
+A value that indicates the relative position of the gradient stop in the brush. This value must be in the [0.0f, 1.0f] range if the gradient stop is to be seen explicitly.
The color of the gradient stop.
Contains the
Use this structure when you call the CreateHwndRenderTarget method to create a new
For convenience, Direct2D provides the D2D1::HwndRenderTargetProperties function for creating new
Contains the starting point and endpoint of the gradient axis for an
Use this method when creating new
The following illustration shows how a linear gradient changes as you change its start and end points. For the first gradient, the start point is set to (0,0) and the end point to (150, 50); this creates a diagonal gradient that starts at the upper-left corner and extends to the lower-right corner of the area being painted. When you set the start point to (0, 25) and the end point to (150, 25), a horizontal gradient is created. Similarly, setting the start point to (75, 0) and the end point to (75, 50) creates a vertical gradient. Setting the start point to (0, 50) and the end point to (150, 0) creates a diagonal gradient that starts at the lower-left corner and extends to the upper-right corner of the area being painted.
+Contains the data format and alpha mode for a bitmap or render target.
+For more information about the pixel formats and alpha modes supported by each render target, see Supported Pixel Formats and Alpha Modes.
+A value that specifies the size and arrangement of channels in each pixel.
A value that specifies whether the alpha channel is using pre-multiplied alpha, straight alpha, whether it should be ignored and considered opaque, or whether it is unkown.
Contains the control point and end point for a quadratic Bezier segment.
+The control point of the quadratic Bezier segment.
The end point of the quadratic Bezier segment.
Contains the gradient origin offset and the size and position of the gradient ellipse for an
Different values for center, gradientOriginOffset, radiusX and/or radiusY produce different gradients. The following illustration shows several radial gradients that have different gradient origin offsets, creating the appearance of the light illuminating the circles from different angles.
For convenience, Direct2D provides the D2D1::RadialGradientBrushProperties function for creating new D2D1_RADIAL_GRADIENT_BRUSH structures.
+Contains rendering options (hardware or software), pixel format, DPI information, remoting options, and Direct3D support requirements for a render target.
+Use this structure when creating a render target, or use it with the
As a convenience, Direct2D provides the D2D1::RenderTargetProperties helper function for creating
Not all render targets support hardware rendering. For a list, see the Render Targets Overview.
+A value that specifies whether the render target should force hardware or software rendering. A value of
The pixel format and alpha mode of the render target. You can use the D2D1::PixelFormat function to create a pixel format that specifies that Direct2D should select the pixel format and alpha mode for you. For a list of pixel formats and alpha modes supported by each render target, see Supported Pixel Formats and Alpha Modes.
The horizontal DPI of the render target. To use the default DPI, set dpiX and dpiY to 0. For more information, see the Remarks section.
The vertical DPI of the render target. To use the default DPI, set dpiX and dpiY to 0. For more information, see the Remarks section.
A value that specifies how the render target is remoted and whether it should be GDI-compatible. Set to
A value that specifies the minimum Direct3D feature level required for hardware rendering. If the specified minimum level is not available, the render target uses software rendering if the type member is set to
Contains the dimensions and corner radii of a rounded rectangle.
+Each corner of the rectangle specified by the rect is replaced with a quarter ellipse, with a radius in each direction specified by radiusX and radiusY.
If the radiusX is greater than or equal to half the width of the rectangle, and the radiusY is greater than or equal to one-half the height, the rounded rectangle is an ellipse with the same width and height of the rect.
Even when both radiuX and radiusY are zero, the rounded rectangle is different from a rectangle., When stroked, the corners of the rounded rectangle are roundly joined, not mitered (square).
+The coordinates of the rectangle.
The x-radius for the quarter ellipse that is drawn to replace every corner of the rectangle.
The y-radius for the quarter ellipse that is drawn to replace every corner of the rectangle.
Describes the stroke that outlines a shape.
+The following illustration shows different dashOffset values for the same custom dash style.
+The cap applied to the start of all the open figures in a stroked geometry.
The cap applied to the end of all the open figures in a stroked geometry.
The shape at either end of each dash segment.
A value that describes how segments are joined. This value is ignored for a vertex if the segment flags specify that the segment should have a smooth join.
The limit of the thickness of the join on a mitered corner. This value is always treated as though it is greater than or equal to 1.0f.
A value that specifies whether the stroke has a dash pattern and, if so, the dash style.
A value that specifies an offset in the dash sequence. A positive dash offset value shifts the dash pattern, in units of stroke width, toward the start of the stroked geometry. A negative dash offset value shifts the dash pattern, in units of stroke width, toward the end of the stroked geometry.
Contains the three vertices that describe a triangle.
+The first vertex of a triangle.
The second vertex of a triangle.
The third vertex of a triangle.
The blend-state interface holds a description for blending state that you can bind to the output-merger stage.
+Blending applies a simple function to combine output values from a pixel shader with data in a render target. You have control over how the pixels are blended by using a predefined set of blending operations and preblending operations.
To create a blend-state object, call
Windows?Phone?8: This API is supported.
+A device-child interface accesses data used by a device.
+There are several types of device child interfaces, all of which inherit this interface. They include shaders, state objects, and input layouts.
Windows?Phone?8: This API is supported.
+Get a reference to the device that created this interface.
+Address of a reference to a device (see
Any returned interfaces will have their reference count incremented by one, so be sure to call ::release() on the returned reference(s) before they are freed or else you will have a memory leak.
Windows?Phone?8: This API is supported.
+Get application-defined data from a device child.
+Guid associated with the data.
A reference to a variable that on input contains the size, in bytes, of the buffer that pData points to, and on output contains the size, in bytes, of the amount of data that GetPrivateData retrieved.
A reference to a buffer that GetPrivateData fills with data from the device child if pDataSize points to a value that specifies a buffer large enough to hold the data.
This method returns one of the codes described in the topic Direct3D 11 Return Codes.
The data stored in the device child is set by calling
Windows?Phone?8: This API is supported.
+Set application-defined data to a device child and associate that data with an application-defined guid.
+Guid associated with the data.
Size of the data.
Pointer to the data to be stored with this device child. If pData is
This method returns one of the following Direct3D 11 Return Codes.
The data stored in the device child with this method can be retrieved with
The debug layer reports memory leaks by outputting a list of object interface references along with their friendly names. The default friendly name is "<unnamed>". You can set the friendly name so that you can determine if the corresponding object interface reference caused the leak. To set the friendly name, use the SetPrivateData method and the
static const char c_szName[] = "My name"; + hr = pContext->SetPrivateData(, sizeof( c_szName ) - 1, c_szName ); +
Windows?Phone?8: This API is supported.
+Associate an
Guid associated with the interface.
Pointer to an
This method returns one of the following Direct3D 11 Return Codes.
When this method is called ::addref() will be called on the
Windows?Phone?8: This API is supported.
+Get a reference to the device that created this interface.
+Any returned interfaces will have their reference count incremented by one, so be sure to call ::release() on the returned reference(s) before they are freed or else you will have a memory leak.
Windows?Phone?8: This API is supported.
+Gets the description for blending state that you used to create the blend-state object.
+A reference to a
You use the description for blending state in a call to the
Windows?Phone?8: This API is supported.
+Gets the description for blending state that you used to create the blend-state object.
+You use the description for blending state in a call to the
Windows?Phone?8: This API is supported.
+Describes the blend state that you use in a call to
Here are the default values for blend state.
State | Default Value |
---|---|
AlphaToCoverageEnable | |
IndependentBlendEnable | |
RenderTarget[0].BlendEnable | |
RenderTarget[0].SrcBlend | |
RenderTarget[0].DestBlend | |
RenderTarget[0].BlendOp | |
RenderTarget[0].SrcBlendAlpha | |
RenderTarget[0].DestBlendAlpha | |
RenderTarget[0].BlendOpAlpha | |
RenderTarget[0].RenderTargetWriteMask |
?
Note??
If the driver type is set to
A buffer interface accesses a buffer resource, which is unstructured memory. Buffers typically store vertex or index data.
+There are three types of buffers: vertex, index, or a shader-constant buffer. Create a buffer resource by calling
A buffer must be bound to the pipeline before it can be accessed. Buffers can be bound to the input-assembler stage by calls to
Buffers can be bound to multiple pipeline stages simultaneously for reading. A buffer can also be bound to a single pipeline stage for writing; however, the same buffer cannot be bound for reading and writing simultaneously.
Windows?Phone?8: This API is supported.
+A resource interface provides common actions on all resources.
+You don't directly create a resource interface; instead, you create buffers and textures that inherit from a resource interface. For more info, see How to: Create a Vertex Buffer, How to: Create an Index Buffer, How to: Create a Constant Buffer, and How to: Create a Texture.
Windows?Phone?8: This API is supported.
+Get the type of the resource.
+Pointer to the resource type (see
Windows?Phone?8: This API is supported.
+Set the eviction priority of a resource.
+Eviction priority for the resource, which is one of the following values:
Resource priorities determine which resource to evict from video memory when the system has run out of video memory. The resource will not be lost; it will be removed from video memory and placed into system memory, or possibly placed onto the hard drive. The resource will be loaded back into video memory when it is required.
A resource that is set to the maximum priority,
Changing the priorities of resources should be done carefully. The wrong eviction priorities could be a detriment to performance rather than an improvement.
Windows?Phone?8: This API is supported.
+Get the eviction priority of a resource.
+One of the following values, which specifies the eviction priority for the resource:
Windows?Phone?8: This API is supported.
+Get the type of the resource.
+Windows?Phone?8: This API is supported.
+Get or sets the eviction priority of a resource.
+Windows?Phone?8: This API is supported.
+Get the properties of a buffer resource.
+Pointer to a resource description (see
Windows?Phone?8: This API is supported.
+Get the properties of a buffer resource.
+Windows?Phone?8: This API is supported.
+Describes a buffer resource.
+This structure is used by
In addition to this structure, you can also use the CD3D11_BUFFER_DESC derived structure, which is defined in D3D11.h and behaves like an inherited class, to help create a buffer description.
If the bind flag is
Size of the buffer in bytes.
Identify how the buffer is expected to be read from and written to. Frequency of update is a key factor. The most common value is typically
Identify how the buffer will be bound to the pipeline. Flags (see
CPU access flags (see
Miscellaneous flags (see
The size of each element in the buffer structure (in bytes) when the buffer represents a structured buffer. For more info about structured buffers, see Structured Buffer.
The size value in StructureByteStride must match the size of the format that you use for views of the buffer. For example, if you use a shader resource view (SRV) to read a buffer in a pixel shader, the SRV format size must match the size value in StructureByteStride.
This interface encapsulates an HLSL class.
+This interface is created by calling
Windows?Phone?8: This API is supported.
+Gets the
For more information about using the
Windows?Phone?8: This API is supported.
+Gets a description of the current HLSL class.
+A reference to a
For more information about using the
An instance is not restricted to being used for a single type in a single shader. An instance is flexible and can be used for any shader that used the same type name or instance name when the instance was generated.
An instance does not replace the importance of reflection for a particular shader since a gotten instance will not know its slot location and a created instance only specifies a type name.
Windows?Phone?8: This API is supported.
+Gets the instance name of the current HLSL class.
+The instance name of the current HLSL class.
The length of the pInstanceName parameter.
GetInstanceName will return a valid name only for instances acquired using
For more information about using the
Windows?Phone?8: This API is supported.
+Gets the type of the current HLSL class.
+Type of the current HLSL class.
The length of the pTypeName parameter.
GetTypeName will return a valid name only for instances acquired using
For more information about using the
Windows?Phone?8: This API is supported.
+Gets the
For more information about using the
Windows?Phone?8: This API is supported.
+Gets a description of the current HLSL class.
+For more information about using the
An instance is not restricted to being used for a single type in a single shader. An instance is flexible and can be used for any shader that used the same type name or instance name when the instance was generated.
An instance does not replace the importance of reflection for a particular shader since a gotten instance will not know its slot location and a created instance only specifies a type name.
Windows?Phone?8: This API is supported.
+This interface encapsulates an HLSL dynamic linkage.
+A class linkage object can hold up to 64K gotten instances. A gotten instance is a handle that references a variable name in any shader that is created with that linkage object. When you create a shader with a class linkage object, the runtime gathers these instances and stores them in the class linkage object. For more information about how a class linkage object is used, see Storing Variables and Types for Shaders to Share.
An
Windows?Phone?8: This API is supported.
+Gets the class-instance object that represents the specified HLSL class.
+The name of a class for which to get the class instance.
The index of the class instance.
The address of a reference to an
For more information about using the
A class instance must have at least 1 data member in order to be available for the runtime to use with
Windows?Phone?8: This API is supported.
+Initializes a class-instance object that represents an HLSL class instance.
+The type name of a class to initialize.
Identifies the constant buffer that contains the class data.
The four-component vector offset from the start of the constant buffer where the class data will begin. Consequently, this is not a byte offset.
The texture slot for the first texture; there may be multiple textures following the offset.
The sampler slot for the first sampler; there may be multiple samplers following the offset.
The address of a reference to an
Returns
Instances can be created (or gotten) before or after a shader is created. Use the same shader linkage object to acquire a class instance and create the shader the instance is going to be used in.
For more information about using the
Windows?Phone?8: This API is supported.
+A compute-shader interface manages an executable program (a compute shader) that controls the compute-shader stage.
+The compute-shader interface has no methods; use HLSL to implement your shader functionality. All shaders are implemented from a common set of features referred to as the common-shader core..
To create a compute-shader interface, call
This interface is defined in D3D11.h.
+Describes depth-stencil state.
+Pass a reference to
Depth-stencil state controls how depth-stencil testing is performed by the output-merger stage.
The following table shows the default values of depth-stencil states.
State | Default Value |
---|---|
DepthEnable | TRUE |
DepthWriteMask | |
DepthFunc | |
StencilEnable | |
StencilReadMask | D3D11_DEFAULT_STENCIL_READ_MASK |
StencilWriteMask | D3D11_DEFAULT_STENCIL_WRITE_MASK |
FrontFace.StencilFunc and BackFace.StencilFunc | |
FrontFace.StencilDepthFailOp and BackFace.StencilDepthFailOp | |
FrontFace.StencilPassOp and BackFace.StencilPassOp | |
FrontFace.StencilFailOp and BackFace.StencilFailOp |
?
The formats that support stenciling are
Enable depth testing.
Identify a portion of the depth-stencil buffer that can be modified by depth data (see
A function that compares depth data against existing depth data. The function options are listed in
Enable stencil testing.
Identify a portion of the depth-stencil buffer for reading stencil data.
Identify a portion of the depth-stencil buffer for writing stencil data.
Identify how to use the results of the depth test and the stencil test for pixels whose surface normal is facing towards the camera (see
Identify how to use the results of the depth test and the stencil test for pixels whose surface normal is facing away from the camera (see
Describes rasterizer state.
+Rasterizer state defines the behavior of the rasterizer stage. To create a rasterizer-state object, call
If you do not specify some rasterizer state, the Direct3D runtime uses the following default values for rasterizer state.
State | Default Value |
---|---|
FillMode | Solid |
CullMode | Back |
FrontCounterClockwise | |
DepthBias | 0 |
SlopeScaledDepthBias | 0.0f |
DepthBiasClamp | 0.0f |
DepthClipEnable | TRUE |
ScissorEnable | |
MultisampleEnable | |
AntialiasedLineEnable |
?
Note??For feature levels 9.1, 9.2, 9.3, and 10.0, if you set MultisampleEnable to
Line-rendering algorithm | MultisampleEnable | AntialiasedLineEnable |
---|---|---|
Aliased | ||
Alpha antialiased | TRUE | |
Quadrilateral | TRUE | |
Quadrilateral | TRUE | TRUE |
?
The settings of the MultisampleEnable and AntialiasedLineEnable members apply only to multisample antialiasing (MSAA) render targets (that is, render targets with sample counts greater than 1). Because of the differences in feature-level behavior and as long as you aren?t performing any line drawing or don?t mind that lines render as quadrilaterals, we recommend that you always set MultisampleEnable to TRUE whenever you render on MSAA render targets.
+Determines the fill mode to use when rendering (see
Indicates triangles facing the specified direction are not drawn (see
Determines if a triangle is front- or back-facing. If this parameter is TRUE, a triangle will be considered front-facing if its vertices are counter-clockwise on the render target and considered back-facing if they are clockwise. If this parameter is
Depth value added to a given pixel. For info about depth bias, see Depth Bias.
Maximum depth bias of a pixel. For info about depth bias, see Depth Bias.
Scalar on a given pixel's slope. For info about depth bias, see Depth Bias.
Enable clipping based on distance.
The hardware always performs x and y clipping of rasterized coordinates. When DepthClipEnable is set to the default?TRUE, the hardware also clips the z value (that is, the hardware performs the last step of the following algorithm). +
0 < w
+ -w <= x <= w (or arbitrarily wider range if implementation uses a guard band to reduce clipping burden)
+ -w <= y <= w (or arbitrarily wider range if implementation uses a guard band to reduce clipping burden)
+ 0 <= z <= w
+
When you set DepthClipEnable to
Enable scissor-rectangle culling. All pixels outside an active scissor rectangle are culled.
Specifies whether to use the quadrilateral or alpha line anti-aliasing algorithm on multisample antialiasing (MSAA) render targets. Set to TRUE to use the quadrilateral line anti-aliasing algorithm and to
Specifies whether to enable line antialiasing; only applies if doing line drawing and MultisampleEnable is
This interface encapsulates methods for measuring GPU performance.
+A counter can be created with
This is a derived class of
Counter data is gathered by issuing an
Counters are best suited for profiling.
For a list of the types of performance counters, see
Windows?Phone?8: This API is supported.
+This interface encapsulates methods for retrieving data from the GPU asynchronously.
+There are three types of asynchronous interfaces, all of which inherit this interface:
Windows?Phone?8: This API is supported.
+Get the size of the data (in bytes) that is output when calling
Size of the data (in bytes) that is output when calling GetData.
Windows?Phone?8: This API is supported.
+Get the size of the data (in bytes) that is output when calling
Windows?Phone?8: This API is supported.
+Get a counter description.
+Pointer to a counter description (see
Windows?Phone?8: This API is supported.
+Get a counter description.
+Windows?Phone?8: This API is supported.
+A debug interface controls debug settings, validates pipeline state and can only be used if the debug layer is turned on.
+This interface is obtained by querying it from the
For more information about the debug layer, see Debug Layer.
Windows?Phone?8: This API is supported.
+Set a bit field of flags that will turn debug features on and off.
+A combination of feature-mask flags that are combined by using a bitwise OR operation. If a flag is present, that feature will be set to on, otherwise the feature will be set to off. For descriptions of the feature-mask flags, see Remarks.
This method returns one of the Direct3D 11 Return Codes.
Note??If you call this API in a Session 0 process, it returns
Setting one of the following feature-mask flags will cause a rendering-operation method (listed below) to do some extra task when called.
Application will wait for the GPU to finish processing the rendering operation before continuing. | |
Runtime will additionally call | |
Runtime will call |
?
These feature-mask flags apply to the following rendering-operation methods:
By setting one of the following feature-mask flags, you can control the behavior of the IDXGIDevice2::OfferResources and IDXGIDevice2::ReclaimResources methods to aid in testing and debugging.
Note??These flags are supported by the Direct3D 11.1 runtime, which is available starting with Windows?8.
D3D11_DEBUG_FEATURE_ALWAYS_DISCARD_OFFERED_RESOURCE (0x8) | When you call IDXGIDevice2::OfferResources to offer resources while this flag is enabled, their content is always discarded. Use this flag to test code paths that regenerate resource content on reclaim. You cannot use this flag in combination with D3D11_DEBUG_FEATURE_NEVER_DISCARD_OFFERED_RESOURCE. |
D3D11_DEBUG_FEATURE_NEVER_DISCARD_OFFERED_RESOURCE (0x10) | When you call IDXGIDevice2::OfferResources to offer resources while this flag is enabled, their content is never discarded. Use this flag to test code paths that do not need to regenerate resource content on reclaim. You cannot use this flag in combination with D3D11_DEBUG_FEATURE_ALWAYS_DISCARD_OFFERED_RESOURCE. |
?
The behavior of the IDXGIDevice2::OfferResources and IDXGIDevice2::ReclaimResources methods depends on system-wide memory pressure. Therefore, the scenario where content is lost and must be regenerated is uncommon for most applications. The preceding new options in the Direct3D debug layer let you simulate that scenario consistently and test code paths.
The following flag is supported by the Direct3D 11.1 runtime.
D3D11_DEBUG_FEATURE_AVOID_BEHAVIOR_CHANGING_DEBUG_AIDS (0x40) | Disables the following default debugging behavior. |
?
When the debug layer is enabled, it performs certain actions to reveal application problems. By setting the D3D11_DEBUG_FEATURE_AVOID_BEHAVIOR_CHANGING_DEBUG_AIDS feature-mask flag, you can enable the debug layer without getting the following default debugging behavior:
The following flag is supported by the Direct3D 11.2 runtime.
D3D11_DEBUG_FEATURE_DISABLE_TILED_RESOURCE_MAPPING_TRACKING_AND_VALIDATION (0x80) | Disables the following default debugging behavior. |
?
By default (that is, without D3D11_DEBUG_FEATURE_DISABLE_TILED_RESOURCE_MAPPING_TRACKING_AND_VALIDATION set), the debug layer validates the proper usage of all tile mappings for tiled resources for bound resources for every operation performed on the device context (for example, draw, copy, and so on). Depending on the size of the tiled resources used (if any), this validation can be processor intensive and slow. Apps might want to initially run with tiled resource tile mapping validation on; then, when they determine that the calling pattern is safe, they can disable the validation by setting D3D11_DEBUG_FEATURE_DISABLE_TILED_RESOURCE_MAPPING_TRACKING_AND_VALIDATION.
If D3D11_DEBUG_FEATURE_DISABLE_TILED_RESOURCE_MAPPING_TRACKING_AND_VALIDATION is set when a tiled resource is created, the debug layer never performs the tracking of tile mapping for that resource for its entire lifetime. Alternatively, if D3D11_DEBUG_FEATURE_DISABLE_TILED_RESOURCE_MAPPING_TRACKING_AND_VALIDATION is set for any given device context method call (like draw or copy calls) involving tiled resources, the debug layer skips all tile mapping validation for the call.
Windows?Phone?8: This API is supported.
+Get a bitfield of flags that indicates which debug features are on or off.
+Mask of feature-mask flags bitwise ORed together. If a flag is present, then that feature will be set to on, otherwise the feature will be set to off. See
Windows?Phone?8: This API is supported.
+Set the number of milliseconds to sleep after
This method returns one of the following Direct3D 11 Return Codes.
Note??If you call this API in a Session 0 process, it returns
The application will only sleep if
Windows?Phone?8: This API is supported.
+Get the number of milliseconds to sleep after
Number of milliseconds to sleep after Present is called.
Value is set with
Windows?Phone?8: This API is supported.
+Sets a swap chain that the runtime will use for automatically calling
This method returns one of the following Direct3D 11 Return Codes.
Note??If you call this API in a Session 0 process, it returns
The swap chain set by this method will only be used if
Windows?Phone?8: This API is supported.
+Get the swap chain that the runtime will use for automatically calling
This method returns one of the following Direct3D 11 Return Codes.
The swap chain retrieved by this method will only be used if
Windows?Phone?8: This API is supported.
+Check to see if the draw pipeline state is valid.
+A reference to the
This method returns one of the following Direct3D 11 Return Codes.
Use validate prior to calling a draw method (for example,
Windows?Phone?8: This API is supported.
+Report information about a device object's lifetime.
+A value from the
This method returns one of the following Direct3D 11 Return Codes.
ReportLiveDeviceObjects uses the value in Flags to determine the amount of information to report about a device object's lifetime.
Windows?Phone?8: This API is supported.
+Verifies whether the dispatch pipeline state is valid.
+A reference to the
This method returns one of the return codes described in the topic Direct3D 11 Return Codes.
Use this method before you call a dispatch method (for example,
Windows?Phone?8: This API is supported.
+Get or sets the number of milliseconds to sleep after
Value is set with
Windows?Phone?8: This API is supported.
+Get or sets the swap chain that the runtime will use for automatically calling
The swap chain retrieved by this method will only be used if
Windows?Phone?8: This API is supported.
+The depth-stencil-state interface holds a description for depth-stencil state that you can bind to the output-merger stage.
+To create a depth-stencil-state object, call
Windows?Phone?8: This API is supported.
+Gets the description for depth-stencil state that you used to create the depth-stencil-state object.
+A reference to a
You use the description for depth-stencil state in a call to the
Windows?Phone?8: This API is supported.
+Gets the description for depth-stencil state that you used to create the depth-stencil-state object.
+You use the description for depth-stencil state in a call to the
Windows?Phone?8: This API is supported.
+A depth-stencil-view interface accesses a texture resource during depth-stencil testing.
+To create a depth-stencil view, call
To bind a depth-stencil view to the pipeline, call
Windows?Phone?8: This API is supported.
+A view interface specifies the parts of a resource the pipeline can access during rendering.
+A view interface is the base interface for all views. There are four types of views; a depth-stencil view, a render-target view, a shader-resource view, and an unordered-access view.
All resources must be bound to the pipeline before they can be accessed.
Windows?Phone?8: This API is supported.
+Get the resource that is accessed through this view.
+Address of a reference to the resource that is accessed through this view. (See
This function increments the reference count of the resource by one, so it is necessary to call Release on the returned reference when the application is done with it. Destroying (or losing) the returned reference before Release is called will result in a memory leak.
Windows?Phone?8: This API is supported.
+Get the resource that is accessed through this view.
+This function increments the reference count of the resource by one, so it is necessary to call Dispose on the returned reference when the application is done with it. Destroying (or losing) the returned reference before Release is called will result in a memory leak.
+Get the resource that is accessed through this view.
+This function increments the reference count of the resource by one, so it is necessary to call Release on the returned reference when the application is done with it. Destroying (or losing) the returned reference before Release is called will result in a memory leak.
+Get the depth-stencil view.
+Pointer to a depth-stencil-view description (see
Windows?Phone?8: This API is supported.
+Get the depth-stencil view.
+Windows?Phone?8: This API is supported.
+The device interface represents a virtual adapter; it is used to create resources.
+A device is created using
Windows?Phone?8: This API is supported.
+ IDXGIResource* pOtherResource(NULL);
+ hr = pOtherDeviceResource->QueryInterface( __uuidof(IDXGIResource), (void**)&pOtherResource );
+ HANDLE sharedHandle;
+ pOtherResource->GetSharedHandle(&sharedHandle);
+ The only resources that can be shared are 2D non-mipmapped textures. To share a resource between a Direct3D 9 device and a Direct3D 10 device the texture must have been created using the pSharedHandle argument of {{CreateTexture}}. The shared Direct3D 9 handle is then passed to OpenSharedResource in the hResource argument. The following code illustrates the method calls involved.
+ sharedHandle = NULL; // must be set to NULL to create, can use a valid handle here to open in D3D9
+ pDevice9->CreateTexture(..., pTex2D_9, &sharedHandle);
+ ...
+ pDevice10->OpenSharedResource(sharedHandle, __uuidof(ID3D10Resource), (void**)(&tempResource10));
+ tempResource10->QueryInterface(__uuidof(ID3D10Texture2D), (void**)(&pTex2D_10));
+ tempResource10->Release();
+ // now use pTex2D_10 with pDevice10
+ Textures being shared from D3D9 to D3D10 have the following restrictions. Textures must be 2D Only 1 mip level is allowed Texture must have default usage Texture must be write only MSAA textures are not allowed Bind flags must have SHADER_RESOURCE and RENDER_TARGET set Only R10G10B10A2_UNORM, R16G16B16A16_FLOAT and R8G8B8A8_UNORM formats are allowed If a shared texture is updated on one device Creates a buffer (vertex buffer, index buffer, or shader-constant buffer).
+A reference to a
A reference to a
If you don't pass anything to pInitialData, the initial content of the memory for the buffer is undefined. In this case, you need to write the buffer content some other way before the resource is read.
Address of a reference to the
This method returns E_OUTOFMEMORY if there is insufficient memory to create the buffer. See Direct3D 11 Return Codes for other possible return values.
For example code, see How to: Create a Vertex Buffer, How to: Create an Index Buffer or How to: Create a Constant Buffer.
For a constant buffer (BindFlags of
The Direct3D 11.1 runtime, which is available on Windows?8 and later operating systems, provides the following new functionality for CreateBuffer.
You can create a constant buffer that is larger than the maximum constant buffer size that a shader can access (4096 32-bit*4-component constants ? 64KB). When you bind the constant buffer to the pipeline (for example, via PSSetConstantBuffers or PSSetConstantBuffers1), you can define a range of the buffer that the shader can access that fits within the 4096 constant limit.
The runtime will emulate this feature for feature level 9.1, 9.2, and 9.3; therefore, this feature is supported for feature level 9.1, 9.2, and 9.3. This feature is always available on new drivers for feature level 10 and higher. On existing drivers that are implemented to feature level 10 and higher, a call to CreateBuffer to request a constant buffer that is larger than 4096 fails.
Windows?Phone?8: This API is supported.
+Create an array of 2D textures.
+If the method succeeds, the return code is
CreateTexture2D creates a 2D texture resource, which can contain a number of 2D subresources. The number of textures is specified in the texture description. All textures in a resource must have the same format, size, and number of mipmap levels.
All resources are made up of one or more subresources. To load data into the texture, applications can supply the data initially as an array of
For a 32 x 32 texture with a full mipmap chain, the pInitialData array has the following 6 elements: +
Windows?Phone?8: This API is supported.
+Create an array of 2D textures.
+If the method succeeds, the return code is
CreateTexture2D creates a 2D texture resource, which can contain a number of 2D subresources. The number of textures is specified in the texture description. All textures in a resource must have the same format, size, and number of mipmap levels.
All resources are made up of one or more subresources. To load data into the texture, applications can supply the data initially as an array of
For a 32 x 32 texture with a full mipmap chain, the pInitialData array has the following 6 elements: +
Windows?Phone?8: This API is supported.
+Create a single 3D texture.
+If the method succeeds, the return code is
CreateTexture3D creates a 3D texture resource, which can contain a number of 3D subresources. The number of textures is specified in the texture description. All textures in a resource must have the same format, size, and number of mipmap levels.
All resources are made up of one or more subresources. To load data into the texture, applications can supply the data initially as an array of
Each element of pInitialData provides all of the slices that are defined for a given miplevel. For example, for a 32 x 32 x 4 volume texture with a full mipmap chain, the array has the following 6 elements:
Windows?Phone?8: This API is supported.
+Create a shader-resource view for accessing data in a resource.
+Pointer to the resource that will serve as input to a shader. This resource must have been created with the
Pointer to a shader-resource view description (see
Address of a reference to an
This method returns one of the following Direct3D 11 Return Codes.
A resource is made up of one or more subresources; a view identifies which subresources to allow the pipeline to access. In addition, each resource is bound to the pipeline using a view. A shader-resource view is designed to bind any buffer or texture resource to the shader stages using the following API methods:
Because a view is fully typed, this means that typeless resources become fully typed when bound to the pipeline.
Note??To successfully create a shader-resource view from a typeless buffer (for example,
The Direct3D 11.1 runtime, which is available starting with Windows?8, allows you to use CreateShaderResourceView for the following new purpose.
You can create shader-resource views of video resources so that Direct3D shaders can process those shader-resource views. These video resources are either Texture2D or Texture2DArray. The value in the ViewDimension member of the
The runtime read+write conflict prevention logic (which stops a resource from being bound as an SRV and RTV or UAV at the same time) treats views of different parts of the same video surface as conflicting for simplicity. Therefore, the runtime does not allow an application to read from luma while the application simultaneously renders to chroma in the same surface even though the hardware might allow these simultaneous operations.
Windows?Phone?8: This API is supported.
+Creates a view for accessing an unordered access resource.
+This method returns one of the Direct3D 11 Return Codes.
The Direct3D 11.1 runtime, which is available starting with Windows?8, allows you to use CreateUnorderedAccessView for the following new purpose.
You can create unordered-access views of video resources so that Direct3D shaders can process those unordered-access views. These video resources are either Texture2D or Texture2DArray. The value in the ViewDimension member of the
The runtime read+write conflict prevention logic (which stops a resource from being bound as an SRV and RTV or UAV at the same time) treats views of different parts of the same video surface as conflicting for simplicity. Therefore, the runtime does not allow an application to read from luma while the application simultaneously renders to chroma in the same surface even though the hardware might allow these simultaneous operations.
Windows?Phone?8: This API is supported.
+Creates a render-target view for accessing resource data.
+Pointer to a
Pointer to a
Address of a reference to an
This method returns one of the Direct3D 11 Return Codes.
A render-target view can be bound to the output-merger stage by calling
The Direct3D 11.1 runtime, which is available starting with Windows?8, allows you to use CreateRenderTargetView for the following new purpose.
You can create render-target views of video resources so that Direct3D shaders can process those render-target views. These video resources are either Texture2D or Texture2DArray. The value in the ViewDimension member of the
The runtime read+write conflict prevention logic (which stops a resource from being bound as an SRV and RTV or UAV at the same time) treats views of different parts of the same video surface as conflicting for simplicity. Therefore, the runtime does not allow an application to read from luma while the application simultaneously renders to chroma in the same surface even though the hardware might allow these simultaneous operations.
Windows?Phone?8: This API is supported.
+Create a depth-stencil view for accessing resource data.
+Pointer to the resource that will serve as the depth-stencil surface. This resource must have been created with the
Pointer to a depth-stencil-view description (see
Address of a reference to an
This method returns one of the following Direct3D 11 Return Codes.
A depth-stencil view can be bound to the output-merger stage by calling
Windows?Phone?8: This API is supported.
+Create an input-layout object to describe the input-buffer data for the input-assembler stage.
+An array of the input-assembler stage input data types; each type is described by an element description (see
The number of input-data types in the array of input-elements.
A reference to the compiled shader. The compiled shader code contains a input signature which is validated against the array of elements. See remarks.
Size of the compiled shader.
A reference to the input-layout object created (see
If the method succeeds, the return code is
After creating an input layout object, it must be bound to the input-assembler stage before calling a draw API.
Once an input-layout object is created from a shader signature, the input-layout object can be reused with any other shader that has an identical input signature (semantics included). This can simplify the creation of input-layout objects when you are working with many shaders with identical inputs.
If a data type in the input-layout declaration does not match the data type in a shader-input signature, CreateInputLayout will generate a warning during compilation. The warning is simply to call attention to the fact that the data may be reinterpreted when read from a register. You may either disregard this warning (if reinterpretation is intentional) or make the data types match in both declarations to eliminate the warning.
Windows?Phone?8: This API is supported.
+Create a vertex-shader object from a compiled shader.
+A reference to the compiled shader.
Size of the compiled vertex shader.
A reference to a class linkage interface (see
Address of a reference to a
This method returns one of the Direct3D 11 Return Codes.
The Direct3D 11.1 runtime, which is available starting with Windows?8, provides the following new functionality for CreateVertexShader.
The following shader model 5.0 instructions are available to just pixel shaders and compute shaders in the Direct3D 11.0 runtime. For the Direct3D 11.1 runtime, because unordered access views (UAV) are available at all shader stages, you can use these instructions in all shader stages.
Therefore, if you use the following shader model 5.0 instructions in a vertex shader, you can successfully pass the compiled vertex shader to pShaderBytecode. That is, the call to CreateVertexShader succeeds.
If you pass a compiled shader to pShaderBytecode that uses any of the following instructions on a device that doesn?t support UAVs at every shader stage (including existing drivers that are not implemented to support UAVs at every shader stage), CreateVertexShader fails. CreateVertexShader also fails if the shader tries to use a UAV slot beyond the set of UAV slots that the hardware supports.
Windows?Phone?8: This API is supported.
+Create a geometry shader.
+A reference to the compiled shader.
Size of the compiled geometry shader.
A reference to a class linkage interface (see
Address of a reference to a
This method returns one of the following Direct3D 11 Return Codes.
After it is created, the shader can be set to the device by calling
The Direct3D 11.1 runtime, which is available starting with Windows?8, provides the following new functionality for CreateGeometryShader.
The following shader model 5.0 instructions are available to just pixel shaders and compute shaders in the Direct3D 11.0 runtime. For the Direct3D 11.1 runtime, because unordered access views (UAV) are available at all shader stages, you can use these instructions in all shader stages.
Therefore, if you use the following shader model 5.0 instructions in a geometry shader, you can successfully pass the compiled geometry shader to pShaderBytecode. That is, the call to CreateGeometryShader succeeds.
If you pass a compiled shader to pShaderBytecode that uses any of the following instructions on a device that doesn?t support UAVs at every shader stage (including existing drivers that are not implemented to support UAVs at every shader stage), CreateGeometryShader fails. CreateGeometryShader also fails if the shader tries to use a UAV slot beyond the set of UAV slots that the hardware supports.
Windows?Phone?8: This API is supported.
+Creates a geometry shader that can write to streaming output buffers.
+A reference to the compiled geometry shader for a standard geometry shader plus stream output. For info on how to get this reference, see Getting a Pointer to a Compiled Shader.
To create the stream output without using a geometry shader, pass a reference to the output signature for the prior stage. To obtain this output signature, call the
Size of the compiled geometry shader.
Pointer to a
The number of entries in the stream output declaration ( ranges from 0 to
An array of buffer strides; each stride is the size of an element for that buffer.
The number of strides (or buffers) in pBufferStrides (ranges from 0 to
The index number of the stream to be sent to the rasterizer stage (ranges from 0 to
A reference to a class linkage interface (see
Address of a reference to an
This method returns one of the Direct3D 11 Return Codes.
For more info about using CreateGeometryShaderWithStreamOutput, see Create a Geometry-Shader Object with Stream Output.
The Direct3D 11.1 runtime, which is available starting with Windows?8, provides the following new functionality for CreateGeometryShaderWithStreamOutput.
The following shader model 5.0 instructions are available to just pixel shaders and compute shaders in the Direct3D 11.0 runtime. For the Direct3D 11.1 runtime, because unordered access views (UAV) are available at all shader stages, you can use these instructions in all shader stages.
Therefore, if you use the following shader model 5.0 instructions in a geometry shader, you can successfully pass the compiled geometry shader to pShaderBytecode. That is, the call to CreateGeometryShaderWithStreamOutput succeeds.
If you pass a compiled shader to pShaderBytecode that uses any of the following instructions on a device that doesn?t support UAVs at every shader stage (including existing drivers that are not implemented to support UAVs at every shader stage), CreateGeometryShaderWithStreamOutput fails. CreateGeometryShaderWithStreamOutput also fails if the shader tries to use a UAV slot beyond the set of UAV slots that the hardware supports.
Windows?Phone?8: This API is supported.
+Create a pixel shader.
+A reference to the compiled shader.
Size of the compiled pixel shader.
A reference to a class linkage interface (see
Address of a reference to a
This method returns one of the following Direct3D 11 Return Codes.
After creating the pixel shader, you can set it to the device using
Windows?Phone?8: This API is supported.
+Create a hull shader.
+This method returns one of the Direct3D 11 Return Codes.
The Direct3D 11.1 runtime, which is available starting with Windows?8, provides the following new functionality for CreateHullShader.
The following shader model 5.0 instructions are available to just pixel shaders and compute shaders in the Direct3D 11.0 runtime. For the Direct3D 11.1 runtime, because unordered access views (UAV) are available at all shader stages, you can use these instructions in all shader stages.
Therefore, if you use the following shader model 5.0 instructions in a hull shader, you can successfully pass the compiled hull shader to pShaderBytecode. That is, the call to CreateHullShader succeeds.
If you pass a compiled shader to pShaderBytecode that uses any of the following instructions on a device that doesn?t support UAVs at every shader stage (including existing drivers that are not implemented to support UAVs at every shader stage), CreateHullShader fails. CreateHullShader also fails if the shader tries to use a UAV slot beyond the set of UAV slots that the hardware supports.
Windows?Phone?8: This API is supported.
+Create a domain shader .
+This method returns one of the following Direct3D 11 Return Codes.
The Direct3D 11.1 runtime, which is available starting with Windows?8, provides the following new functionality for CreateDomainShader.
The following shader model 5.0 instructions are available to just pixel shaders and compute shaders in the Direct3D 11.0 runtime. For the Direct3D 11.1 runtime, because unordered access views (UAV) are available at all shader stages, you can use these instructions in all shader stages.
Therefore, if you use the following shader model 5.0 instructions in a domain shader, you can successfully pass the compiled domain shader to pShaderBytecode. That is, the call to CreateDomainShader succeeds.
If you pass a compiled shader to pShaderBytecode that uses any of the following instructions on a device that doesn?t support UAVs at every shader stage (including existing drivers that are not implemented to support UAVs at every shader stage), CreateDomainShader fails. CreateDomainShader also fails if the shader tries to use a UAV slot beyond the set of UAV slots that the hardware supports.
Windows?Phone?8: This API is supported.
+Create a compute shader.
+This method returns E_OUTOFMEMORY if there is insufficient memory to create the compute shader. See Direct3D 11 Return Codes for other possible return values.
For an example, see How To: Create a Compute Shader and HDRToneMappingCS11 Sample.
Windows?Phone?8: This API is supported.
+Creates class linkage libraries to enable dynamic shader linkage.
+A reference to a class-linkage interface reference (see
This method returns one of the following Direct3D 11 Return Codes.
The
Windows?Phone?8: This API is supported.
+Create a blend-state object that encapsules blend state for the output-merger stage.
+Pointer to a blend-state description (see
Address of a reference to the blend-state object created (see
This method returns E_OUTOFMEMORY if there is insufficient memory to create the blend-state object. See Direct3D 11 Return Codes for other possible return values.
An application can create up to 4096 unique blend-state objects. For each object created, the runtime checks to see if a previous object has the same state. If such a previous object exists, the runtime will return a reference to previous instance instead of creating a duplicate object.
Windows?Phone?8: This API is supported.
+Create a depth-stencil state object that encapsulates depth-stencil test information for the output-merger stage.
+Pointer to a depth-stencil state description (see
Address of a reference to the depth-stencil state object created (see
This method returns one of the following Direct3D 11 Return Codes.
4096 unique depth-stencil state objects can be created on a device at a time.
If an application attempts to create a depth-stencil-state interface with the same state as an existing interface, the same interface will be returned and the total number of unique depth-stencil state objects will stay the same.
Windows?Phone?8: This API is supported.
+Create a rasterizer state object that tells the rasterizer stage how to behave.
+Pointer to a rasterizer state description (see
Address of a reference to the rasterizer state object created (see
This method returns E_OUTOFMEMORY if there is insufficient memory to create the compute shader. See Direct3D 11 Return Codes for other possible return values.
4096 unique rasterizer state objects can be created on a device at a time.
If an application attempts to create a rasterizer-state interface with the same state as an existing interface, the same interface will be returned and the total number of unique rasterizer state objects will stay the same.
Windows?Phone?8: This API is supported.
+Create a sampler-state object that encapsulates sampling information for a texture.
+Pointer to a sampler state description (see
Address of a reference to the sampler state object created (see
This method returns one of the following Direct3D 11 Return Codes.
4096 unique sampler state objects can be created on a device at a time.
If an application attempts to create a sampler-state interface with the same state as an existing interface, the same interface will be returned and the total number of unique sampler state objects will stay the same.
Windows?Phone?8: This API is supported.
+This interface encapsulates methods for querying information from the GPU.
+Pointer to a query description (see
Address of a reference to the query object created (see
This method returns E_OUTOFMEMORY if there is insufficient memory to create the query object. See Direct3D 11 Return Codes for other possible return values.
Windows?Phone?8: This API is supported.
+Creates a predicate.
+Pointer to a query description where the type of query must be a
Address of a reference to a predicate (see
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Create a counter object for measuring GPU performance.
+Pointer to a counter description (see
Address of a reference to a counter (see
If this function succeeds, it will return
E_INVALIDARG is returned whenever an out-of-range well-known or device-dependent counter is requested, or when the simulataneously active counters have been exhausted.
Windows?Phone?8: This API is supported.
+Creates a deferred context, which can record command lists.
+Reserved for future use. Pass 0.
Upon completion of the method, the passed reference to an
Returns
A deferred context is a thread-safe context that you can use to record graphics commands on a thread other than the main rendering thread. Using a deferred context, you can record graphics commands into a command list that is encapsulated by the
You can create multiple deferred contexts.
Note??If you use the
For more information about deferred contexts, see Immediate and Deferred Rendering.
Windows?Phone?8: This API is supported.
+Give a device access to a shared resource created on a different device.
+A resource handle. See remarks.
The globally unique identifier (
Address of a reference to the resource we are gaining access to.
This method returns one of the following Direct3D 11 Return Codes.
The REFIID, or
The unique handle of the resource is obtained differently depending on the type of device that originally created the resource.
To share a resource between two Direct3D 11 devices the resource must have been created with the
The REFIID, or
When sharing a resource between two Direct3D 10/11 devices the unique handle of the resource can be obtained by querying the resource for the
* pOtherResource( null ); + hr = pOtherDeviceResource->QueryInterface( __uuidof(), (void**)&pOtherResource ); + HANDLE sharedHandle; + pOtherResource->GetSharedHandle(&sharedHandle);
The only resources that can be shared are 2D non-mipmapped textures.
To share a resource between a Direct3D 9 device and a Direct3D 11 device the texture must have been created using the pSharedHandle argument of CreateTexture. The shared Direct3D 9 handle is then passed to OpenSharedResource in the hResource argument.
The following code illustrates the method calls involved.
sharedHandle =null ; // must be set tonull to create, can use a valid handle here to open in D3D9 + pDevice9->CreateTexture(..., pTex2D_9, &sharedHandle); + ... + pDevice11->OpenSharedResource(sharedHandle, __uuidof(), (void**)(&tempResource11)); + tempResource11->QueryInterface(__uuidof( ), (void**)(&pTex2D_11)); + tempResource11->Release(); + // now use pTex2D_11 with pDevice11
Textures being shared from D3D9 to D3D11 have the following restrictions.
If a shared texture is updated on one device
Windows?Phone?8: This API is supported.
+Get the support of a given format on the installed video device.
+A
A bitfield of
Windows?Phone?8: This API is supported.
+Get the number of quality levels available during multisampling.
+The texture format. See
The number of samples during multisampling.
Number of quality levels supported by the adapter. See remarks.
When multisampling a texture, the number of quality levels available for an adapter is dependent on the texture format used and the number of samples requested. The maximum number of quality levels is defined by
Furthermore, the definition of a quality level is up to each hardware vendor to define, however no facility is provided by Direct3D to help discover this information.
Note that FEATURE_LEVEL_10_1 devices are required to support 4x MSAA for all render targets except R32G32B32A32 and R32G32B32. FEATURE_LEVEL_11_0 devices are required to support 4x MSAA for all render target formats, and 8x MSAA for all render target formats except R32G32B32A32 formats.
Windows?Phone?8: This API is supported.
+Get a counter's information.
+Windows?Phone?8: This API is supported.
+Get the type, name, units of measure, and a description of an existing counter.
+Pointer to a counter description (see
Pointer to the data type of a counter (see
Pointer to the number of hardware counters that are needed for this counter type to be created. All instances of the same counter type use the same hardware counters.
String to be filled with a brief name for the counter. May be
Length of the string returned to szName. Can be
Name of the units a counter measures, provided the memory the reference points to has enough room to hold the string. Can be
Length of the string returned to szUnits. Can be
A description of the counter, provided the memory the reference points to has enough room to hold the string. Can be
Length of the string returned to szDescription. Can be
This method returns one of the following Direct3D 11 Return Codes.
Length parameters can be
Windows?Phone?8: This API is supported.
+Gets information about the features that are supported by the current graphics driver.
+A member of the
Upon completion of the method, the passed structure is filled with data that describes the feature support.
The size of the structure passed to the pFeatureSupportData parameter.
Returns
To query for multi-threading support, pass the
Calling CheckFeatureSupport with Feature set to
Windows?Phone?8: This API is supported.
+Get application-defined data from a device.
+Guid associated with the data.
A reference to a variable that on input contains the size, in bytes, of the buffer that pData points to, and on output contains the size, in bytes, of the amount of data that GetPrivateData retrieved.
A reference to a buffer that GetPrivateData fills with data from the device if pDataSize points to a value that specifies a buffer large enough to hold the data.
This method returns one of the codes described in the topic Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Set data to a device and associate that data with a guid.
+Guid associated with the data.
Size of the data.
Pointer to the data to be stored with this device. If pData is
This method returns one of the following Direct3D 11 Return Codes.
The data stored in the device with this method can be retrieved with
The data and guid set with this method will typically be application-defined.
The debug layer reports memory leaks by outputting a list of object interface references along with their friendly names. The default friendly name is "<unnamed>". You can set the friendly name so that you can determine if the corresponding object interface reference caused the leak. To set the friendly name, use the SetPrivateData method and the
static const char c_szName[] = "My name"; + hr = pContext->SetPrivateData(, sizeof( c_szName ) - 1, c_szName ); +
Windows?Phone?8: This API is supported.
+Associate an
Guid associated with the interface.
Pointer to an
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Gets the feature level of the hardware device.
+A member of the
Feature levels determine the capabilities of your device.
Windows?Phone?8: This API is supported.
+Get the flags used during the call to create the device with
A bitfield containing the flags used to create the device. See
Windows?Phone?8: This API is supported.
+Get the reason why the device was removed.
+Possible return values include:
For more detail on these return codes, see DXGI_ERROR.
Windows?Phone?8: This API is supported.
+Gets an immediate context, which can play back command lists.
+Upon completion of the method, the passed reference to an
The GetImmediateContext method returns an
The GetImmediateContext method increments the reference count of the immediate context by one. Therefore, you must call Release on the returned interface reference when you are done with it to avoid a memory leak.
Windows?Phone?8: This API is supported.
+Get the exception-mode flags.
+A value that contains one or more exception flags; each flag specifies a condition which will cause an exception to be raised. The flags are listed in D3D11_RAISE_FLAG. A default value of 0 means there are no flags.
This method returns one of the following Direct3D 11 Return Codes.
Set an exception-mode flag to elevate an error condition to a non-continuable exception.
Whenever an error occurs, a Direct3D device enters the DEVICEREMOVED state and if the appropriate exception flag has been set, an exception is raised. A raised exception is designed to terminate an application. Before termination, the last chance an application has to persist data is by using an UnhandledExceptionFilter (see Structured Exception Handling). In general, UnhandledExceptionFilters are leveraged to try to persist data when an application is crashing (to disk, for example). Any code that executes during an UnhandledExceptionFilter is not guaranteed to reliably execute (due to possible process corruption). Any data that the UnhandledExceptionFilter manages to persist, before the UnhandledExceptionFilter crashes again, should be treated as suspect, and therefore inspected by a new, non-corrupted process to see if it is usable.
Windows?Phone?8: This API is supported.
+Get the exception-mode flags.
+A value that contains one or more exception flags; each flag specifies a condition which will cause an exception to be raised. The flags are listed in D3D11_RAISE_FLAG. A default value of 0 means there are no flags.
An exception-mode flag is used to elevate an error condition to a non-continuable exception.
Windows?Phone?8: This API is supported.
+Gets the feature level of the hardware device.
+Feature levels determine the capabilities of your device.
Windows?Phone?8: This API is supported.
+Get the flags used during the call to create the device with
Windows?Phone?8: This API is supported.
+Get the reason why the device was removed.
+Windows?Phone?8: This API is supported.
+Gets an immediate context, which can play back command lists.
+The GetImmediateContext method returns an
The GetImmediateContext method increments the reference count of the immediate context by one. Therefore, you must call Release on the returned interface reference when you are done with it to avoid a memory leak.
Windows?Phone?8: This API is supported.
+Get or sets the exception-mode flags.
+An exception-mode flag is used to elevate an error condition to a non-continuable exception.
Windows?Phone?8: This API is supported.
+The
Windows?Phone?8: This API is supported.
+Set the target output buffers for the stream-output stage of the pipeline.
+The number of buffer to bind to the device. A maximum of four output buffers can be set. If less than four are defined by the call, the remaining buffer slots are set to
The array of output buffers (see
Array of offsets to the output buffers from ppSOTargets, one offset for each buffer. The offset values must be in bytes.
An offset of -1 will cause the stream output buffer to be appended, continuing after the last location written to the buffer in a previous stream output pass.
Calling this method using a buffer that is currently bound for writing will effectively bind
The debug layer will generate a warning whenever a resource is prevented from being bound simultaneously as an input and an output, but this will not prevent invalid data from being used by the runtime.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
+Set the target output buffers for the stream-output stage of the pipeline.
+The number of buffer to bind to the device. A maximum of four output buffers can be set. If less than four are defined by the call, the remaining buffer slots are set to
The array of output buffers (see
Array of offsets to the output buffers from ppSOTargets, one offset for each buffer. The offset values must be in bytes.
An offset of -1 will cause the stream output buffer to be appended, continuing after the last location written to the buffer in a previous stream output pass.
Calling this method using a buffer that is currently bound for writing will effectively bind
The debug layer will generate a warning whenever a resource is prevented from being bound simultaneously as an input and an output, but this will not prevent invalid data from being used by the runtime.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Get the target output buffers for the stream-output stage of the pipeline.
+Number of buffers to get.
An array of output buffers (see
A maximum of four output buffers can be retrieved.
The offsets to the output buffers pointed to in the returned ppSOTargets array may be assumed to be -1 (append), as defined for use in
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+The
Windows?Phone?8: This API is supported.
+Bind an array of shader resources to the compute-shader stage.
+Index into the device's zero-based array to begin setting shader resources to (ranges from 0 to
Number of shader resources to set. Up to a maximum of 128 slots are available for shader resources(ranges from 0 to
Array of shader resource view interfaces to set to the device.
If an overlapping resource view is already bound to an output slot, such as a render target, then the method will fill the destination shader resource slot with
For information about creating shader-resource views, see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10. +
Windows?Phone?8: This API is supported.
+Sets an array of views for an unordered resource.
+Index of the first element in the zero-based array to begin setting (ranges from 0 to D3D11_1_UAV_SLOT_COUNT - 1). D3D11_1_UAV_SLOT_COUNT is defined as 64.
Number of views to set (ranges from 0 to D3D11_1_UAV_SLOT_COUNT - StartSlot).
A reference to an array of
An array of append and consume buffer offsets. A value of -1 indicates to keep the current offset. Any other values set the hidden counter for that appendable and consumable UAV. pUAVInitialCounts is only relevant for UAVs that were created with either
Windows?Phone?8: This API is supported.
+Set a compute shader to the device.
+Pointer to a compute shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Set a compute shader to the device.
+Pointer to a compute shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Set an array of sampler states to the compute-shader stage.
+Index into the device's zero-based array to begin setting samplers to (ranges from 0 to
Number of samplers in the array. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Pointer to an array of sampler-state interfaces (see
Any sampler may be set to
//Default sampler state: +SamplerDesc; + SamplerDesc.Filter = ; + SamplerDesc.AddressU = ; + SamplerDesc.AddressV = ; + SamplerDesc.AddressW = ; + SamplerDesc.MipLODBias = 0; + SamplerDesc.MaxAnisotropy = 1; + SamplerDesc.ComparisonFunc = ; + SamplerDesc.BorderColor[0] = 1.0f; + SamplerDesc.BorderColor[1] = 1.0f; + SamplerDesc.BorderColor[2] = 1.0f; + SamplerDesc.BorderColor[3] = 1.0f; + SamplerDesc.MinLOD = -FLT_MAX; + SamplerDesc.MaxLOD = FLT_MAX;
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Sets the constant buffers used by the compute-shader stage.
+Index into the zero-based array to begin setting constant buffers to (ranges from 0 to
Number of buffers to set (ranges from 0 to
Array of constant buffers (see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The Direct3D 11.1 runtime, which is available starting with Windows?8, can bind a larger number of
If the application wants the shader to access other parts of the buffer, it must call the CSSetConstantBuffers1 method instead.
Windows?Phone?8: This API is supported.
+Get the compute-shader resources.
+Index into the device's zero-based array to begin getting shader resources from (ranges from 0 to
The number of resources to get from the device. Up to a maximum of 128 slots are available for shader resources (ranges from 0 to
Array of shader resource view interfaces to be returned by the device.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Gets an array of views for an unordered resource.
+Index of the first element in the zero-based array to return (ranges from 0 to
Number of views to get (ranges from 0 to
A reference to an array of interface references (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the compute shader currently set on the device.
+Address of a reference to a Compute shader (see
Pointer to an array of class instance interfaces (see
The number of class-instance elements in the array.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get an array of sampler state interfaces from the compute-shader stage.
+Index into a zero-based array to begin getting samplers from (ranges from 0 to
Number of samplers to get from a device context. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Pointer to an array of sampler-state interfaces (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the constant buffers used by the compute-shader stage.
+Index into the device's zero-based array to begin retrieving constant buffers from (ranges from 0 to
Number of buffers to retrieve (ranges from 0 to
Array of constant buffer interface references (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+The
Windows?Phone?8: This API is supported.
+ D3D11_BOX sourceRegion;
+ sourceRegion.left = 120;
+ sourceRegion.right = 200;
+ sourceRegion.top = 100;
+ sourceRegion.bottom = 220;
+ sourceRegion.front = 0;
+ sourceRegion.back = 1; pd3dDeviceContext->CopySubresourceRegion( pDestTexture, 0, 10, 20, 0, pSourceTexture, 0, &sourceRegion );
+
+ Notice, that for a 2D texture, front and back are set to 0 and 1 respectively.
+ Gets a reference to the data contained in a subresource, and denies the GPU access to that subresource.
+A reference to a
Index number of the subresource.
Specifies the CPU's read and write permissions for a resource. For possible values, see
Flag that specifies what the CPU should do when the GPU is busy. This flag is optional.
A reference to the mapped subresource (see
This method also throws an exception with the code
For more information about these error codes, see DXGI_ERROR.
If you call Map on a deferred context, you can only pass
The Direct3D 11.1 runtime, which is available starting with Windows Developer Preview, can map shader resource views (SRVs) of dynamic buffers with
Draw indexed, non-instanced primitives.
+Number of indices to draw.
The location of the first index read by the GPU from the index buffer.
A value added to each index before reading a vertex from the vertex buffer.
A draw API submits work to the rendering pipeline.
If the sum of both indices is negative, the result of the function call is undefined.
Windows?Phone?8: This API is supported.
+Draw non-indexed, non-instanced primitives.
+Number of vertices to draw.
Index of the first vertex, which is usually an offset in a vertex buffer.
Draw submits work to the rendering pipeline.
The vertex data for a draw call normally comes from a vertex buffer that is bound to the pipeline.
Even without any vertex buffer bound to the pipeline, you can generate your own vertex data in your vertex shader by using the SV_VertexID system-value semantic to determine the current vertex that the runtime is processing.
Windows?Phone?8: This API is supported.
+Gets a reference to the data contained in a subresource, and denies the GPU access to that subresource.
+This method returns one of the Direct3D 11 Return Codes.
This method also returns
This method also returns
For more information about these error codes, see DXGI_ERROR.
If you call Map on a deferred context, you can only pass
Note??The Direct3D 11.1 runtime, which is available starting with Windows?8, enables mapping dynamic constant buffers and shader resource views (SRVs) of dynamic buffers with
For info about how to use Map, see How to: Use dynamic resources.
+Invalidate the reference to a resource and reenable the GPU's access to that resource.
+A reference to a
A subresource to be unmapped.
For info about how to use Unmap, see How to: Use dynamic resources.
Windows?Phone?8: This API is supported.
+Draw indexed, instanced primitives.
+Number of indices read from the index buffer for each instance.
Number of instances to draw.
The location of the first index read by the GPU from the index buffer.
A value added to each index before reading a vertex from the vertex buffer.
A value added to each index before reading per-instance data from a vertex buffer.
A draw API submits work to the rendering pipeline.
Instancing may extend performance by reusing the same geometry to draw multiple objects in a scene. One example of instancing could be to draw the same object with different positions and colors. Instancing requires multiple vertex buffers: at least one for per-vertex data and a second buffer for per-instance data.
Windows?Phone?8: This API is supported.
+Draw non-indexed, instanced primitives.
+Number of vertices to draw.
Number of instances to draw.
Index of the first vertex.
A value added to each index before reading per-instance data from a vertex buffer.
A draw API submits work to the rendering pipeline.
Instancing may extend performance by reusing the same geometry to draw multiple objects in a scene. One example of instancing could be to draw the same object with different positions and colors.
The vertex data for an instanced draw call normally comes from a vertex buffer that is bound to the pipeline. However, you could also provide the vertex data from a shader that has instanced data identified with a system-value semantic (SV_InstanceID).
Windows?Phone?8: This API is supported.
+Mark the beginning of a series of commands.
+A reference to an
Use
Windows?Phone?8: This API is supported.
+Mark the end of a series of commands.
+A reference to an
Use
Windows?Phone?8: This API is supported.
+Get data from the graphics processing unit (GPU) asynchronously.
+A reference to an
Address of memory that will receive the data. If
Size of the data to retrieve or 0. Must be 0 when pData is
Optional flags. Can be 0 or any combination of the flags enumerated by
This method returns one of the Direct3D 11 Return Codes. A return value of
Queries in a deferred context are limited to predicated drawing. That is, you cannot call
GetData retrieves the data that the runtime collected between calls to
If DataSize is 0, GetData is only used to check status.
An application gathers counter data by calling
Windows?Phone?8: This API is supported.
+Set a rendering predicate.
+A reference to the
If TRUE, rendering will be affected by when the predicate's conditions are met. If
The predicate must be in the "issued" or "signaled" state to be used for predication. While the predicate is set for predication, calls to
Use this method to denote that subsequent rendering and resource manipulation commands are not actually performed if the resulting predicate data of the predicate is equal to the PredicateValue. However, some predicates are only hints, so they may not actually prevent operations from being performed.
The primary usefulness of predication is to allow an application to issue rendering and resource manipulation commands without taking the performance hit of spinning, waiting for
Rendering and resource manipulation commands for Direct3D?11 include these Draw, Dispatch, Copy, Update, Clear, Generate, and Resolve operations.
You can set a rendering predicate on an immediate or a deferred context. For info about immediate and deferred contexts, see Immediate and Deferred Rendering.
Windows?Phone?8: This API is supported.
+Draw geometry of an unknown size.
+A draw API submits work to the rendering pipeline. This API submits work of an unknown size that was processed by the input assembler, vertex shader, and stream-output stages; the work may or may not have gone through the geometry-shader stage.
After data has been streamed out to stream-output stage buffers, those buffers can be again bound to the Input Assembler stage at input slot 0 and DrawAuto will draw them without the application needing to know the amount of data that was written to the buffers. A measurement of the amount of data written to the SO stage buffers is maintained internally when the data is streamed out. This means that the CPU does not need to fetch the measurement before re-binding the data that was streamed as input data. Although this amount is tracked internally, it is still the responsibility of applications to use input layouts to describe the format of the data in the SO stage buffers so that the layouts are available when the buffers are again bound to the input assembler.
The following diagram shows the DrawAuto process.
Calling DrawAuto does not change the state of the streaming-output buffers that were bound again as inputs.
DrawAuto only works when drawing with one input buffer bound as an input to the IA stage at slot 0. Applications must create the SO buffer resource with both binding flags,
This API does not support indexing or instancing.
If an application needs to retrieve the size of the streaming-output buffer, it can query for statistics on streaming output by using
Windows?Phone?8: This API is supported.
+Draw indexed, instanced, GPU-generated primitives.
+A reference to an
Offset in pBufferForArgs to the start of the GPU generated primitives.
When an application creates a buffer that is associated with the
Windows?Phone?8: This API is supported.
+Draw instanced, GPU-generated primitives.
+A reference to an
Offset in pBufferForArgs to the start of the GPU generated primitives.
When an application creates a buffer that is associated with the
Windows?Phone?8: This API is supported.
+Execute a command list from a thread group.
+The number of groups dispatched in the x direction. ThreadGroupCountX must be less than or equal to
The number of groups dispatched in the y direction. ThreadGroupCountY must be less than or equal to
The number of groups dispatched in the z direction. ThreadGroupCountZ must be less than or equal to
You call the Dispatch method to execute commands in a compute shader. A compute shader can be run on many threads in parallel, within a thread group. Index a particular thread, within a thread group using a 3D vector given by (x,y,z).
In the following illustration, assume a thread group with 50 threads where the size of the group is given by (5,5,2). A single thread is identified from a thread group with 50 threads in it, using the vector (4,1,1).
The following illustration shows the relationship between the parameters passed to
Windows?Phone?8: This API is supported.
+Execute a command list over one or more thread groups.
+A reference to an
A byte-aligned offset between the start of the buffer and the arguments.
You call the DispatchIndirect method to execute commands in a compute shader.
When an application creates a buffer that is associated with the
Windows?Phone?8: This API is supported.
+Copy a region from a source resource to a destination resource.
+A reference to the destination resource (see
Destination subresource index.
The x-coordinate of the upper left corner of the destination region.
The y-coordinate of the upper left corner of the destination region. For a 1D subresource, this must be zero.
The z-coordinate of the upper left corner of the destination region. For a 1D or 2D subresource, this must be zero.
A reference to the source resource (see
Source subresource index.
A reference to a 3D box (see
An empty box results in a no-op. A box is empty if the top value is greater than or equal to the bottom value, or the left value is greater than or equal to the right value, or the front value is greater than or equal to the back value. When the box is empty, CopySubresourceRegion doesn't perform a copy operation.
The source box must be within the size of the source resource. The destination offsets, (x, y, and z), allow the source box to be offset when writing into the destination resource; however, the dimensions of the source box and the offsets must be within the size of the resource. If you try and copy outside the destination resource or specify a source box that is larger than the source resource, the behavior of CopySubresourceRegion is undefined. If you created a device that supports the debug layer, the debug output reports an error on this invalid CopySubresourceRegion call. Invalid parameters to CopySubresourceRegion cause undefined behavior and might result in incorrect rendering, clipping, no copy, or even the removal of the rendering device.
If the resources are buffers, all coordinates are in bytes; if the resources are textures, all coordinates are in texels. D3D11CalcSubresource is a helper function for calculating subresource indexes.
CopySubresourceRegion performs the copy on the GPU (similar to a memcpy by the CPU). As a consequence, the source and destination resources:
CopySubresourceRegion only supports copy; it does not support any stretch, color key, or blend. CopySubresourceRegion can reinterpret the resource data between a few format types. For more info, see Format Conversion using Direct3D 10.1.
If your app needs to copy an entire resource, we recommend to use
CopySubresourceRegion is an asynchronous call, which may be added to the command-buffer queue, this attempts to remove pipeline stalls that may occur when copying data. For more information about pipeline stalls, see performance considerations.
Note??Applies only to feature level 9_x hardware If you use
Note??Applies only to feature level 9_x hardware You can't use CopySubresourceRegion to copy mipmapped volume textures.
Note??Applies only to feature levels 9_x Subresources created with the
Note??If you use CopySubresourceRegion with a depth-stencil buffer or a multisampled resource, you must copy the whole subresource. In this situation, you must pass 0 to the DstX, DstY, and DstZ parameters and
Copy the entire contents of the source resource to the destination resource using the GPU.
+A reference to the
A reference to the
This method is unusual in that it causes the GPU to perform the copy operation (similar to a memcpy by the CPU). As a result, it has a few restrictions designed for improving performance. For instance, the source and destination resources:
CopyResource only supports copy; it doesn't support any stretch, color key, or blend. CopyResource can reinterpret the resource data between a few format types. For more info, see Format Conversion using Direct3D 10.1.
You can't use an Immutable resource as a destination. You can use a depth-stencil resource as either a source or a destination provided that the feature level is
The method is an asynchronous call, which may be added to the command-buffer queue. This attempts to remove pipeline stalls that may occur when copying data. For more info, see performance considerations.
We recommend to use
Windows?Phone?8: This API is supported.
+The CPU copies data from memory to a subresource created in non-mappable memory.
+A reference to the destination resource (see
A zero-based index, that identifies the destination subresource. See D3D11CalcSubresource for more details.
A reference to a box that defines the portion of the destination subresource to copy the resource data into. Coordinates are in bytes for buffers and in texels for textures. If
An empty box results in a no-op. A box is empty if the top value is greater than or equal to the bottom value, or the left value is greater than or equal to the right value, or the front value is greater than or equal to the back value. When the box is empty, UpdateSubresource doesn't perform an update operation.
A reference to the source data in memory.
The size of one row of the source data.
The size of one depth slice of source data.
For a shader-constant buffer; set pDstBox to
A resource cannot be used as a destination if:
When UpdateSubresource returns, the application is free to change or even free the data pointed to by pSrcData because the method has already copied/snapped away the original contents.
The performance of UpdateSubresource depends on whether or not there is contention for the destination resource. For example, contention for a vertex buffer resource occurs when the application executes a Draw call and later calls UpdateSubresource on the same vertex buffer before the Draw call is actually executed by the GPU.
Note??Applies only to feature level 9_x hardware If you use UpdateSubresource or
To better understand the source row pitch and source depth pitch parameters, the following illustration shows a 3D volume texture.
Each block in this visual represents an element of data, and the size of each element is dependent on the resource's format. For example, if the resource format is
To calculate the source row pitch and source depth pitch for a given resource, use the following formulas:
In the case of this example 3D volume texture where the size of each element is 16 bytes, the formulas are as follows:
The following illustration shows the resource as it is laid out in memory.
For example, the following code snippet shows how to specify a destination region in a 2D texture. Assume the destination texture is 512x512 and the operation will copy the data pointed to by pData to [(120,100)..(200,220)] in the destination texture. Also assume that rowPitch has been initialized with the proper value (as explained above). front and back are set to 0 and 1 respectively, because by having front equal to back, the box is technically empty.
destRegion; + destRegion.left = 120; + destRegion.right = 200; + destRegion.top = 100; + destRegion.bottom = 220; + destRegion.front = 0; + destRegion.back = 1; pd3dDeviceContext->UpdateSubresource( pDestTexture, 0, &destRegion, pData, rowPitch, 0 ); +
The 1D case is similar. The following snippet shows how to specify a destination region in a 1D texture. Use the same assumptions as above, except that the texture is 512 in length.
destRegion; + destRegion.left = 120; + destRegion.right = 200; + destRegion.top = 0; + destRegion.bottom = 1; + destRegion.front = 0; + destRegion.back = 1; pd3dDeviceContext->UpdateSubresource( pDestTexture, 0, &destRegion, pData, rowPitch, 0 ); +
For info about various resource types and how UpdateSubresource might work with each resource type, see Introduction to a Resource in Direct3D 11.
+Copies data from a buffer holding variable length data.
+Pointer to
Offset from the start of pDstBuffer to write 32-bit UINT structure (vertex) count from pSrcView.
Pointer to an
Windows?Phone?8: This API is supported.
+Set all the elements in a render target to one value.
+Pointer to the render target.
A 4-component array that represents the color to fill the render target with.
Applications that wish to clear a render target to a specific integer value bit pattern should render a screen-aligned quad instead of using this method. The reason for this is because this method accepts as input a floating point value, which may not have the same bit pattern as the original integer.
Differences between Direct3D 9 and Direct3D 11/10: Unlike Direct3D 9, the full extent of the resource view is always cleared. Viewport and scissor settings are not applied. |
?
When using D3D_FEATURE_LEVEL_9_x, ClearRenderTargetView only clears the first array slice in the render target view. This can impact (for example) cube map rendering scenarios. Applications should create a render target view for each face or array slice, then clear each view individually.
Windows?Phone?8: This API is supported.
+Clears an unordered access resource with bit-precise values.
+This API copies the lower ni bits from each array element i to the corresponding channel, where ni is the number of bits in the ith channel of the resource format (for example, R8G8B8_FLOAT has 8 bits for the first 3 channels). This works on any UAV with no format conversion. For a raw or structured buffer view, only the first array element value is used.
Windows?Phone?8: This API is supported.
+Clears an unordered access resource with a float value.
+This API works on FLOAT, UNORM, and SNORM unordered access views (UAVs), with format conversion from FLOAT to *NORM where appropriate. On other UAVs, the operation is invalid and the call will not reach the driver.
Windows?Phone?8: This API is supported.
+Clears the depth-stencil resource.
+Pointer to the depth stencil to be cleared.
Identify the type of data to clear (see
Clear the depth buffer with this value. This value will be clamped between 0 and 1.
Clear the stencil buffer with this value.
Differences between Direct3D 9 and Direct3D 11/10: Unlike Direct3D 9, the full extent of the resource view is always cleared. Viewport and scissor settings are not applied. |
?
Windows?Phone?8: This API is supported.
+Generates mipmaps for the given shader resource.
+A reference to an
You can call GenerateMips on any shader-resource view to generate the lower mipmap levels for the shader resource. GenerateMips uses the largest mipmap level of the view to recursively generate the lower levels of the mip and stops with the smallest level that is specified by the view. If the base resource wasn't created with
Feature levels 9.1, 9.2, and 9.3 can't support automatic generation of mipmaps for 3D (volume) textures.
Video adapters that support feature level 9.1 and higher support generating mipmaps if you use any of these formats:
+ + + + + + +
Video adapters that support feature level 9.2 and higher support generating mipmaps if you use any of these formats in addition to any of the formats for feature level 9.1:
+ + + + +
Video adapters that support feature level 9.3 and higher support generating mipmaps if you use any of these formats in addition to any of the formats for feature levels 9.1 and 9.2:
+ DXGI_FORMAT_B4G4R4A4 (optional) +
Video adapters that support feature level 10 and higher support generating mipmaps if you use any of these formats in addition to any of the formats for feature levels 9.1, 9.2, and 9.3:
(optional) + + + + + + + + + + + + + + + (optional) +
For all other unsupported formats, GenerateMips will silently fail.
Windows?Phone?8: This API is supported.
+Sets the minimum level-of-detail (LOD) for a resource.
+A reference to an
The level-of-detail, which ranges between 0 and the maximum number of mipmap levels of the resource. For example, the maximum number of mipmap levels of a 1D texture is specified in the MipLevels member of the
To use a resource with SetResourceMinLOD, you must set the
For Direct3D 10 and Direct3D 10.1, when sampling from a texture resource in a shader, the sampler can define a minimum LOD clamp to force sampling from less detailed mip levels. For Direct3D 11, this functionality is extended from the sampler to the entire resource. Therefore, the application can specify the highest-resolution mip level of a resource that is available for access. This restricts the set of mip levels that are required to be resident in GPU memory, thereby saving memory.
The set of mip levels resident per-resource in GPU memory can be specified by the user.
Minimum LOD affects all of the resident mip levels. Therefore, only the resident mip levels can be updated and read from.
All methods that access texture resources must adhere to minimum LOD clamps.
Empty-set accesses are handled as out-of-bounds cases.
Windows?Phone?8: This API is supported.
+Gets the minimum level-of-detail (LOD).
+A reference to an
Returns the minimum LOD.
Windows?Phone?8: This API is supported.
+Copy a multisampled resource into a non-multisampled resource.
+Destination resource. Must be a created with the
A zero-based index, that identifies the destination subresource. Use D3D11CalcSubresource to calculate the index.
Source resource. Must be multisampled.
The source subresource of the source resource.
A
This API is most useful when re-using the resulting rendertarget of one render pass as an input to a second render pass.
The source and destination resources must be the same resource type and have the same dimensions. In addition, they must have compatible formats. There are three scenarios for this:
Scenario | Requirements |
---|---|
Source and destination are prestructured and typed | Both the source and destination must have identical formats and that format must be specified in the Format parameter. |
One resource is prestructured and typed and the other is prestructured and typeless | The typed resource must have a format that is compatible with the typeless resource (i.e. the typed resource is |
Source and destination are prestructured and typeless | Both the source and desintation must have the same typeless format (i.e. both must have For example, given the
|
?
Windows?Phone?8: This API is supported.
+Queues commands from a command list onto a device.
+A reference to an
A Boolean flag that determines whether the target context state is saved prior to and restored after the execution of a command list. Use TRUE to indicate that the runtime needs to save and restore the state. Use
Use this method to play back a command list that was recorded by a deferred context on any thread.
A call to ExecuteCommandList of a command list from a deferred context onto the immediate context is required for the recorded commands to be executed on the graphics processing unit (GPU). A call to ExecuteCommandList of a command list from a deferred context onto another deferred context can be used to merge recorded lists. But to run the commands from the merged deferred command list on the GPU, you need to execute them on the immediate context.
This method performs some runtime validation related to queries. Queries that are begun in a device context cannot be manipulated indirectly by executing a command list (that is, Begin or End was invoked against the same query by the deferred context which generated the command list). If such a condition occurs, the ExecuteCommandList method does not execute the command list. However, the state of the device context is still maintained, as would be expected (
Windows?Phone?8: This API is supported.
+Get the rendering predicate state.
+Address of a boolean to fill with the predicate comparison value.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Restore all default settings.
+This method resets any device context to the default settings. This sets all input/output resource slots, shaders, input layouts, predications, scissor rectangles, depth-stencil state, rasterizer state, blend state, sampler state, and viewports to
For a scenario where you would like to clear a list of commands recorded so far, call
Windows?Phone?8: This API is supported.
+Sends queued-up commands in the command buffer to the graphics processing unit (GPU).
+Most applications don't need to call this method. If an application calls this method when not necessary, it incurs a performance penalty. Each call to Flush incurs a significant amount of overhead.
When Microsoft Direct3D state-setting, present, or draw commands are called by an application, those commands are queued into an internal command buffer. Flush sends those commands to the GPU for processing. Typically, the Direct3D runtime sends these commands to the GPU automatically whenever the runtime determines that they need to be sent, such as when the command buffer is full or when an application maps a resource. Flush sends the commands manually.
We recommend that you use Flush when the CPU waits for an arbitrary amount of time (such as when you call the Sleep function).
Because Flush operates asynchronously, it can return either before or after the GPU finishes executing the queued graphics commands. However, the graphics commands eventually always complete. You can call the
Microsoft Direct3D?11 defers the destruction of objects. Therefore, an application can't rely upon objects immediately being destroyed. By calling Flush, you destroy any objects whose destruction was deferred. If an application requires synchronous destruction of an object, we recommend that the application release all its references, call
Gets the type of device context.
+A member of
Windows?Phone?8: This API is supported.
+Gets the initialization flags associated with the current deferred context.
+The GetContextFlags method gets the flags that were supplied to the ContextFlags parameter of
Windows?Phone?8: This API is supported.
+Create a command list and record graphics commands into it.
+A Boolean flag that determines whether the runtime saves deferred context state before it executes FinishCommandList and restores it afterwards. Use TRUE to indicate that the runtime needs to save and restore the state. Use
Note??This parameter does not affect the command list that the current call to FinishCommandList returns. However, this parameter affects the command list of the next call to FinishCommandList on the same deferred context.
Upon completion of the method, the passed reference to an
Returns
Create a command list from a deferred context and record commands into it by calling FinishCommandList. Play back a command list with an immediate context by calling
Immediate context state is cleared before and after a command list is executed. A command list has no concept of inheritance. Each call to FinishCommandList will record only the state set since any previous call to FinishCommandList.
For example, the state of a device context is its render state or pipeline state. To retrieve device context state, an application can call
For more information about how to use FinishCommandList, see How to: Record a Command List.
Windows?Phone?8: This API is supported.
+Gets the type of device context.
+Windows?Phone?8: This API is supported.
+Gets the initialization flags associated with the current deferred context.
+The GetContextFlags method gets the flags that were supplied to the ContextFlags parameter of
Windows?Phone?8: This API is supported.
+The
Windows?Phone?8: This API is supported.
+Bind a single vertex buffer to the input-assembler stage.
+The first input slot for binding. The first vertex buffer is explicitly bound to the start slot; this causes each additional vertex buffer in the array to be implicitly bound to each subsequent input slot. The maximum of 16 or 32 input slots (ranges from 0 to
A
For information about creating vertex buffers, see Create a Vertex Buffer.
Calling this method using a buffer that is currently bound for writing (i.e. bound to the stream output pipeline stage) will effectively bind
The debug layer will generate a warning whenever a resource is prevented from being bound simultaneously as an input and an output, but this will not prevent invalid data from being used by the runtime.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
+Bind an array of vertex buffers to the input-assembler stage.
+The first input slot for binding. The first vertex buffer is explicitly bound to the start slot; this causes each additional vertex buffer in the array to be implicitly bound to each subsequent input slot. The maximum of 16 or 32 input slots (ranges from 0 to
A reference to an array of
For information about creating vertex buffers, see Create a Vertex Buffer.
Calling this method using a buffer that is currently bound for writing (i.e. bound to the stream output pipeline stage) will effectively bind
The debug layer will generate a warning whenever a resource is prevented from being bound simultaneously as an input and an output, but this will not prevent invalid data from being used by the runtime.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
+Bind an array of vertex buffers to the input-assembler stage.
+The first input slot for binding. The first vertex buffer is explicitly bound to the start slot; this causes each additional vertex buffer in the array to be implicitly bound to each subsequent input slot. The maximum of 16 or 32 input slots (ranges from 0 to
A reference to an array of vertex buffers (see
Pointer to an array of stride values; one stride value for each buffer in the vertex-buffer array. Each stride is the size (in bytes) of the elements that are to be used from that vertex buffer.
Pointer to an array of offset values; one offset value for each buffer in the vertex-buffer array. Each offset is the number of bytes between the first element of a vertex buffer and the first element that will be used.
For information about creating vertex buffers, see Create a Vertex Buffer.
Calling this method using a buffer that is currently bound for writing (i.e. bound to the stream output pipeline stage) will effectively bind
The debug layer will generate a warning whenever a resource is prevented from being bound simultaneously as an input and an output, but this will not prevent invalid data from being used by the runtime.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
+Bind an input-layout object to the input-assembler stage.
+A reference to the input-layout object (see
Input-layout objects describe how vertex buffer data is streamed into the IA pipeline stage. To create an input-layout object, call
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Bind an array of vertex buffers to the input-assembler stage.
+For info about creating vertex buffers, see How to: Create a Vertex Buffer.
Calling this method using a buffer that is currently bound for writing (that is, bound to the stream output pipeline stage) will effectively bind
The debug layer will generate a warning whenever a resource is prevented from being bound simultaneously as an input and an output, but this will not prevent invalid data from being used by the runtime.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Bind an index buffer to the input-assembler stage.
+A reference to an
A
Offset (in bytes) from the start of the index buffer to the first index to use.
For information about creating index buffers, see How to: Create an Index Buffer.
Calling this method using a buffer that is currently bound for writing (i.e. bound to the stream output pipeline stage) will effectively bind
The debug layer will generate a warning whenever a resource is prevented from being bound simultaneously as an input and an output, but this will not prevent invalid data from being used by the runtime.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Bind information about the primitive type, and data order that describes input data for the input assembler stage.
+The type of primitive and ordering of the primitive data (see D3D11_PRIMITIVE_TOPOLOGY).
Windows?Phone?8: This API is supported.
+Get a reference to the input-layout object that is bound to the input-assembler stage.
+A reference to the input-layout object (see
For information about creating an input-layout object, see Creating the Input-Layout Object.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the vertex buffers bound to the input-assembler stage.
+The input slot of the first vertex buffer to get. The first vertex buffer is explicitly bound to the start slot; this causes each additional vertex buffer in the array to be implicitly bound to each subsequent input slot. The maximum of 16 or 32 input slots (ranges from 0 to
The number of vertex buffers to get starting at the offset. The number of buffers (plus the starting slot) cannot exceed the total number of IA-stage input slots.
A reference to an array of vertex buffers returned by the method (see
Pointer to an array of stride values returned by the method; one stride value for each buffer in the vertex-buffer array. Each stride value is the size (in bytes) of the elements that are to be used from that vertex buffer.
Pointer to an array of offset values returned by the method; one offset value for each buffer in the vertex-buffer array. Each offset is the number of bytes between the first element of a vertex buffer and the first element that will be used.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get a reference to the index buffer that is bound to the input-assembler stage.
+A reference to an index buffer returned by the method (see
Specifies format of the data in the index buffer (see
Offset (in bytes) from the start of the index buffer, to the first index to use.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get information about the primitive type, and data order that describes input data for the input assembler stage.
+A reference to the type of primitive, and ordering of the primitive data (see D3D11_PRIMITIVE_TOPOLOGY).
Windows?Phone?8: This API is supported.
+Get or sets a reference to the input-layout object that is bound to the input-assembler stage.
+For information about creating an input-layout object, see Creating the Input-Layout Object.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get or sets information about the primitive type, and data order that describes input data for the input assembler stage.
+Windows?Phone?8: This API is supported.
+The
Windows?Phone?8: This API is supported.
+Bind one or more render targets atomically and the depth-stencil buffer to the output-merger stage.
+The maximum number of active render targets a device can have active at any given time is set by a #define in D3D11.h called D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT. It is invalid to try to set the same subresource to multiple render target slots. Any render targets not defined by this call are set to
If any subresources are also currently bound for reading in a different stage or writing (perhaps in a different part of the pipeline), those bind points will be set to
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
If the render-target views were created from an array resource type, then all of the render-target views must have the same array size. This restriction also applies to the depth-stencil view, its array size must match that of the render-target views being bound.
The pixel shader must be able to simultaneously render to at least eight separate render targets. All of these render targets must access the same type of resource: Buffer, Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D, or TextureCube. All render targets must have the same size in all dimensions (width and height, and depth for 3D or array size for *Array types). If render targets use multisample anti-aliasing, all bound render targets and depth buffer must be the same form of multisample resource (that is, the sample counts must be the same). Each render target can have a different data format. These render target formats are not required to have identical bit-per-element counts.
Any combination of the eight slots for render targets can have a render target set or not set.
The same resource view cannot be bound to multiple render target slots simultaneously. However, you can set multiple non-overlapping resource views of a single resource as simultaneous multiple render targets.
+The maximum number of active render targets a device can have active at any given time is set by a #define in D3D11.h called D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT. It is invalid to try to set the same subresource to multiple render target slots. Any render targets not defined by this call are set to
If any subresources are also currently bound for reading in a different stage or writing (perhaps in a different part of the pipeline), those bind points will be set to
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
If the render-target views were created from an array resource type, then all of the render-target views must have the same array size. This restriction also applies to the depth-stencil view, its array size must match that of the render-target views being bound.
The pixel shader must be able to simultaneously render to at least eight separate render targets. All of these render targets must access the same type of resource: Buffer, Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D, or TextureCube. All render targets must have the same size in all dimensions (width and height, and depth for 3D or array size for *Array types). If render targets use multisample anti-aliasing, all bound render targets and depth buffer must be the same form of multisample resource (that is, the sample counts must be the same). Each render target can have a different data format. These render target formats are not required to have identical bit-per-element counts.
Any combination of the eight slots for render targets can have a render target set or not set.
The same resource view cannot be bound to multiple render target slots simultaneously. However, you can set multiple non-overlapping resource views of a single resource as simultaneous multiple render targets.
+The maximum number of active render targets a device can have active at any given time is set by a #define in D3D11.h called D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT. It is invalid to try to set the same subresource to multiple render target slots. Any render targets not defined by this call are set to
If any subresources are also currently bound for reading in a different stage or writing (perhaps in a different part of the pipeline), those bind points will be set to
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
If the render-target views were created from an array resource type, then all of the render-target views must have the same array size. This restriction also applies to the depth-stencil view, its array size must match that of the render-target views being bound.
The pixel shader must be able to simultaneously render to at least eight separate render targets. All of these render targets must access the same type of resource: Buffer, Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D, or TextureCube. All render targets must have the same size in all dimensions (width and height, and depth for 3D or array size for *Array types). If render targets use multisample anti-aliasing, all bound render targets and depth buffer must be the same form of multisample resource (that is, the sample counts must be the same). Each render target can have a different data format. These render target formats are not required to have identical bit-per-element counts.
Any combination of the eight slots for render targets can have a render target set or not set.
The same resource view cannot be bound to multiple render target slots simultaneously. However, you can set multiple non-overlapping resource views of a single resource as simultaneous multiple render targets.
+The maximum number of active render targets a device can have active at any given time is set by a #define in D3D11.h called D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT. It is invalid to try to set the same subresource to multiple render target slots. Any render targets not defined by this call are set to
If any subresources are also currently bound for reading in a different stage or writing (perhaps in a different part of the pipeline), those bind points will be set to
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
If the render-target views were created from an array resource type, then all of the render-target views must have the same array size. This restriction also applies to the depth-stencil view, its array size must match that of the render-target views being bound.
The pixel shader must be able to simultaneously render to at least eight separate render targets. All of these render targets must access the same type of resource: Buffer, Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D, or TextureCube. All render targets must have the same size in all dimensions (width and height, and depth for 3D or array size for *Array types). If render targets use multisample anti-aliasing, all bound render targets and depth buffer must be the same form of multisample resource (that is, the sample counts must be the same). Each render target can have a different data format. These render target formats are not required to have identical bit-per-element counts.
Any combination of the eight slots for render targets can have a render target set or not set.
The same resource view cannot be bound to multiple render target slots simultaneously. However, you can set multiple non-overlapping resource views of a single resource as simultaneous multiple render targets.
The maximum number of active render targets a device can have active at any given time is set by a #define in D3D11.h called D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT. It is invalid to try to set the same subresource to multiple render target slots. Any render targets not defined by this call are set to
If any subresources are also currently bound for reading in a different stage or writing (perhaps in a different part of the pipeline), those bind points will be set to
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
If the render-target views were created from an array resource type, then all of the render-target views must have the same array size. This restriction also applies to the depth-stencil view, its array size must match that of the render-target views being bound.
The pixel shader must be able to simultaneously render to at least eight separate render targets. All of these render targets must access the same type of resource: Buffer, Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D, or TextureCube. All render targets must have the same size in all dimensions (width and height, and depth for 3D or array size for *Array types). If render targets use multisample anti-aliasing, all bound render targets and depth buffer must be the same form of multisample resource (that is, the sample counts must be the same). Each render target can have a different data format. These render target formats are not required to have identical bit-per-element counts.
Any combination of the eight slots for render targets can have a render target set or not set.
The same resource view cannot be bound to multiple render target slots simultaneously. However, you can set multiple non-overlapping resource views of a single resource as simultaneous multiple render targets.
+The maximum number of active render targets a device can have active at any given time is set by a #define in D3D11.h called D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT. It is invalid to try to set the same subresource to multiple render target slots. Any render targets not defined by this call are set to
If any subresources are also currently bound for reading in a different stage or writing (perhaps in a different part of the pipeline), those bind points will be set to
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
If the render-target views were created from an array resource type, then all of the render-target views must have the same array size. This restriction also applies to the depth-stencil view, its array size must match that of the render-target views being bound.
The pixel shader must be able to simultaneously render to at least eight separate render targets. All of these render targets must access the same type of resource: Buffer, Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D, or TextureCube. All render targets must have the same size in all dimensions (width and height, and depth for 3D or array size for *Array types). If render targets use multisample anti-aliasing, all bound render targets and depth buffer must be the same form of multisample resource (that is, the sample counts must be the same). Each render target can have a different data format. These render target formats are not required to have identical bit-per-element counts.
Any combination of the eight slots for render targets can have a render target set or not set.
The same resource view cannot be bound to multiple render target slots simultaneously. However, you can set multiple non-overlapping resource views of a single resource as simultaneous multiple render targets.
+The maximum number of active render targets a device can have active at any given time is set by a #define in D3D11.h called D3D11_SIMULTANEOUS_RENDER_TARGET_COUNT. It is invalid to try to set the same subresource to multiple render target slots. Any render targets not defined by this call are set to
If any subresources are also currently bound for reading in a different stage or writing (perhaps in a different part of the pipeline), those bind points will be set to
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
If the render-target views were created from an array resource type, then all of the render-target views must have the same array size. This restriction also applies to the depth-stencil view, its array size must match that of the render-target views being bound.
The pixel shader must be able to simultaneously render to at least eight separate render targets. All of these render targets must access the same type of resource: Buffer, Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D, or TextureCube. All render targets must have the same size in all dimensions (width and height, and depth for 3D or array size for *Array types). If render targets use multisample anti-aliasing, all bound render targets and depth buffer must be the same form of multisample resource (that is, the sample counts must be the same). Each render target can have a different data format. These render target formats are not required to have identical bit-per-element counts.
Any combination of the eight slots for render targets can have a render target set or not set.
The same resource view cannot be bound to multiple render target slots simultaneously. However, you can set multiple non-overlapping resource views of a single resource as simultaneous multiple render targets.
+Binds resources to the output-merger stage.
+Number of render-target views (ppRenderTargetViews) and depth-stencil view (ppDepthStencilView) to bind. If you set NumViews to D3D11_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL (0xffffffff), this method does not modify the currently bound render-target views (RTVs) and also does not modify depth-stencil view (DSV).
Pointer to an array of
Pointer to a
Index into a zero-based array to begin setting unordered-access views (ranges from 0 to
For the Direct3D 11.1 runtime, which is available starting with Windows Developer Preview, this value can range from 0 to D3D11_1_UAV_SLOT_COUNT - 1. D3D11_1_UAV_SLOT_COUNT is defined as 64.
For pixel shaders, UAVStartSlot should be equal to the number of render-target views being bound.
Number of unordered-access views (UAVs) in ppUnorderedAccessView. If you set NumUAVs to D3D11_KEEP_UNORDERED_ACCESS_VIEWS (0xffffffff), this method does not modify the currently bound unordered-access views.
For the Direct3D 11.1 runtime, which is available starting with Windows Developer Preview, this value can range from 0 to D3D11_1_UAV_SLOT_COUNT - UAVStartSlot.
Pointer to an array of
An array of append and consume buffer offsets. A value of -1 indicates to keep the current offset. Any other values set the hidden counter for that appendable and consumable UAV. pUAVInitialCounts is relevant only for UAVs that were created with either
For pixel shaders, the render targets and unordered-access views share the same resource slots when being written out. This means that UAVs must be given an offset so that they are placed in the slots after the render target views that are being bound.
Note??RTVs, DSV, and UAVs cannot be set independently; they all need to be set at the same time.
Two RTVs conflict if they share a subresource (and therefore share the same resource).
Two UAVs conflict if they share a subresource (and therefore share the same resource).
An RTV conflicts with a UAV if they share a subresource or share a bind point.
OMSetRenderTargetsAndUnorderedAccessViews operates properly in the following situations:
NumViews != D3D11_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL and NumUAVs != D3D11_KEEP_UNORDERED_ACCESS_VIEWS
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews performs the following tasks:
NumViews == D3D11_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL
In this situation, OMSetRenderTargetsAndUnorderedAccessViews binds only UAVs.
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews unbinds the following items:
OMSetRenderTargetsAndUnorderedAccessViews binds ppUnorderedAccessView.
OMSetRenderTargetsAndUnorderedAccessViews ignores ppDepthStencilView, and the current depth-stencil view remains bound.
NumUAVs == D3D11_KEEP_UNORDERED_ACCESS_VIEWS
In this situation, OMSetRenderTargetsAndUnorderedAccessViews binds only RTVs and DSV.
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews unbinds the following items:
OMSetRenderTargetsAndUnorderedAccessViews binds ppRenderTargetViews and ppDepthStencilView.
OMSetRenderTargetsAndUnorderedAccessViews ignores UAVStartSlot.
Binds resources to the output-merger stage.
+Number of render-target views (ppRenderTargetViews) and depth-stencil view (ppDepthStencilView) to bind. If you set NumViews to D3D11_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL (0xffffffff), this method does not modify the currently bound render-target views (RTVs) and also does not modify depth-stencil view (DSV).
Pointer to an array of
Pointer to a
Index into a zero-based array to begin setting unordered-access views (ranges from 0 to
For the Direct3D 11.1 runtime, which is available starting with Windows Developer Preview, this value can range from 0 to D3D11_1_UAV_SLOT_COUNT - 1. D3D11_1_UAV_SLOT_COUNT is defined as 64.
For pixel shaders, UAVStartSlot should be equal to the number of render-target views being bound.
Number of unordered-access views (UAVs) in ppUnorderedAccessView. If you set NumUAVs to D3D11_KEEP_UNORDERED_ACCESS_VIEWS (0xffffffff), this method does not modify the currently bound unordered-access views.
For the Direct3D 11.1 runtime, which is available starting with Windows Developer Preview, this value can range from 0 to D3D11_1_UAV_SLOT_COUNT - UAVStartSlot.
Pointer to an array of
An array of append and consume buffer offsets. A value of -1 indicates to keep the current offset. Any other values set the hidden counter for that appendable and consumable UAV. pUAVInitialCounts is relevant only for UAVs that were created with either
For pixel shaders, the render targets and unordered-access views share the same resource slots when being written out. This means that UAVs must be given an offset so that they are placed in the slots after the render target views that are being bound.
Note??RTVs, DSV, and UAVs cannot be set independently; they all need to be set at the same time.
Two RTVs conflict if they share a subresource (and therefore share the same resource).
Two UAVs conflict if they share a subresource (and therefore share the same resource).
An RTV conflicts with a UAV if they share a subresource or share a bind point.
OMSetRenderTargetsAndUnorderedAccessViews operates properly in the following situations:
NumViews != D3D11_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL and NumUAVs != D3D11_KEEP_UNORDERED_ACCESS_VIEWS
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews performs the following tasks:
NumViews == D3D11_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL
In this situation, OMSetRenderTargetsAndUnorderedAccessViews binds only UAVs.
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews unbinds the following items:
OMSetRenderTargetsAndUnorderedAccessViews binds ppUnorderedAccessView.
OMSetRenderTargetsAndUnorderedAccessViews ignores ppDepthStencilView, and the current depth-stencil view remains bound.
NumUAVs == D3D11_KEEP_UNORDERED_ACCESS_VIEWS
In this situation, OMSetRenderTargetsAndUnorderedAccessViews binds only RTVs and DSV.
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews unbinds the following items:
OMSetRenderTargetsAndUnorderedAccessViews binds ppRenderTargetViews and ppDepthStencilView.
OMSetRenderTargetsAndUnorderedAccessViews ignores UAVStartSlot.
Bind one or more render targets atomically and the depth-stencil buffer to the output-merger stage.
+The maximum number of active render targets a device can have active at any given time is set by a #define in D3D11.h called
If any subresources are also currently bound for reading in a different stage or writing (perhaps in a different part of the pipeline), those bind points will be set to
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
If the render-target views were created from an array resource type, all of the render-target views must have the same array size. This restriction also applies to the depth-stencil view, its array size must match that of the render-target views being bound.
The pixel shader must be able to simultaneously render to at least eight separate render targets. All of these render targets must access the same type of resource: Buffer, Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D, or TextureCube. All render targets must have the same size in all dimensions (width and height, and depth for 3D or array size for *Array types). If render targets use multisample anti-aliasing, all bound render targets and depth buffer must be the same form of multisample resource (that is, the sample counts must be the same). Each render target can have a different data format. These render target formats are not required to have identical bit-per-element counts.
Any combination of the eight slots for render targets can have a render target set or not set.
The same resource view cannot be bound to multiple render target slots simultaneously. However, you can set multiple non-overlapping resource views of a single resource as simultaneous multiple render targets.
Windows?Phone?8: This API is supported.
+Binds resources to the output-merger stage.
+Number of render targets to bind (ranges between 0 and
Pointer to an array of
Pointer to a
Index into a zero-based array to begin setting unordered-access views (ranges from 0 to
For the Direct3D 11.1 runtime, which is available starting with Windows?8, this value can range from 0 to D3D11_1_UAV_SLOT_COUNT - 1. D3D11_1_UAV_SLOT_COUNT is defined as 64.
For pixel shaders, UAVStartSlot should be equal to the number of render-target views being bound.
Number of unordered-access views (UAVs) in ppUnorderedAccessView. If you set NumUAVs to D3D11_KEEP_UNORDERED_ACCESS_VIEWS (0xffffffff), this method does not modify the currently bound unordered-access views.
For the Direct3D 11.1 runtime, which is available starting with Windows?8, this value can range from 0 to D3D11_1_UAV_SLOT_COUNT - UAVStartSlot.
Pointer to an array of
An array of append and consume buffer offsets. A value of -1 indicates to keep the current offset. Any other values set the hidden counter for that appendable and consumable UAV. pUAVInitialCounts is relevant only for UAVs that were created with either
For pixel shaders, the render targets and unordered-access views share the same resource slots when being written out. This means that UAVs must be given an offset so that they are placed in the slots after the render target views that are being bound.
Note??RTVs, DSV, and UAVs cannot be set independently; they all need to be set at the same time.
Two RTVs conflict if they share a subresource (and therefore share the same resource).
Two UAVs conflict if they share a subresource (and therefore share the same resource).
An RTV conflicts with a UAV if they share a subresource or share a bind point.
OMSetRenderTargetsAndUnorderedAccessViews operates properly in the following situations:
NumViews != D3D11_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL and NumUAVs != D3D11_KEEP_UNORDERED_ACCESS_VIEWS
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews performs the following tasks:
NumViews == D3D11_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL
In this situation, OMSetRenderTargetsAndUnorderedAccessViews binds only UAVs.
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews unbinds the following items:
OMSetRenderTargetsAndUnorderedAccessViews binds ppUnorderedAccessView.
OMSetRenderTargetsAndUnorderedAccessViews ignores ppDepthStencilView, and the current depth-stencil view remains bound.
NumUAVs == D3D11_KEEP_UNORDERED_ACCESS_VIEWS
In this situation, OMSetRenderTargetsAndUnorderedAccessViews binds only RTVs and DSV.
The following conditions must be true for OMSetRenderTargetsAndUnorderedAccessViews to succeed and for the runtime to pass the bind information to the driver:
OMSetRenderTargetsAndUnorderedAccessViews unbinds the following items:
OMSetRenderTargetsAndUnorderedAccessViews binds ppRenderTargetViews and ppDepthStencilView.
OMSetRenderTargetsAndUnorderedAccessViews ignores UAVStartSlot.
Windows?Phone?8: This API is supported.
+Set the blend state of the output-merger stage.
+Pointer to a blend-state interface (see
Array of blend factors, one for each RGBA component. The blend factors modulate values for the pixel shader, render target, or both. If you created the blend-state object with
32-bit sample coverage. The default value is 0xffffffff. See remarks.
Blend state is used by the output-merger stage to determine how to blend together two RGB pixel values and two alpha values. The two RGB pixel values and two alpha values are the RGB pixel value and alpha value that the pixel shader outputs and the RGB pixel value and alpha value already in the output render target. The blend option controls the data source that the blending stage uses to modulate values for the pixel shader, render target, or both. The blend operation controls how the blending stage mathematically combines these modulated values.
To create a blend-state interface, call
Passing in
State | Default Value |
---|---|
AlphaToCoverageEnable | |
IndependentBlendEnable | |
RenderTarget[0].BlendEnable | |
RenderTarget[0].SrcBlend | |
RenderTarget[0].DestBlend | |
RenderTarget[0].BlendOp | |
RenderTarget[0].SrcBlendAlpha | |
RenderTarget[0].DestBlendAlpha | |
RenderTarget[0].BlendOpAlpha | |
RenderTarget[0].RenderTargetWriteMask |
?
A sample mask determines which samples get updated in all the active render targets. The mapping of bits in a sample mask to samples in a multisample render target is the responsibility of an individual application. A sample mask is always applied; it is independent of whether multisampling is enabled, and does not depend on whether an application uses multisample render targets.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Sets the depth-stencil state of the output-merger stage.
+Pointer to a depth-stencil state interface (see
Reference value to perform against when doing a depth-stencil test. See remarks.
To create a depth-stencil state interface, call
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Get references to the resources bound to the output-merger stage.
+Number of render targets to retrieve.
Pointer to an array of
Pointer to a
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get references to the resources bound to the output-merger stage.
+The number of render-target views to retrieve.
Pointer to an array of
Pointer to a
Index into a zero-based array to begin retrieving unordered-access views (ranges from 0 to
Number of unordered-access views to return in ppUnorderedAccessViews. This number ranges from 0 to
Pointer to an array of
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the blend state of the output-merger stage.
+Address of a reference to a blend-state interface (see
Array of blend factors, one for each RGBA component.
Pointer to a sample mask.
The reference count of the returned interface will be incremented by one when the blend state is retrieved. Applications must release returned reference(s) when they are no longer needed, or else there will be a memory leak.
Windows?Phone?8: This API is supported.
+Gets the depth-stencil state of the output-merger stage.
+Address of a reference to a depth-stencil state interface (see
Pointer to the stencil reference value used in the depth-stencil test.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+The
Windows?Phone?8: This API is supported.
+All scissor rects must be set atomically as one operation. Any scissor rects not defined by the call are disabled.
The scissor rectangles will only be used if ScissorEnable is set to true in the rasterizer state (see
Which scissor rectangle to use is determined by the SV_ViewportArrayIndex semantic output by a geometry shader (see shader semantic syntax). If a geometry shader does not make use of the SV_ViewportArrayIndex semantic then Direct3D will use the first scissor rectangle in the array.
Each scissor rectangle in the array corresponds to a viewport in an array of viewports (see
All scissor rects must be set atomically as one operation. Any scissor rects not defined by the call are disabled.
The scissor rectangles will only be used if ScissorEnable is set to true in the rasterizer state (see
Which scissor rectangle to use is determined by the SV_ViewportArrayIndex semantic output by a geometry shader (see shader semantic syntax). If a geometry shader does not make use of the SV_ViewportArrayIndex semantic then Direct3D will use the first scissor rectangle in the array.
Each scissor rectangle in the array corresponds to a viewport in an array of viewports (see
All viewports must be set atomically as one operation. Any viewports not defined by the call are disabled.
Which viewport to use is determined by the SV_ViewportArrayIndex semantic output by a geometry shader; if a geometry shader does not specify the semantic, Direct3D will use the first viewport in the array.
+All viewports must be set atomically as one operation. Any viewports not defined by the call are disabled.
Which viewport to use is determined by the SV_ViewportArrayIndex semantic output by a geometry shader; if a geometry shader does not specify the semantic, Direct3D will use the first viewport in the array.
+All viewports must be set atomically as one operation. Any viewports not defined by the call are disabled.
Which viewport to use is determined by the SV_ViewportArrayIndex semantic output by a geometry shader; if a geometry shader does not specify the semantic, Direct3D will use the first viewport in the array.
Set the rasterizer state for the rasterizer stage of the pipeline.
+To create a rasterizer state interface, call
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Bind an array of viewports to the rasterizer stage of the pipeline.
+Number of viewports to bind.
An array of
All viewports must be set atomically as one operation. Any viewports not defined by the call are disabled.
Which viewport to use is determined by the SV_ViewportArrayIndex semantic output by a geometry shader; if a geometry shader does not specify the semantic, Direct3D will use the first viewport in the array.
Note??Even though you specify float values to the members of the
Windows?Phone?8: This API is supported.
+Bind an array of scissor rectangles to the rasterizer stage.
+Number of scissor rectangles to bind.
An array of scissor rectangles (see D3D11_RECT).
All scissor rects must be set atomically as one operation. Any scissor rects not defined by the call are disabled.
The scissor rectangles will only be used if ScissorEnable is set to true in the rasterizer state (see
Which scissor rectangle to use is determined by the SV_ViewportArrayIndex semantic output by a geometry shader (see shader semantic syntax). If a geometry shader does not make use of the SV_ViewportArrayIndex semantic then Direct3D will use the first scissor rectangle in the array.
Each scissor rectangle in the array corresponds to a viewport in an array of viewports (see
Windows?Phone?8: This API is supported.
+Get the rasterizer state from the rasterizer stage of the pipeline.
+Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Gets the array of viewports bound to the rasterizer stage.
+A reference to a variable that, on input, specifies the number of viewports (ranges from 0 to D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE) in the pViewports array; on output, the variable contains the actual number of viewports that are bound to the rasterizer stage. If pViewports is
Note??In some versions of the Windows SDK, a debug device will raise an exception if the input value in the variable to which pNumViewports points is greater than D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE even if pViewports is
An array of
Windows?Phone?8: This API is supported.
+Get the array of scissor rectangles bound to the rasterizer stage.
+The number of scissor rectangles (ranges between 0 and D3D11_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE) bound; set pRects to
An array of scissor rectangles (see D3D11_RECT). If NumRects is greater than the number of scissor rects currently bound, then unused members of the array will contain 0.
Windows?Phone?8: This API is supported.
+Get or sets the rasterizer state from the rasterizer stage of the pipeline.
+Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+A domain-shader interface manages an executable program (a domain shader) that controls the domain-shader stage.
+The domain-shader interface has no methods; use HLSL to implement your shader functionality. All shaders are implemented from a common set of features referred to as the common-shader core..
To create a domain-shader interface, call
This interface is defined in D3D11.h.
+Encapsulates forward and inverse FFTs.
+Sets the scale used for forward transforms.
+The scale to use for forward transforms. Setting ForwardScale to 0 causes the default values of 1 to be used.
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
SetForwardScale sets the scale used by
Gets the scale for forward transforms.
+Scale for forward transforms.
Sets the scale used for inverse transforms.
+Scale used for inverse transforms. Setting InverseScale to 0 causes the default value of 1/N to be used, where N is the product of the transformed dimension lengths.
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
SetInverseScale sets the scale used by
Get the scale for inverse transforms.
+Scale for inverse transforms.
Attaches buffers to an FFT context and performs any required precomputations.
+Number of buffers in ppTempBuffers.
A reference to an array of
Number of buffers in ppPrecomputeBuffers.
A reference to an array of
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
The
Use the info in
Although you can share temporary buffers between multiple device contexts, we recommend not to concurrently execute multiple FFT objects that share temporary buffers.
Some FFT algorithms benefit from precomputing sin and cos. The FFT object might store precomputed data in the user-supplied precompute buffers.
+Attaches buffers to an FFT context and performs any required precomputations.
+Number of buffers in ppTempBuffers.
A reference to an array of
Number of buffers in ppPrecomputeBuffers.
A reference to an array of
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
The
Use the info in
Although you can share temporary buffers between multiple device contexts, we recommend not to concurrently execute multiple FFT objects that share temporary buffers.
Some FFT algorithms benefit from precomputing sin and cos. The FFT object might store precomputed data in the user-supplied precompute buffers.
+Performs a forward FFT.
+Pointer to
Pointer to a
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
ForwardTransform can be called after buffers have been attached to the context using
The format of complex data is interleaved components (for example, (Real0, Imag0), (Real1, Imag1) ... , and so on). Data is stored in row major order.
+Performs an inverse FFT.
+Pointer to
Pointer to a
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
Gets or sets the scale for forward transforms.
+Get or sets the scale for inverse transforms.
+Optional flags that control the behavior of
Identifies how to bind a resource to the pipeline.
+In general, binding flags can be combined using a logical OR (except the constant-buffer flag); however, you should use a single flag to allow the device to optimize the resource usage.
This enumeration is used by a:
A shader-resource buffer is NOT a constant buffer; rather, it is a texture or buffer resource that is bound to a shader, that contains texture or buffer data (it is not limited to a single element type in the buffer). A shader-resource buffer is created with the
Note??The Direct3D 11.1 runtime, which is available starting with Windows?8, enables mapping dynamic constant buffers and shader resource views (SRVs) of dynamic buffers with
Bind a buffer as a vertex buffer to the input-assembler stage.
Bind a buffer as an index buffer to the input-assembler stage.
Bind a buffer as a constant buffer to a shader stage; this flag may NOT be combined with any other bind flag.
Bind a buffer or texture to a shader stage; this flag cannot be used with the
Note??The Direct3D 11.1 runtime, which is available starting with Windows?8, enables mapping dynamic constant buffers and shader resource views (SRVs) of dynamic buffers with
Bind an output buffer for the stream-output stage.
Bind a texture as a render target for the output-merger stage.
Bind a texture as a depth-stencil target for the output-merger stage.
Bind an unordered access resource.
RGB or alpha blending operation.
+The runtime implements RGB blending and alpha blending separately. Therefore, blend state requires separate blend operations for RGB data and alpha data. These blend operations are specified in a blend description. The two sources ?source 1 and source 2? are shown in the blending block diagram.
Blend state is used by the output-merger stage to determine how to blend together two RGB pixel values and two alpha values. The two RGB pixel values and two alpha values are the RGB pixel value and alpha value that the pixel shader outputs and the RGB pixel value and alpha value already in the output render target. The blend option controls the data source that the blending stage uses to modulate values for the pixel shader, render target, or both. The blend operation controls how the blending stage mathematically combines these modulated values.
+Add source 1 and source 2.
Subtract source 1 from source 2.
Subtract source 2 from source 1.
Find the minimum of source 1 and source 2.
Find the maximum of source 1 and source 2.
Describes the blend state that you use in a call to
Here are the default values for blend state.
State | Default Value |
---|---|
AlphaToCoverageEnable | |
IndependentBlendEnable | |
RenderTarget[0].BlendEnable | |
RenderTarget[0].SrcBlend | |
RenderTarget[0].DestBlend | |
RenderTarget[0].BlendOp | |
RenderTarget[0].SrcBlendAlpha | |
RenderTarget[0].DestBlendAlpha | |
RenderTarget[0].BlendOpAlpha | |
RenderTarget[0].RenderTargetWriteMask |
?
Note??
If the driver type is set to
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
These flags are used by functions which operate on one or more channels in a texture.
+Indicates the red channel should be used.
Indicates the blue channel should be used.
Indicates the green channel should be used.
Indicates the alpha channel should be used.
Indicates the luminaces of the red, green, and blue channels should be used.
Identify which components of each pixel of a render target are writable during blending.
+These flags can be combined with a bitwise OR.
+Allow data to be stored in the red component.
Allow data to be stored in the green component.
Allow data to be stored in the blue component.
Allow data to be stored in the alpha component.
Allow data to be stored in all components.
Comparison options.
+A comparison option determines whether how the runtime compares source (new) data against destination (existing) data before storing the new data. The comparison option is declared in a description before an object is created. The API allows you to set a comparison option for a depth-stencil buffer (see
Never pass the comparison.
If the source data is less than the destination data, the comparison passes.
If the source data is equal to the destination data, the comparison passes.
If the source data is less than or equal to the destination data, the comparison passes.
If the source data is greater than the destination data, the comparison passes.
If the source data is not equal to the destination data, the comparison passes.
If the source data is greater than or equal to the destination data, the comparison passes.
Always pass the comparison.
Unordered resource support options for a compute shader resource (see
Options for performance counters.
+Independent hardware vendors may define their own set of performance counters for their devices, by giving the enumeration value a number that is greater than the value for
This enumeration is used by
Define a performance counter that is dependent on the hardware device.
Data type of a performance counter.
+These flags are an output parameter in
32-bit floating point.
16-bit unsigned integer.
32-bit unsigned integer.
64-bit unsigned integer.
Specifies the types of CPU access allowed for a resource.
+This enumeration is used in
Applications may combine one or more of these flags with a logical OR. When possible, create resources with no CPU access flags, as this enables better resource optimization.
The
The resource is to be mappable so that the CPU can change its contents. Resources created with this flag cannot be set as outputs of the pipeline and must be created with either dynamic or staging usage (see
The resource is to be mappable so that the CPU can read its contents. Resources created with this flag cannot be set as either inputs or outputs to the pipeline and must be created with staging usage (see
Indicates triangles facing a particular direction are not drawn.
+This enumeration is part of a rasterizer-state object description (see
Always draw all triangles.
Do not draw triangles that are front-facing.
Do not draw triangles that are back-facing.
Specifies the parts of the depth stencil to clear.
+These flags are used when calling
Clear the depth buffer.
Clear the stencil buffer.
Specifies how to access a resource used in a depth-stencil view.
+This enumeration is used in
The resource will be accessed as a 1D texture.
The resource will be accessed as an array of 1D textures.
The resource will be accessed as a 2D texture.
The resource will be accessed as an array of 2D textures.
The resource will be accessed as a 2D texture with multisampling.
The resource will be accessed as an array of 2D textures with multisampling.
Depth-stencil view options.
+This enumeration is used by
Limiting a depth-stencil buffer to read-only access allows more than one depth-stencil view to be bound to the pipeline simultaneously, since it is not possible to have a read/write conflicts between separate views.
+Indicates that depth values are read only.
Indicates that stencil values are read only.
Identify the portion of a depth-stencil buffer for writing depth data.
+Turn off writes to the depth-stencil buffer.
Turn on writes to the depth-stencil buffer.
Device context options.
+This enumeration is used by
The device context is an immediate context.
The device context is a deferred context.
Describes parameters that are used to create a device.
+Device creation flags are used by
An application might dynamically create (and destroy) threads to improve performance especially on a machine with multiple CPU cores. There may be cases, however, when an application needs to prevent extra threads from being created. This can happen when you want to simplify debugging, profile code or develop a tool for instance. For these cases, use
Use this flag if your application will only call methods of Direct3D?11 interfaces from a single thread. By default, the
Creates a device that supports the debug layer.
To use this flag, you must have D3D11*SDKLayers.dll installed; otherwise, device creation fails. To get D3D11_1SDKLayers.dll, install the SDK for Windows?8.
Note??This flag is not supported in Direct3D?11.
Prevents multiple threads from being created. When this flag is used with a Windows Advanced Rasterization Platform (WARP) device, no additional threads will be created by WARP and all rasterization will occur on the calling thread. This flag is not recommended for general use. See remarks.
Required for Direct2D interoperability with Direct3D resources.
FFT creation flags.
+Do not AddRef or Release temp and precompute buffers, caller is responsible for holding references to these buffers.
FFT data types.
+Real numbers.
Complex numbers.
Number of dimensions for FFT data.
+One dimension.
Two dimensions.
Three dimensions.
Direct3D 11 feature options.
+This enumeration is used when querying a driver about support for these features by calling
The following table shows the structures associated with each enumeration value.
Enumerant | Associated Structure |
---|---|
| |
| |
| |
| |
| |
D3D11_FEATURE_D3D11_OPTIONS | D3D11_FEATURE_DATA_D3D11_OPTIONS |
D3D11_FEATURE_ARCHITECTURE_INFO | D3D11_FEATURE_DATA_ARCHITECTURE_INFO |
D3D11_FEATURE_D3D9_OPTIONS | D3D11_FEATURE_DATA_D3D9_OPTIONS |
D3D11_FEATURE_SHADER_MIN_PRECISION_SUPPORT | D3D11_FEATURE_DATA_SHADER_MIN_PRECISION_SUPPORT |
D3D11_FEATURE_D3D9_SHADOW_SUPPORT | D3D11_FEATURE_DATA_D3D9_SHADOW_SUPPORT |
D3D11_FEATURE_D3D11_OPTIONS1 | D3D11_FEATURE_DATA_D3D11_OPTIONS1 |
D3D11_FEATURE_D3D9_SIMPLE_INSTANCING_SUPPORT | D3D11_FEATURE_DATA_D3D9_SIMPLE_INSTANCING_SUPPORT |
D3D11_FEATURE_MARKER_SUPPORT + | D3D11_FEATURE_DATA_MARKER_SUPPORT |
D3D11_FEATURE_D3D9_OPTIONS1 | D3D11_FEATURE_DATA_D3D9_OPTIONS1 |
?
+The driver supports multithreading. To see an example of testing a driver for multithread support, see How To: Check for Driver Support.
Supports the use of the double-precision shaders in HLSL.
Supports the formats in
Supports the formats in
Supports compute shaders and raw and structured buffers.
Determines the fill mode to use when rendering triangles.
+This enumeration is part of a rasterizer-state object description (see
Draw lines connecting the vertices. Adjacent vertices are not drawn.
Fill the triangles formed by the vertices. Adjacent vertices are not drawn.
Filtering options during texture sampling.
+Note??If you use different filter types for min versus mag filter, undefined behavior occurs in certain cases where the choice between whether magnification or minification happens is ambiguous. To prevent this undefined behavior, use filter modes that use similar filter operations for both min and mag (or use anisotropic filtering, which avoids the issue as well).
During texture sampling, one or more texels are read and combined (this is calling filtering) to produce a single value. Point sampling reads a single texel while linear sampling reads two texels (endpoints) and linearly interpolates a third value between the endpoints.
HLSL texture-sampling functions also support comparison filtering during texture sampling. Comparison filtering compares each sampled texel against a comparison value. The boolean result is blended the same way that normal texture filtering is blended.
You can use HLSL intrinsic texture-sampling functions that implement texture filtering only or companion functions that use texture filtering with comparison filtering.
Texture Sampling Function | Texture Sampling Function with Comparison Filtering |
---|---|
sample | samplecmp or samplecmplevelzero |
?
Comparison filters only work with textures that have the following DXGI formats: R32_FLOAT_X8X24_TYPELESS, R32_FLOAT, R24_UNORM_X8_TYPELESS, R16_UNORM.
+Use point sampling for minification, magnification, and mip-level sampling.
Use point sampling for minification and magnification; use linear interpolation for mip-level sampling.
Use point sampling for minification; use linear interpolation for magnification; use point sampling for mip-level sampling.
Use point sampling for minification; use linear interpolation for magnification and mip-level sampling.
Use linear interpolation for minification; use point sampling for magnification and mip-level sampling.
Use linear interpolation for minification; use point sampling for magnification; use linear interpolation for mip-level sampling.
Use linear interpolation for minification and magnification; use point sampling for mip-level sampling.
Use linear interpolation for minification, magnification, and mip-level sampling.
Use anisotropic interpolation for minification, magnification, and mip-level sampling.
Use point sampling for minification, magnification, and mip-level sampling. Compare the result to the comparison value.
Use point sampling for minification and magnification; use linear interpolation for mip-level sampling. Compare the result to the comparison value.
Use point sampling for minification; use linear interpolation for magnification; use point sampling for mip-level sampling. Compare the result to the comparison value.
Use point sampling for minification; use linear interpolation for magnification and mip-level sampling. Compare the result to the comparison value.
Use linear interpolation for minification; use point sampling for magnification and mip-level sampling. Compare the result to the comparison value.
Use linear interpolation for minification; use point sampling for magnification; use linear interpolation for mip-level sampling. Compare the result to the comparison value.
Use linear interpolation for minification and magnification; use point sampling for mip-level sampling. Compare the result to the comparison value.
Use linear interpolation for minification, magnification, and mip-level sampling. Compare the result to the comparison value.
Use anisotropic interpolation for minification, magnification, and mip-level sampling. Compare the result to the comparison value.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Texture filtering flags.
+D3DX11 automatically performs gamma correction (to convert color data from RGB space to standard RGB space) when loading texture data. This is automatically done for instance when RGB data is loaded from a .png file into an sRGB texture. Use the SRGB filter flags to indicate if the data does not need to be converted into sRGB space.
+No scaling or filtering will take place. Pixels outside the bounds of the source image are assumed to be transparent black.
Each destination pixel is computed by sampling the nearest pixel from the source image.
Each destination pixel is computed by sampling the four nearest pixels from the source image. This filter works best when the scale on both axes is less than two.
Every pixel in the source image contributes equally to the destination image. This is the slowest of the filters.
Each pixel is computed by averaging a 2x2(x2) box of pixels from the source image. This filter works only when the dimensions of the destination are half those of the source, as is the case with mipmaps.
Pixels off the edge of the texture on the u-axis should be mirrored, not wrapped.
Pixels off the edge of the texture on the v-axis should be mirrored, not wrapped.
Pixels off the edge of the texture on the w-axis should be mirrored, not wrapped.
Specifying this flag is the same as specifying the
The resulting image must be dithered using a 4x4 ordered dither algorithm. This happens when converting from one format to another.
Do diffuse dithering on the image when changing from one format to another.
Input data is in standard RGB (sRGB) color space. See remarks.
Output data is in standard RGB (sRGB) color space. See remarks.
Same as specifying
Types of magnification or minification sampler filters.
+Point filtering used as a texture magnification or minification filter. The texel with coordinates nearest to the desired pixel value is used. The texture filter to be used between mipmap levels is nearest-point mipmap filtering. The rasterizer uses the color from the texel of the nearest mipmap texture.
Bilinear interpolation filtering used as a texture magnification or minification filter. A weighted average of a 2 x 2 area of texels surrounding the desired pixel is used. The texture filter to use between mipmap levels is trilinear mipmap interpolation. The rasterizer linearly interpolates pixel color, using the texels of the two nearest mipmap textures.
Which resources are supported for a given format and given device (see
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Image file formats supported by D3DX11Createxxx and D3DX11Savexxx functions.
+See Types of Bitmaps (GDI+) for more information on some of these formats.
+Windows bitmap (BMP) file format. Contains a header that describes the resolution of the device on which the rectangle of pixels was created, the dimensions of the rectangle, the size of the array of bits, a logical palette, and an array of bits that defines the relationship between pixels in the bitmapped image and entries in the logical palette. The file extension for this format is .bmp.
Joint Photographic Experts Group (JPEG) compressed file format. Specifies variable compression of 24-bit RGB color and 8-bit gray-scale Tagged Image File Format (TIFF) image document files. The file extension for this format is .jpg.
Portable Network Graphics (PNG) file format. A non-proprietary bitmap format using lossless compression. The file extension for this format is .png.
DirectDraw surface (DDS) file format. Stores textures, volume textures, and cubic environment maps, with or without mipmap levels, and with or without pixel compression. The file extension for this format is .dds.
Tagged Image File Format (TIFF). The file extensions for this format are .tif and .tiff.
Graphics Interchange Format (GIF). The file extension for this format is .gif.
Windows Media Photo format (WMP). This format is also known as HD Photo and JPEG XR. The file extensions for this format are .hdp, .jxr, and .wdp.
To work properly,
Type of data contained in an input slot.
+Use these values to specify the type of data for a particular input element (see
Input data is per-vertex data.
Input data is per-instance data.
Specifies how the CPU should respond when an application calls the
This enumeration is used by
Identifies a resource to be accessed for reading and writing by the CPU. Applications may combine one or more of these flags.
+This enumeration is used in
These remarks are divided into the following topics:
Resource is mapped for reading. The resource must have been created with read access (see
Resource is mapped for writing. The resource must have been created with write access (see
Resource is mapped for reading and writing. The resource must have been created with read and write access (see
Resource is mapped for writing; the previous contents of the resource will be undefined. The resource must have been created with write access and dynamic usage (See
Resource is mapped for writing; the existing contents of the resource cannot be overwritten (see Remarks). This flag is only valid on vertex and index buffers. The resource must have been created with write access (see
Note??The Direct3D 11.1 runtime, which is available starting with Windows?8, enables mapping dynamic constant buffers and shader resource views (SRVs) of dynamic buffers with
Categories of debug messages. This will identify the category of a message when retrieving a message with
This is part of the Information Queue feature. See
A debug message in the Information Queue.
+This structure is returned from
The category of the message. See
The severity of the message. See
The ID of the message. See
The message string.
The length of pDescription in bytes.
Debug message severity levels for an information queue.
+Use these values to allow or deny message categories to pass through the storage and retrieval filters for an information queue (see
Defines some type of corruption which has occurred.
Defines an error message.
Defines a warning message.
Defines an information message.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Normal map options. You can combine any number of these flags by using a bitwise OR operation.
+These flags are used by
Indicates that pixels off the edge of the texture on the U-axis should be mirrored, not wraped.
Indicates that pixels off the edge of the texture on the V-axis should be mirrored, not wraped.
Same as
Inverts the direction of each normal.
Computes the per pixel occlusion term and encodes it into the alpha. An Alpha of 1 means that the pixel is not obscured in any way, and an alpha of 0 would mean that the pixel is completly obscured.
Flags that describe miscellaneous query behavior.
+This flag is part of a query description (see
Tell the hardware that if it is not yet sure if something is hidden or not to draw it anyway. This is only used with an occlusion predicate. Predication data cannot be returned to your application via
Query types.
+Create a query with
Determines whether or not the GPU is finished processing commands. When the GPU is finished processing commands
Get the number of samples that passed the depth and stencil tests in between
Get a timestamp value where
Determines whether or not a
Get pipeline statistics, such as the number of pixel shader invocations in between
Similar to
Get streaming output statistics, such as the number of primitives streamed out in between
Determines whether or not any of the streaming output buffers overflowed in between
Get streaming output statistics for stream 0, such as the number of primitives streamed out in between
Determines whether or not the stream 0 output buffers overflowed in between
Get streaming output statistics for stream 1, such as the number of primitives streamed out in between
Determines whether or not the stream 1 output buffers overflowed in between
Get streaming output statistics for stream 2, such as the number of primitives streamed out in between
Determines whether or not the stream 2 output buffers overflowed in between
Get streaming output statistics for stream 3, such as the number of primitives streamed out in between
Determines whether or not the stream 3 output buffers overflowed in between
These flags identify the type of resource that will be viewed as a render target.
+This enumeration is used in
Do not use this value, as it will cause
The resource will be accessed as a buffer.
The resource will be accessed as a 1D texture.
The resource will be accessed as an array of 1D textures.
The resource will be accessed as a 2D texture.
The resource will be accessed as an array of 2D textures.
The resource will be accessed as a 2D texture with multisampling.
The resource will be accessed as an array of 2D textures with multisampling.
The resource will be accessed as a 3D texture.
Options for the amount of information to report about a device object's lifetime.
+This enumeration is used by
Several inline functions exist to combine the options using operators, see the D3D11SDKLayers.h header file for details.
+Specifies to obtain a summary about a device object's lifetime.
Specifies to obtain detailed information about a device object's lifetime.
Identifies the type of resource being used.
+This enumeration is used in
Resource is of unknown type.
Resource is a buffer.
Resource is a 1D texture.
Resource is a 2D texture.
Resource is a 3D texture.
Identifies options for resources.
+This enumeration is used in
These flags can be combined by bitwise OR.
The
Enables MIP map generation by using
Enables resource data sharing between two or more Direct3D devices. The only resources that can be shared are 2D non-mipmapped textures.
WARP and REF devices do not support shared resources. If you try to create a resource with this flag on either a WARP or REF device, the create method will return an E_OUTOFMEMORY error code.
Note??Starting with Windows?8, WARP devices fully support shared resources.
Note??Starting with Windows?8, we recommend that you enable resource data sharing between two or more Direct3D devices by using a combination of the D3D11_RESOURCE_MISC_SHARED_NTHANDLE and
Sets a resource to be a cube texture created from a Texture2DArray that contains 6 textures.
Enables instancing of GPU-generated content.
Enables a resource as a byte address buffer.
Enables a resource as a structured buffer.
Enables a resource with MIP map clamping for use with
Enables the resource to be synchronized by using the
If you call any of these methods with the
WARP and REF devices do not support shared resources. If you try to create a resource with this flag on either a WARP or REF device, the create method will return an E_OUTOFMEMORY error code.
Note??Starting with Windows?8, WARP devices fully support shared resources.
Enables a resource compatible with GDI. You must set the
Consider the following programming tips for using
You must set the texture format to one of the following types.
Identifies expected resource use during rendering. The usage directly reflects whether a resource is accessible by the CPU and/or the graphics processing unit (GPU).
+An application identifies the way a resource is intended to be used (its usage) in a resource description. There are several structures for creating resources including:
Differences between Direct3D 9 and Direct3D 10/11: In Direct3D 9, you specify the type of memory a resource should be created in at resource creation time (using In Direct3D 10/11, an application no longer specifies what type of memory (the pool) to create a resource in. Instead, you specify the intended usage of the resource, and let the runtime (in concert with the driver and a memory manager) choose the type of memory that will achieve the best performance. |
?
+A resource that requires read and write access by the GPU. This is likely to be the most common usage choice.
A resource that can only be read by the GPU. It cannot be written by the GPU, and cannot be accessed at all by the CPU. This type of resource must be initialized when it is created, since it cannot be changed after creation.
A resource that is accessible by both the GPU (read only) and the CPU (write only). A dynamic resource is a good choice for a resource that will be updated by the CPU at least once per frame. To update a dynamic resource, use a Map method.
For info about how to use dynamic resources, see How to: Use dynamic resources.
A resource that supports data transfer (copy) from the GPU to the CPU.
Type for scan data.
+FLOAT data.
INT data.
UINT data.
Direction to perform scan in.
+Scan forward.
Scan backward.
Scan opcodes.
+Add values.
Take the minimum value.
Take the maximum value.
Multiply the values.
Perform a logical AND on the values.
Perform a logical OR on the values.
Perform a logical XOR on the values.
Identifies how to view a buffer resource.
+This enumeration is used by
View the buffer as raw. For more info about raw viewing of buffers, see Raw Views of Buffers.
Specifies a multi-sample pattern type.
+An app calls
The runtime defines the following standard sample patterns for 1(trivial), 2, 4, 8, and 16 sample counts. Hardware must support 1, 4, and 8 sample counts. Hardware vendors can expose more sample counts beyond these. However, if vendors support 2, 4(required), 8(required), or 16, they must also support the corresponding standard pattern or center pattern for each of those sample counts.
+Pre-defined multi-sample patterns required for Direct3D?11 and Direct3D?10.1 hardware.
Pattern where all of the samples are located at the pixel center.
The stencil operations that can be performed during depth-stencil testing.
+Keep the existing stencil data.
Set the stencil data to 0.
Set the stencil data to the reference value set by calling
Increment the stencil value by 1, and clamp the result.
Decrement the stencil value by 1, and clamp the result.
Invert the stencil data.
Increment the stencil value by 1, and wrap the result if necessary.
Decrement the stencil value by 1, and wrap the result if necessary.
Identify a technique for resolving texture coordinates that are outside of the boundaries of a texture.
+Tile the texture at every (u,v) integer junction. For example, for u values between 0 and 3, the texture is repeated three times.
Flip the texture at every (u,v) integer junction. For u values between 0 and 1, for example, the texture is addressed normally; between 1 and 2, the texture is flipped (mirrored); between 2 and 3, the texture is normal again; and so on.
Texture coordinates outside the range [0.0, 1.0] are set to the texture color at 0.0 or 1.0, respectively.
Texture coordinates outside the range [0.0, 1.0] are set to the border color specified in
Similar to
The different faces of a cube texture.
+Positive X face.
Negative X face.
Positive Y face.
Negative Y face.
Positive Z face.
Negative Z face.
Identifies unordered-access view options for a buffer resource.
+Resource contains raw, unstructured data. Requires the UAV format to be
Allow data to be appended to the end of the buffer.
Adds a counter to the unordered-access-view buffer.
Unordered-access view options.
+This enumeration is used by a unordered access-view description (see
The view type is unknown.
View the resource as a buffer.
View the resource as a 1D texture.
View the resource as a 1D texture array.
View the resource as a 2D texture.
View the resource as a 2D texture array.
View the resource as a 3D texture array.
Creates a device that represents the display adapter.
+A reference to the video adapter to use when creating a device. Pass
Note??Do not mix the use of DXGI 1.0 (
The
A handle to a DLL that implements a software rasterizer. If DriverType is
The runtime layers to enable (see
A reference to an array of
{, , , , , ,};
Note??If the Direct3D 11.1 runtime is present on the computer and pFeatureLevels is set to
The number of elements in pFeatureLevels.
The SDK version; use
Returns the address of a reference to an
If successful, returns the first
Returns the address of a reference to an
This method can return one of the Direct3D 11 Return Codes.
This method returns E_INVALIDARG if you set the pAdapter parameter to a non-
This entry-point is supported by the Direct3D 11 runtime, which is available on Windows 7, Windows Server 2008 R2, and as an update to Windows Vista (KB971644).
To create a Direct3D 11.1 device (ID3D11Device1), which is available on Windows?8, Windows Server?2012, and Windows?7 and Windows Server?2008?R2 with the Platform Update for Windows 7 installed, you first create a
To create a Direct3D 11.2 device (ID3D11Device2), which is available on Windows?8.1 and Windows Server?2012?R2, you first create a
Set ppDevice and ppImmediateContext to
For an example, see How To: Create a Device and Immediate Context; to create a device and a swap chain at the same time, use
If you set the pAdapter parameter to a non-
Differences between Direct3D 10 and Direct3D 11: In Direct3D 10, the presence of pAdapter dictated which adapter to use and the DriverType could mismatch what the adapter was. In Direct3D 11, if you are trying to create a hardware or a software device, set pAdapter !=
On the other hand, if pAdapter ==
|
?
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Creates a device that represents the display adapter and a swap chain used for rendering.
+A reference to the video adapter to use when creating a device. Pass
Note??Do not mix the use of DXGI 1.0 (
The
A handle to a DLL that implements a software rasterizer. If DriverType is
The runtime layers to enable (see
A reference to an array of
{, , , , , ,};
Note??If the Direct3D 11.1 runtime is present on the computer and pFeatureLevels is set to
The number of elements in pFeatureLevels.
The SDK version; use
A reference to a swap chain description (see
Returns the address of a reference to the
Returns the address of a reference to an
Returns a reference to a
Returns the address of a reference to an
This method can return one of the Direct3D 11 Return Codes.
This method returns
This method returns E_INVALIDARG if you set the pAdapter parameter to a non-
Note??If you call this method in a Session 0 process, it returns
This entry-point is supported by the Direct3D 11 runtime, which is available on Windows 7, Windows Server 2008 R2, and as an update to Windows Vista (KB971644).
To create a Direct3D 11.1 device (ID3D11Device1), which is available on Windows?8, Windows Server?2012, and Windows?7 and Windows Server?2008?R2 with the Platform Update for Windows 7 installed, you first create a
To create a Direct3D 11.2 device (ID3D11Device2), which is available on Windows?8.1 and Windows Server?2012?R2, you first create a
Also, see the remarks section in
If you set the pAdapter parameter to a non-
Creates an
The return value is one of the values listed in Direct3D 11 Return Codes.
Creates an
The return value is one of the values listed in Direct3D 11 Return Codes.
Creates an
The return value is one of the values listed in Direct3D 11 Return Codes.
Creates an
The return value is one of the values listed in Direct3D 11 Return Codes.
Creates a scan context.
+The
Maximum single scan size, in elements (FLOAT, UINT, or INT)
Maximum number of scans in multiscan.
Pointer to a
The return value is one of the values listed in Direct3D 11 Return Codes.
Creates an
The return value is one of the values listed in Direct3D 11 Return Codes.
Creates an
The return value is one of the values listed in Direct3D 11 Return Codes.
Creates a segmented scan context.
+Pointer to an
Maximum single scan size, in elements (FLOAT, UINT, or INT).
Pointer to a
The return value is one of the values listed in Direct3D 11 Return Codes.
Creates an
The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use these:
Create a shader-resource view from a file in memory.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use the Spherical Harmonics Math library, SHProjectCubeMap.
Projects a function represented in a cube map into spherical harmonics.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use resource functions, then these:
Create a texture from another resource.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use the DirectXTex library, CaptureTexture then SaveToXXXMemory (where XXX is WIC, DDS, or TGA; WIC doesn't support DDS and TGA; D3DX 9 supported TGA as a common art source format for games).
Save a texture to memory.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use resource functions, then use DirectXTex library (tools), + LoadFromXXXMemory (where XXX is WIC, DDS, or TGA; WIC doesn't support DDS and TGA; D3DX 9 supported TGA as a common art source format for games).
Retrieves information about a given image in a resource.
+If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use these:
Create a shader-resource view from a file.
+The return value is one of the values listed in Direct3D 11 Return Codes.
For a list of supported image formats, see
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use these:
Create a texture resource from a file.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use the DirectXTex library, GenerateMipMaps and GenerateMipMaps3D.
Generates mipmap chain using a particular texture filter.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use the DirectXTex library, GetMetadataFromXXXFile (where XXX is WIC, DDS, or TGA; WIC doesn't support DDS and TGA; D3DX 9 supported TGA as a common art source format for games).
Retrieves information about a given image file.
+If the function succeeds, the return value is
This function supports both Unicode and ANSI strings.
+Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use resource functions, then these:
Create a shader-resource view from a resource.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use the DirectXTex library, Resize, Convert, Compress, Decompress, and/or CopyRectangle.
Load a texture from a texture.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use the DirectXTex library, GetMetadataFromXXXMemory (where XXX is WIC, DDS, or TGA; WIC doesn't support DDS and TGA; D3DX 9 supported TGA as a common art source format for games).
Get information about an image already loaded into memory.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use these:
Create a texture resource from a file residing in system memory.
+The return value is one of the values listed in Direct3D 11 Return Codes.
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use the DirectXTex library, ComputeNormalMap.
Converts a height map into a normal map. The (x,y,z) components of each normal are mapped to the (r,g,b) channels of the output texture.
+If the function succeeds, the return value is
This method computes the normal by using the central difference with a kernel size of 3x3. RGB channels in the destination contain biased (x,y,z) components of the normal. The central differencing denominator is hardcoded to 2.0.
+Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Note??Instead of using this function, we recommend that you use the DirectXTex library, CaptureTexture then SaveToXXXFile (where XXX is WIC, DDS, or TGA; WIC doesn't support DDS and TGA; D3DX 9 supported TGA as a common art source format for games). For the simplified scenario of creating a screen shot from a render target texture, we recommend that you use the DirectXTK library, SaveDDSTextureToFile or SaveWICTextureToFile.
Save a texture to a file.
+The return value is one of the values listed in Direct3D 11 Return Codes; use the return value to see if the DestFormat is supported.
The
There is no explicit creation method, simply declare an
Gets the initialization flags associated with the deferred context that created the command list.
+The context flag is reserved for future use and is always 0.
The GetContextFlags method gets the flags that were supplied to the ContextFlags parameter of
Gets the initialization flags associated with the deferred context that created the command list.
+The GetContextFlags method gets the flags that were supplied to the ContextFlags parameter of
The
Windows?Phone?8: This API is supported.
+Bind an array of shader resources to the domain-shader stage.
+Index into the device's zero-based array to begin setting shader resources to (ranges from 0 to
Number of shader resources to set. Up to a maximum of 128 slots are available for shader resources(ranges from 0 to
Array of shader resource view interfaces to set to the device.
If an overlapping resource view is already bound to an output slot, such as a render target, then the method will fill the destination shader resource slot with
For information about creating shader-resource views, see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Set a domain shader to the device.
+Pointer to a domain shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Set a domain shader to the device.
+Pointer to a domain shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Set an array of sampler states to the domain-shader stage.
+Index into the device's zero-based array to begin setting samplers to (ranges from 0 to
Number of samplers in the array. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Pointer to an array of sampler-state interfaces (see
Any sampler may be set to
//Default sampler state: +SamplerDesc; + SamplerDesc.Filter = ; + SamplerDesc.AddressU = ; + SamplerDesc.AddressV = ; + SamplerDesc.AddressW = ; + SamplerDesc.MipLODBias = 0; + SamplerDesc.MaxAnisotropy = 1; + SamplerDesc.ComparisonFunc = ; + SamplerDesc.BorderColor[0] = 1.0f; + SamplerDesc.BorderColor[1] = 1.0f; + SamplerDesc.BorderColor[2] = 1.0f; + SamplerDesc.BorderColor[3] = 1.0f; + SamplerDesc.MinLOD = -FLT_MAX; + SamplerDesc.MaxLOD = FLT_MAX;
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Sets the constant buffers used by the domain-shader stage.
+Index into the zero-based array to begin setting constant buffers to (ranges from 0 to
Number of buffers to set (ranges from 0 to
Array of constant buffers (see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The Direct3D 11.1 runtime, which is available starting with Windows?8, can bind a larger number of
If the application wants the shader to access other parts of the buffer, it must call the DSSetConstantBuffers1 method instead.
Windows?Phone?8: This API is supported.
+Get the domain-shader resources.
+Index into the device's zero-based array to begin getting shader resources from (ranges from 0 to
The number of resources to get from the device. Up to a maximum of 128 slots are available for shader resources (ranges from 0 to
Array of shader resource view interfaces to be returned by the device.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the domain shader currently set on the device.
+Address of a reference to a domain shader (see
Pointer to an array of class instance interfaces (see
The number of class-instance elements in the array.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get an array of sampler state interfaces from the domain-shader stage.
+Index into a zero-based array to begin getting samplers from (ranges from 0 to
Number of samplers to get from a device context. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Pointer to an array of sampler-state interfaces (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the constant buffers used by the domain-shader stage.
+Index into the device's zero-based array to begin retrieving constant buffers from (ranges from 0 to
Number of buffers to retrieve (ranges from 0 to
Array of constant buffer interface references (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+A geometry-shader interface manages an executable program (a geometry shader) that controls the geometry-shader stage.
+The geometry-shader interface has no methods; use HLSL to implement your shader functionality. All shaders are implemented from a common set of features referred to as the common-shader core..
To create a geometry shader interface, call either
This interface is defined in D3D11.h.
+The
Windows?Phone?8: This API is supported.
+Sets the constant buffers used by the geometry shader pipeline stage.
+Index into the device's zero-based array to begin setting constant buffers to (ranges from 0 to
Number of buffers to set (ranges from 0 to
Array of constant buffers (see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
You can't use the
The Direct3D 11.1 runtime, which is available starting with Windows?8, can bind a larger number of
If the application wants the shader to access other parts of the buffer, it must call the GSSetConstantBuffers1 method instead.
Windows?Phone?8: This API is supported.
+Set a geometry shader to the device.
+Pointer to a geometry shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Set a geometry shader to the device.
+Pointer to a geometry shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Bind an array of shader resources to the geometry shader stage.
+Index into the device's zero-based array to begin setting shader resources to (ranges from 0 to
Number of shader resources to set. Up to a maximum of 128 slots are available for shader resources(ranges from 0 to
Array of shader resource view interfaces to set to the device.
If an overlapping resource view is already bound to an output slot, such as a render target, then the method will fill the destination shader resource slot with
For information about creating shader-resource views, see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Set an array of sampler states to the geometry shader pipeline stage.
+Index into the device's zero-based array to begin setting samplers to (ranges from 0 to
Number of samplers in the array. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Pointer to an array of sampler-state interfaces (see
Any sampler may be set to
//Default sampler state: +SamplerDesc; + SamplerDesc.Filter = ; + SamplerDesc.AddressU = ; + SamplerDesc.AddressV = ; + SamplerDesc.AddressW = ; + SamplerDesc.MipLODBias = 0; + SamplerDesc.MaxAnisotropy = 1; + SamplerDesc.ComparisonFunc = ; + SamplerDesc.BorderColor[0] = 1.0f; + SamplerDesc.BorderColor[1] = 1.0f; + SamplerDesc.BorderColor[2] = 1.0f; + SamplerDesc.BorderColor[3] = 1.0f; + SamplerDesc.MinLOD = -FLT_MAX; + SamplerDesc.MaxLOD = FLT_MAX;
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Get the constant buffers used by the geometry shader pipeline stage.
+Index into the device's zero-based array to begin retrieving constant buffers from (ranges from 0 to
Number of buffers to retrieve (ranges from 0 to
Array of constant buffer interface references (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the geometry shader currently set on the device.
+Address of a reference to a geometry shader (see
Pointer to an array of class instance interfaces (see
The number of class-instance elements in the array.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the geometry shader resources.
+Index into the device's zero-based array to begin getting shader resources from (ranges from 0 to
The number of resources to get from the device. Up to a maximum of 128 slots are available for shader resources (ranges from 0 to
Array of shader resource view interfaces to be returned by the device.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get an array of sampler state interfaces from the geometry shader pipeline stage.
+Index into a zero-based array to begin getting samplers from (ranges from 0 to
Number of samplers to get from a device context. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Pointer to an array of sampler-state interfaces (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+A hull-shader interface manages an executable program (a hull shader) that controls the hull-shader stage.
+The hull-shader interface has no methods; use HLSL to implement your shader functionality. All shaders are implemented from a common set of features referred to as the common-shader core..
To create a hull-shader interface, call
This interface is defined in D3D11.h.
+The
Windows?Phone?8: This API is supported.
+Bind an array of shader resources to the hull-shader stage.
+If an overlapping resource view is already bound to an output slot, such as a render target, then the method will fill the destination shader resource slot with
For information about creating shader-resource views, see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Set a hull shader to the device.
+Pointer to a hull shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Set a hull shader to the device.
+Pointer to a hull shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Set an array of sampler states to the hull-shader stage.
+Any sampler may be set to
//Default sampler state: +SamplerDesc; + SamplerDesc.Filter = ; + SamplerDesc.AddressU = ; + SamplerDesc.AddressV = ; + SamplerDesc.AddressW = ; + SamplerDesc.MipLODBias = 0; + SamplerDesc.MaxAnisotropy = 1; + SamplerDesc.ComparisonFunc = ; + SamplerDesc.BorderColor[0] = 1.0f; + SamplerDesc.BorderColor[1] = 1.0f; + SamplerDesc.BorderColor[2] = 1.0f; + SamplerDesc.BorderColor[3] = 1.0f; + SamplerDesc.MinLOD = -FLT_MAX; + SamplerDesc.MaxLOD = FLT_MAX;
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Set the constant buffers used by the hull-shader stage.
+The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The Direct3D 11.1 runtime, which is available starting with Windows?8, can bind a larger number of
If the application wants the shader to access other parts of the buffer, it must call the HSSetConstantBuffers1 method instead.
Windows?Phone?8: This API is supported.
+Get the hull-shader resources.
+Index into the device's zero-based array to begin getting shader resources from (ranges from 0 to
The number of resources to get from the device. Up to a maximum of 128 slots are available for shader resources (ranges from 0 to
Array of shader resource view interfaces to be returned by the device.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the hull shader currently set on the device.
+Address of a reference to a hull shader (see
Pointer to an array of class instance interfaces (see
The number of class-instance elements in the array.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get an array of sampler state interfaces from the hull-shader stage.
+Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the constant buffers used by the hull-shader stage.
+Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+An information-queue interface stores, retrieves, and filters debug messages. The queue consists of a message queue, an optional storage filter stack, and a optional retrieval filter stack.
+To get this interface, turn on debug layer and use IUnknown::QueryInterface from the
Windows?Phone?8: This API is supported.
+Set the maximum number of messages that can be added to the message queue.
+Maximum number of messages that can be added to the message queue. -1 means no limit.
This method returns one of the following Direct3D 11 Return Codes.
When the number of messages in the message queue has reached the maximum limit, new messages coming in will push old messages out.
Windows?Phone?8: This API is supported.
+Clear all messages from the message queue.
+Windows?Phone?8: This API is supported.
+Get a message from the message queue.
+Index into message queue after an optional retrieval filter has been applied. This can be between 0 and the number of messages in the message queue that pass through the retrieval filter (which can be obtained with
Returned message (see
Size of pMessage in bytes, including the size of the message string that the pMessage points to.
This method returns one of the following Direct3D 11 Return Codes.
This method does not remove any messages from the message queue.
This method gets messages from the message queue after an optional retrieval filter has been applied.
Applications should call this method twice to retrieve a message - first to obtain the size of the message and second to get the message. Here is a typical example:
// Get the size of the message +messageLength = 0; + hr = pInfoQueue->GetMessage(0, null , &messageLength); // Allocate space and get the message +* pMessage = ( *)malloc(messageLength); + hr = pInfoQueue->GetMessage(0, pMessage, &messageLength); +
For an overview see Information Queue Overview.
Windows?Phone?8: This API is supported.
+Get the number of messages that were allowed to pass through a storage filter.
+Number of messages allowed by a storage filter.
Windows?Phone?8: This API is supported.
+Get the number of messages that were denied passage through a storage filter.
+Number of messages denied by a storage filter.
Windows?Phone?8: This API is supported.
+Get the number of messages currently stored in the message queue.
+Number of messages currently stored in the message queue.
Windows?Phone?8: This API is supported.
+Get the number of messages that are able to pass through a retrieval filter.
+Number of messages allowed by a retrieval filter.
Windows?Phone?8: This API is supported.
+Get the number of messages that were discarded due to the message count limit.
+Number of messages discarded.
Get and set the message count limit with
Windows?Phone?8: This API is supported.
+Get the maximum number of messages that can be added to the message queue.
+Maximum number of messages that can be added to the queue. -1 means no limit.
When the number of messages in the message queue has reached the maximum limit, new messages coming in will push old messages out.
Windows?Phone?8: This API is supported.
+Add storage filters to the top of the storage-filter stack.
+Array of storage filters (see
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Get the storage filter at the top of the storage-filter stack.
+Storage filter at the top of the storage-filter stack.
Size of the storage filter in bytes. If pFilter is
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Remove a storage filter from the top of the storage-filter stack.
+Windows?Phone?8: This API is supported.
+Push an empty storage filter onto the storage-filter stack.
+This method returns one of the following Direct3D 11 Return Codes.
An empty storage filter allows all messages to pass through.
Windows?Phone?8: This API is supported.
+Push a copy of storage filter currently on the top of the storage-filter stack onto the storage-filter stack.
+This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Push a storage filter onto the storage-filter stack.
+Pointer to a storage filter (see
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Pop a storage filter from the top of the storage-filter stack.
+Windows?Phone?8: This API is supported.
+Get the size of the storage-filter stack in bytes.
+Size of the storage-filter stack in bytes.
Windows?Phone?8: This API is supported.
+Add storage filters to the top of the retrieval-filter stack.
+Array of retrieval filters (see
This method returns one of the following Direct3D 11 Return Codes.
The following code example shows how to use
cats[] = { ..., ..., ... }; + sevs[] = { ..., ..., ... }; + UINT ids[] = { ..., ..., ... }; filter; + memset( &filter, 0, sizeof(filter) ); // To set the type of messages to allow, + // set filter.AllowList as follows: + filter.AllowList.NumCategories = sizeof(cats / sizeof( )); + filter.AllowList.pCategoryList = cats; + filter.AllowList.NumSeverities = sizeof(sevs / sizeof( )); + filter.AllowList.pSeverityList = sevs; + filter.AllowList.NumIDs = sizeof(ids) / sizeof(UINT); + filter.AllowList.pIDList = ids; // To set the type of messages to deny, set filter.DenyList + // similarly to the preceding filter.AllowList. // The following single call sets all of the preceding information. + hr = infoQueue->AddRetrievalFilterEntries( &filter ); +
Windows?Phone?8: This API is supported.
+Get the retrieval filter at the top of the retrieval-filter stack.
+Retrieval filter at the top of the retrieval-filter stack.
Size of the retrieval filter in bytes. If pFilter is
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Remove a retrieval filter from the top of the retrieval-filter stack.
+Windows?Phone?8: This API is supported.
+Push an empty retrieval filter onto the retrieval-filter stack.
+This method returns one of the following Direct3D 11 Return Codes.
An empty retrieval filter allows all messages to pass through.
Windows?Phone?8: This API is supported.
+Push a copy of retrieval filter currently on the top of the retrieval-filter stack onto the retrieval-filter stack.
+This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Push a retrieval filter onto the retrieval-filter stack.
+Pointer to a retrieval filter (see
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Pop a retrieval filter from the top of the retrieval-filter stack.
+Windows?Phone?8: This API is supported.
+Get the size of the retrieval-filter stack in bytes.
+Size of the retrieval-filter stack in bytes.
Windows?Phone?8: This API is supported.
+Add a debug message to the message queue and send that message to debug output.
+Category of a message (see
Severity of a message (see
Unique identifier of a message (see
User-defined message.
This method returns one of the following Direct3D 11 Return Codes.
This method is used by the runtime's internal mechanisms to add debug messages to the message queue and send them to debug output. For applications to add their own custom messages to the message queue and send them to debug output, call
Windows?Phone?8: This API is supported.
+Add a user-defined message to the message queue and send that message to debug output.
+Severity of a message (see
Message string.
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Set a message category to break on when a message with that category passes through the storage filter.
+Message category to break on (see
Turns this breaking condition on or off (true for on, false for off).
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Set a message severity level to break on when a message with that severity level passes through the storage filter.
+A
Turns this breaking condition on or off (true for on, false for off).
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Set a message identifier to break on when a message with that identifier passes through the storage filter.
+Message identifier to break on (see
Turns this breaking condition on or off (true for on, false for off).
This method returns one of the following Direct3D 11 Return Codes.
Windows?Phone?8: This API is supported.
+Get a message category to break on when a message with that category passes through the storage filter.
+Message category to break on (see
Whether this breaking condition is turned on or off (true for on, false for off).
Windows?Phone?8: This API is supported.
+Get a message severity level to break on when a message with that severity level passes through the storage filter.
+Message severity level to break on (see
Whether this breaking condition is turned on or off (true for on, false for off).
Windows?Phone?8: This API is supported.
+Get a message identifier to break on when a message with that identifier passes through the storage filter.
+Message identifier to break on (see
Whether this breaking condition is turned on or off (true for on, false for off).
Windows?Phone?8: This API is supported.
+Set a boolean that turns the debug output on or off.
+Disable/Enable the debug output (TRUE to disable or mute the output,
This will stop messages that pass the storage filter from being printed out in the debug output, however those messages will still be added to the message queue.
Windows?Phone?8: This API is supported.
+Get a boolean that turns the debug output on or off.
+Whether the debug output is on or off (true for on, false for off).
Windows?Phone?8: This API is supported.
+Get a message from the message queue.
+Index into message queue after an optional retrieval filter has been applied. This can be between 0 and the number of messages in the message queue that pass through the retrieval filter (which can be obtained with
Get the storage filter at the top of the storage-filter stack.
+Get the retrieval filter at the top of the retrieval-filter stack.
+Get or sets the maximum number of messages that can be added to the message queue.
+When the number of messages in the message queue has reached the maximum limit, new messages coming in will push old messages out.
Windows?Phone?8: This API is supported.
+Get the number of messages that were allowed to pass through a storage filter.
+Windows?Phone?8: This API is supported.
+Get the number of messages that were denied passage through a storage filter.
+Windows?Phone?8: This API is supported.
+Get the number of messages currently stored in the message queue.
+Windows?Phone?8: This API is supported.
+Get the number of messages that are able to pass through a retrieval filter.
+Windows?Phone?8: This API is supported.
+Get the number of messages that were discarded due to the message count limit.
+Get and set the message count limit with
Windows?Phone?8: This API is supported.
+Get the size of the storage-filter stack in bytes.
+Windows?Phone?8: This API is supported.
+Get the size of the retrieval-filter stack in bytes.
+Windows?Phone?8: This API is supported.
+Get or sets a boolean that turns the debug output on or off.
+Windows?Phone?8: This API is supported.
+An input-layout interface holds a definition of how to feed vertex data that is laid out in memory into the input-assembler stage of the graphics pipeline.
+To create an input-layout object, call
Windows?Phone?8: This API is supported.
+A pixel-shader interface manages an executable program (a pixel shader) that controls the pixel-shader stage.
+The pixel-shader interface has no methods; use HLSL to implement your shader functionality. All shaders in are implemented from a common set of features referred to as the common-shader core..
To create a pixel shader interface, call
This interface is defined in D3D11.h.
Windows?Phone?8: This API is supported.
+The
Windows?Phone?8: This API is supported.
+Bind an array of shader resources to the pixel shader stage.
+Index into the device's zero-based array to begin setting shader resources to (ranges from 0 to
Number of shader resources to set. Up to a maximum of 128 slots are available for shader resources (ranges from 0 to
Array of shader resource view interfaces to set to the device.
If an overlapping resource view is already bound to an output slot, such as a rendertarget, then this API will fill the destination shader resource slot with
For information about creating shader-resource views, see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Sets a pixel shader to the device.
+Pointer to a pixel shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Set ppClassInstances to
Windows?Phone?8: This API is supported.
+Sets a pixel shader to the device.
+Pointer to a pixel shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Set ppClassInstances to
Windows?Phone?8: This API is supported.
+Set an array of sampler states to the pixel shader pipeline stage.
+Index into the device's zero-based array to begin setting samplers to (ranges from 0 to
Number of samplers in the array. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Pointer to an array of sampler-state interfaces (see
Any sampler may be set to
State | Default Value |
---|---|
Filter | |
AddressU | |
AddressV | |
AddressW | |
MipLODBias | 0 |
MaxAnisotropy | 1 |
ComparisonFunc | |
BorderColor[0] | 1.0f |
BorderColor[1] | 1.0f |
BorderColor[2] | 1.0f |
BorderColor[3] | 1.0f |
MinLOD | -FLT_MAX |
MaxLOD | FLT_MAX |
?
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Sets the constant buffers used by the pixel shader pipeline stage.
+Index into the device's zero-based array to begin setting constant buffers to (ranges from 0 to
Number of buffers to set (ranges from 0 to
Array of constant buffers (see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The Direct3D 11.1 runtime, which is available on Windows?8 and later operating systems, can bind a larger number of
If the application wants the shader to access other parts of the buffer, it must call the PSSetConstantBuffers1 method instead.
Windows?Phone?8: This API is supported.
+Bind an array of shader resources to the pixel shader stage.
+Index into the device's zero-based array to begin setting shader resources to (ranges from 0 to
Number of shader resources to set. Up to a maximum of 128 slots are available for shader resources (ranges from 0 to
Array of shader resource view interfaces to set to the device.
If an overlapping resource view is already bound to an output slot, such as a rendertarget, then this API will fill the destination shader resource slot with
For information about creating shader-resource views, see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Get the pixel shader currently set on the device.
+Address of a reference to a pixel shader (see
Pointer to an array of class instance interfaces (see
The number of class-instance elements in the array.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get an array of sampler states from the pixel shader pipeline stage.
+Index into a zero-based array to begin getting samplers from (ranges from 0 to
Number of samplers to get from a device context. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Arry of sampler-state interface references (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the constant buffers used by the pixel shader pipeline stage.
+Index into the device's zero-based array to begin retrieving constant buffers from (ranges from 0 to
Number of buffers to retrieve (ranges from 0 to
Array of constant buffer interface references (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+A predicate interface determines whether geometry should be processed depending on the results of a previous draw call.
+To create a predicate object, call
There are two types of predicates: stream-output-overflow predicates and occlusion predicates. Stream-output-overflow predicates cause any geometry residing in stream-output buffers that were overflowed to not be processed. Occlusion predicates cause any geometry that did not have a single sample pass the depth/stencil tests to not be processed.
Windows?Phone?8: This API is supported.
+A query interface queries information from the GPU.
+A query can be created with
Query data is typically gathered by issuing an
There are, however, some queries that do not require calls to Begin. For a list of possible queries see
A query is typically executed as shown in the following code:
queryDesc; + ... // Fill out queryDesc structure + * pQuery; + pDevice->CreateQuery(&queryDesc, &pQuery); + pDeviceContext->Begin(pQuery); ... // Issue graphics commands pDeviceContext->End(pQuery); + UINT64 queryData; // This data type is different depending on the query type while( != pDeviceContext->GetData(pQuery, &queryData, sizeof(UINT64), 0) ) + { + } +
When using a query that does not require a call to Begin, it still requires a call to End. The call to End causes the data returned by GetData to be accurate up until the last call to End.
Windows?Phone?8: This API is supported.
+Get a query description.
+Pointer to a query description (see
Windows?Phone?8: This API is supported.
+Get a query description.
+Windows?Phone?8: This API is supported.
+The rasterizer-state interface holds a description for rasterizer state that you can bind to the rasterizer stage.
+To create a rasterizer-state object, call
Windows?Phone?8: This API is supported.
+Gets the description for rasterizer state that you used to create the rasterizer-state object.
+A reference to a
You use the description for rasterizer state in a call to the
Windows?Phone?8: This API is supported.
+Create a rasterizer state object that tells the rasterizer stage how to behave.
+4096 unique rasterizer state objects can be created on a device at a time.
If an application attempts to create a rasterizer-state interface with the same state as an existing interface, the same interface will be returned and the total number of unique rasterizer state objects will stay the same.
+Gets the description for rasterizer state that you used to create the rasterizer-state object.
+You use the description for rasterizer state in a call to the
Windows?Phone?8: This API is supported.
+A render-target-view interface identifies the render-target subresources that can be accessed during rendering.
+To create a render-target view, call
A rendertarget is a resource that can be written by the output-merger stage at the end of a render pass. Each render-target should also have a corresponding depth-stencil view.
Windows?Phone?8: This API is supported.
+Get the properties of a render target view.
+Pointer to the description of a render target view (see
Windows?Phone?8: This API is supported.
+Get the properties of a render target view.
+Windows?Phone?8: This API is supported.
+The sampler-state interface holds a description for sampler state that you can bind to any shader stage of the pipeline for reference by texture sample operations.
+To create a sampler-state object, call
To bind a sampler-state object to any pipeline shader stage, call the following methods:
You can bind the same sampler-state object to multiple shader stages simultaneously.
Windows?Phone?8: This API is supported.
+Gets the description for sampler state that you used to create the sampler-state object.
+A reference to a
You use the description for sampler state in a call to the
Windows?Phone?8: This API is supported.
+Gets the description for sampler state that you used to create the sampler-state object.
+You use the description for sampler state in a call to the
Windows?Phone?8: This API is supported.
+Sets which direction to perform scans in.
+SetScanDirection sets the direction
Sets which direction to perform scans in.
+Direction to perform scans in. See
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
SetScanDirection sets the direction
Performs an unsegmented scan of a sequence.
+The type of element in the sequence. See
The binary operation to perform. See
Size of scan in elements.
Input sequence on the device. Set pSrc and pDst to the same value for in-place scans.
Output sequence on the device.
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
You must point the parameters pSrc and pDst to typed buffers (and not to raw or structured buffers). For information about buffer types, see Types of Resources. The format of these typed buffers must be
Performs a multiscan of a sequence.
+The type of element in the sequence. See
The binary operation to perform. See
Size of scan in elements.
Pitch of the next scan in elements.
Number of scans in the multiscan.
Input sequence on the device. Set pSrc and pDst to the same value for in-place scans.
Output sequence on the device.
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
You must point the parameters pSrc and pDst to typed buffers (and not to raw or structured buffers). For information about buffer types, see Types of Resources. The format of these typed buffers must be
Performs a segmented scan of a sequence.
+You must point the parameters pSrc and pDst to typed buffers (and not to raw or structured buffers). For information about buffer types, see Types of Resources. The format of these typed buffers must be
The format of the resource view to which the pSrcElementFlags parameter points must be
Sets which direction to perform scans in.
+Direction to perform scans in. See
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
SetScanDirection sets the direction
Performs a segmented scan of a sequence.
+The type of element in the sequence. See
The binary operation to perform. See
Size of scan in elements.
Input sequence on the device. Set pSrc and pDst to the same value for in-place scans.
Compact array of bits with one bit per element of pSrc. A set value indicates the start of a new segment.
Output sequence on the device.
Returns one of the return codes described in the topic Direct3D 11 Return Codes.
You must point the parameters pSrc and pDst to typed buffers (and not to raw or structured buffers). For information about buffer types, see Types of Resources. The format of these typed buffers must be
The format of the resource view to which the pSrcElementFlags parameter points must be
A shader-resource-view interface specifies the subresources a shader can access during rendering. Examples of shader resources include a constant buffer, a texture buffer, and a texture.
+To create a shader-resource view, call
A shader-resource view is required when binding a resource to a shader stage; the binding occurs by calling
Windows?Phone?8: This API is supported.
+Get the shader resource view's description.
+A reference to a
Windows?Phone?8: This API is supported.
+Get the shader resource view's description.
+Windows?Phone?8: This API is supported.
+Note??The
Note??The
Reserved.
Note??The
Reserved.
Note??The
A 1D texture interface accesses texel data, which is structured memory.
+To create an empty 1D texture, call
Textures cannot be bound directly to the pipeline; instead, a view must be created and bound. Using a view, texture data can be interpreted at run time within certain restrictions. To use the texture as a render target or depth-stencil resource, call
Windows?Phone?8: This API is supported.
+Get the properties of the texture resource.
+Pointer to a resource description (see
Windows?Phone?8: This API is supported.
+Get the properties of the texture resource.
+Windows?Phone?8: This API is supported.
+A 2D texture interface manages texel data, which is structured memory.
+To create an empty Texture2D resource, call
Textures cannot be bound directly to the pipeline; instead, a view must be created and bound. Using a view, texture data can be interpreted at run time within certain restrictions. To use the texture as a render target or depth-stencil resource, call
Windows?Phone?8: This API is supported.
+Get the properties of the texture resource.
+Pointer to a resource description (see
Windows?Phone?8: This API is supported.
+Get the properties of the texture resource.
+Windows?Phone?8: This API is supported.
+A 3D texture interface accesses texel data, which is structured memory.
+To create an empty Texture3D resource, call
Textures cannot be bound directly to the pipeline; instead, a view must be created and bound. Using a view, texture data can be interpreted at run time within certain restrictions. To use the texture as a render target or depth-stencil resource, call
Windows?Phone?8: This API is supported.
+Get the properties of the texture resource.
+Pointer to a resource description (see
Windows?Phone?8: This API is supported.
+Get the properties of the texture resource.
+Windows?Phone?8: This API is supported.
+A view interface specifies the parts of a resource the pipeline can access during rendering.
+To create a view for an unordered access resource, call
All resources must be bound to the pipeline before they can be accessed. Call
Get a description of the resource.
+Pointer to a resource description (see
Get a description of the resource.
+A vertex-shader interface manages an executable program (a vertex shader) that controls the vertex-shader stage.
+The vertex-shader interface has no methods; use HLSL to implement your shader functionality. All shaders are implemented from a common set of features referred to as the common-shader core..
To create a vertex shader interface, call
This interface is defined in D3D11.h.
Windows?Phone?8: This API is supported.
+The
Windows?Phone?8: This API is supported.
+Sets the constant buffers used by the vertex shader pipeline stage.
+Index into the device's zero-based array to begin setting constant buffers to (ranges from 0 to
Number of buffers to set (ranges from 0 to
Array of constant buffers (see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The Direct3D 11.1 runtime, which is available starting with Windows?8, can bind a larger number of
If the application wants the shader to access other parts of the buffer, it must call the VSSetConstantBuffers1 method instead.
Windows?Phone?8: This API is supported.
+Set a vertex shader to the device.
+Pointer to a vertex shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Set a vertex shader to the device.
+Pointer to a vertex shader (see
A reference to an array of class-instance interfaces (see
The number of class-instance interfaces in the array.
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The maximum number of instances a shader can have is 256.
Windows?Phone?8: This API is supported.
+Bind an array of shader resources to the vertex-shader stage.
+Index into the device's zero-based array to begin setting shader resources to (range is from 0 to
Number of shader resources to set. Up to a maximum of 128 slots are available for shader resources (range is from 0 to
Array of shader resource view interfaces to set to the device.
If an overlapping resource view is already bound to an output slot, such as a rendertarget, then this API will fill the destination shader resource slot with
For information about creating shader-resource views, see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Set an array of sampler states to the vertex shader pipeline stage.
+Index into the device's zero-based array to begin setting samplers to (ranges from 0 to
Number of samplers in the array. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Pointer to an array of sampler-state interfaces (see
Any sampler may be set to
//Default sampler state: +SamplerDesc; + SamplerDesc.Filter = ; + SamplerDesc.AddressU = ; + SamplerDesc.AddressV = ; + SamplerDesc.AddressW = ; + SamplerDesc.MipLODBias = 0; + SamplerDesc.MaxAnisotropy = 1; + SamplerDesc.ComparisonFunc = ; + SamplerDesc.BorderColor[0] = 1.0f; + SamplerDesc.BorderColor[1] = 1.0f; + SamplerDesc.BorderColor[2] = 1.0f; + SamplerDesc.BorderColor[3] = 1.0f; + SamplerDesc.MinLOD = -FLT_MAX; + SamplerDesc.MaxLOD = FLT_MAX;
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
Windows?Phone?8: This API is supported.
+Sets the constant buffers used by the vertex shader pipeline stage.
+Index into the device's zero-based array to begin setting constant buffers to (ranges from 0 to
Number of buffers to set (ranges from 0 to
Array of constant buffers (see
The method will hold a reference to the interfaces passed in. This differs from the device state behavior in Direct3D 10.
The Direct3D 11.1 runtime, which is available starting with Windows?8, can bind a larger number of
If the application wants the shader to access other parts of the buffer, it must call the VSSetConstantBuffers1 method instead.
Windows?Phone?8: This API is supported.
+Get the vertex shader currently set on the device.
+Address of a reference to a vertex shader (see
Pointer to an array of class instance interfaces (see
The number of class-instance elements in the array.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get the vertex shader resources.
+Index into the device's zero-based array to begin getting shader resources from (ranges from 0 to
The number of resources to get from the device. Up to a maximum of 128 slots are available for shader resources (ranges from 0 to
Array of shader resource view interfaces to be returned by the device.
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Get an array of sampler states from the vertex shader pipeline stage.
+Index into a zero-based array to begin getting samplers from (ranges from 0 to
Number of samplers to get from a device context. Each pipeline stage has a total of 16 sampler slots available (ranges from 0 to
Arry of sampler-state interface references (see
Any returned interfaces will have their reference count incremented by one. Applications should call IUnknown::Release on the returned interfaces when they are no longer needed to avoid memory leaks.
Windows?Phone?8: This API is supported.
+Describes an HLSL class instance.
+The
The members of this structure except InstanceIndex are valid (non default values) if they describe a class instance aquired using
The instance ID of an HLSL class; the default value is 0.
The instance index of an HLSL class; the default value is 0.
The type ID of an HLSL class; the default value is 0.
Describes the constant buffer associated with an HLSL class; the default value is 0.
The base constant buffer offset associated with an HLSL class; the default value is 0.
The base texture associated with an HLSL class; the default value is 127.
The base sampler associated with an HLSL class; the default value is 15.
True if the class was created; the default value is false.
Information about the video card's performance counter capabilities.
+This structure is returned by
Largest device-dependent counter ID that the device supports. If none are supported, this value will be 0. Otherwise it will be greater than or equal to
Number of counters that can be simultaneously supported.
Number of detectable parallel units that the counter is able to discern. Values are 1 ~ 4. Use NumDetectableParallelUnits to interpret the values of the VERTEX_PROCESSING, GEOMETRY_PROCESSING, PIXEL_PROCESSING, and OTHER_GPU_PROCESSING counters.
Describes a counter.
+This structure is used by
Type of counter (see
Reserved.
Stencil operations that can be performed based on the results of stencil test.
+All stencil operations are specified as a
This structure is a member of a depth-stencil description.
+The stencil operation to perform when stencil testing fails.
The stencil operation to perform when stencil testing passes and depth testing fails.
The stencil operation to perform when stencil testing and depth testing both pass.
A function that compares stencil data against existing stencil data. The function options are listed in
Specifies the subresources of a texture that are accessible from a depth-stencil view.
+These are valid formats for a depth-stencil view:
A depth-stencil view cannot use a typeless format. If the format chosen is
A depth-stencil-view description is needed when calling
Resource data format (see
Type of resource (see
A value that describes whether the texture is read only. Pass 0 to specify that it is not read only; otherwise, pass one of the members of the
Specifies a 1D texture subresource (see
Specifies an array of 1D texture subresources (see
Specifies a 2D texture subresource (see
Specifies an array of 2D texture subresources (see
Specifies a multisampled 2D texture (see
Specifies an array of multisampled 2D textures (see
Specifies the subresource from a 2D texture that is accessible to a depth-stencil view.
+This structure is one member of a depth-stencil-view description (see
The index of the first mipmap level to use.
Specifies the subresources from an array of 1D textures to use in a depth-stencil view.
+This structure is one member of a depth-stencil-view description (see
The index of the first mipmap level to use.
The index of the first texture to use in an array of textures.
Number of textures to use.
Specifies the subresource from a 1D texture that is accessible to a depth-stencil view.
+This structure is one member of a depth-stencil-view description (see
The index of the first mipmap level to use.
Specifies the subresources from an array 2D textures that are accessible to a depth-stencil view.
+This structure is one member of a depth-stencil-view description (see
The index of the first mipmap level to use.
The index of the first texture to use in an array of textures.
Number of textures to use.
Specifies the subresource from a multisampled 2D texture that is accessible to a depth-stencil view.
+Because a multisampled 2D texture contains a single subtexture, there is nothing to specify; this unused member is included so that this structure will compile in C.
+Unused.
Specifies the subresources from an array of multisampled 2D textures for a depth-stencil view.
+This structure is one member of a depth-stencil-view description (see
The index of the first texture to use in an array of textures.
Number of textures to use.
Describes buffer requirements for an FFT.
+The
Use the info in
Some FFT algorithms benefit from precomputing sin and cos. The FFT object might store precomputed data in the user-supplied buffers.
+Number of temporary buffers needed. Allowed range is 0 to D3DX11_FFT_MAX_TEMP_BUFFERS.
Number of precompute buffers required. Allowed range is 0 to D3DX11_FFT_MAX_PRECOMPUTE_BUFFERS.
Minimum sizes (in FLOATs) of temporary buffers.
Minimum sizes (in FLOATs) for precompute buffers.
Describes an FFT.
+Number of dimension in the FFT.
Combination of
Length of each dimension in the FFT.
Describes compute shader and raw and structured buffer support in the current graphics driver.
+Direct3D 11 devices (
TRUE if compute shaders and raw and structured buffers are supported; otherwise
Describes double data type support in the current graphics driver.
+If the runtime sets DoublePrecisionFloatShaderOps to TRUE, the hardware and driver support the following Shader Model 5 instructions:
Note??If DoublePrecisionFloatShaderOps is TRUE, the hardware and driver do not necessarily support double-precision division.
+Specifies whether double types are allowed. If TRUE, double types are allowed; otherwise
Describes which resources are supported by the current graphics driver for a given format.
+
Combination of
Describes which unordered resource options are supported by the current graphics driver for a given format.
+
Combination of
Describes the multi-threading features that are supported by the current graphics driver.
+Use the
TRUE means resources can be created concurrently on multiple threads while drawing;
TRUE means command lists are supported by the current driver;
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Optionally provide information to texture loader APIs to control how textures get loaded. A value of D3DX11_DEFAULT for any of these parameters will cause D3DX to automatically use the value from the source file.
+When initializing the structure, you may set any member to D3DX11_DEFAULT and D3DX will initialize it with a default value from the source texture when the texture is loaded.
This structure can be used by APIs that:
The default values are:
Width = D3DX11_DEFAULT; Height = D3DX11_DEFAULT; Depth = D3DX11_DEFAULT; FirstMipLevel = D3DX11_DEFAULT; MipLevels = D3DX11_DEFAULT; Usage = () D3DX11_DEFAULT; BindFlags = D3DX11_DEFAULT; CpuAccessFlags = D3DX11_DEFAULT; MiscFlags = D3DX11_DEFAULT; Format = DXGI_FORMAT_FROM_FILE; Filter = D3DX11_DEFAULT; MipFilter = D3DX11_DEFAULT; pSrcInfo = null ; +
Here is a brief example that uses this structure to supply the pixel format when loading a texture. For the complete code, see HDRFormats10.cpp in HDRToneMappingCS11 Sample.
+* pCubeRV = null ; + WCHAR strPath[MAX_PATH]; +LoadInfo; DXUTFindDXSDKMediaFileCch( strPath, MAX_PATH, L"Light Probes\\uffizi_cross.dds" ); LoadInfo.Format = ; hr = ( pd3dDevice, strPath, &LoadInfo, null , &pCubeRV,null ); +
The target width of the texture. If the actual width of the texture is larger or smaller than this value then the texture will be scaled up or down to fit this target width.
The target height of the texture. If the actual height of the texture is larger or smaller than this value then the texture will be scaled up or down to fit this target height.
The depth of the texture. This only applies to volume textures.
The highest resolution mipmap level of the texture. If this is greater than 0, then after the texture is loaded FirstMipLevel will be mapped to mipmap level 0.
The maximum number of mipmap levels in the texture. See the remarks in
Miscellaneous resource properties (see
A
The access permissions the cpu will have for the texture resource. See
Miscellaneous resource properties (see
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Optionally provide information to texture loader APIs to control how textures get loaded. A value of D3DX11_DEFAULT for any of these parameters will cause D3DX to automatically use the value from the source file.
+When initializing the structure, you may set any member to D3DX11_DEFAULT and D3DX will initialize it with a default value from the source texture when the texture is loaded.
This structure can be used by APIs that:
The default values are:
Width = D3DX11_DEFAULT; Height = D3DX11_DEFAULT; Depth = D3DX11_DEFAULT; FirstMipLevel = D3DX11_DEFAULT; MipLevels = D3DX11_DEFAULT; Usage = () D3DX11_DEFAULT; BindFlags = D3DX11_DEFAULT; CpuAccessFlags = D3DX11_DEFAULT; MiscFlags = D3DX11_DEFAULT; Format = DXGI_FORMAT_FROM_FILE; Filter = D3DX11_DEFAULT; MipFilter = D3DX11_DEFAULT; pSrcInfo = null ; +
Here is a brief example that uses this structure to supply the pixel format when loading a texture. For the complete code, see HDRFormats10.cpp in HDRToneMappingCS11 Sample.
+* pCubeRV = null ; + WCHAR strPath[MAX_PATH]; +LoadInfo; DXUTFindDXSDKMediaFileCch( strPath, MAX_PATH, L"Light Probes\\uffizi_cross.dds" ); LoadInfo.Format = ; hr = ( pd3dDevice, strPath, &LoadInfo, null , &pCubeRV,null ); +
The target width of the texture. If the actual width of the texture is larger or smaller than this value then the texture will be scaled up or down to fit this target width.
The target height of the texture. If the actual height of the texture is larger or smaller than this value then the texture will be scaled up or down to fit this target height.
The depth of the texture. This only applies to volume textures.
The highest resolution mipmap level of the texture. If this is greater than 0, then after the texture is loaded FirstMipLevel will be mapped to mipmap level 0.
The maximum number of mipmap levels in the texture. See the remarks in
The way the texture resource is intended to be used. See
The pipeline stages that the texture will be allowed to bind to. See
The access permissions the cpu will have for the texture resource. See
Miscellaneous resource properties (see
A
Filter the texture using the specified filter (only when resampling). See
Filter the texture mip levels using the specified filter (only if generating mipmaps). Valid values are
Information about the original image. See
Debug message filter; contains a lists of message types to allow or deny.
+For use with an
Types of messages that you want to allow. See
Types of messages that you want to deny.
Allow or deny certain types of messages to pass through a filter.
+Number of message categories to allow or deny.
Array of message categories to allow or deny. Array must have at least NumCategories members (see
Number of message severity levels to allow or deny.
Array of message severity levels to allow or deny. Array must have at least NumSeverities members (see
Number of message IDs to allow or deny.
Array of message IDs to allow or deny. Array must have at least NumIDs members (see
Type of data contained in an input slot.
+Use these values to specify the type of data for a particular input element (see
Input data is per-vertex data.
Input data is per-instance data.
A debug message in the Information Queue.
+This structure is returned from
The category of the message. See
The severity of the message. See
The ID of the message. See
The message string.
The length of pDescription in bytes.
Query information about graphics-pipeline activity in between calls to
Query information about the reliability of a timestamp query.
+For a list of query types see
How frequently the GPU counter increments in Hz.
If this is TRUE, something occurred in between the query's
Describes a query.
+Type of query (see
Miscellaneous flags (see
Describes the blend state for a render target.
+You specify an array of
For info about how blending is done, see the output-merger stage.
Here are the default values for blend state.
State | Default Value |
---|---|
BlendEnable | |
SrcBlend | |
DestBlend | |
BlendOp | |
SrcBlendAlpha | |
DestBlendAlpha | |
BlendOpAlpha | |
RenderTargetWriteMask |
?
+Enable (or disable) blending.
This blend option specifies the operation to perform on the RGB value that the pixel shader outputs. The BlendOp member defines how to combine the SrcBlend and DestBlend operations.
This blend option specifies the operation to perform on the current RGB value in the render target. The BlendOp member defines how to combine the SrcBlend and DestBlend operations.
This blend operation defines how to combine the SrcBlend and DestBlend operations.
This blend option specifies the operation to perform on the alpha value that the pixel shader outputs. Blend options that end in _COLOR are not allowed. The BlendOpAlpha member defines how to combine the SrcBlendAlpha and DestBlendAlpha operations.
This blend option specifies the operation to perform on the current alpha value in the render target. Blend options that end in _COLOR are not allowed. The BlendOpAlpha member defines how to combine the SrcBlendAlpha and DestBlendAlpha operations.
This blend operation defines how to combine the SrcBlendAlpha and DestBlendAlpha operations.
A write mask.
Specifies the subresources from a resource that are accessible using a render-target view.
+A render-target-view description is passed into
A render-target-view cannot use the following formats:
If the format is set to
The data format (see
The resource type (see
Specifies which buffer elements can be accessed (see
Specifies the subresources in a 1D texture that can be accessed (see
Specifies the subresources in a 1D texture array that can be accessed (see
Specifies the subresources in a 2D texture that can be accessed (see
Specifies the subresources in a 2D texture array that can be accessed (see
Specifies a single subresource because a multisampled 2D texture only contains one subresource (see
Specifies the subresources in a multisampled 2D texture array that can be accessed (see
Specifies subresources in a 3D texture that can be accessed (see
Specifies the subresource from a multisampled 2D texture to use in a render-target view.
+Since a multisampled 2D texture contains a single subresource, there is actually nothing to specify in
Integer of any value. See remarks.
Specifies the subresources from a an array of multisampled 2D textures to use in a render-target view.
+This structure is one member of a render-target-view description (see
The index of the first texture to use in an array of textures.
Number of textures to use.
Specifies the subresource from a 1D texture to use in a render-target view.
+This structure is one member of a render-target-view description (see
The index of the mipmap level to use mip slice.
Specifies the subresource from a 2D texture to use in a render-target view.
+This structure is one member of a render-target-view description (see
The index of the mipmap level to use mip slice.
Specifies the subresources from an array of 1D textures to use in a render-target view.
+This structure is one member of a render-target-view description (see
The index of the mipmap level to use mip slice.
The index of the first texture to use in an array of textures.
Number of textures to use.
Specifies the elements in a buffer resource to use in a render-target view.
+A render-target view is a member of a render-target-view description (see
Number of bytes between the beginning of the buffer and the first element to access.
The offset of the first element in the view to access, relative to element 0.
The total number of elements in the view.
The width of each element (in bytes). This can be determined from the format stored in the render-target-view description.
Specifies the subresources from a 3D texture to use in a render-target view.
+This structure is one member of a render target view. See
The index of the mipmap level to use mip slice.
First depth level to use.
Number of depth levels to use in the render-target view, starting from FirstWSlice. A value of -1 indicates all of the slices along the w axis, starting from FirstWSlice.
Specifies the subresources from an array of 2D textures to use in a render-target view.
+This structure is one member of a render-target-view description (see
The index of the mipmap level to use mip slice.
The index of the first texture to use in an array of textures.
Number of textures in the array to use in the render target view, starting from FirstArraySlice.
Defines a 3D box.
+The following diagram shows a 3D box, where the origin is the left, front, top corner.
The values for right, bottom, and back are each one pixel past the end of the pixels that are included in the box region. That is, the values for left, top, and front are included in the box region while the values for right, bottom, and back are excluded from the box region. For example, for a box that is one pixel wide, (right - left) == 1; the box region includes the left pixel but not the right pixel.
Coordinates of a box are in bytes for buffers and in texels for textures.
+The x position of the left hand side of the box.
The y position of the top of the box.
The z position of the front of the box.
The x position of the right hand side of the box.
The y position of the bottom of the box.
The z position of the back of the box.
Describes a sampler state.
+These are the default values for sampler state.
State | Default Value |
---|---|
Filter | |
AddressU | |
AddressV | |
AddressW | |
MinLOD | -3.402823466e+38F (-FLT_MAX) |
MaxLOD | 3.402823466e+38F (FLT_MAX) |
MipMapLODBias | 0.0f |
MaxAnisotropy | 1 |
ComparisonFunc | |
BorderColor | float4(1.0f,1.0f,1.0f,1.0f) |
Texture | N/A |
?
+Filtering method to use when sampling a texture (see
Method to use for resolving a u texture coordinate that is outside the 0 to 1 range (see
Method to use for resolving a v texture coordinate that is outside the 0 to 1 range.
Method to use for resolving a w texture coordinate that is outside the 0 to 1 range.
Offset from the calculated mipmap level. For example, if Direct3D calculates that a texture should be sampled at mipmap level 3 and MipLODBias is 2, then the texture will be sampled at mipmap level 5.
Clamping value used if
A function that compares sampled data against existing sampled data. The function options are listed in
Border color to use if
Lower end of the mipmap range to clamp access to, where 0 is the largest and most detailed mipmap level and any level higher than that is less detailed.
Upper end of the mipmap range to clamp access to, where 0 is the largest and most detailed mipmap level and any level higher than that is less detailed. This value must be greater than or equal to MinLOD. To have no upper limit on LOD set this to a large value such as D3D11_FLOAT32_MAX.
Describes a shader-resource view.
+A view is a format-specific way to look at the data in a resource. The view determines what data to look at, and how it is cast when read.
When viewing a resource, the resource-view description must specify a typed format, that is compatible with the resource format. So that means that you cannot create a resource-view description using any format with _TYPELESS in the name. You can however view a typeless resource by specifying a typed format for the view. For example, a
Create a shader-resource-view description by calling
A
The resource type of the view. See D3D11_SRV_DIMENSION. This should be the same as the resource type of the underlying resource. This parameter also determines which _SRV to use in the union below.
View the resource as a buffer using information from a shader-resource view (see
View the resource as a 1D texture using information from a shader-resource view (see
View the resource as a 1D-texture array using information from a shader-resource view (see
View the resource as a 2D-texture using information from a shader-resource view (see
View the resource as a 2D-texture array using information from a shader-resource view (see
View the resource as a 2D-multisampled texture using information from a shader-resource view (see
View the resource as a 2D-multisampled-texture array using information from a shader-resource view (see
View the resource as a 3D texture using information from a shader-resource view (see
View the resource as a 3D-cube texture using information from a shader-resource view (see
View the resource as a 3D-cube-texture array using information from a shader-resource view (see
View the resource as a raw buffer using information from a shader-resource view (see
Specifies the elements in a buffer resource to use in a shader-resource view.
+The
Number of bytes between the beginning of the buffer and the first element to access.
The offset of the first element in the view to access, relative to element 0.
The total number of elements in the view.
The width of each element (in bytes). This can be determined from the format stored in the shader-resource-view description.
Specifies the subresource from a cube texture to use in a shader-resource view.
+This structure is one member of a shader-resource-view description (see
Index of the most detailed mipmap level to use; this number is between 0 and MipLevels (from the original TextureCube for which
The maximum number of mipmap levels for the view of the texture. See the remarks in
Set to -1 to indicate all the mipmap levels from MostDetailedMip on down to least detailed.
Specifies the subresource from a 1D texture to use in a shader-resource view.
+This structure is one member of a shader-resource-view description (see
As an example, assuming MostDetailedMip = 6 and MipLevels = 2, the view will have access to 2 mipmap levels, 6 and 7, of the original texture for which
Index of the most detailed mipmap level to use; this number is between 0 and MipLevels (from the original Texture1D for which
The maximum number of mipmap levels for the view of the texture. See the remarks.
Set to -1 to indicate all the mipmap levels from MostDetailedMip on down to least detailed.
Specifies the subresources from a multisampled 2D texture to use in a shader-resource view.
+Since a multisampled 2D texture contains a single subresource, there is actually nothing to specify in
Integer of any value. See remarks.
Specifies the subresources from an array of cube textures to use in a shader-resource view.
+This structure is one member of a shader-resource-view description (see
Index of the most detailed mipmap level to use; this number is between 0 and MipLevels (from the original TextureCube for which
The maximum number of mipmap levels for the view of the texture. See the remarks in
Set to -1 to indicate all the mipmap levels from MostDetailedMip on down to least detailed.
Index of the first 2D texture to use.
Number of cube textures in the array.
Specifies the subresources from a 3D texture to use in a shader-resource view.
+This structure is one member of a shader-resource-view description (see
Index of the most detailed mipmap level to use; this number is between 0 and MipLevels (from the original Texture3D for which
The maximum number of mipmap levels for the view of the texture. See the remarks in
Set to -1 to indicate all the mipmap levels from MostDetailedMip on down to least detailed.
Specifies the subresource from a 2D texture to use in a shader-resource view.
+This structure is one member of a shader-resource-view description (see
Index of the most detailed mipmap level to use; this number is between 0 and MipLevels (from the original Texture2D for which
The maximum number of mipmap levels for the view of the texture. See the remarks in
Set to -1 to indicate all the mipmap levels from MostDetailedMip on down to least detailed.
Specifies the subresources from an array of 1D textures to use in a shader-resource view.
+This structure is one member of a shader-resource-view description (see
Index of the most detailed mipmap level to use; this number is between 0 and MipLevels (from the original Texture1D for which
The maximum number of mipmap levels for the view of the texture. See the remarks in
Set to -1 to indicate all the mipmap levels from MostDetailedMip on down to least detailed.
The index of the first texture to use in an array of textures.
Number of textures in the array.
Specifies the subresources from an array of 2D textures to use in a shader-resource view.
+This structure is one member of a shader-resource-view description (see
Index of the most detailed mipmap level to use; this number is between 0 and MipLevels (from the original Texture2D for which
The maximum number of mipmap levels for the view of the texture. See the remarks in
Set to -1 to indicate all the mipmap levels from MostDetailedMip on down to least detailed.
The index of the first texture to use in an array of textures.
Number of textures in the array.
Specifies the subresources from an array of multisampled 2D textures to use in a shader-resource view.
+This structure is one member of a shader-resource-view description (see
The index of the first texture to use in an array of textures.
Number of textures to use.
Describes the elements in a raw buffer resource to use in a shader-resource view.
+This structure is used by
The index of the first element to be accessed by the view.
The number of elements in the resource.
A
Description of a vertex element in a vertex buffer in an output slot.
+Zero-based, stream number.
Type of output element; possible values include: "POSITION", "NORMAL", or "TEXCOORD0". Note that if SemanticName is
Output element's zero-based index. Should be used if, for example, you have more than one texture coordinate stored in each vertex.
Which component of the entry to begin writing out to. Valid values are 0 to 3. For example, if you only wish to output to the y and z components of a position, then StartComponent should be 1 and ComponentCount should be 2.
The number of components of the entry to write out to. Valid values are 1 to 4. For example, if you only wish to output to the y and z components of a position, then StartComponent should be 1 and ComponentCount should be 2. Note that if SemanticName is
The associated stream output buffer that is bound to the pipeline (see
Query information about the amount of data streamed out to the stream-output buffers in between
Describes a 1D texture.
+This structure is used in a call to
In addition to this structure, you can also use the CD3D11_TEXTURE1D_DESC derived structure, which is defined in D3D11.h and behaves like an inherited class, to help create a texture description.
The texture size range is determined by the feature level at which you create the device and not the Microsoft Direct3D interface version. For example, if you use Microsoft Direct3D?10 hardware at feature level 10 (
Texture width (in texels). The range is from 1 to
The maximum number of mipmap levels in the texture. See the remarks in
Number of textures in the array. The range is from 1 to
Texture format (see
Value that identifies how the texture is to be read from and written to. The most common value is
Flags (see
Flags (see
Flags (see
Describes a 2D texture.
+This structure is used in a call to
In addition to this structure, you can also use the CD3D11_TEXTURE2D_DESC derived structure, which is defined in D3D11.h and behaves like an inherited class, to help create a texture description.
The device places some size restrictions (must be multiples of a minimum size) for a subsampled, block compressed, or bit-format resource.
The texture size range is determined by the feature level at which you create the device and not the Microsoft Direct3D interface version. For example, if you use Microsoft Direct3D?10 hardware at feature level 10 (
Texture width (in texels). The range is from 1 to
Texture height (in texels). The range is from 1 to
The maximum number of mipmap levels in the texture. See the remarks in
Number of textures in the texture array. The range is from 1 to
Texture format (see
Structure that specifies multisampling parameters for the texture. See
Value that identifies how the texture is to be read from and written to. The most common value is
Flags (see
Flags (see
Flags (see
Describes a 3D texture.
+This structure is used in a call to
In addition to this structure, you can also use the CD3D11_TEXTURE3D_DESC derived structure, which is defined in D3D11.h and behaves like an inherited class, to help create a texture description.
The device restricts the size of subsampled, block compressed, and bit format resources to be multiples of sizes specific to each format.
The texture size range is determined by the feature level at which you create the device and not the Microsoft Direct3D interface version. For example, if you use Microsoft Direct3D?10 hardware at feature level 10 (
Texture width (in texels). The range is from 1 to
Texture height (in texels). The range is from 1 to
Texture depth (in texels). The range is from 1 to
The maximum number of mipmap levels in the texture. See the remarks in
Texture format (see
Value that identifies how the texture is to be read from and written to. The most common value is
Flags (see
Flags (see
Flags (see
Note??The D3DX (D3DX 9, D3DX 10, and D3DX 11) utility library is deprecated for Windows?8 and is not supported for Windows Store apps.
Describes parameters used to load a texture from another texture.
+This structure is used in a call to
The default values are:
pSrcBox =+null ; pDstBox =null ; SrcFirstMip = 0; DstFirstMip = 0; NumMips = D3DX11_DEFAULT; SrcFirstElement = 0; DstFirstElement = 0; NumElements = D3DX11_DEFAULT; Filter = D3DX11_DEFAULT; MipFilter = D3DX11_DEFAULT; +
Source texture box (see
Destination texture box (see
Source texture mipmap level, see D3D11CalcSubresource for more detail.
Destination texture mipmap level, see D3D11CalcSubresource for more detail.
Number of mipmap levels in the source texture.
First element of the source texture.
First element of the destination texture.
Number of elements to load.
Filtering options during resampling (see
Filtering options when generating mip levels (see
Specifies the subresources from a resource that are accessible using an unordered-access view.
+An unordered-access-view description is passed into
The data format (see
The resource type (see
Specifies which buffer elements can be accessed (see
Specifies the subresources in a 1D texture that can be accessed (see
Specifies the subresources in a 1D texture array that can be accessed (see
Specifies the subresources in a 2D texture that can be accessed (see
Specifies the subresources in a 2D texture array that can be accessed (see
Specifies subresources in a 3D texture that can be accessed (see
Describes an array of unordered-access 1D texture resources.
+This structure is used by a
The mipmap slice index.
The zero-based index of the first array slice to be accessed.
The number of slices in the array.
Describes the elements in a buffer to use in a unordered-access view.
+This structure is used by a
The zero-based index of the first element to be accessed.
The number of elements in the resource. For structured buffers, this is the number of structures in the buffer.
View options for the resource (see
Describes a unordered-access 2D texture resource.
+This structure is used by a
The mipmap slice index.
Describes a unordered-access 1D texture resource.
+This structure is used by a
The mipmap slice index.
Describes an array of unordered-access 2D texture resources.
+This structure is used by a
The mipmap slice index.
The zero-based index of the first array slice to be accessed.
The number of slices in the array.
Describes a unordered-access 3D texture resource.
+This structure is used by a
The mipmap slice index.
The zero-based index of the first depth slice to be accessed.
The number of depth slices.
Contains information identifying the adapter.
+The following pseudocode example illustrates the version format encoded in the DriverVersion, DriverVersionLowPart, and DriverVersionHighPart members.
Product = HIWORD(DriverVersion.HighPart) + Version = LOWORD(DriverVersion.HighPart) + SubVersion = HIWORD(DriverVersion.LowPart) + Build = LOWORD(DriverVersion.LowPart) +
See the Platform SDK for more information about the HIWORD macro, the LOWORD macro, and the
MAX_DEVICE_IDENTIFIER_STRING is a constant with the following definition.
#define MAX_DEVICE_IDENTIFIER_STRING 512
The VendorId, DeviceId, SubSysId, and Revision members can be used in tandem to identify particular chip sets. However, use these members with caution.
+Used for presentation to the user. This should not be used to identify particular drivers, because many different strings might be associated with the same device and driver from different vendors.
Used for presentation to the user.
Device name for GDI.
Identify the version of the Direct3D driver. It is legal to do less than and greater than comparisons on the 64-bit signed integer value. However, exercise caution if you use this element to identify problematic drivers. Instead, you should use DeviceIdentifier. See Remarks.
Can be used to help identify a particular chip set. Query this member to identify the manufacturer. The value can be zero if unknown.
Can be used to help identify a particular chip set. Query this member to identify the type of chip set. The value can be zero if unknown.
Can be used to help identify a particular chip set. Query this member to identify the subsystem, typically the particular board. The value can be zero if unknown.
Can be used to help identify a particular chip set. Query this member to identify the revision level of the chip set. The value can be zero if unknown.
Can be queried to check changes in the driver and chip set. This
Used to determine the Windows Hardware Quality Labs (WHQL) validation level for this driver and device pair. The DWORD is a packed date structure defining the date of the release of the most recent WHQL test passed by the driver. It is legal to perform < and > operations on this value. The following illustrates the date format.
Bits | |
31-16 | The year, a decimal number from 1999 upwards. |
15-8 | The month, a decimal number from 1 to 12. |
7-0 | The day, a decimal number from 1 to 31. |
?
The following values are also used.
0 | Not certified. |
1 | WHQL validated, but no date information is available. |
?
Differences between Direct3D 9 and Direct3D 9Ex:
For Direct3D9Ex running on Windows Vista, Windows Server 2008, Windows 7, and Windows Server 2008 R2 (or more current operating system),
Gets the effect description.
+Returns a description of the effect. See
If the method succeeds, the return value is
Gets a parameter or annotation description.
+Parameter or annotation handle. See Handles (Direct3D 9).
Returns a description of the specified parameter or annotation. See
Gets a technique description.
+Technique handle. See Handles (Direct3D 9).
Returns a description of the technique. See
Gets a pass description.
+Pass handle. See Handles (Direct3D 9).
Returns a description of the specified pass. See
Note??If an effect is created with
Gets a function description.
+Function handle. See Handles (Direct3D 9).
Returns a description of the function. See
Gets the handle of a top-level parameter or a structure member parameter.
+Handle of the parameter, or
Parameter index.
Returns the handle of the specified parameter, or
Gets the handle of a top-level parameter or a structure member parameter by looking up its name.
+Handle of the parameter, or
String containing the parameter name.
Returns the handle of the specified parameter, or
Gets the handle of a top-level parameter or a structure member parameter by looking up its semantic with a case-insensitive search.
+Handle of the parameter, or
String containing the semantic name.
Returns the handle of the first parameter that matches the specified semantic, or
Get the handle of an array element parameter.
+Handle of the array. See Handles (Direct3D 9).
Array element index.
Returns the handle of the specified parameter, or
This method is used to get an element of a parameter that is an array.
+Gets the handle of a technique.
+Technique index.
Returns the handle of the specified technique, or
Gets the handle of a technique by looking up its name.
+String containing the technique name.
Returns the handle of the first technique that has the specified name, or
Gets the handle of a pass.
+Handle of the parent technique. See Handles (Direct3D 9).
Index for the pass.
Returns the handle of the specified pass inside the specified technique, or
Gets the handle of a pass by looking up its name.
+Handle of the parent technique. See Handles (Direct3D 9).
String containing the pass name.
Returns the handle of the first pass inside the specified technique that has the specified name, or
Gets the handle of a function.
+Function index.
Returns the handle of the specified function, or
Gets the handle of a function by looking up its name.
+String containing the function name.
Returns the handle of the specified function, or
Gets the handle of an annotation.
+Handle of a technique, pass, or top-level parameter. See Handles (Direct3D 9).
Annotation index.
Returns the handle of the specified annotation, or
Annotations are user-specific data that can be attached to any technique, pass, or parameter. See Handles (Direct3D 9).
+Gets the handle of an annotation by looking up its name.
+Handle of a technique, pass, or top-level parameter. See Handles (Direct3D 9).
String containing the annotation name.
Returns the handle of the specified annotation, or
Set the value of an arbitrary parameter or annotation, including simple types, structs, arrays, strings, shaders and textures.
+Unique identifier. See Handles (Direct3D 9).
Pointer to a buffer containing data.
[in] Number of bytes in the buffer. Pass in D3DX_DEFAULT if you know your buffer is large enough to contain the entire parameter, and you want to skip size validation.
If the method succeeds, the return value is
This method can be used in place of nearly all the effect set API calls.
+Get the value of an arbitrary parameter or annotation, including simple types, structs, arrays, strings, shaders and textures. This method can be used in place of nearly all the Getxxx calls in
If the method succeeds, the return value is
Sets a
Unique identifier. See Handles (Direct3D 9).
Boolean value.
If the method succeeds, the return value is
Gets a
Unique identifier. See Handles (Direct3D 9).
Returns a Boolean value.
If the method succeeds, the return value is
Sets an array of Boolean values.
+Unique identifier. See Handles (Direct3D 9).
Array of Boolean values.
Number of Boolean values in the array.
If the method succeeds, the return value is
Gets an array of
Unique identifier. See Handles (Direct3D 9).
Returns an array of Boolean values.
Number of Boolean values in the array.
If the method succeeds, the return value is
Sets an integer.
+Unique identifier. See Handles (Direct3D 9).
Integer value.
If the method succeeds, the return value is
Gets an integer.
+Unique identifier. See Handles (Direct3D 9).
Returns an integer.
If the method succeeds, the return value is
Sets an array of integers.
+Unique identifier. See Handles (Direct3D 9).
Array of integers.
Number of integers in the array.
If the method succeeds, the return value is
Gets an array of integers.
+Unique identifier. See Handles (Direct3D 9).
Returns an array of integers.
Number of integers in the array.
If the method succeeds, the return value is
Sets a floating point value.
+Unique identifier. See Handles (Direct3D 9).
Floating point value.
If the method succeeds, the return value is
Gets a floating point value.
+Unique identifier. See Handles (Direct3D 9).
Returns a floating point value.
If the method succeeds, the return value is
Sets an array of floating point values.
+Unique identifier. See Handles (Direct3D 9).
Array of floating point values.
Number of floating point values in the array.
If the method succeeds, the return value is
Gets an array of floating point values.
+Unique identifier. See Handles (Direct3D 9).
Returns an array of floating point values.
Number of floating point values in the array.
If the method succeeds, the return value is
Sets a vector.
+Unique identifier. See Handles (Direct3D 9).
Pointer to a 4D vector.
If the method succeeds, the return value is
If the destination vector is smaller than the source vector, the additional components of the source vector will be ignored.
+Gets a vector.
+Unique identifier. See Handles (Direct3D 9).
Returns a 4D vector.
If the method succeeds, the return value is
If the destination vector is larger than the source vector, only the initial components of the destination vector will be filled, and the remaining components will be set to zero.
+Sets an array of vectors.
+Unique identifier. See Handles (Direct3D 9).
Array of 4D floating point vectors.
Number of vectors in the array.
If the method succeeds, the return value is
If the destination vectors are smaller than the source vectors, the additional components of the source vectors will be ignored.
+Gets an array of vectors.
+Unique identifier. See Handles (Direct3D 9).
Returns an array of 4D floating point vectors.
Number of vectors in the array.
If the method succeeds, the return value is
If the destination vectors are larger than the source vectors, only the initial components of each destination vector will be filled, and the remaining destination vector components will be set to zero.
+Sets a non-transposed matrix.
+Unique identifier. See Handles (Direct3D 9).
Pointer to a nontransposed matrix. See
If the method succeeds, the return value is
A non-transposed matrix contains row-major data. In other words, each vector is contained in a row.
If the destination matrix is smaller than the source matrix, the additional components of the source matrix will be ignored.
+Gets a nontransposed matrix.
+Unique identifier. See Handles (Direct3D 9).
Returns a nontransposed matrix. See
If the method succeeds, the return value is
A nontransposed matrix contains row-major data; that is, each vector is contained in a row.
If the destination matrix is larger than the source matrix, only the upper-left components of the destination matrix will be filled, and the remaining components will be set to zero.
+Sets an array of nontransposed matrices.
+Unique identifier. See Handles (Direct3D 9).
Array of nontransposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A nontransposed matrix contains row-major data; that is, each vector is contained in a row.
If the destination matrices are smaller than the source matrices, the additional components of the source matrices will be ignored.
+Gets an array of nontransposed matrices.
+Unique identifier. See Handles (Direct3D 9).
Returns an array of nontransposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A nontransposed matrix contains row-major data; that is, each vector is contained in a row.
If the destination matrices are larger than the source matrices, only the upper-left components of each destination matrix will be filled, and the remaining destination matrix components will be set to zero.
+Sets an array of references to nontransposed matrices.
+Unique identifier. See Handles (Direct3D 9).
Array of references to nontransposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A nontransposed matrix contains row-major data; that is, each vector is contained in a row.
If the destination matrices are smaller than the source matrices, the additional components of the source matrices will be ignored.
+Gets an array of references to nontransposed matrices.
+Unique identifier. See Handles (Direct3D 9).
Array of references to nontransposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A nontransposed matrix contains row-major data; that is, each vector is contained in a row.
If the destination matrices are larger than the source matrices, only the upper-left components of each destination matrix will be filled, and the remaining destination matrix components will be set to zero.
+Sets a transposed matrix.
+Unique identifier. See Handles (Direct3D 9).
Pointer to a transposed matrix. See
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
If the destination matrix is smaller than the source matrix, the additional components of the source matrix will be ignored.
+Gets a transposed matrix.
+Unique identifier. See Handles (Direct3D 9).
Returns a transposed matrix. See
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
If the destination matrix is larger than the source matrix, only the upper-left elements of the destination matrix will be filled, and the remaining destination matrix components will be set to zero.
+Sets an array of transposed matrices.
+Unique identifier. See Handles (Direct3D 9).
Array of transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
If the destination matrices are smaller than the source matrices, the additional components of the source matrices will be ignored.
+Gets an array of transposed matrices.
+Unique identifier. See Handles (Direct3D 9).
Returns an array of transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
If the destination matrices are larger than the source matrices, only the upper-left components of each destination matrix will be filled, and the remaining destination matrix components will be set to zero.
+Sets an array of references to transposed matrices.
+Unique identifier. See Handles (Direct3D 9).
Array of references to transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
If the destination matrices are smaller than the source matrices, the additional components of the source matrices will be ignored.
+Gets an array of references to transposed matrices.
+Unique identifier. See Handles (Direct3D 9).
Array of references to transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
If the destination matrices are larger than the source matrices, only the upper-left components of each destination matrix will be filled, and the remaining destination matrix components will be set to zero.
+Sets a string.
+Unique identifier. See Handles (Direct3D 9).
String to set.
If the method succeeds, the return value is
Gets a string.
+Unique identifier. See Handles (Direct3D 9).
Returns a string identified by hParameter.
Sets a texture.
+Unique identifier. See Handles (Direct3D 9).
Texture object. See
If the method succeeds, the return value is
Gets a texture.
+Unique identifier. See Handles (Direct3D 9).
Returns a texture object. See
Gets a pixel shader.
+Unique identifier. See Handles (Direct3D 9).
Returns a pixel shader object. See
Gets a vertex shader.
+Unique identifier. See Handles (Direct3D 9).
Returns a vertex shader object. See
Set the range of an array to pass to the device.
+Unique identifier. See Handles (Direct3D 9).
Start index.
Stop index.
If the method succeeds, the return value is
Gets the effect description.
+Applications use the methods of the
The
The LPDIRECT3DBASETEXTURE9 and PDIRECT3DBASETEXTURE9 types are defined as references to the
typedef struct+*LPDIRECT3DBASETEXTURE9, *PDIRECT3DBASETEXTURE9;
Applications use the methods of the
To create a texture resource, you can call one of the following methods.
To create a geometry-oriented resource, you can call one of the following methods.
This interface, like all COM interfaces, inherits from the
The LPDIRECT3DRESOURCE9 and PDIRECT3DRESOURCE9 types are defined as references to the
typedef struct+*LPDIRECT3DRESOURCE9, *PDIRECT3DRESOURCE9; +
Retrieves the device associated with a resource.
+Address of a reference to an
If the method succeeds, the return value is
This method allows navigation to the owning device object.
Calling this method will increase the internal reference count on the
Associates data with the resource that is intended for use by the application, not by Direct3D. Data is passed by value, and multiple sets of data can be associated with a single resource.
+Reference to the globally unique identifier that identifies the private data to set.
Pointer to a buffer that contains the data to be associated with the resource.
Size of the buffer at pData, in bytes.
Value that describes the type of data being passed, or indicates to the application that the data should be invalidated when the resource changes.
Item | Description |
---|---|
(none) | If no flags are specified, Direct3D allocates memory to hold the data within the buffer and copies the data into the new buffer. The buffer allocated by Direct3D is automatically freed, as appropriate. |
D3DSPD_IUNKNOWN | The data at pData is a reference to an |
?
If the method succeeds, the return value is
Direct3D does not manage the memory at pData. If this buffer was dynamically allocated, it is the calling application's responsibility to free the memory.
+Copies the private data associated with the resource to a provided buffer.
+The globally unique identifier that identifies the private data to retrieve.
Pointer to a previously allocated buffer to fill with the requested private data if the call succeeds. The application calling this method is responsible for allocating and releasing this buffer. If this parameter is
Pointer to the size of the buffer at pData, in bytes. If this value is less than the actual size of the private data (such as 0), the method sets this parameter to the required buffer size and the method returns
If the method succeeds, the return value is
This method is inherited by the following interfaces:
Frees the specified private data associated with this resource.
+Reference to the globally unique identifier that identifies the private data to free.
If the method succeeds, the return value is
Direct3D calls this method automatically when a resource is released.
+Assigns the priority of a resource for scheduling purposes.
+Priority to assign to a resource.
Differences between Direct3D 9 and Direct3D 9 for Windows Vista The priority can be any DWORD value; Direct3D 9 for Windows Vista also supports any of these pre-defined values D3D9_RESOURCE_PRIORITY. |
?
Returns the previous priority value for the resource.
This method is used to change the priority of managed resources (resources created with the
Priorities are used to determine when managed resources are to be removed from memory. A resource assigned a low priority is removed before a resource with a high priority. If two resources have the same priority, the resource that was used more recently is kept in memory; the other resource is removed. Managed resources have a default priority of 0.
Windows Vista only - When this method is called using an
Retrieves the priority for this resource.
+Returns a DWORD value, indicating the priority of the resource.
Priorities are used to determine when managed resources are to be removed from memory. A resource assigned a low priority is removed before a resource with a high priority. If two resources have the same priority, the resource that was used more recently is kept in memory; the other resource is removed. Managed resources have a default priority of 0.
+Preloads a managed resource.
+Calling this method indicates that the application will need this managed resource shortly. This method has no effect on nonmanaged resources.
Returns the type of the resource.
+Returns a member of the
Retrieves the device associated with a resource.
+This method allows navigation to the owning device object.
Calling this method will increase the internal reference count on the
Retrieves the priority for this resource.
+Priorities are used to determine when managed resources are to be removed from memory. A resource assigned a low priority is removed before a resource with a high priority. If two resources have the same priority, the resource that was used more recently is kept in memory; the other resource is removed. Managed resources have a default priority of 0.
+Returns the type of the resource.
+Sets the most detailed level-of-detail for a managed texture.
+Most detailed level-of-detail value to set for the mipmap chain.
A DWORD value, clamped to the maximum level-of-detail value (one less than the total number of levels). Subsequent calls to this method will return the clamped value, not the level-of-detail value that was previously set.
This method applies to the following interfaces, which inherit from
SetLOD is used for level-of-detail control of managed textures. This method returns 0 on nonmanaged textures.
SetLOD communicates to the Direct3D texture manager the most detailed mipmap in the chain that should be loaded into local video memory. For example, in a five-level mipmap chain, setting LODNew to 2 indicates that the texture manager should load only mipmap levels 2 through 4 into local video memory at any given time.
More specifically, if the texture was created with the dimensions of 256x256, setting the most detailed level to 0 indicates that 256 x 256 is the largest mipmap available, setting the most detailed level to 1 indicates that 128 x 128 is the largest mipmap available, and so on, up to the most detailed mip level (the smallest texture size) for the chain.
+Returns a value clamped to the maximum level-of-detail set for a managed texture (this method is not supported for an unmanaged texture).
+A DWORD value, clamped to the maximum level-of-detail value (one less than the total number of levels). Calling GetLOD on an unmanaged texture is not supported and will result in a D3DERR error code being returned.
Returns the number of texture levels in a multilevel texture.
+A DWORD value that indicates the number of texture levels in a multilevel texture.
Warning??If you create a texture with
This method applies to the following interfaces, which inherit from
Set the filter type that is used for automatically generated mipmap sublevels.
+Filter type. See
If the method succeeds, the return value is
Changing the filter type "dirties" the mipmap sublevels and causes them to be regenerated.
The (default) filter type set at texture creation time is
This method has no effect if the texture is not created with
Get the filter type that is used for automatically generated mipmap sublevels.
+Filter type. See
Changing the filter type "dirties" the mipmap sublevels and causes them to be regenerated.
The (default) filter type set at texture creation time is
This method has no effect if the texture is not created with
Generate mipmap sublevels.
+An application can generate mipmap sublevels at any time by calling GenerateMipSubLevels. To have mipmap sublevels generated automatically at texture creation time (see Automatic Generation of Mipmaps (Direct3D 9)), specify
Returns the number of texture levels in a multilevel texture.
+Warning??If you create a texture with
This method applies to the following interfaces, which inherit from
Get or sets the filter type that is used for automatically generated mipmap sublevels.
+Changing the filter type "dirties" the mipmap sublevels and causes them to be regenerated.
The (default) filter type set at texture creation time is
This method has no effect if the texture is not created with
Represents the capabilities of the hardware exposed through the Direct3D object.
+The MaxTextureBlendStages and MaxSimultaneousTextures members might seem similar, but they contain different information. The MaxTextureBlendStages member contains the total number of texture-blending stages supported by the current device, and the MaxSimultaneousTextures member describes how many of those stages can have textures bound to them by using the SetTexture method.
When the driver fills this structure, it can set values for execute-buffer capabilities, even when the interface being used to retrieve the capabilities (such as
In general, performance problems may occur if you use a texture and then modify it during a scene. Ensure that no texture used in the current BeginScene and EndScene block is evicted unless absolutely necessary. In the case of extremely high texture usage within a scene, the results are undefined. This occurs when you modify a texture that you have used in the scene and there is no spare texture memory available. For such systems, the contents of the z-buffer become invalid at EndScene. Applications should not call UpdateSurface to or from the back buffer on this type of hardware inside a BeginScene/EndScene pair. In addition, applications should not try to access the z-buffer if the
The following flags concerning mipmapped textures are not supported in Direct3D 9.
Member of the
Adapter on which this Direct3D device was created. This ordinal is valid only to pass to methods of the
The following driver-specific capability.
Value | Meaning |
---|---|
Display hardware is capable of returning the current scan line. | |
The display driver supports an overlay DDI that allows for verification of overlay capabilities. For more information about the overlay DDI, see Overlay DDI. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? |
?
Driver-specific capabilities identified in
Driver-specific capabilities identified in
Bit mask of values representing what presentation swap intervals are available.
Value | Meaning |
---|---|
The driver supports an immediate presentation swap interval. | |
The driver supports a presentation swap interval of every screen refresh. | |
The driver supports a presentation swap interval of every second screen refresh. | |
The driver supports a presentation swap interval of every third screen refresh. | |
The driver supports a presentation swap interval of every fourth screen refresh. |
?
Bit mask indicating what hardware support is available for cursors. Direct3D 9 does not define alpha-blending cursor capabilities.
Value | Meaning |
---|---|
A full-color cursor is supported in hardware. Specifically, this flag indicates that the driver supports at least a hardware color cursor in high-resolution modes (with scan lines greater than or equal to 400). | |
A full-color cursor is supported in hardware. Specifically, this flag indicates that the driver supports a hardware color cursor in both high-resolution and low-resolution modes (with scan lines less than 400). |
?
Flags identifying the capabilities of the device.
Value | Meaning |
---|---|
Device supports blits from system-memory textures to nonlocal video-memory textures. | |
Device can queue rendering commands after a page flip. Applications do not change their behavior if this flag is set; this capability means that the device is relatively fast. | |
Device can support at least a DirectX 5-compliant driver. | |
Device can support at least a DirectX 7-compliant driver. | |
Device exports an | |
Device can use execute buffers from system memory. | |
Device can use execute buffers from video memory. | |
Device has hardware acceleration for scene rasterization. | |
Device can support transformation and lighting in hardware. | |
Device supports N patches. | |
Device can support rasterization, transform, lighting, and shading in hardware. | |
Device supports quintic B?zier curves and B-splines. | |
Device supports rectangular and triangular patches. | |
When this device capability is set, the hardware architecture does not require caching of any information, and uncached patches (handle zero) will be drawn as efficiently as cached ones. Note that setting | |
Device is texturing from separate memory pools. | |
Device can retrieve textures from non-local video memory. | |
Device can retrieve textures from system memory. | |
Device can retrieve textures from device memory. | |
Device can use buffers from system memory for transformed and lit vertices. | |
Device can use buffers from video memory for transformed and lit vertices. |
?
Miscellaneous driver primitive capabilities. See
Information on raster-drawing capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device supports anisotropic filtering. | |
Device iterates colors perspective correctly. | |
Device can dither to improve color resolution. | |
Device supports legacy depth bias. For true depth bias, see | |
Device supports range-based fog. In range-based fog, the distance of an object from the viewer is used to compute fog effects, not the depth of the object (that is, the z-coordinate) in the scene. | |
Device calculates the fog value by referring to a lookup table containing fog values that are indexed to the depth of a given pixel. | |
Device calculates the fog value during the lighting operation and interpolates the fog value during rasterization. | |
Device supports level-of-detail bias adjustments. These bias adjustments enable an application to make a mipmap appear crisper or less sharp than it normally would. For more information about level-of-detail bias in mipmaps, see | |
Device supports toggling multisampling on and off between | |
Device supports scissor test. See Scissor Test (Direct3D 9). | |
Device performs true slope-scale based depth bias. This is in contrast to the legacy style depth bias. | |
Device supports depth buffering using w. | |
Device supports w-based fog. W-based fog is used when a perspective projection matrix is specified, but affine projections still use z-based fog. The system considers a projection matrix that contains a nonzero value in the [3][4] element to be a perspective projection matrix. | |
Device can perform hidden-surface removal (HSR) without requiring the application to sort polygons and without requiring the allocation of a depth-buffer. This leaves more video memory for textures. The method used to perform HSR is hardware-dependent and is transparent to the application. Z-bufferless HSR is performed if no depth-buffer surface is associated with the rendering-target surface and the depth-buffer comparison test is enabled (that is, when the state value associated with the | |
Device supports z-based fog. | |
Device can perform z-test operations. This effectively renders a primitive and indicates whether any z pixels have been rendered. |
?
Z-buffer comparison capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Always pass the z-test. | |
Pass the z-test if the new z equals the current z. | |
Pass the z-test if the new z is greater than the current z. | |
Pass the z-test if the new z is greater than or equal to the current z. | |
Pass the z-test if the new z is less than the current z. | |
Pass the z-test if the new z is less than or equal to the current z. | |
Always fail the z-test. | |
Pass the z-test if the new z does not equal the current z. |
?
Source-blending capabilities. This member can be one or more of the following flags. (The RGBA values of the source and destination are indicated by the subscripts s and d.)
Value | Meaning |
---|---|
The driver supports both | |
Source blend factor is (1 - As, 1 - As, 1 - As, 1 - As) and destination blend factor is (As, As, As, As); the destination blend selection is overridden. | |
The driver supports the | |
Blend factor is (Ad, Ad, Ad, Ad). | |
Blend factor is (Rd, Gd, Bd, Ad). | |
Blend factor is (1 - Ad, 1 - Ad, 1 - Ad, 1 - Ad). | |
Blend factor is (1 - Rd, 1 - Gd, 1 - Bd, 1 - Ad). | |
Blend factor is (1 - As, 1 - As, 1 - As, 1 - As). | |
Blend factor is (1 - Rs, 1 - Gs, 1 - Bs, 1 - As). | |
Blend factor is (1 - PSOutColor[1]r, 1 - PSOutColor[1]g, 1 - PSOutColor[1]b, not used)). See Render Target Blending. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? | |
Blend factor is (1, 1, 1, 1). | |
Blend factor is (As, As, As, As). | |
Blend factor is (f, f, f, 1); f = min(As, 1 - Ad). | |
Blend factor is (Rs, Gs, Bs, As). | |
Blend factor is (PSOutColor[1]r, PSOutColor[1]g, PSOutColor[1]b, not used). See Render Target Blending. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? | |
Blend factor is (0, 0, 0, 0). |
?
Destination-blending capabilities. This member can be the same capabilities that are defined for the SrcBlendCaps member.
Alpha-test comparison capabilities. This member can include the same capability flags defined for the ZCmpCaps member. If this member contains only the
Shading operations capabilities. It is assumed, in general, that if a device supports a given command at all, it supports the
The color, specular highlights, fog, and alpha interpolants of a triangle each have capability flags that an application can use to find out how they are implemented by the device driver.
This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device can support an alpha component for Gouraud-blended transparency (the | |
Device can support colored Gouraud shading. In this mode, the per-vertex color components (red, green, and blue) are interpolated across a triangle face. | |
Device can support fog in the Gouraud shading mode. | |
Device supports Gouraud shading of specular highlights. |
?
Miscellaneous texture-mapping capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Alpha in texture pixels is supported. | |
Device can draw alpha from texture palettes. | |
Supports cube textures. | |
Device requires that cube texture maps have dimensions specified as powers of two. | |
Device supports mipmapped cube textures. | |
Device supports mipmapped textures. | |
Device supports mipmapped volume textures. | |
If this flag is not set, and A texture that is not a power of two cannot be set at a stage that will be read based on a shader computation (such as the bem - ps and texm3x3 - ps instructions in pixel shaders versions 1_0 to 1_3). For example, these textures can be used to store bumps that will be fed into texture reads, but not the environment maps that are used in texbem - ps, texbeml - ps, and texm3x3spec - ps. This means that a texture with dimensions that are not powers of two cannot be addressed or sampled using texture coordinates computed within the shader. This type of operation is known as a dependent read and cannot be performed on these types of textures. | |
Device does not support a projected bump-environment loopkup operation in programmable and fixed function shaders. | |
Perspective correction texturing is supported. | |
If If If this flag is not set, and | |
Supports the | |
All textures must be square. | |
Texture indices are not scaled by the texture size prior to interpolation. | |
Device supports volume textures. | |
Device requires that volume texture maps have dimensions specified as powers of two. |
?
Texture-filtering capabilities for a texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-filtering capabilities for a cube texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-filtering capabilities for a volume texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-addressing capabilities for texture objects. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device supports setting coordinates outside the range [0.0, 1.0] to the border color, as specified by the | |
Device can clamp textures to addresses. | |
Device can separate the texture-addressing modes of the u and v coordinates of the texture. This ability corresponds to the | |
Device can mirror textures to addresses. | |
Device can take the absolute value of the texture coordinate (thus, mirroring around 0) and then clamp to the maximum value. | |
Device can wrap textures to addresses. |
?
Texture-addressing capabilities for a volume texture. This member can be one or more of the flags defined for the TextureAddressCaps member.
Defines the capabilities for line-drawing primitives.
Value | Meaning |
---|---|
Supports alpha-test comparisons. | |
Antialiased lines are supported. | |
Supports source-blending. | |
Supports fog. | |
Supports texture-mapping. | |
Supports z-buffer comparisons. |
?
Maximum texture width for this device.
Maximum texture height for this device.
Maximum value for any of the three dimensions (width, height, and depth) of a volume texture.
This number represents the maximum range of the integer bits of the post-normalized texture coordinates. A texture coordinate is stored as a 32-bit signed integer using 27 bits to store the integer part and 5 bits for the floating point fraction. The maximum integer index, 227, is used to determine the maximum texture coordinate, depending on how the hardware does texture-coordinate scaling.
Some hardware reports the cap
Less desirably, on some hardware
For example, assume that MaxTextureRepeat is equal to 32k and the size of the texture is 4k. If the hardware sets
Maximum texture aspect ratio supported by the hardware, typically a power of 2.
Maximum valid value for the
Maximum W-based depth value that the device supports.
Screen-space coordinate of the guard-band clipping region. Coordinates inside this rectangle but outside the viewport rectangle are automatically clipped.
Screen-space coordinate of the guard-band clipping region. Coordinates inside this rectangle but outside the viewport rectangle are automatically clipped.
Screen-space coordinate of the guard-band clipping region. Coordinates inside this rectangle but outside the viewport rectangle are automatically clipped.
Screen-space coordinate of the guard-band clipping region. Coordinates inside this rectangle but outside the viewport rectangle are automatically clipped.
Number of pixels to adjust the extents rectangle outward to accommodate antialiasing kernels.
Flags specifying supported stencil-buffer operations. Stencil operations are assumed to be valid for all three stencil-buffer operation render states (
For more information, see
Flexible vertex format capabilities.
Value | Meaning |
---|---|
It is preferable that vertex elements not be stripped. That is, if the vertex format contains elements that are not used with the current render states, there is no need to regenerate the vertices. If this capability flag is not present, stripping extraneous elements from the vertex format provides better performance. | |
Point size is determined by either the render state or the vertex data. If an FVF is used, point size can come from point size data in the vertex declaration. Otherwise, point size is determined by the render state | |
Masks the low WORD of FVFCaps. These bits, cast to the WORD data type, describe the total number of texture coordinate sets that the device can simultaneously use for multiple texture blending. (You can use up to eight texture coordinate sets for any vertex, but the device can blend using only the specified number of texture coordinate sets.) |
?
Combination of flags describing the texture operations supported by this device. The following flags are defined.
Value | Meaning |
---|---|
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The | |
The |
?
Maximum number of texture-blending stages supported in the fixed function pipeline. This value is the number of blenders available. In the programmable pixel pipeline, this corresponds to the number of unique texture registers used by pixel shader instructions.
Maximum number of textures that can be simultaneously bound to the fixed-function pipeline sampler stages. If the same texture is bound to two sampler stages, it counts as two textures.
This value has no meaning in the programmable pipeline where the number of sampler stages is determined by each pixel shader version. Each pixel shader version also determines the number of texture declaration instructions. See Pixel Shaders.
Vertex processing capabilities. For a given physical device, this capability might vary across Direct3D devices depending on the parameters supplied to CreateDevice. See
Maximum number of lights that can be active simultaneously. For a given physical device, this capability might vary across Direct3D devices depending on the parameters supplied to CreateDevice.
Maximum number of user-defined clipping planes supported. This member can be 0. For a given physical device, this capability may vary across Direct3D devices depending on the parameters supplied to CreateDevice.
Maximum number of matrices that this device can apply when performing multimatrix vertex blending. For a given physical device, this capability may vary across Direct3D devices depending on the parameters supplied to CreateDevice.
DWORD value that specifies the maximum matrix index that can be indexed into using the per-vertex indices. The number of matrices is MaxVertexBlendMatrixIndex + 1, which is the size of the matrix palette. If normals are present in the vertex data that needs to be blended for lighting, then the number of matrices is half the number specified by this capability flag. If MaxVertexBlendMatrixIndex is set to zero, the driver does not support indexed vertex blending. If this value is not zero then the valid range of indices is zero through MaxVertexBlendMatrixIndex.
A zero value for MaxVertexBlendMatrixIndex indicates that the driver does not support indexed matrices.
When software vertex processing is used, 256 matrices could be used for indexed vertex blending, with or without normal blending.
For a given physical device, this capability may vary across Direct3D devices depending on the parameters supplied to CreateDevice.
Maximum size of a point primitive. If set to 1.0f then device does not support point size control. The range is greater than or equal to 1.0f.
Maximum number of primitives for each DrawPrimitive call. There are two cases: +
Maximum size of indices supported for hardware vertex processing. It is possible to create 32-bit index buffers; however, you will not be able to render with the index buffer unless this value is greater than 0x0000FFFF.
Maximum number of concurrent data streams for SetStreamSource. The valid range is 1 to 16. Note that if this value is 0, then the driver is not a Direct3D 9 driver.
Maximum stride for SetStreamSource.
Two numbers that represent the vertex shader main and sub versions. For more information about the instructions supported for each vertex shader version, see Version 1_x, Version 2_0, Version 2_0 Extended, or Version 3_0.
The number of vertex shader Vertex Shader Registers that are reserved for constants.
Two numbers that represent the pixel shader main and sub versions. For more information about the instructions supported for each pixel shader version, see Version 1_x, Version 2_0, Version 2_0 Extended, or Version 3_0.
Maximum value of pixel shader arithmetic component. This value indicates the internal range of values supported for pixel color blending operations. Within the range that they report to, implementations must allow data to pass through pixel processing unmodified (unclamped). Normally, the value of this member is an absolute value. For example, a 1.0 indicates that the range is -1.0 to 1, and an 8.0 indicates that the range is -8.0 to 8.0. The value must be >= 1.0 for any hardware that supports pixel shaders.
Device driver capabilities for adaptive tessellation. For more information, see
This number indicates which device is the master for this subordinate. This number is taken from the same space as the adapter values.
For multihead support, one head will be denoted the master head, and all other heads on the same card will be denoted subordinate heads. If more than one multihead adapter is present in a system, the master and its subordinates from one multihead adapter are called a group.
This number indicates the order in which heads are referenced by the API. The value for the master adapter is always 0. These values do not correspond to the adapter ordinals. They apply only to heads within a group.
This number indicates which device is the master for this subordinate. This number is taken from the same space as the adapter values.
For multihead support, one head will be denoted the master head, and all other heads on the same card will be denoted subordinate heads. If more than one multihead adapter is present in a system, the master and its subordinates from one multihead adapter are called a group.
This number indicates the order in which heads are referenced by the API. The value for the master adapter is always 0. These values do not correspond to the adapter ordinals. They apply only to heads within a group.
Number of adapters in this adapter group (only if master). This will be 1 for conventional adapters. The value will be greater than 1 for the master adapter of a multihead card. The value will be 0 for a subordinate adapter of a multihead card. Each card can have at most one master, but may have many subordinates.
A combination of one or more data types contained in a vertex declaration. See
Number of simultaneous render targets. This number must be at least one.
Combination of constants that describe the operations supported by StretchRect. The flags that may be set in this field are:
Constant | Description |
---|---|
Device supports point-sample filtering for minifying rectangles. This filter type is requested by calling StretchRect using | |
Device supports point-sample filtering for magnifying rectangles. This filter type is requested by calling StretchRect using | |
Device supports bilinear interpolation filtering for minifying rectangles. This filter type is requested by calling StretchRect using | |
Device supports bilinear interpolation filtering for magnifying rectangles. This filter type is requested by calling StretchRect using |
?
For more information, see
Device supports vertex shader version 2_0 extended capability. See
Device supports pixel shader version 2_0 extended capability. See
Device supports vertex shader texture filter capability. See
Maximum number of vertex shader instructions that can be run when using flow control. The maximum number of instructions that can be programmed is MaxVertexShader30InstructionSlots.
Maximum number of pixel shader instructions that can be run when using flow control. The maximum number of instructions that can be programmed is MaxPixelShader30InstructionSlots.
Maximum number of vertex shader instruction slots supported. The maximum value that can be set on this cap is 32768. Devices that support vs_3_0 are required to support at least 512 instruction slots.
Maximum number of pixel shader instruction slots supported. The maximum value that can be set on this cap is 32768. Devices that support ps_3_0 are required to support at least 512 instruction slots.
The
The LPD3DXCONSTANTTABLE type is defined as a reference to the
typedef interface+; + typedef interface *LPD3DXCONSTANTTABLE; +
Gets a reference to the buffer that contains the constant table.
+Returns a reference the buffer.
Gets the buffer size of the constant table.
+Returns the size of the buffer, in bytes.
Gets a description of the constant table.
+Description of the constant table. See
If the method succeeds, the return value is
Gets a reference to an array of constant descriptions in the constant table.
+Unique identifier to a constant. See
Returns a reference to an array of descriptions. See
The input supplied must be the maximum size of the array. The output is the number of elements that are filled in the array when the function returns.
If the method succeeds, the return value is
Because a sampler can appear more than once in a constant table, this method can return an array of descriptions, each one with a different register index.
+Returns the sampler index.
+The sampler handle.
Returns the sampler index number from the constant table.
Gets a constant by looking up its index.
+Unique identifier to the parent data structure. If the constant is a top-level parameter (there is no parent data structure), use
Zero-based index of the constant.
Returns a unique identifier to the constant.
To get a constant from an array of constants, use
Gets a constant by looking up its name.
+Unique identifier to the parent data structure. If the constant is a top-level parameter (there is no parent data structure), use
Name of the constant.
Returns a unique identifier to the constant.
Gets a constant from an array of constants. An array is made up of elements.
+Unique identifier to the array of constants. This value may not be
Zero-based index of the element in the array.
Returns a unique identifier to the element constant.
To get a constant that is not part of an array, use
Sets the constants to their default values. The default values are declared in the variable declarations in the shader.
+Pointer to an
If the method succeeds, the return value is
Sets the contents of the buffer to the constant table.
+Pointer to an
Unique identifier to a constant. See
Buffer containing data.
Size of the buffer, in bytes.
If the method succeeds, the return value is
Sets a Boolean value.
+Pointer to an
Unique identifier to the constant. See
Boolean value.
If the method succeeds, the return value is
Sets an array of Boolean values.
+Pointer to an
Unique identifier to the array of constants. See
Array of Boolean values.
Number of Boolean values in the array.
If the method succeeds, the return value is
Sets an integer value.
+Pointer to an
Unique identifier to the constant. See
Integer.
If the method succeeds, the return value is
Sets an array of integers.
+Pointer to an
Unique identifier to the array of constants. See
Array of integers.
Number of integers in the array.
If the method succeeds, the return value is
Sets a floating-point number.
+Pointer to an
Unique identifier to the constant. See
Floating-point number.
If the method succeeds, the return value is
Sets an array of floating-point numbers.
+Pointer to an
Unique identifier to the array of constants. See
Array of floating-point numbers.
Number of floating-point values in the array.
If the method succeeds, the return value is
Sets a 4D vector.
+Pointer to an
Unique identifier to the vector constant. See
Pointer to a 4D vector.
If the method succeeds, the return value is
Sets an array of 4D vectors.
+Pointer to an
Unique identifier to the array of vector constants. See
Array of 4D vectors.
Number of vectors in the array.
If the method succeeds, the return value is
Sets a nontransposed matrix.
+Pointer to an
Unique identifier to the matrix of constants. See
Pointer to a nontransposed matrix. See
If the method succeeds, the return value is
Sets an array of nontransposed matrices.
+Pointer to an
Unique identifier to the array of constant matrices. See
Array of nontransposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
Sets an array of references to nontransposed matrices.
+Pointer to an
Unique identifier to an array of constant matrices. See
Array of references to nontransposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A nontransposed matrix contains row-major data; that is, each vector is contained in a row.
+Sets a transposed matrix.
+Pointer to an
Unique identifier to the matrix of constants. See
Pointer to a transposed matrix. See
If the method succeeds, the return value is
Sets an array of transposed matrices.
+Pointer to an
Unique identifier to the array of matrix constants. See
Array of transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
Sets an array of references to transposed matrices.
+Pointer to an
Unique identifier to the array of matrix constants. See
Array of references to transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
+Gets a reference to the buffer that contains the constant table.
+Gets the buffer size of the constant table.
+Gets a description of the constant table.
+Information about the properties of a display mode.
+This structure is used in various methods to create and manage Direct3D 9Ex devices (
The size of this structure. This should always be set to sizeof(
Width of the display mode.
Height of the display mode.
Refresh rate of the display mode.
Format of the display mode. See
Indicates whether the scanline order is progressive or interlaced. See
Describes the display mode.
+Screen width, in pixels.
Screen height, in pixels.
Refresh rate. The value of 0 indicates an adapter default.
Member of the
Applications use the methods of the
The
This interface inherits additional functionality from the
This interface, like all COM interfaces, inherits additional functionality from the
The LPDIRECT3DCUBETEXTURE9 and PDIRECT3DCubeTexture9 types are defined as references to the
typedef struct+*LPDIRECT3DCUBETEXTURE9, *PDIRECT3DCubeTexture9; +
Retrieves a description of one face of the specified cube texture level.
+Specifies a level of a mipmapped cube texture.
Pointer to a
The
Retrieves a cube texture map surface.
+Member of the
Specifies a level of a mipmapped cube texture.
Address of a reference to an
Calling this method will increase the internal reference count on the
Locks a rectangle on a cube texture resource.
+Member of the
Specifies a level of a mipmapped cube texture.
Pointer to a
Pointer to a rectangle to lock. Specified by a reference to a
Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
You may not specify a subrect when using
If the method succeeds, the return value is
For performance reasons, dirty regions are only recorded for level zero of a texture. Dirty regions are automatically recorded when
Cube textures created with
The only lockable format for a depth-stencil texture is
Unlocks a rectangle on a cube texture resource.
+Member of the
Specifies a level of a mipmapped cube texture.
If the method succeeds, the return value is
Adds a dirty region to a cube texture resource.
+Member of the
Pointer to a
If the method succeeds, the return value is
For performance reasons, dirty regions are only recorded for level zero of a texture. For sublevels, it is assumed that the corresponding (scaled) rectangle or box is also dirty. Dirty regions are automatically recorded when
Using
Applications use the methods of the
The
This interface, like all COM interfaces, inherits the
The LPDIRECT3DDEVICE9 and PDIRECT3DDEVICE9 types are defined as references to the
typedef struct+*LPDIRECT3DDEVICE9, *PDIRECT3DDEVICE9; +
Reports the current cooperative-level status of the Direct3D device for a windowed or full-screen application.
+If the method succeeds, the return value is
If the device is lost but cannot be restored at the current time,
A call to
Returns an estimate of the amount of available texture memory.
+The function returns an estimate of the available texture memory.
The returned value is rounded to the nearest MB. This is done to reflect the fact that video memory estimates are never precise due to alignment and other issues that affect consumption by certain resources. Applications can use this value to make gross estimates of memory availability to make large-scale resource decisions such as how many levels of a mipmap to attempt to allocate, but applications cannot use this value to make small-scale decisions such as if there is enough memory left to allocate another resource.
+Evicts all managed resources, including both Direct3D and driver-managed resources.
+If the method succeeds, the return value is
This function causes only the
Returns an interface to the instance of the Direct3D object that created the device.
+Address of a reference to an
If the method succeeds, the return value is
Calling
Retrieves the capabilities of the rendering device.
+Pointer to a
If the method succeeds, the return value is
Retrieves the display mode's spatial resolution, color resolution, and refresh frequency.
+An unsigned integer specifying the swap chain.
Pointer to a
Retrieves the creation parameters of the device.
+Pointer to a
If the method succeeds, the return value is
You can query the AdapterOrdinal member of the returned
Sets properties for the cursor.
+X-coordinate offset (in pixels) that marks the center of the cursor. The offset is relative to the upper-left corner of the cursor. When the cursor is given a new position, the image is drawn at an offset from this new position determined by subtracting the hot spot coordinates from the position.
Y-coordinate offset (in pixels) that marks the center of the cursor. The offset is relative to the upper-left corner of the cursor. When the cursor is given a new position, the image is drawn at an offset from this new position determined by subtracting the hot spot coordinates from the position.
Pointer to an
If the method succeeds, the return value is
An operating system cursor is created and used under either of these conditions:
Otherwise, DirectX uses an emulated cursor. An application uses
It is recommended for applications to always trap WM_MOUSEMOVE events and call DXSetCursorPosition.
Direct3D cursor functions use either GDI cursor or software emulation, depending on the hardware. Users typically want to respond to a WM_SETCURSOR message. For example, they might want to write the message handler as follows:
case WM_SETCURSOR: + // Turn off window cursor. + SetCursor(null ); + m_pd3dDevice->ShowCursor( TRUE ); + return TRUE; // Prevent Windows from setting cursor to window class cursor. + break; +
Or, users might want to call the
The application can determine what hardware support is available for cursors by examining appropriate members of the
The cursor does not survive when the device is lost. This method must be called after the device is reset.
+Sets the cursor position and update options.
+The new X-position of the cursor in virtual desktop coordinates. See Remarks.
The new Y-position of the cursor in virtual desktop coordinates. See Remarks.
Specifies the update options for the cursor. Currently, only one flag is defined.
Value | Meaning |
---|---|
| Update cursor at the refresh rate. If this flag is specified, the system guarantees that the cursor will be updated at a minimum of half the display refresh rate, but never more frequently than the display refresh rate. Otherwise, the method delays cursor updates until the next |
?
When running in full-screen mode, screen space coordinates are the back buffer coordinates appropriately scaled to the current display mode. When running in windowed mode, screen space coordinates are the desktop coordinates. The cursor image is drawn at the specified position minus the hotspot-offset specified by the SetCursorProperties method.
If the cursor has been hidden by ShowCursor, the cursor is not drawn.
+Displays or hides the cursor.
+If bShow is TRUE, the cursor is shown. If bShow is
Value indicating whether the cursor was previously visible. TRUE if the cursor was previously visible, or
Direct3D cursor functions use either GDI cursor or software emulation, depending on the hardware. Users usually want to respond to a WM_SETCURSOR message. For example, the users might want to write the message handler like this:
case WM_SETCURSOR: // Turn off window cursor SetCursor(null ); m_pd3dDevice->ShowCursor( TRUE ); return TRUE; // prevent Windows from setting cursor to window class cursor break; +
Or users might want to call the
Creates an additional swap chain for rendering multiple views.
+Pointer to a
Calling this method changes the value of members of the
Address of a reference to an
If the method succeeds, the return value is
There is always at least one swap chain (the implicit swap chain) for each device because Direct3D 9 has one swap chain as a property of the device.
Note that any given device can support only one full-screen swap chain.
Gets a reference to a swap chain.
+The swap chain ordinal value. For more information, see NumberOfAdaptersInGroup in
Pointer to an
Gets the number of implicit swap chains.
+Number of implicit swap chains. See Remarks.
Implicit swap chains are created by the device during
An application may create additional swap chains using
Resets the type, size, and format of the swap chain.
+Pointer to a
When switching to full-screen mode, Direct3D will try to find a desktop format that matches the back buffer format, so that back buffer and front buffer formats will be identical (to eliminate the need for color conversion).
When this method returns:
Possible return values include:
If a call to
Calling
There are two different types of swap chains: full-screen or windowed. If the new swap chain is full-screen, the adapter will be placed in the display mode that matches the new size.
Direct3D 9 applications can expect messages to be sent to them during this call (for example, before this call is returned); applications should take precautions not to call into Direct3D at this time. In addition, when
A call to
Pixel shaders and vertex shaders survive
When trying to reset more than one display adapter in a group, set pPresentationParameters to point to an array of
If a multihead device was created with
Presents the contents of the next buffer in the sequence of back buffers owned by the device.
+Pointer to a value that must be
Pointer to a value that must be
Pointer to a destination window whose client area is taken as the target for this presentation. If this value is
Value must be
Possible return values include:
If necessary, a stretch operation is applied to transfer the pixels within the source rectangle to the destination rectangle in the client area of the target window.
Present will fail, returning
Retrieves a back buffer from the device's swap chain.
+An unsigned integer specifying the swap chain.
Index of the back buffer object to return. Back buffers are numbered from 0 to the total number of back buffers minus one. A value of 0 returns the first back buffer, not the front buffer. The front buffer is not accessible through this method. Use
Stereo view is not supported in Direct3D 9, so the only valid value for this parameter is
Address of a reference to an
Calling this method will increase the internal reference count on the
Returns information describing the raster of the monitor on which the swap chain is presented.
+An unsigned integer specifying the swap chain.
Pointer to a
This method allows the use of GDI dialog boxes in full-screen mode applications.
+TRUE to enable GDI dialog boxes, and
If the method succeeds, the return value is
The GDI dialog boxes must be created as child to the device window. They should also be created within the same thread that created the device because this enables the parent window to manage redrawing the child window.
The method has no effect for windowed mode applications, but this setting will be respected if the application resets the device into full-screen mode. If SetDialogBoxMode succeeds in a windowed mode application, any subsequent reset to full-screen mode will be checked against the restrictions listed above. Also, SetDialogBoxMode causes all back buffers on the swap chain to be discarded, so an application is expected to refresh its content for all back buffers after this call.
+Sets the gamma correction ramp for the implicit swap chain. This method will affect the entire screen (not just the active window if you are running in windowed mode).
+Unsigned integer specifying the swap chain.
Indicates whether correction should be applied. Gamma correction results in a more consistent display, but can incur processing overhead and should not be used frequently. Short-duration effects, such as flashing the whole screen red, should not be calibrated, but long-duration gamma changes should be calibrated. One of the following values can be set:
Item | Description |
---|---|
D3DSGR_CALIBRATE | If a gamma calibrator is installed, the ramp will be modified before being sent to the device to account for the system and monitor response curves. If a calibrator is not installed, the ramp will be passed directly to the device. |
D3DSGR_NO_CALIBRATION | No gamma correction is applied. The supplied gamma table is transferred directly to the device. |
?
Pointer to a
There is always at least one swap chain (the implicit swap chain) for each device, because Direct3D 9 has one swap chain as a property of the device. The gamma ramp takes effect immediately; there is no wait for a vertical sync.
If the device does not support gamma ramps in the swap chain's current presentation mode (full-screen or windowed), no error return is given. Applications can check the
For windowed gamma correction presentation, use
Retrieves the gamma correction ramp for the swap chain.
+An unsigned integer specifying the swap chain.
Creates a texture resource.
+Width of the top-level of the texture, in pixels. The pixel dimensions of subsequent levels will be the truncated value of half of the previous level's pixel dimension (independently). Each dimension clamps at a size of 1 pixel. Thus, if the division by 2 results in 0, 1 will be taken instead.
Height of the top-level of the texture, in pixels. The pixel dimensions of subsequent levels will be the truncated value of half of the previous level's pixel dimension (independently). Each dimension clamps at a size of 1 pixel. Thus, if the division by 2 results in 0, 1 will be taken instead.
Number of levels in the texture. If this is zero, Direct3D will generate all texture sublevels down to 1 by 1 pixels for hardware that supports mipmapped textures. Call
Usage can be 0, which indicates no usage value. However, if usage is desired, use a combination of one or more
Member of the
Member of the
Pointer to an
Reserved. Set this parameter to
If the method succeeds, the return value is
An application can discover support for Automatic Generation of Mipmaps (Direct3D 9) in a particular format by calling
In Windows Vista CreateTexture can create a texture from a system memory reference allowing the application more flexibility over the use, allocation and deletion of the system memory. For example, an application could pass a GDI system memory bitmap reference and get a Direct3D texture interface around it. Using a system memory reference with CreateTexture has the following restrictions.
Creates a volume texture resource.
+Width of the top-level of the volume texture, in pixels. This value must be a power of two if the
Height of the top-level of the volume texture, in pixels. This value must be a power of two if the
Depth of the top-level of the volume texture, in pixels. This value must be a power of two if the
Number of levels in the texture. If this is zero, Direct3D will generate all texture sublevels down to 1x1 pixels for hardware that supports mipmapped volume textures. Call
Usage can be 0, which indicates no usage value. If usage is desired, use
Member of the
Member of the
Address of a reference to an
Reserved. Set this parameter to
If the method succeeds, the return value is
Creates a cube texture resource.
+Size of the edges of all the top-level faces of the cube texture. The pixel dimensions of subsequent levels of each face will be the truncated value of half of the previous level's pixel dimension (independently). Each dimension clamps at a size of 1 pixel. Thus, if the division by 2 results in 0 (zero), 1 will be taken instead.
Number of levels in each face of the cube texture. If this is zero, Direct3D will generate all cube texture sublevels down to 1x1 pixels for each face for hardware that supports mipmapped cube textures. Call
Usage can be 0, which indicates no usage value. However, if usage is desired, use a combination of one or more
Member of the
Member of the
Address of a reference to an
Reserved. Set this parameter to
If the method succeeds, the return value is
A mipmap (texture) is a collection of successively downsampled (mipmapped) surfaces. On the other hand, a cube texture (created by
An application can discover support for Automatic Generation of Mipmaps (Direct3D 9) in a particular format by calling
Creates a vertex buffer.
+Size of the vertex buffer, in bytes. For FVF vertex buffers, Length must be large enough to contain at least one vertex, but it need not be a multiple of the vertex size. Length is not validated for non-FVF buffers. See Remarks.
Usage can be 0, which indicates no usage value. However, if usage is desired, use a combination of one or more
Combination of
Member of the
Address of a reference to an
Reserved. Set this parameter to
If the method succeeds, the return value is
A vertex buffer can be used with either hardware or software vertex processing. This is determined by how the device and the vertex buffer are created.
When a device is created, CreateDevice uses the behavior flag to determine whether to process vertices in hardware or software. There are three possibilities:
Mixed-mode devices might need to switch between software and hardware processing (using
When a vertex buffer is created, CreateVertexBuffer uses the usage parameter to decide whether to process vertices in hardware or software.
To use a vertex buffer with a mixed mode device, create a single vertex buffer which can be used for both hardware or software processing. Use
The
When set to a nonzero value, which must be a valid FVF code, the FVF parameter indicates that the buffer content is to be characterized by an FVF code. A vertex buffer that is created with an FVF code is referred to as an FVF vertex buffer. For more information, see FVF Vertex Buffers (Direct3D 9).
Non-FVF buffers can be used to interleave data during multipass rendering or multitexture rendering in a single pass. To do this, one buffer contains geometry data and the others contain texture coordinates for each texture to be rendered. When rendering, the buffer containing the geometry data is interleaved with each of the buffers containing the texture coordinates. If FVF buffers were used instead, each of them would need to contain identical geometry data in addition to the texture coordinate data specific to each texture rendered. This would result in either a speed or memory penalty, depending on the strategy used. For more information about texture coordinates, see Texture Coordinates (Direct3D 9).
+Creates an index buffer.
+Size of the index buffer, in bytes.
Usage can be 0, which indicates no usage value. However, if usage is desired, use a combination of one or more
Member of the
Item | Description |
---|---|
| Indices are 16 bits each. |
| Indices are 32 bits each. |
?
Member of the
Address of a reference to an
This parameter can be used in Direct3D?9 for Windows?Vista to share resources; set it to
If the method succeeds, the return value is
Index buffers are memory resources used to hold indices, they are similar to both surfaces and vertex buffers. The use of index buffers enables Direct3D to avoid unnecessary data copying and to place the buffer in the optimal memory type for the expected usage.
To use index buffers, create an index buffer, lock it, fill it with indices, unlock it, pass it to
The MaxVertexIndex member of the
Creates a render-target surface.
+Width of the render-target surface, in pixels.
Height of the render-target surface, in pixels.
Member of the
Member of the
Quality level. The valid range is between zero and one less than the level returned by pQualityLevels used by
Render targets are not lockable unless the application specifies TRUE for Lockable.
Note that lockable render targets reduce performance on some graphics hardware. The readback performance (moving data from video memory to system memory) depends on the type of hardware used (AGP vs. PCI Express) and is usually far lower than upload performance (moving data from system to video memory). If you need read access to render targets, use GetRenderTargetData instead of lockable render targets.
Reserved. Set this parameter to
Address of a reference to an
Render-target surfaces are placed in the
The creation of lockable, multisampled render targets is not supported.
+Creates a depth-stencil resource.
+Width of the depth-stencil surface, in pixels.
Height of the depth-stencil surface, in pixels.
Member of the
Member of the
Quality level. The valid range is between zero and one less than the level returned by pQualityLevels used by
Set this flag to TRUE to enable z-buffer discarding, and
This flag has the same behavior as the constant,
Reserved. Set this parameter to
Address of a reference to an
The memory class of the depth-stencil buffer is always
Copies rectangular subsets of pixels from one surface to another.
+Pointer to an
Pointer to a rectangle on the source surface. Specifying
Pointer to an
Pointer to the upper left corner of the destination rectangle. Specifying
If the method succeeds, the return value is
This method is similar to CopyRects in DirectX 8.
This function has the following restrictions.
The following table shows the supported combinations.
Dest formats | |||||
---|---|---|---|---|---|
Texture | RT texture | RT | Off-screen plain | ||
Src formats | Texture | Yes | Yes | Yes* | Yes |
RT texture | No | No | No | No | |
RT | No | No | No | No | |
Off-screen plain | Yes | Yes | Yes | Yes |
?
* If the driver does not support the requested copy, it will be emulated using lock and copy.
If the application needs to copy data from a
Updates the dirty portions of a texture.
+Pointer to an
Pointer to an
If the method succeeds, the return value is
You can dirty a portion of a texture by locking it, or by calling one of the following methods.
For performance reasons, dirty regions are only recorded for level zero of a texture. For sublevels, it is assumed that the corresponding (scaled) rectangle or box is also dirty. Dirty regions are automatically recorded when LockRect or
This method fails if the textures are of different types, if their bottom-level buffers are of different sizes, or if their matching levels do not match. For example, consider a six-level source texture with the following dimensions.
32x16, 16x8, 8x4, 4x2, 2x1, 1x1 +
This six-level source texture could be the source for the following one-level destination.
1x1 +
For the following two-level destination.
2x1, 1x1 +
Or, for the following three-level destination.
4x2, 2x1, 1x1 +
In addition, this method will fail if the textures are of different formats. If the destination texture has fewer levels than the source, only the matching levels are copied. If the source texture has fewer levels than the destination, the method will fail.
If the source texture has dirty regions, the copy can be optimized by restricting the copy to only those regions. It is not guaranteed that only those bytes marked dirty will be copied.
Here are the possibilities for source and destination surface combinations:
Copies the render-target data from device memory to system memory.
+Pointer to an
Pointer to an
If the method succeeds, the return value is
The destination surface must be either an off-screen plain surface or a level of a texture (mipmap or cube texture) created with
The source surface must be a regular render target or a level of a render-target texture (mipmap or cube texture) created with POOL_DEFAULT.
This method will fail if:
Generates a copy of the device's front buffer and places that copy in a system memory buffer provided by the application.
+An unsigned integer specifying the swap chain.
Pointer to an
For windowed mode, the size of the destination surface should be the size of the desktop. For full-screen mode, the size of the destination surface should be the screen size.
If the method succeeds, the return value is
The buffer pointed to by pDestSurface will be filled with a representation of the front buffer, converted to the standard 32 bits per pixel format
This method is the only way to capture an antialiased screen shot.
This function is very slow, by design, and should not be used in any performance-critical path.
For more information, see Lost Devices and Retrieved Data.
+Copy the contents of the source rectangle to the destination rectangle. The source rectangle can be stretched and filtered by the copy. This function is often used to change the aspect ratio of a video stream.
+Pointer to the source surface. See
Pointer to the source rectangle. A
Pointer to the destination surface. See
Pointer to the destination rectangle. A
Filter type. Allowable values are
If the method succeeds, the return value is
StretchRect Restrictions
Additional Restrictions for Depth and Stencil Surfaces
Using StretchRect to downsample a Multisample Rendertarget
You can use StretchRect to copy from one rendertarget to another. If the source rendertarget is multisampled, this results in downsampling the source rendertarget. For instance you could:
Note that use of the extra surface involved in using StretchRect to downsample a Multisample Rendertarget will result in a performance hit.
Driver Support
There are many restrictions as to which surface combinations are valid for StretchRect. Factors include whether the driver is a Direct3D 9 driver or older, and whether the operation will result in stretching/shrinking. Since applications are not expected to recognize if the driver is a Direct3D 9 driver or not, the runtime will automatically set a new cap,
DirectX 8 Driver (no stretching) | |||||
---|---|---|---|---|---|
Dest formats | |||||
Texture | RT texture | RT | Off-screen plain | ||
Src formats | Texture | No | No | No | No |
RT texture | No | Yes | Yes | No | |
RT | No | Yes | Yes | No | |
Off-screen plain | Yes | Yes | Yes | Yes |
?
DirectX 8 Driver (stretching) | |||||
---|---|---|---|---|---|
Dest formats | |||||
Texture | RT texture | RT | Off-screen plain | ||
Src formats | Texture | No | No | No | No |
RT texture | No | No | No | No | |
RT | No | Yes | Yes | No | |
Off-screen plain | No | Yes | Yes | No |
?
Direct3D 9 Driver (no stretching) | |||||
---|---|---|---|---|---|
Dest formats | |||||
Texture | RT texture | RT | Off-screen plain | ||
Src formats | Texture | No | Yes | Yes | No |
RT texture | No | Yes | Yes | No | |
RT | No | Yes | Yes | No | |
Off-screen plain | No | Yes | Yes | Yes |
?
Direct3D 9 Driver (stretching) | |||||
---|---|---|---|---|---|
Dest formats | |||||
Texture | RT texture | RT | Off-screen plain | ||
Src formats | Texture | No | Yes | Yes | No |
RT texture | No | Yes | Yes | No | |
RT | No | Yes | Yes | No | |
Off-screen plain | No | Yes | Yes | No |
?
+Allows an application to fill a rectangular area of a
Pointer to the surface to be filled.
Pointer to the source rectangle. Using
Color used for filling.
If the method succeeds, the return value is
This method can only be applied to a render target, a render-target texture surface, or an off-screen plain surface with a pool type of
When using a DirectX 7 or DirectX 8.x driver, the only YUV formats supported are
Create an off-screen surface.
+Width of the surface.
Height of the surface.
Format of the surface. See
Surface pool type. See
Reserved. Set this parameter to
Pointer to the
Off-screen plain surfaces are always lockable, regardless of their pool types.
+Sets a new color buffer for the device.
+Index of the render target. See Remarks.
Pointer to a new color buffer. If
If the method succeeds, the return value is
The device can support multiple render targets. The number of render targets supported by a device is contained in the NumSimultaneousRTs member of
Setting a new render target will cause the viewport (see Viewports and Clipping (Direct3D 9)) to be set to the full size of the new render target.
Some hardware tests the compatibility of the depth stencil buffer with the color buffer. If this is done, it is only done in a debug build.
Restrictions for using this method include the following:
These restrictions are validated only when using the debug runtime when any of the
Cube textures differ from other surfaces in that they are collections of surfaces. To call
Retrieves a render-target surface.
+Index of the render target. See Remarks.
Address of a reference to an
Typically, methods that return state will not work on a device that is created using
The device can now support multiple render targets. The number of render targets supported by a device is contained in the NumSimultaneousRTs member of
Calling this method will increase the internal reference count on the
Sets the depth stencil surface.
+Address of a reference to an
If the method succeeds, the return value is
Restrictions for using this method include the following:
These restrictions are validated only when using the debug runtime when any of the
Cube textures differ from other surfaces in that they are collections of surfaces. To call
Gets the depth-stencil surface owned by the Direct3DDevice object.
+Address of a reference to an
If the method succeeds, the return value is
Calling this method will increase the internal reference count on the
Begins a scene.
+If the method succeeds, the return value is
Applications must call
If
There should be one
Ends a scene that was begun by calling
If the method succeeds, the return value is
When this method succeeds, the scene has been queued up for rendering by the driver. This is not a synchronous method, so the scene is not guaranteed to have completed rendering when this method returns.
Applications must call
If
There should be at most one
Clears one or more surfaces such as a render target, multiple render targets, a stencil buffer, and a depth buffer.
+If the method succeeds, the return value is
Use this method to clear a surface including: a render target, all render targets in an MRT, a stencil buffer, or a depth buffer. Flags determines how many surfaces are cleared. Use pRects to clear a subset of a surface defined by an array of rectangles.
Sets a single device transformation-related state.
+Device-state variable that is being modified. This parameter can be any member of the
Pointer to a
If the method succeeds, the return value is
Retrieves a matrix describing a transformation state.
+Device state variable that is being modified. This parameter can be any member of the
Pointer to a
This method will not return device state for a device that is created using
Multiplies a device's world, view, or projection matrices by a specified matrix.
+Member of the
Pointer to a
If the method succeeds, the return value is
The multiplication order is pMatrix times State.
An application might use the
shoulder_transformation upper_arm geometry elbow transformation lower_arm geometry wrist transformation hand geometry +
An application might use the following series of calls to render this hierarchy. Not all the parameters are shown in this pseudocode.
+(D3DTS_WORLDMATRIX(0), shoulder_transform) + (upper_arm) + (D3DTS_WORLDMATRIX(0), elbow_transform) + (lower_arm) + (D3DTS_WORLDMATRIX(0), wrist_transform) + (hand)
Sets the viewport parameters for the device.
+Pointer to a
If the method succeeds, the return value is
Direct3D sets the following default values for the viewport.
vp; + vp.X = 0; + vp.Y = 0; + vp.Width = RenderTarget.Width; + vp.Height = RenderTarget.Height; + vp.MinZ = 0.0f; + vp.MaxZ = 1.0f; +
To draw multiple views within a scene, repeat the
Retrieves the viewport parameters currently set for the device.
+Pointer to a
If the method succeeds, the return value is
Typically, methods that return state will not work on a device that is created using
Sets the material properties for the device.
+Pointer to a
If the method succeeds, the return value is
Retrieves the current material properties for the device.
+Pointer to a
If the method succeeds, the return value is
This method will not return device state for a device that is created using
Assigns a set of lighting properties for this device.
+Zero-based index of the set of lighting properties to set. If a set of lighting properties exists at this index, it is overwritten by the new properties specified in pLight.
Pointer to a
If the method succeeds, the return value is
Set light properties by preparing a
The system allocates memory to accommodate a set of lighting properties each time you call the
The following example prepares and sets properties for a white point-light whose emitted light will not attenuate over distance.
// Assume d3dDevice is a valid reference to aninterface. + d3dLight; + hr; // Initialize the structure. + ZeroMemory(&d3dLight, sizeof(d3dLight)); // Set up a white point light. + d3dLight.Type = ; + d3dLight.Diffuse.r = 1.0f; + d3dLight.Diffuse.g = 1.0f; + d3dLight.Diffuse.b = 1.0f; + d3dLight.Ambient.r = 1.0f; + d3dLight.Ambient.g = 1.0f; + d3dLight.Ambient.b = 1.0f; + d3dLight.Specular.r = 1.0f; + d3dLight.Specular.g = 1.0f; + d3dLight.Specular.b = 1.0f; // Position it high in the scene and behind the user. + // Remember, these coordinates are in world space, so + // the user could be anywhere in world space, too. + // For the purposes of this example, assume the user + // is at the origin of world space. + d3dLight.Position.x = 0.0f; + d3dLight.Position.y = 1000.0f; + d3dLight.Position.z = -100.0f; // Don't attenuate. + d3dLight.Attenuation0 = 1.0f; + d3dLight.Range = 1000.0f; // Set the property information for the first light. + hr = d3dDevice->SetLight(0, &d3dLight); + if (SUCCEEDED(hr)) // Handle Success + else // Handle failure +
Enable a light source by calling the
Retrieves a set of lighting properties that this device uses.
+Zero-based index of the lighting property set to retrieve. This method will fail if a lighting property has not been set for this index by calling the
Pointer to a
This method will not return device state for a device that is created using
Retrieve all the properties for an existing light source by calling the
// Assume d3dDevice is a valid reference to aninterface. + hr; + D3DLight9 light; // Get the property information for the first light. + hr = pd3dDevice->GetLight(0, &light); + if (SUCCEEDED(hr)) // Handle Success + else // Handle failure +
If you supply an index outside the range of the light sources assigned in the device, the
When you assign a set of light properties for a light source in a scene, the light source can be activated by calling the
// Assume d3dDevice is a valid reference to aninterface. + hr; hr = pd3dDevice->LightEnable(0, TRUE); + if (SUCCEEDED(hr)) // Handle Success + else // Handle failure +
Check the MaxActiveLights member of the
If you enable or disable a light that has no properties that are set with
Enables or disables a set of lighting parameters within a device.
+Zero-based index of the set of lighting parameters that are the target of this method.
Value that indicates if the set of lighting parameters are being enabled or disabled. Set this parameter to TRUE to enable lighting with the parameters at the specified index, or
If the method succeeds, the return value is
If a value for LightIndex is outside the range of the light property sets assigned within the device, the
Member | Default |
---|---|
Type | |
Diffuse | (R:1, G:1, B:1, A:0) |
Specular | (R:0, G:0, B:0, A:0) |
Ambient | (R:0, G:0, B:0, A:0) |
Position | (0, 0, 0) |
Direction | (0, 0, 1) |
Range | 0 |
Falloff | 0 |
Attenuation0 | 0 |
Attenuation1 | 0 |
Attenuation2 | 0 |
Theta | 0 |
Phi | 0 |
?
+Retrieves the activity status - enabled or disabled - for a set of lighting parameters within a device.
+Zero-based index of the set of lighting parameters that are the target of this method.
Pointer to a variable to fill with the status of the specified lighting parameters. After the call, a nonzero value at this address indicates that the specified lighting parameters are enabled; a value of 0 indicates that they are disabled.
This method will not return device state for a device that is created using
Sets the coefficients of a user-defined clipping plane for the device.
+Index of the clipping plane for which the plane equation coefficients are to be set.
Pointer to an address of a four-element array of values that represent the clipping plane coefficients to be set, in the form of the general plane equation. See Remarks.
If the method succeeds, the return value is
The coefficients that this method sets take the form of the general plane equation. If the values in the array at pPlane were labeled A, B, C, and D in the order that they appear in the array, they would fit into the general plane equation so that Ax + By + Cz + Dw = 0. A point with homogeneous coordinates (x, y, z, w) is visible in the half space of the plane if Ax + By + Cz + Dw >= 0. Points that exist behind the clipping plane are clipped from the scene.
When the fixed function pipeline is used the plane equations are assumed to be in world space. When the programmable pipeline is used the plane equations are assumed to be in the clipping space (the same space as output vertices).
This method does not enable the clipping plane equation being set. To enable a clipping plane, set the corresponding bit in the DWORD value applied to the
Retrieves the coefficients of a user-defined clipping plane for the device.
+Index of the clipping plane for which the plane equation coefficients are retrieved.
Pointer to a four-element array of values that represent the coefficients of the clipping plane in the form of the general plane equation. See Remarks.
This method will not return device state for a device that is created using
The coefficients that this method reports take the form of the general plane equation. If the values in the array at pPlane were labeled A, B, C, and D in the order that they appear in the array, they would fit into the general plane equation so that Ax + By + Cz + Dw = 0. A point with homogeneous coordinates (x, y, z, w) is visible in the half space of the plane if Ax + By + Cz + Dw >= 0. Points that exist on or behind the clipping plane are clipped from the scene.
The plane equation used by this method exists in world space and is set by a previous call to the
Sets a single device render-state parameter.
+Device state variable that is being modified. This parameter can be any member of the
New value for the device render state to be set. The meaning of this parameter is dependent on the value specified for State. For example, if State were
If the method succeeds, the return value is
Retrieves a render-state value for a device.
+Device state variable that is being queried. This parameter can be any member of the
Pointer to a variable that receives the value of the queried render state variable when the method returns.
If the method succeeds, the return value is
This method will not return device state for a device that is created using
Creates a new state block that contains the values for all device states, vertex-related states, or pixel-related states.
+Type of state data that the method should capture. This parameter can be set to a value defined in the
Pointer to a state block interface. See
If the method succeeds, the return value is
Vertex-related device states typically refer to those states that affect how the system processes vertices. Pixel-related states generally refer to device states that affect how the system processes pixel or depth-buffer data during rasterization. Some states are contained in both groups.
Differences between Direct3D 9 and Direct3D 10: In Direct3D 9, a state block contains state data, for the states it was requested to capture, when the object is created. To change the value of the state block, call |
?
+Signals Direct3D to begin recording a device-state block.
+If the method succeeds, the return value is
Applications can ensure that all recorded states are valid by calling the
The following methods can be recorded in a state block, after calling
The ordering of state changes in a state block is not guaranteed. If the same state is specified multiple times in a state block, only the last value is used.
+Signals Direct3D to stop recording a device-state block and retrieve a reference to the state block interface.
+Pointer to a state block interface. See
Sets the clip status.
+Pointer to a
If the method succeeds, the return value is
Clip status is used during software vertex processing. Therefore, this method is not supported on pure or nonpure hardware processing devices. For more information about pure devices, see
When clipping is enabled during vertex processing (by
Clip status is not updated by
Retrieves the clip status.
+ Pointer to a
If the method succeeds, the return value is
When clipping is enabled during vertex processing (by
Clip status is not updated by
Clip status is used during software vertex processing. Therefore, this method is not supported on pure or nonpure hardware processing devices. For more information about pure devices, see
Retrieves a texture assigned to a stage for a device.
+Stage identifier of the texture to retrieve. Stage identifiers are zero-based.
Address of a reference to an
Typically, methods that return state will not work on a device that is created using
Calling this method will increase the internal reference count on the
Assigns a texture to a stage for a device.
+Zero based sampler number. Textures are bound to samplers; samplers define sampling state such as the filtering mode and the address wrapping mode. Textures are referenced differently by the programmable and the fixed function pipeline:
There are two other special cases for stage/sampler numbers.
Pointer to an
If the method succeeds, the return value is
SetTexture is not allowed if the texture is created with a pool type of
Retrieves a state value for an assigned texture.
+Stage identifier of the texture for which the state is retrieved. Stage identifiers are zero-based. Devices can have up to eight set textures, so the maximum value allowed for Stage is 7.
Texture state to retrieve. This parameter can be any member of the
Pointer a variable to fill with the retrieved state value. The meaning of the retrieved value is determined by the Type parameter.
If the method succeeds, the return value is
This method will not return device state for a device that is created using
Sets the state value for the currently assigned texture.
+Stage identifier of the texture for which the state value is set. Stage identifiers are zero-based. Devices can have up to eight set textures, so the maximum value allowed for Stage is 7.
Texture state to set. This parameter can be any member of the
State value to set. The meaning of this value is determined by the Type parameter.
If the method succeeds, the return value is
Gets the sampler state value.
+The sampler stage index.
This parameter can be any member of the
State value to get. The meaning of this value is determined by the Type parameter.
If the method succeeds, the return value is
This method will not return device state for a device that is created using
Sets the sampler state value.
+The sampler stage index. For more info about sampler stage, see Sampling Stage Registers in vs_3_0 (DirectX HLSL).
This parameter can be any member of the
State value to set. The meaning of this value is determined by the Type parameter.
If the method succeeds, the return value is
Reports the device's ability to render the current texture-blending operations and arguments in a single pass.
+Pointer to a DWORD value to fill with the number of rendering passes needed to complete the desired effect through multipass rendering.
If the method succeeds, the return value is
The
Current hardware does not necessarily implement all possible combinations of operations and arguments. You can determine whether a particular blending operation can be performed with given arguments by setting the desired blending operation, and then calling the
The
For best performance, call
Using diffuse iterated values, either as an argument or as an operation (D3DTA_DIFFUSED3DTOP_BLENDDIFFUSEALPHA) is rarely supported on current hardware. Most hardware can introduce iterated color data only at the last texture operation stage.
Try to specify the texture (
Many cards do not support use of diffuse or scalar values at arbitrary texture stages. Often, these are available only at the first or last texture-blending stage.
Many cards do not have a blending unit associated with the first texture that is capable of more than replicating alpha to color channels or inverting the input. Therefore, your application might need to use only the second texture stage, if possible. On such hardware, the first unit is presumed to be in its default state, which has the first color argument set to
Operations on the output alpha that are more intricate than or substantially different from the color operations are less likely to be supported.
Some hardware does not support simultaneous use of
Many cards do not support simultaneous use of multiple textures and mipmapped trilinear filtering. If trilinear filtering has been requested for a texture involved in multitexture blending operations and validation fails, turn off trilinear filtering and revalidate. In this case, you might want to perform multipass rendering instead.
+Sets palette entries.
+An ordinal value identifying the particular palette upon which the operation is to be performed.
Pointer to a
If the method succeeds, the return value is
For Direct3D 9 applications, any palette sent to this method must conform to the
A single logical palette is associated with the device, and is shared by all texture stages.
+Retrieves palette entries.
+An ordinal value identifying the particular palette to retrieve.
Pointer to a
If the method succeeds, the return value is
For more information about
Note??As of Direct3D 9, the peFlags member of the
Sets the current texture palette.
+Value that specifies the texture palette to set as the current texture palette.
If the method succeeds, the return value is
A single logical palette is associated with the device, and is shared by all texture stages.
+Retrieves the current texture palette.
+Pointer to a returned value that identifies the current texture palette.
If the method succeeds, the return value is
Sets the scissor rectangle.
+Pointer to a
If the method succeeds, the return value is
The scissor rectangle is used as a rectangular clipping region.
See Rectangles (Direct3D 9) for further information on the use of rectangles in DirectX.
+Gets the scissor rectangle.
+Returns a reference to a
If the method succeeds, the return value is
The scissor rectangle is used as a rectangular clipping region.
See Rectangles (Direct3D 9) for further information on the use of rectangles in DirectX.
+Use this method to switch between software and hardware vertex processing.
+TRUE to specify software vertex processing;
If the method succeeds, the return value is
The restrictions for changing modes are as follows:
An application can create a mixed-mode device to use both the software vertex processing and the hardware vertex processing. To switch between the two vertex processing modes in DirectX 8.x, use IDirect3DDevice8::SetRenderState with the render state D3DRS_SOFTWAREVERTEXPROCESSING and the appropriate DWORD argument. The drawback of the render state approach was the difficulty in defining the semantics for state blocks. Applications and the runtime had to do extra work and be careful while recording and playing back state blocks.
In Direct3D 9, use SetSoftwareVertexProcessing instead. This new API is not recorded by StateBlocks.
+Gets the vertex processing (hardware or software) mode.
+Returns TRUE if software vertex processing is set. Otherwise, it returns
An application can create a mixed-mode device to use both the software vertex processing and the hardware vertex processing. To switch between the two vertex processing modes in DirectX 8.x, use
In Direct3D 9, use
Enable or disable N-patches.
+Specifies the number of subdivision segments. If the number of segments is less than 1.0, N-patches are disabled. The default value is 0.0.
If the method succeeds, the return value is
Gets the N-patch mode segments.
+Specifies the number of subdivision segments. If the number of segments is less than 1.0, N-patches are disabled. The default value is 0.0.
Renders a sequence of nonindexed, geometric primitives of the specified type from the current set of data input streams.
+Member of the
Index of the first vertex to load. Beginning at StartVertex the correct number of vertices will be read out of the vertex buffer.
Number of primitives to render. The maximum number of primitives allowed is determined by checking the MaxPrimitiveCount member of the
If the method succeeds, the return value is
When converting a legacy application to Direct3D 9, you must add a call to either
Based on indexing, renders the specified geometric primitive into an array of vertices.
+Member of the
Offset from the start of the vertex buffer to the first vertex. See Scenario 4.
Minimum vertex index for vertices used during this call. This is a zero based index relative to BaseVertexIndex.
Number of vertices used during this call. The first vertex is located at index: BaseVertexIndex + MinIndex.
Index of the first index to use when accesssing the vertex buffer. Beginning at StartIndex to index vertices from the vertex buffer.
Number of primitives to render. The number of vertices used is a function of the primitive count and the primitive type. The maximum number of primitives allowed is determined by checking the MaxPrimitiveCount member of the
If the method succeeds, the return value is
This method draws indexed primitives from the current set of data input streams. MinIndex and all the indices in the index stream are relative to the BaseVertexIndex.
The MinIndex and NumVertices parameters specify the range of vertex indices used for each
The
When converting a legacy application to Direct3D 9, you must add a call to either
Renders data specified by a user memory reference as a sequence of geometric primitives of the specified type.
+Member of the
Number of primitives to render. The maximum number of primitives allowed is determined by checking the MaxPrimitiveCount member of the
User memory reference to the vertex data.
The number of bytes of data for each vertex. This value may not be 0.
If the method succeeds, the return value is
This method is intended for use in applications that are unable to store their vertex data in vertex buffers. This method supports only a single vertex stream. The effect of this call is to use the provided vertex data reference and stride for vertex stream 0. It is invalid to have the declaration of the current vertex shader refer to vertex streams other than stream 0.
Following any
The vertex data passed to
When converting a legacy application to Direct3D 9, you must add a call to either
Renders the specified geometric primitive with data specified by a user memory reference.
+Member of the
Minimum vertex index. This is a zero-based index.
Number of vertices used during this call. The first vertex is located at index: MinVertexIndex.
Number of primitives to render. The maximum number of primitives allowed is determined by checking the MaxPrimitiveCount member of the
User memory reference to the index data.
Member of the
User memory reference to the vertex data. The vertex data must be in stream 0.
The number of bytes of data for each vertex. This value may not be 0.
If the method succeeds, the return value is
This method is intended for use in applications that are unable to store their vertex data in vertex buffers. This method supports only a single vertex stream, which must be declared as stream 0.
Following any
The vertex data passed to
When converting a legacy application to Direct3D 9, you must add a call to either
Applies the vertex processing defined by the vertex shader to the set of input data streams, generating a single stream of interleaved vertex data to the destination vertex buffer.
+Index of first vertex to load.
Index of first vertex in the destination vertex buffer into which the results are placed.
Number of vertices to process.
Pointer to an
Pointer to an
Processing options. Set this parameter to 0 for default processing. Set to D3DPV_DONOTCOPYDATA to prevent the system from copying vertex data not affected by the vertex operation into the destination buffer. The D3DPV_DONOTCOPYDATA value may be combined with one or more
If the method succeeds, the return value is
The order of operations for this method is as follows:
The destination vertex buffer, pDestBuffer, must be created with a nonzero FVF parameter in
When Direct3D generates texture coordinates, or copies or transforms input texture coordinates, and the output texture coordinate format defines more texture coordinate components than Direct3D generates, Direct3D does not change these extra components.
+Create a vertex shader declaration from the device and the vertex elements.
+An array of
Pointer to an
If the method succeeds, the return value is
See the Vertex Declaration (Direct3D 9) page for a detailed description of how to map vertex declarations between different versions of DirectX.
+Sets a Vertex Declaration (Direct3D 9).
+If the method succeeds, the return value is
A vertex declaration is an
Gets a vertex shader declaration.
+Pointer to an
If the method succeeds, the return value is
Sets the current vertex stream declaration.
+DWORD containing the fixed function vertex type. For more information, see
If the method succeeds, the return value is
Here are the steps necessary to initialize and use vertices that have a position, diffuse and specular color, and texture coordinates:
struct LVertex + { FLOAT x, y, z;specular, diffuse; FLOAT tu, tv; + }; const DWORD VertexFVF = ( | | | ); +
g_d3dDevice->CreateVertexBuffer( 4*sizeof(LVertex),, VertexFVF, , &pBigSquareVB, null ); +
LVertex * v; + pBigSquareVB->Lock( 0, 0, (BYTE**)&v, 0 ); v[0].x = 0.0f; v[0].y = 10.0; v[0].z = 10.0f; + v[0].diffuse = 0xffff0000; + v[0].specular = 0xff00ff00; + v[0].tu = 0.0f; v[0].tv = 0.0f; v[1].x = 0.0f; v[1].y = 0.0f; v[1].z = 10.0f; + v[1].diffuse = 0xff00ff00; + v[1].specular = 0xff00ffff; + v[1].tu = 0.0f; v[1].tv = 0.0f; v[2].x = 10.0f; v[2].y = 10.0f; v[2].z = 10.0f; + v[2].diffuse = 0xffff00ff; + v[2].specular = 0xff000000; + v[2].tu = 0.0f; v[2].tv = 0.0f; v[3].x = 0.0f; v[3].y = 10.0f; v[3].z = 10.0f; + v[3].diffuse = 0xffffff00; + v[3].specular = 0xffff0000; + v[3].tu = 0.0f; v[3].tv = 0.0f; pBigSquareVB->Unlock(); +
g_d3dDevice->SetFVF(VertexFVF); + g_d3dDevice->SetStreamSource(0, pBigSquareVB, 0, sizeof(LVertex)); + g_d3dDevice->DrawPrimitive(, 0 ,2); +
Here are the steps necessary to initialize and use vertices that have a position, a normal, and texture coordinates:
struct Vertex + { FLOAT x, y, z; FLOAT nx, ny, nz; FLOAT tu, tv; + }; const DWORD VertexFVF = (| | ); +
Vertex * v; + pBigSquareVB->Lock(0, 0, (BYTE**)&v, 0); v[0].x = 0.0f; v[0].y = 10.0; v[0].z = 10.0f; + v[0].nx = 0.0f; v[0].ny = 1.0f; v[0].nz = 0.0f; + v[0].tu = 0.0f; v[0].tv = 0.0f; v[1].x = 0.0f; v[1].y = 0.0f; v[1].z = 10.0f; + v[1].nx = 0.0f; v[1].ny = 1.0f; v[1].nz = 0.0f; + v[1].tu = 0.0f; v[1].tv = 0.0f; v[2].x = 10.0f; v[2].y = 10.0f; v[2].z = 10.0f; + v[2].nx = 0.0f; v[2].ny = 1.0f; v[2].nz = 0.0f; + v[2].tu = 0.0f; v[2].tv = 0.0f; v[3].x = 0.0f; v[3].y = 10.0f; v[3].z = 10.0f; + v[3].nx = 0.0f; v[3].ny = 1.0f; v[3].nz = 0.0f; + v[3].tu = 0.0f; v[3].tv = 0.0f; pBigSquareVB->Unlock(); +
Gets the fixed vertex function declaration.
+A DWORD reference to the fixed function vertex type. For more information, see
If the method succeeds, the return value is
The fixed vertex function declaration is a set of FVF flags that determine how vertices processed by the fixed function pipeline will be used.
+Creates a vertex shader.
+Pointer to an array of tokens that represents the vertex shader, including any embedded debug and symbol table information.
Pointer to the returned vertex shader interface (see
If the method succeeds, the return value is
When a device is created,
For an example using
Sets the vertex shader.
+Vertex shader interface. For more information, see
If the method succeeds, the return value is
To set a fixed-function vertex shader (after having set a programmable vertex shader), call
Retrieves the currently set vertex shader.
+Pointer to a vertex shader interface.
If the method succeeds, the return value is
Typically, methods that return state will not work on a device that is created using
Sets a floating-point vertex shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of four float vectors in the array of constants.
If the method succeeds, the return value is
Gets a floating-point vertex shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of four float vectors in the array of constants.
If the method succeeds, the return value is
Sets an integer vertex shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of four integer vectors in the array of constants.
If the method succeeds, the return value is
Gets an integer vertex shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of four integer vectors in the array of constants.
If the method succeeds, the return value is
Sets a Boolean vertex shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of boolean values in the array of constants.
If the method succeeds, the return value is
Gets a Boolean vertex shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of Boolean values in the array of constants.
If the method succeeds, the return value is
Binds a vertex buffer to a device data stream. For more information, see Setting the Stream Source (Direct3D 9).
+If the method succeeds, the return value is
When a FVF vertex shader is used, the stride of the vertex stream must match the vertex size, computed from the FVF. When a declaration is used, the stride should be greater than or equal to the stream size computed from the declaration.
When calling SetStreamSource, the stride is normally required to be equal to the vertex size. However, there are times when you may want to draw multiple instances of the same or similar geometry (such as when using instancing to draw). For this case, use a zero stride to tell the runtime not to increment the vertex buffer offset (ie: use the same vertex data for all instances). For more information about instancing, see Efficiently Drawing Multiple Instances of Geometry (Direct3D 9).
+Retrieves a vertex buffer bound to the specified data stream.
+Specifies the data stream, in the range from 0 to the maximum number of streams minus one.
Address of a reference to an
Pointer containing the offset from the beginning of the stream to the beginning of the vertex data. The offset is measured in bytes. See Remarks.
Pointer to a returned stride of the component, in bytes. See Remarks.
If the method succeeds, the return value is
A stream is defined as a uniform array of component data, where each component consists of one or more elements representing a single entity such as position, normal, color, and so on.
When a FVF vertex shader is used, the stride of the vertex stream must match the vertex size, computed from the FVF. When a declaration is used, the stride should be greater than or equal to the stream size computed from the declaration.
Calling this method will increase the internal reference count on the
Sets the stream source frequency divider value. This may be used to draw several instances of geometry.
+Stream source number.
This parameter may have two different values. See remarks.
If the method succeeds, the return value is
There are two constants defined in d3d9types.h that are designed to use with SetStreamSourceFreq:
Gets the stream source frequency divider value.
+Stream source number.
Returns the frequency divider value.
If the method succeeds, the return value is
Vertex shaders can now be invoked more than once per vertex. See Drawing Non-Indexed Geometry.
+Sets index data.
+Pointer to an
If the method succeeds, the return value is
When an application no longer holds a references to this interface, the interface will automatically be freed.
The
Retrieves index data.
+Address of a reference to an
If the method succeeds, the return value is
Calling this method will increase the internal reference count on the
Creates a pixel shader.
+Pointer to the pixel shader function token array, specifying the blending operations. This value cannot be
Pointer to the returned pixel shader interface. See
If the method succeeds, the return value is
Sets the current pixel shader to a previously created pixel shader.
+Pixel shader interface.
If the method succeeds, the return value is
Retrieves the currently set pixel shader.
+Pointer to a pixel shader interface.
If the method succeeds, the return value is
This method will not work on a device that is created using
Sets a floating-point shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of four float vectors in the array of constants.
If the method succeeds, the return value is
Gets a floating-point shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of four float vectors in the array of constants.
If the method succeeds, the return value is
Sets an integer shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of four integer vectors in the array of constants.
If the method succeeds, the return value is
Gets an integer shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of four integer vectors in the array of constants.
If the method succeeds, the return value is
Sets a Boolean shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of boolean values in the array of constants.
If the method succeeds, the return value is
Gets a Boolean shader constant.
+Register number that will contain the first constant value.
Pointer to an array of constants.
Number of Boolean values in the array of constants.
If the method succeeds, the return value is
Draws a rectangular patch using the currently set streams.
+Handle to the rectangular patch to draw.
Pointer to an array of four floating-point values that identify the number of segments each edge of the rectangle patch should be divided into when tessellated. See
Pointer to a
If the method succeeds, the return value is
For static patches: Set the vertex shader, set the appropriate streams, supply patch information in the pRectPatchInfo parameter, and specify a handle so that Direct3D can capture and cache information. Call
Calling
For dynamic patches, the patch data changes for every rendering of the patch, so it is not efficient to cache information. The application can convey this to Direct3D by setting Handle to 0. In this case, Direct3D draws the patch using the currently set streams and the pNumSegs values, and does not cache any information. It is not valid to simultaneously set Handle to 0 and pRectPatchInfo to
Draws a triangular patch using the currently set streams.
+Handle to the triangular patch to draw.
Pointer to an array of three floating-point values that identify the number of segments each edge of the triangle patch should be divided into when tessellated. See
Pointer to a
If the method succeeds, the return value is
For static patches: Set the vertex shader, set the appropriate streams, supply patch information in the pTriPatchInfo parameter, and specify a handle so that Direct3D can capture and cache information. To efficiently draw the patch, call
Calling
For dynamic patches, the patch data changes for every rendering of the patch so it is not efficient to cache information. The application can convey this to Direct3D by setting Handle to 0. In this case, Direct3D draws the patch using the currently set streams and the pNumSegs values, and does not cache any information. It is not valid to simultaneously set Handle to 0 and pTriPatchInfo to
Frees a cached high-order patch.
+Handle of the cached high-order patch to delete.
If the method succeeds, the return value is
Creates a status query.
+Identifies the query type. For more information, see
Returns a reference to the query interface that manages the query object. See
This parameter can be set to
If the method succeeds, the return value is
This method is provided for both synchronous and asynchronous queries. It takes the place of GetInfo, which is no longer supported in Direct3D 9.
Synchronous and asynchronous queries are created with
Returns an interface to the instance of the Direct3D object that created the device.
+Calling
Retrieves the capabilities of the rendering device.
+Retrieves the creation parameters of the device.
+You can query the AdapterOrdinal member of the returned
Gets the number of implicit swap chains.
+Implicit swap chains are created by the device during
An application may create additional swap chains using
This method allows the use of GDI dialog boxes in full-screen mode applications.
+The GDI dialog boxes must be created as child to the device window. They should also be created within the same thread that created the device because this enables the parent window to manage redrawing the child window.
The method has no effect for windowed mode applications, but this setting will be respected if the application resets the device into full-screen mode. If SetDialogBoxMode succeeds in a windowed mode application, any subsequent reset to full-screen mode will be checked against the restrictions listed above. Also, SetDialogBoxMode causes all back buffers on the swap chain to be discarded, so an application is expected to refresh its content for all back buffers after this call.
+Gets or sets the depth-stencil surface owned by the Direct3DDevice object.
+Calling this method will increase the internal reference count on the
Retrieves or sets the viewport parameters currently set for the device.
+Typically, methods that return state will not work on a device that is created using
Retrieves or sets the current material properties for the device.
+This method will not return device state for a device that is created using
Retrieves or sets the clip status.
+When clipping is enabled during vertex processing (by
Clip status is not updated by
Clip status is used during software vertex processing. Therefore, this method is not supported on pure or nonpure hardware processing devices. For more information about pure devices, see
Retrieves or sets the current texture palette.
+Gets or sets the scissor rectangle.
+The scissor rectangle is used as a rectangular clipping region.
See Rectangles (Direct3D 9) for further information on the use of rectangles in DirectX.
+Gets or sets the vertex processing (hardware or software) mode.
+An application can create a mixed-mode device to use both the software vertex processing and the hardware vertex processing. To switch between the two vertex processing modes in DirectX 8.x, use
In Direct3D 9, use
Gets or sets the N-patch mode segments.
+Gets or sets a vertex shader declaration.
+Gets or sets the fixed vertex function declaration.
+The fixed vertex function declaration is a set of FVF flags that determine how vertices processed by the fixed function pipeline will be used.
+Retrieves or sets the currently set vertex shader.
+Typically, methods that return state will not work on a device that is created using
Retrieves or sets index data.
+ Calling this method will increase the internal reference count on the
Retrieves or sets the currently set pixel shader.
+This method will not work on a device that is created using
Applications use the methods of the
The
The LPDIRECT3DDEVICE9EX and PDIRECT3DDEVICE9EX types are defined as references to the
typedef struct+*LPDIRECT3DDEVICE9EX, *PDIRECT3DDEVICE9EX; +
Prepare the texture sampler for monochrome convolution filtering on a single-color texture.
+The width of the filter kernel; ranging from 1 - D3DCONVOLUTIONMONO_MAXWIDTH. The default value is 1.
The height of the filter kernel; ranging from 1 - D3DCONVOLUTIONMONO_MAXHEIGHT. The default value is 1.
An array of weights, one weight for each kernel sub-element in the width. This parameter must be
An array of weights, one weight for each kernel sub-element in the height. This parameter must be
If the method succeeds, the return value is
This method is designed to filter a single color texture. A monochrome convolution filter is a 2D box filter with all of the weights set to 1.0; the filter kernel resolution ranges from 1 x 1 to 7 x 7. When monochrome texture filtering is set to a texture sampler and texture sampling is performed at location, then Direct3D performs convolution.
Restrictions include:
Copy a text string to one surface using an alphabet of glyphs on another surface. Composition is done by the GPU using bitwise operations.
+A reference to a source surface (prepared by
A reference to the destination surface (prepared by
A reference to a vertex buffer (see
The number of rectangles or glyphs that are used in the operation. The number applies to both the source and destination surfaces. The range is 0 to D3DCOMPOSERECTS_MAXNUMRECTS.
A reference to a vertex buffer (see
Specifies how to combine the source and destination surfaces. See
A value added to the x coordinates of all destination rectangles. This value can be negative, which may cause the glyph to be rejected or clipped if the result is beyond the bounds of the surface.
A value added to the y coordinates of all destination rectangles. This value can be negative, which may cause the glyph to be rejected or clipped if the result is beyond the bounds of the surface.
If the method succeeds, the return value is
Glyphs from a one-bit source surface are put together into another one-bit texture surface with this method. The destination surface can then be used as the source for a normal texturing operation that will filter and scale the strings of text onto some other non-monochrome surface.
This method has several constraints (which are similar to StretchRect):
The method is not recorded in state blocks.
+Swap the swapchain's next buffer with the front buffer.
+Pointer to a
Pointer to
Pointer to a destination window whose client area is taken as the target for this presentation. If this value is
Note??If you create a swap chain with
Pointer to a
If this value is non-
Allows the application to request that the method return immediately when the driver reports that it cannot schedule a presentation. Valid values are 0, or any combination of
Possible return values include:
Differences between Direct3D 9 and Direct3D 9Ex: |
?
Similar to the
When the swapchain is created with
Get the priority of the GPU thread.
+Current GPU priority. Valid values range from -7 to 7.
Possible return values include:
Use
This method will retrieve the priority of the thread stored with the Direct3D device even if it was created with the
Set the priority on the GPU thread.
+The thread priority, ranging from -7 to 7.
Possible return values include:
GPU thread priority is not reset when a device is lost. The effects of calls to this method are not recorded in state blocks.
+Suspend execution of the calling thread until the next vertical blank signal.
+Swap chain index. This is an optional, zero-based index used to specify a swap chain on a multihead card.
This method will always return
This method allows applications to efficiently throttle their frame rate to that of the monitor associated with the device. Following a vertical blank, the amount of time it takes for the thread to wake up is typically very short.
In some scenarios the hardware may stop generating vertical blank signals when nothing is being displayed on the monitor. In this case, the method will wait approximately 100ms and return with
Checks an array of resources to determine if it is likely that they will cause a large stall at Draw time because the system must make the resources GPU-accessible.
+An array of
A value indicating the number of resources passed into the pResourceArray parameter up to a maximum of 65535.
If all the resources are in GPU-accessible memory, the method will return
If no allocation that comprises the resources is on disk, but at least one allocation is not in GPU-accessible memory, the method will return
If at least one allocation that comprises the resources is on disk, this method will return S_NOT_RESIDENT. The system may need to perform a copy to promote the resource.
This API is no more than a reasonable guess at residency, since resources may have been demoted by the time the application uses them.
The expected usage pattern is as follows. If the application determines that a set of resources are not resident, then the application will substitute a lower-LOD version of the resource and continue with rendering. The video memory manager API, offers a feature to allow the application to express that it would like these lower-LOD resources to be made more likely to stay resident in GPU-accessible memory. It is the app's responsibility to create, fill and destroy these lower-LOD versions, if it so chooses.
The application also needs to begin promotion of the higher-LOD versions when the residency check indicates that the resource is not resident in GPU-accessible memory. Since a per-process lock exists in kernel mode, a performant implementation will spawn a separate process whose sole job is to promote resources. The application communicates resource identity between the two process by means of the Sharing Resources shared surfaces API and promotes them by means of the SetPriority.
+Checks an array of resources to determine if it is likely that they will cause a large stall at Draw time because the system must make the resources GPU-accessible.
+An array of
A value indicating the number of resources passed into the pResourceArray parameter up to a maximum of 65535.
If all the resources are in GPU-accessible memory, the method will return
If no allocation that comprises the resources is on disk, but at least one allocation is not in GPU-accessible memory, the method will return
If at least one allocation that comprises the resources is on disk, this method will return S_NOT_RESIDENT. The system may need to perform a copy to promote the resource.
This API is no more than a reasonable guess at residency, since resources may have been demoted by the time the application uses them.
The expected usage pattern is as follows. If the application determines that a set of resources are not resident, then the application will substitute a lower-LOD version of the resource and continue with rendering. The video memory manager API, offers a feature to allow the application to express that it would like these lower-LOD resources to be made more likely to stay resident in GPU-accessible memory. It is the app's responsibility to create, fill and destroy these lower-LOD versions, if it so chooses.
The application also needs to begin promotion of the higher-LOD versions when the residency check indicates that the resource is not resident in GPU-accessible memory. Since a per-process lock exists in kernel mode, a performant implementation will spawn a separate process whose sole job is to promote resources. The application communicates resource identity between the two process by means of the Sharing Resources shared surfaces API and promotes them by means of the SetPriority.
+Set the number of frames that the system is allowed to queue for rendering.
+The maximum number of back buffer frames that a driver can queue. The value is typically 3, but can range from 1 to 20. A value of 0 will reset latency to the default. For multi-head devices, MaxLatency is specified per-head.
Possible return values include:
Frame latency is the number of frames that are allowed to be stored in a queue, before submission for rendering. Latency is often used to control how the CPU chooses between responding to user input and frames that are in the render queue.
It is often beneficial for applications that have no user input (for example, video playback) to queue more than 3 frames of data.
+Retrieves the number of frames of data that the system is allowed to queue.
+Returns the number of frames that can be queued for render. The value is typically 3, but can range from 1 to 20.
Possible return values include:
Frame latency is the number of frames that are allowed to be stored in a queue, before submission for rendering. Latency is often used to control how the CPU chooses between responding to user input and frames that are in the render queue.
It is often beneficial for applications that have no user input (for example, video playback) to queue more than 3 frames of data.
+Reports the current cooperative-level status of the Direct3D device for a windowed or full-screen application.
+The destination window handle to check for occlusion. When this parameter is
Possible return values include:
This method replaces
We recommend not to call CheckDeviceState every frame. Instead, call CheckDeviceState only if the
See Lost Device Behavior Changes for more information about lost, hung, and removed devices.
+Creates a render-target surface.
+Width of the render-target surface, in pixels.
Height of the render-target surface, in pixels.
Member of the
Member of the
Quality level. The valid range is between zero and one less than the level returned by pQualityLevels used by
Render targets are not lockable unless the application specifies TRUE for Lockable.
Note that lockable render targets reduce performance on some graphics hardware. The readback performance (moving data from video memory to system memory) depends on the type of hardware used (AGP vs. PCI Express) and is usually far lower than upload performance (moving data from system to video memory). If you need read access to render targets, use GetRenderTargetData instead of lockable render targets.
Reserved. Set this parameter to
Combination of one or more
Address of a reference to an
Render-target surfaces are placed in the
The creation of lockable, multisampled render targets is not supported.
+Create an off-screen surface.
+Width of the surface.
Height of the surface.
Format of the surface. See
Surface pool type. See
Reserved. Set this parameter to
Combination of one or more
Pointer to the
Off-screen plain surfaces are always lockable, regardless of their pool types.
+Creates a depth-stencil surface.
+Width of the depth-stencil surface, in pixels.
Height of the depth-stencil surface, in pixels.
Member of the
Member of the
Quality level. The valid range is between zero and one less than the level returned by pQualityLevels used by
Set this flag to TRUE to enable z-buffer discarding, and
This flag has the same behavior as the constant,
Reserved. Set this parameter to
Combination of one or more
Address of a reference to an
The memory class of the depth-stencil buffer is always
Resets the type, size, and format of the swap chain with all other surfaces persistent.
+Pointer to a
When switching to full-screen mode, Direct3D will try to find a desktop format that matches the back buffer format, so that back buffer and front buffer formats will be identical (to eliminate the need for color conversion).
When this method returns:
Pointer to a
The method can return:
If this method returns
If a call to
Unlike previous versions of DirectX, calling
Pixel shaders and vertex shaders survive
There are two different types of swap chains: full-screen or windowed. If the new swap chain is full-screen, the adapter will be placed in the display mode that matches the new size.
Applications can expect messages to be sent to them during this call (for example, before this call is returned); applications should take precautions not to call into Direct3D at this time.
A call to
When trying to reset more than one display adapter in a group, set pPresentationParameters to point to an array of
If a multihead device was created with
Retrieves the display mode's spatial resolution, color resolution, refresh frequency, and rotation settings.
+An unsigned integer specifying the swap chain.
Pointer to a
Pointer to a
Get or sets the priority of the GPU thread.
+Use
This method will retrieve the priority of the thread stored with the Direct3D device even if it was created with the
Retrieves or sets the number of frames of data that the system is allowed to queue.
+Frame latency is the number of frames that are allowed to be stored in a queue, before submission for rendering. Latency is often used to control how the CPU chooses between responding to user input and frames that are in the render queue.
It is often beneficial for applications that have no user input (for example, video playback) to queue more than 3 frames of data.
+Applications use the methods of the
The
The LPDIRECT3D9 and PDIRECT3D9 types are defined as references to the
typedef struct+*LPDIRECT3D9, *PDIRECT3D9;
LPDIRECT3D9 g_pD3D = NULL; if( NULL == (g_pD3D = Direct3DCreate9(D3D_SDK_VERSION))) return E_FAIL;
+
+ The IDirect3D9 interface supports enumeration of active display adapters and allows the creation of Registers a pluggable software device. Software devices provide software rasterization enabling applications to access a variety of software rasterizers.
+Pointer to the initialization function for the software device to be registered.
If the method succeeds, the return value is
If the user's computer provides no special hardware acceleration for 3D operations, your application might emulate 3D hardware in software. Software rasterization devices emulate the functions of color 3D hardware in software. A software device runs more slowly than a hal. However, software devices take advantage of any special instructions supported by the CPU to increase performance. Instruction sets include the AMD 3DNow! instruction set on some AMD processors and the MMX instruction set supported by many Intel processors. Direct3D uses the 3D-Now! instruction set to accelerate transformation and lighting operations and the MMX instruction set to accelerate rasterization.
Software devices communicate with Direct3D through an interface similar to the hardware device driver interface (DDI).
Software devices are loaded by the application and registered with the
The Direct3D Driver Development Kit (DDK) provides the documentation and headers for developing pluggable software devices.
+Returns the number of adapters on the system.
+A UINT value that denotes the number of adapters on the system at the time this
Describes the physical display adapters present in the system when the
Returns the number of display modes available on this adapter.
+Ordinal number that denotes the display adapter. D3DADAPTER_DEFAULT is always the primary display adapter.
Identifies the format of the surface type using
This method returns the number of display modes on this adapter or zero if Adapter is greater than or equal to the number of adapters on the system.
Queries the device to determine whether the specified adapter supports the requested format and display mode. This method could be used in a loop to enumerate all the available adapter modes.
+Ordinal number denoting the display adapter to enumerate. D3DADAPTER_DEFAULT is always the primary display adapter. This method returns
Allowable pixel formats. See Remarks.
Represents the display-mode index which is an unsigned integer between zero and the value returned by GetAdapterModeCount minus one.
A reference to the available display mode of type
An application supplies a display mode and a format to EnumAdapterModes which returns a display mode. This method could be used in a loop to enumerate all available display modes.
The application specifies a format and the enumeration is restricted to those display modes that exactly match the format (alpha is ignored). Allowed formats (which are members of
In addition, EnumAdapterModes treats pixel formats 565 and 555 as equivalent, and returns the correct version. The difference comes into play only when the application locks the back buffer and there is an explicit flag that the application must set in order to accomplish this.
+Retrieves the current display mode of the adapter.
+Ordinal number that denotes the display adapter to query. D3DADAPTER_DEFAULT is always the primary display adapter.
Pointer to a
GetAdapterDisplayMode will not return the correct format when the display is in an extended format, such as 2:10:10:10. Instead, it returns the format X8R8G8B8.
+Verifies whether a hardware accelerated device type can be used on this adapter.
+Ordinal number denoting the display adapter to enumerate. D3DADAPTER_DEFAULT is always the primary display adapter. This method returns
Member of the
Member of the
Back buffer format. For more information about formats, see
Value indicating whether the device type will be used in full-screen or windowed mode. If set to TRUE, the query is performed for windowed applications; otherwise, this value should be set
If the device can be used on this adapter,
A hal device type requires hardware acceleration. Applications can use CheckDeviceType to determine if the needed hardware and drivers are present to support a hal device.
Full-screen applications should not specify a DisplayFormat that contains an alpha channel. This will result in a failed call. Note that an alpha channel can be present in the back buffer but the two display formats must be identical in all other respects. For example, if DisplayFormat =
The following code fragment shows how you could use CheckDeviceType to test whether a certain device type can be used on this adapter.
if(SUCCEEDED(pD3Device->CheckDeviceType(D3DADAPTER_DEFAULT,, DisplayFormat, BackBufferFormat, bIsWindowed))) return ; + // There is no HAL on this adapter using this render-target format. + // Try again, using another format. +
This code returns
Using CheckDeviceType to test for compatibility between a back buffer that differs from the display format will return appropriate values. This means that the call will reflect device capabilities. If the device cannot render to the requested back-buffer format, the call will still return
Determines whether a surface format is available as a specified resource type and can be used as a texture, depth-stencil buffer, or render target, or any combination of the three, on a device representing this adapter.
+Ordinal number denoting the display adapter to query. D3DADAPTER_DEFAULT is always the primary display adapter. This method returns
Member of the
Member of the
Requested usage options for the surface. Usage options are any combination of
Resource type requested for use with the queried format. Member of
Format of the surfaces which may be used, as defined by Usage. Member of
If the format is compatible with the specified device for the requested usage, this method returns
Here are some examples using CheckDeviceFormat to check for hardware support of:
IsDepthFormatExisting( DepthFormat, AdapterFormat ) + { hr = pD3D->CheckDeviceFormat( D3DADAPTER_DEFAULT, , AdapterFormat, , , DepthFormat); return SUCCEEDED( hr ); + }
See Selecting a Device (Direct3D 9) for more detail on the enumeration process.
IsTextureFormatOk( TextureFormat, AdapterFormat ) + { hr = pD3D->CheckDeviceFormat( D3DADAPTER_DEFAULT, , AdapterFormat, 0, , TextureFormat); return SUCCEEDED( hr ); + }
When migrating code from Direct3D 9 to Direct3D 10, the Direct3D 10 equivalent to CheckDeviceFormat is CheckFormatSupport.
+Determines if a multisampling technique is available on this device.
+Ordinal number denoting the display adapter to query. D3DADAPTER_DEFAULT is always the primary display adapter. This method returns
Member of the
Member of the
bool value. Specify TRUE to inquire about windowed multisampling, and specify
Member of the
pQualityLevels returns the number of device-specific sampling variations available with the given sample type. For example, if the returned value is 3, then quality levels 0, 1 and 2 can be used when creating resources with the given sample count. The meanings of these quality levels are defined by the device manufacturer and cannot be queried through D3D. For example, for a particular device different quality levels at a fixed sample count might refer to different spatial layouts of the sample locations or different methods of resolving. This can be
If the device can perform the specified multisampling method, this method returns
This method is intended for use with both render-target and depth-stencil surfaces because you must create both surfaces multisampled if you want to use them together.
The following code fragment shows how you could use CheckDeviceMultiSampleType to test for devices that support a specific multisampling method.
if( SUCCEEDED(pD3D->CheckDeviceMultiSampleType( pCaps->AdapterOrdinal, pCaps->DeviceType, BackBufferFormat,, , null ) ) && SUCCEEDED(pD3D->CheckDeviceMultiSampleType( pCaps->AdapterOrdinal, pCaps->DeviceType, DepthBufferFormat,, , null ) ) ) return; +
The preceding code will return
See the remarks in
Determines whether a depth-stencil format is compatible with a render-target format in a particular display mode.
+Ordinal number denoting the display adapter to query. D3DADAPTER_DEFAULT is always the primary display adapter.
Member of the
Member of the
Member of the
Member of the
If the depth-stencil format is compatible with the render-target format in the display mode, this method returns
This method is provided to enable applications to work with hardware requiring that certain depth formats can only work with certain render-target formats.
The behavior of this method has been changed for DirectX 8.1. This method now pays attention to the D24x8 and D32 depth-stencil formats. The previous version assumed that these formats would always be usable with 32- or 16-bit render targets. This method will now return
The following code fragment shows how you could use CheckDeviceFormat to validate a depth stencil format.
IsDepthFormatOk( DepthFormat, AdapterFormat, BackBufferFormat) + { // Verify that the depth format exists hr = pD3D->CheckDeviceFormat(D3DADAPTER_DEFAULT, , AdapterFormat, , , DepthFormat); if(FAILED(hr)) return ; // Verify that the depth format is compatible hr = pD3D->CheckDepthStencilMatch(D3DADAPTER_DEFAULT, , AdapterFormat, BackBufferFormat, DepthFormat); return SUCCEEDED(hr); } +
The preceding call will return
Tests the device to see if it supports conversion from one display format to another.
+Display adapter ordinal number. D3DADAPTER_DEFAULT is always the primary display adapter. This method returns
Device type. Member of the
Source adapter format. Member of the
Target adapter format. Member of the
If the method succeeds, the return value is
Using CheckDeviceType to test for compatibility between a back buffer that differs from the display format will return appropriate values. This means that the call will reflect device capabilities. If the device cannot render to the requested back buffer format, the call will still return
CheckDeviceFormatConversion can also be used to determine which combinations of source surface formats and destination surface formats are permissible in calls to StretchRect.
Color conversion is restricted to the following source and target formats.
?
Retrieves device-specific information about a device.
+Ordinal number that denotes the display adapter. D3DADAPTER_DEFAULT is always the primary display adapter.
Member of the
Pointer to a
The application should not assume the persistence of vertex processing capabilities across Direct3D device objects. The particular capabilities that a physical device exposes may depend on parameters supplied to CreateDevice. For example, the capabilities may yield different vertex processing capabilities before and after creating a Direct3D Device Object with hardware vertex processing enabled. For more information see the description of
Returns the handle of the monitor associated with the Direct3D object.
+Ordinal number that denotes the display adapter. D3DADAPTER_DEFAULT is always the primary display adapter.
Handle of the monitor associated with the Direct3D object.
As shown in the following code fragment, which illustrates how to obtain a handle to the monitor associated with a given device, use GetDirect3D to return the Direct3D enumerator from the device and use GetCreationParameters to retrieve the value for Adapter.
if( FAILED( pDevice->GetCreationParameters( &Parameters ) ) ) return+; if( FAILED( pDevice->GetDirect3D(&pD3D) ) ) return ; hMonitor = pD3D->GetAdapterMonitor(Parameters.AdapterOrdinal); pD3D->Release(); +
Creates a device to represent the display adapter.
+Ordinal number that denotes the display adapter. D3DADAPTER_DEFAULT is always the primary display adapter.
Member of the
The focus window alerts Direct3D when an application switches from foreground mode to background mode. See Remarks.
Combination of one or more options that control device creation. For more information, see
Pointer to a
For Windows 2000 and Windows XP, the full-screen device display refresh rate is set in the following order:
An unsupported refresh rate will default to the closest supported refresh rate below it. For example, if the application specifies 63 hertz, 60 hertz will be used. There are no supported refresh rates below 57 hertz.
pPresentationParameters is both an input and an output parameter. Calling this method may change several members including:
Address of a reference to the returned
If the method succeeds, the return value is
This method returns a fully working device interface, set to the required display mode (or windowed), and allocated with the appropriate back buffers. To begin rendering, the application needs only to create and set a depth buffer (assuming EnableAutoDepthStencil is
When you create a Direct3D device, you supply two different window parameters: a focus window (hFocusWindow) and a device window (the hDeviceWindow in
This method should not be run during the handling of WM_CREATE. An application should never pass a window handle to Direct3D while handling WM_CREATE. Any call to create, release, or reset the device must be done using the same thread as the window procedure of the focus window.
Note that
Back buffers created as part of the device are only lockable if
The methods Reset,
If you attempt to create a device on a 0x0 sized window, CreateDevice will fail.
+Returns the number of adapters on the system.
+Applications use the methods of the
The
The LPDIRECT3D9EX and PDIRECT3D9EX types are defined as references to the
typedef struct+*LPDIRECT3D9EX, *PDIRECT3D9EX; +
Returns the number of display modes available.
+Ordinal number denoting the display adapter from which to retrieve the display mode count.
Specifies the characteristics of the desired display mode. See
The number of display modes available. A return of value zero from this method is an indication that no such display mode is supported or simply this monitor is no longer available.
Events such as display mode changes on other heads of the same hardware, monitor change or its connection status change, and desktop extension/unextension could all affect the number of display mode available.
To fullscreen applications,
To increase the chance of setting a currently available display mode successfully, fullscreen applications should try to requery the available display mode list upon receiving
This method returns the actual display mode info based on the given mode index.
+Ordinal number denoting the display adapter to enumerate. D3DADAPTER_DEFAULT is always the primary display adapter. This method returns
See
Represents the display-mode index which is an unsigned integer between zero and the value returned by GetAdapterModeCount minus one.
A reference to the available display mode of type
Retrieves the current display mode and rotation settings of the adapter.
+Ordinal number that denotes the display adapter to query. D3DADAPTER_DEFAULT is always the primary display adapter.
Pointer to a
Pointer to a
GetAdapterDisplayModeEx does not return the correct format when the display is in an extended format, such as 2:10:10:10. Instead, it returns the format X8R8G8B8.
To windowed applications, a value of
Creates a device to represent the display adapter.
+Ordinal number that denotes the display adapter. D3DADAPTER_DEFAULT is always the primary display adapter.
Specifies the type of device. See
The focus window alerts Direct3D when an application switches from foreground mode to background mode. For full-screen mode, the window specified must be a top-level window. For windowed mode, this parameter may be
Combination of one or more options (see
Pointer to a
This parameter is both an input and an output parameter. Calling this method may change several members including:
The display mode for when the device is set to fullscreen. See
Address of a reference to the returned
This method returns
This method returns a unique identifier for the adapter that is specific to the adapter hardware. Applications can use this identifier to define robust mappings across various APIs (Direct3D 9, DXGI).
+Ordinal number denoting the display adapter from which to retrieve the
A unique identifier for the given adapter.
Used to set and query effects, and to choose techniques. An effect object can contain multiple techniques to render the same effect.
+The
The LPD3DXEFFECT type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXEFFECT; +
Gets a reference to the pool of shared parameters.
+Pointer to a
This method always returns the value
Pools contain shared parameters between effects. See Cloning and Sharing (Direct3D 9).
+Sets the active technique.
+Unique handle to the technique. See Handles (Direct3D 9).
If the method succeeds, the return value is
Gets the current technique.
+A unique identifier to the current technique. See Handles (Direct3D 9).
Validate a technique.
+Unique identifier. See Handles (Direct3D 9).
If the method succeeds, the return value is
Searches for the next valid technique, starting at the technique after the specified technique.
+Unique identifier to a technique. See Handles (Direct3D 9). Specify
Pointer to an identifier for the next technique.
Determines if a parameter is used by the technique.
+Unique identifier for the parameter. See Handles (Direct3D 9).
Unique identifier for the technique. See Handles (Direct3D 9).
Returns TRUE if the parameter is being used and returns
Starts an active technique.
+DWORD that determines if state modified by an effect is saved and restored. The default value 0 specifies that
Pointer to a value returned that indicates the number of passes needed to render the current technique.
An application sets one active technique in the effect system by calling
Within the
Begins a pass, within the active technique.
+A zero-based integer index into the technique.
If the method succeeds, the return value is
An application sets one active pass (within one active technique) in the effect system by calling
If the application changes any effect state using any of the Effect::Setx methods inside of a
Propagate state changes that occur inside of an active pass to the device before rendering.
+If the method succeeds, the return value is
If the application changes any effect state using any of the ID3DXEffect::Setx methods inside of an
This is slightly different for any shared parameters in a cloned effect. When a technique is active on a cloned effect (that is, when
End an active pass.
+This method always returns the value
An application signals the end of rendering an active pass by calling
Each matching pair of
If the application changes any effect state using any of the Effect::Setx methods inside of a
Ends an active technique.
+This method always returns the value
All rendering in an effect is done within a matching pair of
By default, the effect system takes care of saving state prior to a technique, and restoring state after a technique. If you choose to disable this save and restore functionality, see
Retrieves the device associated with the effect.
+Address of a reference to an
If the method succeeds, the return value is
Calling this method will increase the internal reference count for the
Use this method to release all references to video memory resources and delete all stateblocks. This method should be called whenever a device is lost, or before resetting a device.
+If the method succeeds, the return value is
This method should be called whenever the device is lost or before the user calls
Use this method to re-acquire resources and save initial state.
+If the method succeeds, the return value is
Set the effect state manager.
+A reference to the state manager. See
If the method succeeds, the return value is
The
Get the effect state manager.
+Returns a reference to the state manager. See
If the method succeeds, the return value is
The
Start capturing state changes in a parameter block.
+If the method succeeds, the return value is
Capture effect parameter state changes until EndParameterBlock is called. Effect parameters include any state changes outside of a pass. Delete parameter blocks if they are no longer needed by calling DeleteParameterBlock.
+Stop capturing effect parameter state changes.
+Returns a handle to the parameter state block.
All effect parameters that change state (after calling BeginParameterBlock and before calling EndParameterBlock) will be saved in an effect parameter state block. Use ApplyParameterBlock to apply this block of state changes to the effect system. Once you are finished with a state block use DeleteParameterBlock to free the memory.
+Apply the values in a state block to the current effect system state.
+A handle to the parameter block. This is the handle returned by
If the method succeeds, the return value is
Capture effect parameter state changes in a parameter block by calling BeginParameterBlock; stop capturing state changes by calling EndParameterBlock. These state changes include any effect parameter changes that occur inside of a technique (including those outside of a pass). Once you are done with the parameter block, call DeleteParameterBlock to recover memory.
+Delete a parameter block.
+A handle to the parameter block. This is the handle returned by
If the method succeeds, the return value is
Parameter blocks are blocks of effect states. Use a parameter block to record state changes so that they can be applied later on with a single API call. When no longer needed, delete the parameter block to reduce memory usage.
+Creates a copy of an effect.
+Pointer to an
Pointer to an
Note??This function will not clone an effect if the user specifies
To update shared and non-shared parameters in an active technique of a cloned effect, see
Set a contiguous range of shader constants with a memory copy.
+Handle to the value to set, or the name of the value passed in as a string. Passing in a handle is more efficient. See Handles (Direct3D 9).
Pointer to a buffer containing the data to be set. SetRawValue checks for valid memory, but does not do any checking for valid data.
Number of bytes between the beginning of the effect data and the beginning of the effect constants you are going to set.
The size of the buffer to be set, in bytes.
If the method succeeds, the return value is
SetRawValue is a very fast way to set effect constants since it performs a memory copy without performing validation or any data conversion (like converting a row-major matrix to a column-major matrix). Use SetRawValue to set a series of contiguous effect constants. For instance, you could set an array of twenty matrices with 20 calls to
All values are expected to be either matrix4x4s or float4s and all matrices are expected to be in column-major order. Int or float values are cast into a float4; therefore, it is highly recommended that you use SetRawValue with only float4 or matrix4x4 data.
+Gets a reference to the pool of shared parameters.
+Pools contain shared parameters between effects. See Cloning and Sharing (Direct3D 9).
+Retrieves the device associated with the effect.
+Calling this method will increase the internal reference count for the
Get or sets the effect state manager.
+The
The
The
The LPD3DXEFFECTCOMPILER type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXEFFECTCOMPILER; +
Toggles the literal status of a parameter. A literal parameter has a value that doesn't change during the lifetime of an effect.
+Unique identifier to a parameter. See Handles (Direct3D 9).
Set to TRUE to make the parameter a literal, and
If the method succeeds, the return value is
This methods only changes whether the parameter is a literal or not. To change the value of a parameter, use a method like
This function must be called before the effect is compiled. Here is an example of how one might use this function:
LPD3DXEFFECTCOMPILER pEffectCompiler; char errors[1000];+hr; hr = ("shader.fx", null ,null , 0, &pEffectCompiler, &errors); //In the fx file, literalInt is declared as an int. //By calling this function, the compiler will treat //it as a literal (i.e. #define) hr = pEffectCompiler->SetLiteral("literalInt", TRUE); //create ten different variations of the same effect LPD3DXBUFFER pEffects[10]; LPD3DXBUFFER pErrors; for(int i = 0; i < 10; ++i) { hr = pEffectCompiler->SetInt("literalInt", i); hr = pEffectCompiler->CompileEffect(0, &pEffects[i], &pErrors); } +
Gets a literal status of a parameter. A literal parameter has a value that doesn't change during the lifetime of an effect.
+Unique identifier to a parameter. See Handles (Direct3D 9).
Returns True if the parameter is a literal, and False otherwise.
This methods only changes whether the parameter is a literal or not. To change the value of a parameter, use a method like
Compile an effect.
+Compile options identified by various flags. The Direct3D 10 HLSL compiler is now the default. See
Buffer containing the compiled effect. For more information about accessing the buffer, see
Buffer containing at least the first compile error message that occurred. This includes effect compiler errors and high-level language compile errors. For more information about accessing the buffer, see
If the method succeeds, the return value is
If the arguments are invalid, the method will return
If the method fails, the return value will be E_FAIL.
Compiles a shader from an effect that contains one or more functions.
+Unique identifier to the function to be compiled. This value must not be
Pointer to a shader profile which determines the shader instruction set. See
Compile options identified by various flags. The Direct3D 10 HLSL compiler is now the default. See
Buffer containing the compiled shader. The compiler shader is an array of DWORDs. For more information about accessing the buffer, see
Buffer containing at least the first compile error message that occurred. This includes effect compiler errors and high-level language compile errors. For more information about accessing the buffer, see
Returns an
If the method succeeds, the return value is
If the arguments are invalid, the method will return
If the method fails, the return value will be E_FAIL.
Targets can be specified for vertex shaders, pixel shaders, and texture fill functions.
Vertex shader targets | vs_1_1, vs_2_0, vs_2_sw, vs_3_0 |
Pixel shader targets | ps_1_1, ps_1_2, ps_1_3, ps_1_4, ps_2_0, ps_2_sw, ps_3_0 |
Texture fill targets | tx_0, tx_1 |
?
This method compiles a shader from a function that is written in a C-like language. For more information, see HLSL.
+Data type for managing a set of default effect parameters.
+Name of the effect file.
Number of default parameters.
Pointer to an array of
Applications use the
The
The LPD3DXEFFECTPOOL type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXEFFECTPOOL; +
The
The
The LPD3DXFONT type is defined as a reference to the
typedef interface+; + typedef interface *LPD3DXFONT; +
Retrieves the Direct3D device associated with the font object.
+Address of a reference to an
If the method succeeds, the return value is
Note??Calling this method will increase the internal reference count on the
Gets a description of the current font object. GetDescW and GetDescA are identical to this method, except that a reference is returned to a
If the method succeeds, the return value is
This method describes Unicode font objects if UNICODE is defined. Otherwise GetDescA is called, which returns a reference to the D3DXFONT_DESCA structure.
+Retrieves font characteristics that are identified in a
Nonzero if the function is successful; otherwise 0.
The compiler setting also determines the structure type. If Unicode is defined, the function returns a
Returns a handle to a display device context (DC) that has the font set.
+Handle to a display DC.
Returns information about the placement and orientation of a glyph in a character cell.
+Glyph identifier.
Address of a reference to a
Pointer to the smallest rectangle object that completely encloses the glyph.
Pointer to the two-dimensional vector that connects the origin of the current character cell to the origin of the next character cell. See
If the method succeeds, the return value is
Loads a series of characters into video memory to improve the efficiency of rendering to the device.
+ID of the first character to be loaded into video memory.
ID of the last character to be loaded into video memory.
If the method succeeds, the return value is
This method generates textures containing glyphs that represent the input characters. The glyphs are drawn as a series of triangles.
Characters will not be rendered to the device; DrawText must still be called to render the characters. However, by pre-loading characters into video memory, DrawText will use substantially fewer CPU resources.
This method internally converts characters to glyphs using the GDI function GetCharacterPlacement.
+Loads a series of glyphs into video memory to improve the efficiency of rendering to the device.
+ID of the first glyph to be loaded into video memory.
ID of the last glyph to be loaded into video memory.
If the method succeeds, the return value is
This method generates textures that contain the input glyphs. The glyphs are drawn as a series of triangles.
Glyphs will not be rendered to the device; DrawText must still be called to render the glyphs. However, by pre-loading glyphs into video memory, DrawText will use substantially fewer CPU resources.
+Loads formatted text into video memory to improve the efficiency of rendering to the device. This method supports ANSI and Unicode strings.
+Pointer to a string of characters to be loaded into video memory. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR; otherwise, the data type resolves to LPCSTR. See Remarks.
Number of characters in the text string.
If the method succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to PreloadTextW. Otherwise, the function call resolves to PreloadTextA because ANSI strings are being used.
This method generates textures that contain glyphs that represent the input text. The glyphs are drawn as a series of triangles.
Text will not be rendered to the device; DrawText must still be called to render the text. However, by preloading text into video memory, DrawText will use substantially fewer CPU resources.
This method internally converts characters to glyphs using the GDI function GetCharacterPlacement.
+Draws formatted text. This method supports ANSI and Unicode strings.
+Pointer to an
Pointer to a string to draw. If the Count parameter is -1, the string must be null-terminated.
Specifies the number of characters in the string. If Count is -1, then the pString parameter is assumed to be a reference to a null-terminated string and DrawText computes the character count automatically.
Pointer to a
Specifies the method of formatting the text. It can be any combination of the following values:
Value | Meaning |
---|---|
| Justifies the text to the bottom of the rectangle. This value must be combined with DT_SINGLELINE. |
| Determines the width and height of the rectangle. If there are multiple lines of text, DrawText uses the width of the rectangle pointed to by the pRect parameter and extends the base of the rectangle to bound the last line of text. If there is only one line of text, DrawText modifies the right side of the rectangle so that it bounds the last character in the line. In either case, DrawText returns the height of the formatted text but does not draw the text. |
| Centers text horizontally in the rectangle. |
| Expands tab characters. The default number of characters per tab is eight. |
| Aligns text to the left. |
| Draws without clipping. DrawText is somewhat faster when DT_NOCLIP is used. |
| Aligns text to the right. |
| Displays text in right-to-left reading order for bidirectional text when a Hebrew or Arabic font is selected. The default reading order for all text is left-to-right. |
| Displays text on a single line only. Carriage returns and line feeds do not break the line. |
| Top-justifies text. |
| Centers text vertically (single line only). |
| Breaks words. Lines are automatically broken between words if a word would extend past the edge of the rectangle specified by the pRect parameter. A carriage return/line feed sequence also breaks the line. |
?
Color of the text. For more information, see
If the function succeeds, the return value is the height of the text in logical units. If DT_VCENTER or DT_BOTTOM is specified, the return value is the offset from pRect (top to the bottom) of the drawn text. If the function fails, the return value is zero.
The parameters of this method are very similar to those of the GDI DrawText function.
This method supports both ANSI and Unicode strings.
This method must be called inside a BeginScene ... EndScene block. The only exception is when an application calls DrawText with DT_CALCRECT to calculate the size of a given block of text.
Unless the DT_NOCLIP format is used, this method clips the text so that it does not appear outside the specified rectangle. All formatting is assumed to have multiple lines unless the DT_SINGLELINE format is specified.
If the selected font is too large for the rectangle, this method does not attempt to substitute a smaller font.
This method supports only fonts whose escapement and orientation are both zero.
+Use this method to release all references to video memory resources and delete all stateblocks. This method should be called whenever a device is lost, or before resetting a device.
+If the method succeeds, the return value is
This method should be called whenever the device is lost or before the user calls Reset. Even if the device was not actually lost, OnLostDevice is responsible for freeing stateblocks and other resources that may need to be released before resetting the device. As a result, the font object cannot be used again before calling Reset and then OnResetDevice.
+Use this method to re-acquire resources and save initial state.
+If the method succeeds, the return value is
OnResetDevice should be called each time the device is reset (using Reset), before any other methods are called. This is a good place to re-acquire video-memory resources and capture state blocks.
+Retrieves the Direct3D device associated with the font object.
+Note??Calling this method will increase the internal reference count on the
Gets a description of the current font object. GetDescW and GetDescA are identical to this method, except that a reference is returned to a
This method describes Unicode font objects if UNICODE is defined. Otherwise GetDescA is called, which returns a reference to the D3DXFONT_DESCA structure.
+Returns a handle to a display device context (DC) that has the font set.
+Defines constants that describe the type of back buffer.
+Direct3D 9 does not support stereo view, so Direct3D does not use the
Specifies a nonstereo swap chain.
Specifies the left side of a stereo pair in a swap chain.
Specifies the right side of a stereo pair in a swap chain.
Defines the basis type of a high-order patch surface.
+The members of
Input vertices are treated as a series of B?zier patches. The number of vertices specified must be divisible by 4. Portions of the mesh beyond this criterion will not be rendered. Full continuity is assumed between sub-patches in the interior of the surface rendered by each call. Only the vertices at the corners of each sub-patch are guaranteed to lie on the resulting surface.
Input vertices are treated as control points of a B-spline surface. The number of apertures rendered is two fewer than the number of apertures in that direction. In general, the generated surface does not contain the control vertices specified.
An interpolating basis defines the surface so that the surface goes through all the input vertices specified. In DirectX 8, this was D3DBASIS_INTERPOLATE.
Defines the supported blend mode.
+In the preceding member descriptions, the RGBA values of the source and destination are indicated by the s and d subscripts.
The values in this enumerated type are used by the following render states:
See
Blend factor is (0, 0, 0, 0).
Blend factor is (1, 1, 1, 1).
Blend factor is (Rs, Gs, Bs, As).
Blend factor is (1 - Rs, 1 - Gs, 1 - Bs, 1 - As).
Blend factor is (As, As, As, As).
Blend factor is ( 1 - As, 1 - As, 1 - As, 1 - As).
Blend factor is (Ad Ad Ad Ad).
Blend factor is (1 - Ad 1 - Ad 1 - Ad 1 - Ad).
Blend factor is (Rd, Gd, Bd, Ad).
Blend factor is (1 - Rd, 1 - Gd, 1 - Bd, 1 - Ad).
Blend factor is (f, f, f, 1); where f = min(As, 1 - Ad).
Obsolete. Starting with DirectX 6, you can achieve the same effect by setting the source and destination blend factors to
Obsolete. Source blend factor is (1 - As, 1 - As, 1 - As, 1 - As), and destination blend factor is (As, As, As, As); the destination blend selection is overridden. This blend mode is supported only for the
Constant color blending factor used by the frame-buffer blender. This blend mode is supported only if
Inverted constant color-blending factor used by the frame-buffer blender. This blend mode is supported only if the
Blend factor is (PSOutColor[1]r, PSOutColor[1]g, PSOutColor[1]b, not used). See Render.
Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. |
?
Blend factor is (1 - PSOutColor[1]r, 1 - PSOutColor[1]g, 1 - PSOutColor[1]b, not used)). See Render.
Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. |
?
Represents the capabilities of the hardware exposed through the Direct3D object.
+The MaxTextureBlendStages and MaxSimultaneousTextures members might seem similar, but they contain different information. The MaxTextureBlendStages member contains the total number of texture-blending stages supported by the current device, and the MaxSimultaneousTextures member describes how many of those stages can have textures bound to them by using the SetTexture method.
When the driver fills this structure, it can set values for execute-buffer capabilities, even when the interface being used to retrieve the capabilities (such as
In general, performance problems may occur if you use a texture and then modify it during a scene. Ensure that no texture used in the current BeginScene and EndScene block is evicted unless absolutely necessary. In the case of extremely high texture usage within a scene, the results are undefined. This occurs when you modify a texture that you have used in the scene and there is no spare texture memory available. For such systems, the contents of the z-buffer become invalid at EndScene. Applications should not call UpdateSurface to or from the back buffer on this type of hardware inside a BeginScene/EndScene pair. In addition, applications should not try to access the z-buffer if the
The following flags concerning mipmapped textures are not supported in Direct3D 9.
Member of the
Adapter on which this Direct3D device was created. This ordinal is valid only to pass to methods of the
The following driver-specific capability.
Value | Meaning |
---|---|
Display hardware is capable of returning the current scan line. | |
The display driver supports an overlay DDI that allows for verification of overlay capabilities. For more information about the overlay DDI, see Overlay DDI. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? |
?
Driver-specific capabilities identified in
Driver-specific capabilities identified in
Bit mask of values representing what presentation swap intervals are available.
Value | Meaning |
---|---|
The driver supports an immediate presentation swap interval. | |
The driver supports a presentation swap interval of every screen refresh. | |
The driver supports a presentation swap interval of every second screen refresh. | |
The driver supports a presentation swap interval of every third screen refresh. | |
The driver supports a presentation swap interval of every fourth screen refresh. |
?
Bit mask indicating what hardware support is available for cursors. Direct3D 9 does not define alpha-blending cursor capabilities.
Value | Meaning |
---|---|
A full-color cursor is supported in hardware. Specifically, this flag indicates that the driver supports at least a hardware color cursor in high-resolution modes (with scan lines greater than or equal to 400). | |
A full-color cursor is supported in hardware. Specifically, this flag indicates that the driver supports a hardware color cursor in both high-resolution and low-resolution modes (with scan lines less than 400). |
?
Flags identifying the capabilities of the device.
Value | Meaning |
---|---|
Device supports blits from system-memory textures to nonlocal video-memory textures. | |
Device can queue rendering commands after a page flip. Applications do not change their behavior if this flag is set; this capability means that the device is relatively fast. | |
Device can support at least a DirectX 5-compliant driver. | |
Device can support at least a DirectX 7-compliant driver. | |
Device exports an | |
Device can use execute buffers from system memory. | |
Device can use execute buffers from video memory. | |
Device has hardware acceleration for scene rasterization. | |
Device can support transformation and lighting in hardware. | |
Device supports N patches. | |
Device can support rasterization, transform, lighting, and shading in hardware. | |
Device supports quintic B?zier curves and B-splines. | |
Device supports rectangular and triangular patches. | |
When this device capability is set, the hardware architecture does not require caching of any information, and uncached patches (handle zero) will be drawn as efficiently as cached ones. Note that setting | |
Device is texturing from separate memory pools. | |
Device can retrieve textures from non-local video memory. | |
Device can retrieve textures from system memory. | |
Device can retrieve textures from device memory. | |
Device can use buffers from system memory for transformed and lit vertices. | |
Device can use buffers from video memory for transformed and lit vertices. |
?
Miscellaneous driver primitive capabilities. See
Information on raster-drawing capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device supports anisotropic filtering. | |
Device iterates colors perspective correctly. | |
Device can dither to improve color resolution. | |
Device supports legacy depth bias. For true depth bias, see | |
Device supports range-based fog. In range-based fog, the distance of an object from the viewer is used to compute fog effects, not the depth of the object (that is, the z-coordinate) in the scene. | |
Device calculates the fog value by referring to a lookup table containing fog values that are indexed to the depth of a given pixel. | |
Device calculates the fog value during the lighting operation and interpolates the fog value during rasterization. | |
Device supports level-of-detail bias adjustments. These bias adjustments enable an application to make a mipmap appear crisper or less sharp than it normally would. For more information about level-of-detail bias in mipmaps, see | |
Device supports toggling multisampling on and off between | |
Device supports scissor test. See Scissor Test (Direct3D 9). | |
Device performs true slope-scale based depth bias. This is in contrast to the legacy style depth bias. | |
Device supports depth buffering using w. | |
Device supports w-based fog. W-based fog is used when a perspective projection matrix is specified, but affine projections still use z-based fog. The system considers a projection matrix that contains a nonzero value in the [3][4] element to be a perspective projection matrix. | |
Device can perform hidden-surface removal (HSR) without requiring the application to sort polygons and without requiring the allocation of a depth-buffer. This leaves more video memory for textures. The method used to perform HSR is hardware-dependent and is transparent to the application. Z-bufferless HSR is performed if no depth-buffer surface is associated with the rendering-target surface and the depth-buffer comparison test is enabled (that is, when the state value associated with the | |
Device supports z-based fog. | |
Device can perform z-test operations. This effectively renders a primitive and indicates whether any z pixels have been rendered. |
?
Z-buffer comparison capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Always pass the z-test. | |
Pass the z-test if the new z equals the current z. | |
Pass the z-test if the new z is greater than the current z. | |
Pass the z-test if the new z is greater than or equal to the current z. | |
Pass the z-test if the new z is less than the current z. | |
Pass the z-test if the new z is less than or equal to the current z. | |
Always fail the z-test. | |
Pass the z-test if the new z does not equal the current z. |
?
Source-blending capabilities. This member can be one or more of the following flags. (The RGBA values of the source and destination are indicated by the subscripts s and d.)
Value | Meaning |
---|---|
The driver supports both | |
Source blend factor is (1 - As, 1 - As, 1 - As, 1 - As) and destination blend factor is (As, As, As, As); the destination blend selection is overridden. | |
The driver supports the | |
Blend factor is (Ad, Ad, Ad, Ad). | |
Blend factor is (Rd, Gd, Bd, Ad). | |
Blend factor is (1 - Ad, 1 - Ad, 1 - Ad, 1 - Ad). | |
Blend factor is (1 - Rd, 1 - Gd, 1 - Bd, 1 - Ad). | |
Blend factor is (1 - As, 1 - As, 1 - As, 1 - As). | |
Blend factor is (1 - Rs, 1 - Gs, 1 - Bs, 1 - As). | |
Blend factor is (1 - PSOutColor[1]r, 1 - PSOutColor[1]g, 1 - PSOutColor[1]b, not used)). See Render Target Blending. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? | |
Blend factor is (1, 1, 1, 1). | |
Blend factor is (As, As, As, As). | |
Blend factor is (f, f, f, 1); f = min(As, 1 - Ad). | |
Blend factor is (Rs, Gs, Bs, As). | |
Blend factor is (PSOutColor[1]r, PSOutColor[1]g, PSOutColor[1]b, not used). See Render Target Blending. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? | |
Blend factor is (0, 0, 0, 0). |
?
Destination-blending capabilities. This member can be the same capabilities that are defined for the SrcBlendCaps member.
Alpha-test comparison capabilities. This member can include the same capability flags defined for the ZCmpCaps member. If this member contains only the
Shading operations capabilities. It is assumed, in general, that if a device supports a given command at all, it supports the
The color, specular highlights, fog, and alpha interpolants of a triangle each have capability flags that an application can use to find out how they are implemented by the device driver.
This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device can support an alpha component for Gouraud-blended transparency (the | |
Device can support colored Gouraud shading. In this mode, the per-vertex color components (red, green, and blue) are interpolated across a triangle face. | |
Device can support fog in the Gouraud shading mode. | |
Device supports Gouraud shading of specular highlights. |
?
Miscellaneous texture-mapping capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Alpha in texture pixels is supported. | |
Device can draw alpha from texture palettes. | |
Supports cube textures. | |
Device requires that cube texture maps have dimensions specified as powers of two. | |
Device supports mipmapped cube textures. | |
Device supports mipmapped textures. | |
Device supports mipmapped volume textures. | |
If this flag is not set, and A texture that is not a power of two cannot be set at a stage that will be read based on a shader computation (such as the bem - ps and texm3x3 - ps instructions in pixel shaders versions 1_0 to 1_3). For example, these textures can be used to store bumps that will be fed into texture reads, but not the environment maps that are used in texbem - ps, texbeml - ps, and texm3x3spec - ps. This means that a texture with dimensions that are not powers of two cannot be addressed or sampled using texture coordinates computed within the shader. This type of operation is known as a dependent read and cannot be performed on these types of textures. | |
Device does not support a projected bump-environment loopkup operation in programmable and fixed function shaders. | |
Perspective correction texturing is supported. | |
If If If this flag is not set, and | |
Supports the | |
All textures must be square. | |
Texture indices are not scaled by the texture size prior to interpolation. | |
Device supports volume textures. | |
Device requires that volume texture maps have dimensions specified as powers of two. |
?
Defines the supported blend operations. See Remarks for definitions of terms.
+Source, Destination, and Result are defined as:
Term | Type | Description |
---|---|---|
Source | Input | Color of the source pixel before the operation. |
Destination | Input | Color of the pixel in the destination buffer before the operation. |
Result | Output | Returned value that is the blended color resulting from the operation. |
?
This enumerated type defines values used by the following render states:
The result is the destination added to the source. Result = Source + Destination
The result is the destination subtracted from to the source. Result = Source - Destination
The result is the source subtracted from the destination. Result = Destination - Source
The result is the minimum of the source and destination. Result = MIN(Source, Destination)
The result is the maximum of the source and destination. Result = MAX(Source, Destination)
Flags used to obtain callback information.
+Exclude callbacks located at the initial position from the search.
Reverse the callback search direction.
Obsolete in DirectX 8.0 and later versions; see Remarks.
The D3DLIGHTINGCAPS structure describes the lighting capabilities of a device.
+This structure has been replaced by D3DCAPS8 (see the DirectX 8.0 SDK documentation) for DirectX 8.0 and later runtimes, but is required for DirectX 7.0 and earlier runtime compatibility. See Reporting DirectX 8.0 Style Direct3D Capabilities for details.
This structure is a member of the D3DDEVICEDESC_V1 structure.
+Specifies the size, in bytes, of the D3DLIGHTINGCAPS structure.
Specifies flags describing the capabilities of the lighting module. The following flags are defined:
Value | Meaning |
---|---|
D3DLIGHTCAPS_DIRECTIONAL | Directional lights are supported. + |
D3DLIGHTCAPS_GLSPOT | OpenGL-style spotlights are supported. |
D3DLIGHTCAPS_PARALLELPOINT | Parallel-point lights are supported. |
D3DLIGHTCAPS_POINT | Point lights are supported. |
D3DLIGHTCAPS_SPOT | Spotlights are supported. + |
?
Driver capability flags.
+Driver capability flags.
+The following flags are used to specify which channels in a texture to operate on.
+Defines operations to perform on vertices in preparation for mesh cleaning.
+Merge triangles that share the same vertex indices but have face normals pointing in opposite directions (back-facing triangles). Unless the triangles are not split by adding a replicated vertex, mesh adjacency data from the two triangles may conflict.
If a vertex is the apex of two triangle fans (a bowtie) and mesh operations will affect one of the fans, then split the shared vertex into two new vertices. Bowties can cause problems for operations such as mesh simplification that remove vertices, because removing one vertex affects two distinct sets of triangles.
Use this flag to prevent infinite loops during skinning setup mesh operations.
Use this flag to prevent infinite loops during mesh optimization operations.
Use this flag to prevent infinite loops during mesh simplification operations.
These flags identify a surface to reset when calling Clear.
+Describes the current clip status.
+When clipping is enabled during vertex processing (by ProcessVertices, DrawPrimitive, or other drawing functions), Direct3D computes a clip code for every vertex. The clip code is a combination of D3DCS_* bits. When a vertex is outside a particular clipping plane, the corresponding bit is set in the clipping code. Direct3D maintains the clip status using
Clip status is not updated by DrawRectPatch and DrawTriPatch because there is no software emulation for them.
+Clip union flags that describe the current clip status. This member can be one or more of the following flags:
Value | Meaning |
---|---|
Combination of all clip flags. | |
All vertices are clipped by the left plane of the viewing frustum. | |
All vertices are clipped by the right plane of the viewing frustum. | |
All vertices are clipped by the top plane of the viewing frustum. | |
All vertices are clipped by the bottom plane of the viewing frustum. | |
All vertices are clipped by the front plane of the viewing frustum. | |
All vertices are clipped by the back plane of the viewing frustum. | |
Application-defined clipping planes. | |
Application-defined clipping planes. | |
Application-defined clipping planes. | |
Application-defined clipping planes. | |
Application-defined clipping planes. | |
Application-defined clipping planes. |
?
Clip intersection flags that describe the current clip status. This member can take the same flags as ClipUnion.
Render states define set-up states for all kinds of vertex and pixel processing. Some render states set up vertex processing, and some set up pixel processing (see Render States (Direct3D 9)). Render states can be saved and restored using stateblocks (see State Blocks Save and Restore State (Direct3D 9)).
+Render States | |
---|---|
ps_1_1 to ps_1_3 | 4 texture samplers |
?
Direct3D defines the D3DRENDERSTATE_WRAPBIAS constant as a convenience for applications to enable or disable texture wrapping, based on the zero-based integer of a texture coordinate set (rather than explicitly using one of the D3DRS_WRAP n state values). Add the D3DRENDERSTATE_WRAPBIAS value to the zero-based index of a texture coordinate set to calculate the D3DRS_WRAP n value that corresponds to that index, as shown in the following example.
// Enable U/V wrapping for textures that use the texture + // coordinate set at the index within the dwIndex variable+hr = pd3dDevice->SetRenderState( + dwIndex + D3DRENDERSTATE_WRAPBIAS, + | ); // If dwIndex is 3, the value that results from + // the addition equals (131) +
Defines the supported compare functions.
+The values in this enumerated type define the supported compare functions for the
Always fail the test.
Accept the new pixel if its value is less than the value of the current pixel.
Accept the new pixel if its value equals the value of the current pixel.
Accept the new pixel if its value is less than or equal to the value of the current pixel.
Accept the new pixel if its value is greater than the value of the current pixel.
Accept the new pixel if its value does not equal the value of the current pixel.
Accept the new pixel if its value is greater than or equal to the value of the current pixel.
Always pass the test.
C++ applications can use alpha testing to control when pixels are written to the render-target surface. By using the
The most common use for alpha testing is to improve performance when rasterizing objects that are nearly transparent. If the color data being rasterized is more opaque than the color at a given pixel (
// This code example assumes that pCaps is a + //structure that was filled with a + // previous call to . if (pCaps.AlphaCmpCaps & ) + { dev->SetRenderState( , (DWORD)0x00000001); dev->SetRenderState( , TRUE); dev->SetRenderState( , ); + } // If the comparison is not supported, render anyway. + // The only drawback is no performance gain. +
Not all hardware supports all alpha-testing features. You can check the device capabilities by calling the
?
?
+Specifies how to combine the glyph data from the source and destination surfaces in a call to ComposeRects.
+Defines the compression mode used for storing compressed animation set data.
+Implements fast compression.
A combination of one or more flags that control the device create behavior.
+Defines the faces of a cubemap.
+Positive x-face of the cubemap.
Negative x-face of the cubemap.
Positive y-face of the cubemap.
Negative y-face of the cubemap.
Positive z-face of the cubemap.
Negative z-face of the cubemap.
Defines the supported culling modes.
+The values in this enumerated type are used by the
Do not cull back faces.
Cull back faces with clockwise vertices.
Cull back faces with counterclockwise vertices.
Driver cursor capability flags.
+Defines the vertex declaration method which is a predefined operation performed by the tessellator (or any procedural geometry routine on the vertex data during tessellation).
+The tessellator looks at the method to determine what data to calculate from the vertex data during tessellation. Mesh data should use the default value. Patches can use any of the other implemented types.
Vertex data is declared with an array of
In addition to using
Default value. The tessellator copies the vertex data (spline data for patches) as is, with no additional calculations. When the tessellator is used, this element is interpolated. Otherwise vertex data is copied into the input register. The input and output type can be any value, but are always the same type.
Computes the tangent at a point on the rectangle or triangle patch in the U direction. The input type can be one of the following: +
The output type is always
Computes the tangent at a point on the rectangle or triangle patch in the V direction. The input type can be one of the following: +
The output type is always
Computes the normal at a point on the rectangle or triangle patch by taking the cross product of two tangents. The input type can be one of the following: +
The output type is always
Copy out the U, V values at a point on the rectangle or triangle patch. This results in a 2D float. The input type must be set to
Look up a displacement map. The input type can be one of the following: +
Only the .x and .y components are used for the texture map lookup. The output type is always
Look up a presampled displacement map. The input type must be set to
Defines a vertex declaration data type.
+Vertex data is declared with an array of
Use the DirectX Caps Viewer Tool tool to see which types are supported on your device.
+One-component float expanded to (float, 0, 0, 1).
Two-component float expanded to (float, float, 0, 1).
Three-component float expanded to (float, float, float, 1).
Four-component float expanded to (float, float, float, float).
Four-component, packed, unsigned bytes mapped to 0 to 1 range. Input is a
Four-component, unsigned byte.
Two-component, signed short expanded to (value, value, 0, 1).
Four-component, signed short expanded to (value, value, value, value).
Four-component byte with each byte normalized by dividing with 255.0f.
Normalized, two-component, signed short, expanded to (first short/32767.0, second short/32767.0, 0, 1).
Normalized, four-component, signed short, expanded to (first short/32767.0, second short/32767.0, third short/32767.0, fourth short/32767.0).
Normalized, two-component, unsigned short, expanded to (first short/65535.0, short short/65535.0, 0, 1).
Normalized, four-component, unsigned short, expanded to (first short/65535.0, second short/65535.0, third short/65535.0, fourth short/65535.0).
Three-component, unsigned, 10 10 10 format expanded to (value, value, value, 1).
Three-component, signed, 10 10 10 format normalized and expanded to (v[0]/511.0, v[1]/511.0, v[2]/511.0, 1).
Two-component, 16-bit, floating point expanded to (value, value, 0, 1).
Four-component, 16-bit, floating point expanded to (value, value, value, value).
Type field in the declaration is unused. This is designed for use with
Constants describing the vertex data types supported by a device.
+Identifies the intended use of vertex data.
+Vertex data is declared with an array of
For more information about vertex declarations, see Vertex Declaration (Direct3D 9).
+Position data ranging from (-1,-1) to (1,1). Use
Blending weight data. Use
Blending indices data. Use
Vertex normal data. Use
Point size data. Use
Texture coordinate data. Use
Vertex tangent data.
Vertex binormal data.
Single positive floating point value. Use
Vertex data contains transformed position data ranging from (0,0) to (viewport width, viewport height). Use
Vertex data contains diffuse or specular color. Use
Vertex data contains fog data. Use
Vertex data contains depth data.
Vertex data contains sampler data. Use
Defines the degree of the variables in the equation that describes a curve.
+The values in this enumeration are used to describe the curves used by rectangle and triangle patches. For more information, see
Curve is described by variables of first order.
Curve is described by variables of second order.
Curve is described by variables of third order.
Curve is described by variables of fourth order.
Represents the capabilities of the hardware exposed through the Direct3D object.
+The MaxTextureBlendStages and MaxSimultaneousTextures members might seem similar, but they contain different information. The MaxTextureBlendStages member contains the total number of texture-blending stages supported by the current device, and the MaxSimultaneousTextures member describes how many of those stages can have textures bound to them by using the SetTexture method.
When the driver fills this structure, it can set values for execute-buffer capabilities, even when the interface being used to retrieve the capabilities (such as
In general, performance problems may occur if you use a texture and then modify it during a scene. Ensure that no texture used in the current BeginScene and EndScene block is evicted unless absolutely necessary. In the case of extremely high texture usage within a scene, the results are undefined. This occurs when you modify a texture that you have used in the scene and there is no spare texture memory available. For such systems, the contents of the z-buffer become invalid at EndScene. Applications should not call UpdateSurface to or from the back buffer on this type of hardware inside a BeginScene/EndScene pair. In addition, applications should not try to access the z-buffer if the
The following flags concerning mipmapped textures are not supported in Direct3D 9.
Member of the
Adapter on which this Direct3D device was created. This ordinal is valid only to pass to methods of the
The following driver-specific capability.
Value | Meaning |
---|---|
Display hardware is capable of returning the current scan line. | |
The display driver supports an overlay DDI that allows for verification of overlay capabilities. For more information about the overlay DDI, see Overlay DDI. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? |
?
Driver-specific capabilities identified in
Driver-specific capabilities identified in
Bit mask of values representing what presentation swap intervals are available.
Value | Meaning |
---|---|
The driver supports an immediate presentation swap interval. | |
The driver supports a presentation swap interval of every screen refresh. | |
The driver supports a presentation swap interval of every second screen refresh. | |
The driver supports a presentation swap interval of every third screen refresh. | |
The driver supports a presentation swap interval of every fourth screen refresh. |
?
Bit mask indicating what hardware support is available for cursors. Direct3D 9 does not define alpha-blending cursor capabilities.
Value | Meaning |
---|---|
A full-color cursor is supported in hardware. Specifically, this flag indicates that the driver supports at least a hardware color cursor in high-resolution modes (with scan lines greater than or equal to 400). | |
A full-color cursor is supported in hardware. Specifically, this flag indicates that the driver supports a hardware color cursor in both high-resolution and low-resolution modes (with scan lines less than 400). |
?
Flags identifying the capabilities of the device.
Value | Meaning |
---|---|
Device supports blits from system-memory textures to nonlocal video-memory textures. | |
Device can queue rendering commands after a page flip. Applications do not change their behavior if this flag is set; this capability means that the device is relatively fast. | |
Device can support at least a DirectX 5-compliant driver. | |
Device can support at least a DirectX 7-compliant driver. | |
Device exports an | |
Device can use execute buffers from system memory. | |
Device can use execute buffers from video memory. | |
Device has hardware acceleration for scene rasterization. | |
Device can support transformation and lighting in hardware. | |
Device supports N patches. | |
Device can support rasterization, transform, lighting, and shading in hardware. | |
Device supports quintic B?zier curves and B-splines. | |
Device supports rectangular and triangular patches. | |
When this device capability is set, the hardware architecture does not require caching of any information, and uncached patches (handle zero) will be drawn as efficiently as cached ones. Note that setting | |
Device is texturing from separate memory pools. | |
Device can retrieve textures from non-local video memory. | |
Device can retrieve textures from system memory. | |
Device can retrieve textures from device memory. | |
Device can use buffers from system memory for transformed and lit vertices. | |
Device can use buffers from video memory for transformed and lit vertices. |
?
Miscellaneous driver primitive capabilities. See
Information on raster-drawing capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device supports anisotropic filtering. | |
Device iterates colors perspective correctly. | |
Device can dither to improve color resolution. | |
Device supports legacy depth bias. For true depth bias, see | |
Device supports range-based fog. In range-based fog, the distance of an object from the viewer is used to compute fog effects, not the depth of the object (that is, the z-coordinate) in the scene. | |
Device calculates the fog value by referring to a lookup table containing fog values that are indexed to the depth of a given pixel. | |
Device calculates the fog value during the lighting operation and interpolates the fog value during rasterization. | |
Device supports level-of-detail bias adjustments. These bias adjustments enable an application to make a mipmap appear crisper or less sharp than it normally would. For more information about level-of-detail bias in mipmaps, see | |
Device supports toggling multisampling on and off between | |
Device supports scissor test. See Scissor Test (Direct3D 9). | |
Device performs true slope-scale based depth bias. This is in contrast to the legacy style depth bias. | |
Device supports depth buffering using w. | |
Device supports w-based fog. W-based fog is used when a perspective projection matrix is specified, but affine projections still use z-based fog. The system considers a projection matrix that contains a nonzero value in the [3][4] element to be a perspective projection matrix. | |
Device can perform hidden-surface removal (HSR) without requiring the application to sort polygons and without requiring the allocation of a depth-buffer. This leaves more video memory for textures. The method used to perform HSR is hardware-dependent and is transparent to the application. Z-bufferless HSR is performed if no depth-buffer surface is associated with the rendering-target surface and the depth-buffer comparison test is enabled (that is, when the state value associated with the | |
Device supports z-based fog. | |
Device can perform z-test operations. This effectively renders a primitive and indicates whether any z pixels have been rendered. |
?
Z-buffer comparison capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Always pass the z-test. | |
Pass the z-test if the new z equals the current z. | |
Pass the z-test if the new z is greater than the current z. | |
Pass the z-test if the new z is greater than or equal to the current z. | |
Pass the z-test if the new z is less than the current z. | |
Pass the z-test if the new z is less than or equal to the current z. | |
Always fail the z-test. | |
Pass the z-test if the new z does not equal the current z. |
?
Source-blending capabilities. This member can be one or more of the following flags. (The RGBA values of the source and destination are indicated by the subscripts s and d.)
Value | Meaning |
---|---|
The driver supports both | |
Source blend factor is (1 - As, 1 - As, 1 - As, 1 - As) and destination blend factor is (As, As, As, As); the destination blend selection is overridden. | |
The driver supports the | |
Blend factor is (Ad, Ad, Ad, Ad). | |
Blend factor is (Rd, Gd, Bd, Ad). | |
Blend factor is (1 - Ad, 1 - Ad, 1 - Ad, 1 - Ad). | |
Blend factor is (1 - Rd, 1 - Gd, 1 - Bd, 1 - Ad). | |
Blend factor is (1 - As, 1 - As, 1 - As, 1 - As). | |
Blend factor is (1 - Rs, 1 - Gs, 1 - Bs, 1 - As). | |
Blend factor is (1 - PSOutColor[1]r, 1 - PSOutColor[1]g, 1 - PSOutColor[1]b, not used)). See Render Target Blending. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? | |
Blend factor is (1, 1, 1, 1). | |
Blend factor is (As, As, As, As). | |
Blend factor is (f, f, f, 1); f = min(As, 1 - Ad). | |
Blend factor is (Rs, Gs, Bs, As). | |
Blend factor is (PSOutColor[1]r, PSOutColor[1]g, PSOutColor[1]b, not used). See Render Target Blending. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? | |
Blend factor is (0, 0, 0, 0). |
?
Destination-blending capabilities. This member can be the same capabilities that are defined for the SrcBlendCaps member.
Alpha-test comparison capabilities. This member can include the same capability flags defined for the ZCmpCaps member. If this member contains only the
Shading operations capabilities. It is assumed, in general, that if a device supports a given command at all, it supports the
The color, specular highlights, fog, and alpha interpolants of a triangle each have capability flags that an application can use to find out how they are implemented by the device driver.
This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device can support an alpha component for Gouraud-blended transparency (the | |
Device can support colored Gouraud shading. In this mode, the per-vertex color components (red, green, and blue) are interpolated across a triangle face. | |
Device can support fog in the Gouraud shading mode. | |
Device supports Gouraud shading of specular highlights. |
?
Miscellaneous texture-mapping capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Alpha in texture pixels is supported. | |
Device can draw alpha from texture palettes. | |
Supports cube textures. | |
Device requires that cube texture maps have dimensions specified as powers of two. | |
Device supports mipmapped cube textures. | |
Device supports mipmapped textures. | |
Device supports mipmapped volume textures. | |
If this flag is not set, and A texture that is not a power of two cannot be set at a stage that will be read based on a shader computation (such as the bem - ps and texm3x3 - ps instructions in pixel shaders versions 1_0 to 1_3). For example, these textures can be used to store bumps that will be fed into texture reads, but not the environment maps that are used in texbem - ps, texbeml - ps, and texm3x3spec - ps. This means that a texture with dimensions that are not powers of two cannot be addressed or sampled using texture coordinates computed within the shader. This type of operation is known as a dependent read and cannot be performed on these types of textures. | |
Device does not support a projected bump-environment loopkup operation in programmable and fixed function shaders. | |
Perspective correction texturing is supported. | |
If If If this flag is not set, and | |
Supports the | |
All textures must be square. | |
Texture indices are not scaled by the texture size prior to interpolation. | |
Device supports volume textures. | |
Device requires that volume texture maps have dimensions specified as powers of two. |
?
Texture-filtering capabilities for a texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-filtering capabilities for a cube texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-filtering capabilities for a volume texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-addressing capabilities for texture objects. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device supports setting coordinates outside the range [0.0, 1.0] to the border color, as specified by the | |
Device can clamp textures to addresses. | |
Device can separate the texture-addressing modes of the u and v coordinates of the texture. This ability corresponds to the | |
Device can mirror textures to addresses. | |
Device can take the absolute value of the texture coordinate (thus, mirroring around 0) and then clamp to the maximum value. | |
Device can wrap textures to addresses. |
?
Defines device types.
+All methods of the
A
If D3dref9.dll is installed, Direct3D will use the reference rasterizer to create a
Hardware rasterization. Shading is done with software, hardware, or mixed transform and lighting.
Direct3D features are implemented in software; however, the reference rasterizer does make use of special CPU instructions whenever it can.
The reference device is installed by the Windows SDK 8.0 or later and is intended as an aid in debugging for development only.
A pluggable software device that has been registered with
Initialize Direct3D on a computer that has neither hardware nor reference rasterization available, and enable resources for 3D content creation. See Remarks.
Specifies how the monitor being used to display a full-screen application is rotated.
+This enumeration is used in
Applications may choose to handle monitor rotation themselves by using the
Display is not rotated.
Display is rotated 90 degrees.
Display is rotated 180 degrees.
Display is rotated 270 degrees.
Effect data types. The data is contained in the pValue member of
Describes the type of events that can be keyed by the animation controller.
+Track speed.
Track weight.
Track position.
Enable flag.
Priority blend value.
Defines constants describing the fill mode.
+The values in this enumerated type are used by the
Fill points.
Fill wireframes.
Fill solids.
The following flags are used to specify which channels in a texture to operate on.
+Texture filtering constants.
+Defines constants that describe the fog mode.
+The values in this enumerated type are used by the
Fog can be considered a measure of visibility: the lower the fog value produced by a fog equation, the less visible an object is.
+No fog effect.
Fog effect intensifies exponentially, according to the following formula. +
Fog effect intensifies exponentially with the square of the distance, according to the following formula. +
Fog effect intensifies linearly between the start and end points, according to the following formula.
This is the only fog mode currently supported.
Defines the various types of surface formats.
typedef enum _D3DFORMAT {There are several types of formats:
All formats are listed from left to right, most-significant bit to least-significant bit. For example, D3DFORMAT_ARGB is ordered from the most-significant bit channel A (alpha), to the least-significant bit channel B (blue). When traversing surface data, the data is stored in memory from least-significant bit to most-significant bit, which means that the channel order in memory is from least-significant bit (blue) to most-significant bit (alpha).
The default value for formats that contain undefined channels (G16R16, A8, and so on) is 1. The only exception is the A8 format, which is initialized to 000 for the three color channels.
The order of the bits is from the most significant byte first, so
Pixel formats have been chosen to enable the expression of hardware-vendor-defined extension formats, as well as to include the well-established FOURCC method. The set of formats understood by the Direct3D runtime is defined by
Note that formats are supplied by independent hardware vendors (IHVs) and many FOURCC codes are not listed. The formats in this enumeration are unique in that they are sanctioned by the runtime, meaning that the reference rasterizer will operate on all these types. IHV-supplied formats will be supported by the individual IHVs on a card-by-card basis.
+Options for saving and creating effects.
The constants in the following table are defined in d3dx9effect.h.
+Describes the supported image file formats. See Remarks for descriptions of these formats.
+Functions that begin with D3DXLoadxxx support all of the formats listed. Functions that begin with D3DXSavexxx support all of the formats listed except the Truevision (.tga) and portable pixmap (.ppm) formats.
The following table lists the available input and output formats.
File Extension | Description |
---|---|
.bmp | Windows bitmap format. Contains a header that describes the resolution of the device on which the rectangle of pixels was created, the dimensions of the rectangle, the size of the array of bits, a logical palette, and an array of bits that defines the relationship between pixels in the bitmapped image and entries in the logical palette. |
.dds | DirectDraw Surface file format. Stores textures, volume textures, and cubic environment maps, with or without mipmap levels, and with or without pixel compression. See DDS. |
.dib | Windows DIB. Contains an array of bits combined with structures that specify width and height of the bitmapped image, color format of the device where the image was created, and resolution of the device used to create that image. |
.hdr | HDR format. Encodes each pixel as an RGBE 32-bit color, with 8 bits of mantissa for red, green, and blue, and a shared 8-bit exponent. Each channel is separately compressed with run-length encoding (RLE). |
.jpg | JPEG standard. Specifies variable compression of 24-bit RGB color and 8-bit gray-scale Tagged Image File Format (TIFF) image document files. |
.pfm | Portable float map format. A raw floating point image format, without any compression. The file header specifies image width, height, monochrome or color, and machine word order. Pixel data is stored as 32-bit floating point values, with 3 values per pixel for color, and one value per pixel for monochrome. |
.png | PNG format. A non-proprietary bitmap format using lossless compression. |
.ppm | Portable Pixmap format. A binary or ASCII file format for color images that includes image height and width and the maximum color component value. |
.tga | Targa or Truevision Graphics Adapter format. Supports depths of 8, 15, 16, 24, and 32 bits, including 8-bit gray scale, and contains optional color palette data, image (x, y) origin and size data, and pixel data. |
?
See Types of Bitmaps for more information on some of these formats.
+Windows bitmap (BMP) file format.
Joint Photographics Experts Group (JPEG) compressed file format.
Truevision (Targa, or TGA) image file format.
Portable Network Graphics (PNG) file format.
DirectDraw surface (DDS) file format.
Portable pixmap (PPM) file format.
Windows device-independent bitmap (DIB) file format.
High dynamic range (HDR) file format.
Portable float map file format.
Describes the location for the include file.
+Look in the local project for the include file.
Look in the system path for the include file.
This macro creates a value used by Issue to issue a query end.
#defineThis macro changes the query state to nonsignaled.
Defines the light type.
+Directional lights are slightly faster than point light sources, but point lights look a little better. Spotlights offer interesting visual effects but are computationally time-consuming.
+Light is a point source. The light has a position in space and radiates light in all directions.
Light is a spotlight source. This light is like a point light, except that the illumination is limited to a cone. This light type has a direction and several other parameters that determine the shape of the cone it produces. For information about these parameters, see the
Light is a directional light source. This is equivalent to using a point light source at an infinite distance.
A combination of zero or more locking options that describe the type of lock to perform.
+Defines the type of mesh data present in
Flags used to specify creation options for a mesh.
+A 32-bit mesh (
The mesh has 32-bit indices instead of 16-bit indices. See Remarks.
Use the
Use the
Use the
Specifying this flag causes the vertex and index buffer of the mesh to be created with
Use the
Use the
Use the
Use the
Use the
Use the
Use the
Use the
Use the
Use the
Forces the cloned meshes to share vertex buffers.
Use hardware processing only. For mixed-mode device, this flag will cause the system to use hardware (if supported in hardware) or will default to software processing.
Equivalent to specifying both
Equivalent to specifying both
Equivalent to specifying both
Equivalent to specifying both
Equivalent to specifying both
Specifies the type of mesh optimization to be performed.
+The
The D3DXMESHOPT_SHAREVB flag has been removed from this enumeration. Use
Reorders faces to remove unused vertices and faces.
Reorders faces to optimize for fewer attribute bundle state changes and enhanced
Reorders faces to increase the cache hit rate of vertex caches.
Reorders faces to maximize length of adjacent triangles.
Optimize the faces only; do not optimize the vertices.
While attribute sorting, do not split vertices that are shared between attribute groups.
Affects the vertex cache size. Using this flag specifies a default vertex cache size that works well on legacy hardware.
Specifies simplification options for a mesh.
+The mesh will be simplified by the number of vertices specified in the MinValue parameter.
The mesh will be simplified by the number of faces specified in the MinValue parameter.
Defines the levels of full-scene multisampling that the device can apply.
+In addition to enabling full-scene multisampling at
Multisampling is valid only on a swap chain that is being created or reset with the
The multisample antialiasing value can be set with the parameters (or sub-parameters) in the following methods.
Method | Parameters | Sub-parameters |
---|---|---|
| MultiSampleType and pQualityLevels | |
| pPresentationParameters | MultiSampleType and pQualityLevels |
| pPresentationParameters | MultiSampleType and pQualityLevels |
| MultiSampleType and pQualityLevels | |
| MultiSampleType and pQualityLevels | |
| pPresentationParameters | MultiSampleType and pQualityLevels |
?
It is not good practice to switch from one multisample type to another to raise the quality of the antialiasing.
Whether the display device supports maskable multisampling (more than one sample for a multiple-sample render-target format plus antialias support) or just non-maskable multisampling (only antialias support), the driver for the device provides the number of quality levels for the
The quality levels supported by the device can be obtained with the pQualityLevels parameter of
See
No level of full-scene multisampling is available.
Enables the multisample quality value. See Remarks.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Level of full-scene multisampling available.
Normal maps generation constants.
+The type of object.
+Constant is a scalar.
Constant is a vector.
Constant is a row major matrix.
Constant is a column major matrix.
Constant is either a texture, shader, or a string.
Constant is a structure.
These flags provide additional information about effect parameters.
Effect parameter constants are used by
Describes the data contained by the enumeration.
+Parameter is a void reference.
Parameter is a Boolean. Any non-zero value passed into
Parameter is an integer. Any floating-point values passed into
Parameter is a floating-point number.
Parameter is a string.
Parameter is a texture.
Parameter is a 1D texture.
Parameter is a 2D texture.
Parameter is a 3D texture.
Parameter is a cube texture.
Parameter is a sampler.
Parameter is a 1D sampler.
Parameter is a 2D sampler.
Parameter is a 3D sampler.
Parameter is a cube sampler.
Parameter is a pixel shader.
Parameter is a vertex shader.
Parameter is a pixel shader fragment.
Parameter is a vertex shader fragment.
Parameter is not supported.
Defines whether the current tessellation mode is discrete or continuous.
+Note that continuous tessellation produces a completely different tessellation pattern from the discrete one for the same tessellation values (this is more apparent in wireframe mode). Thus, 4.0 continuous is not the same as 4 discrete.
+Discrete edge style. In discrete mode, you can specify float tessellation but it will be truncated to integers.
Continuous edge style. In continuous mode, tessellation is specified as float values that can be smoothly varied to reduce "popping" artifacts.
Mesh patch types.
+Triangle patches have three sides and are described in
Rectangle patch mesh type.
Triangle patch mesh type.
N-patch mesh type.
Defines the type of animation set looping modes used for playback.
+The animation repeats endlessly.
The animation plays once, and then it stops on the last frame.
The animation alternates endlessly between playing forward and playing backward.
Defines the memory class that holds the buffers for a resource.
+All pool types are valid with all resources including: vertex buffers, index buffers, textures, and surfaces.
The following tables indicate restrictions on pool types for render targets, depth stencils, and dynamic and mipmap usages. An x indicates a compatible combination; lack of an x indicates incompatibility.
Pool | ||
---|---|---|
x | x | |
?
Pool | ||
---|---|---|
x | x | |
x | ||
x |
?
For more information about usage types, see
Pools cannot be mixed for different objects contained within one resource (mip levels in a mipmap) and, when a pool is chosen, it cannot be changed.
Applications should use
For dynamic textures, it is sometimes desirable to use a pair of video memory and system memory textures, allocating the video memory using
Resources are placed in the memory pool most appropriate for the set of usages requested for the given resource. This is usually video memory, including both local video memory and AGP memory. The
When creating resources with
Resources are copied automatically to device-accessible memory as needed. Managed resources are backed by system memory and do not need to be recreated when a device is lost. See Managing Resources (Direct3D 9) for more information. Managed resources can be locked. Only the system-memory copy is directly modified. Direct3D copies your changes to driver-accessible memory as needed.
Differences between Direct3D 9 and Direct3D 9Ex: |
?
Resources are placed in memory that is not typically accessible by the Direct3D device. This memory allocation consumes system RAM but does not reduce pageable RAM. These resources do not need to be recreated when a device is lost. Resources in this pool can be locked and can be used as the source for a
Resources are placed in system RAM and do not need to be recreated when a device is lost. These resources are not bound by device size or format restrictions. Because of this, these resources cannot be accessed by the Direct3D device nor set as textures or render targets. However, these resources can always be created, locked, and copied.
Describes the relationship between the adapter refresh rate and the rate at which Present or Present operations are completed. These values also serve as flag values for the PresentationIntervals field of
Windowed mode supports
Full-screen mode supports similar usage as windowed mode by supporting
Constants used by
Describes the relationship between the adapter refresh rate and the rate at which Present or Present operations are completed. These values also serve as flag values for the PresentationIntervals field of
Windowed mode supports
Full-screen mode supports similar usage as windowed mode by supporting
Miscellaneous driver primitive capability flags.
+Defines the primitives supported by Direct3D.
+Using Triangle Strips or Triangle Fans (Direct3D 9) is often more efficient than using triangle lists because fewer vertices are duplicated.
+Renders the vertices as a collection of isolated points. This value is unsupported for indexed primitives.
Renders the vertices as a list of isolated straight line segments.
Renders the vertices as a single polyline.
Renders the specified vertices as a sequence of isolated triangles. Each group of three vertices defines a separate triangle.
Back-face culling is affected by the current winding-order render state.
Renders the vertices as a triangle strip. The backface-culling flag is automatically flipped on even-numbered triangles.
Renders the vertices as a triangle fan.
Identifies the query type. For information about queries, see Queries (Direct3D 9)
+A programmable pixel shader is made up of a set of instructions that operate on pixel data. Registers transfer data in and out of the ALU. Additional control can be applied to modify the instruction, the results, or what data gets written out.
Data type of the register.
+Boolean value.
4D integer number.
4D floating-point number.
The register contains 4D sampler data.
The
#define D3DRENDERSTATE_EVICTMANAGEDTEXTURES 61 + #define D3DRENDERSTATE_SCENECAPTURE 62 + #define D3DRS_DELETERTPATCH 169 + #define D3DRS_MAXVERTEXSHADERINST 196 + #define D3DRS_MAXPIXELSHADERINST 197
Enumerators
Determines whether textures are evicted from memory.
+ The driver uses a
This render state determines whether the driver evicts textures that it manages (as opposed to textures managed by the Direct3D runtime) from video memory. If the render state value is TRUE, the driver evicts the textures. Otherwise, the driver does not evict those textures.
Specifies either begin scene information or end scene information for geometric data captured within a frame.
+ The driver uses a
The driver responds to D3DRENDERSTATE_SCENECAPTURE first with TRUE for begin scene information and next with
Care must be taken in updating a driver that implements the legacy D3DHALCallbacks->D3dSceneCapture callback routine to one using the D3DRENDERSTATE_SCENECAPTURE render state. The D3dSceneCapture callback routine uses the constants D3DHAL_SCENE_CAPTURE_START and D3DHAL_SCENE_CAPTURE_END to indicate, respectively, the beginning and end of a scene. The values of these constants are, respectively, 0 and 1. If you use these constants in place of TRUE and
DirectX 8.0 and later versions only.
Deletes either a rectangular or triangular patch from memory. + The driver uses a DWORD data type without a default value to detect the patch to delete.
This render state notifies the driver that a patch is to be deleted. The value of this render state is the handle to the patch affected. All cached information should be freed and the handle should be removed from the driver's patch handle table. This render state is not visible to applications but is generated internally when an application calls the DeletePatch function. This render state is sent to the driver only when patches are deleted by DeletePatch explicitly. All other patches should be cleaned up when the device is destroyed.
DirectX 9.0 and later versions only.
Determines the maximum number of instructions that the vertex shader assembler can execute.
The driver uses a DWORD data type with a default value of D3DINFINITEINSTRUCTIONS (0xffffffff) to report the maximum number of vertex-shader instructions. + This maximum number depends on the version of the vertex shader that the display device supports as shown in the following table.
Version | Maximum number |
---|---|
earlier than 2_0 | 0 |
2_0 and later | From 216 (0x0000ffff) to D3DINFINITEINSTRUCTIONS |
?
D3DINFINITEINSTRUCTIONS represents a virtually unlimited amount.
Valid values for this render state are numbers that are powers of 2; if the driver sets any other integer, the runtime uses the next nearest power of 2 number.
The runtime sets the MaxVShaderInstructionsExecuted member of the
DirectX 9.0 and later versions only.
Determines the maximum number of instructions that the pixel shader assembler can execute.
The driver uses a DWORD data type with a default value of D3DINFINITEINSTRUCTIONS (0xffffffff) to report the maximum number of pixel-shader instructions. + This maximum number depends on the version of the pixel shader that the display device supports as shown in the following table.
Version | Maximum number |
---|---|
earlier than 2_0 | 0 |
2_0 | From 96 to D3DINFINITEINSTRUCTIONS |
3_0 and later | From 216 (0x0000ffff) to D3DINFINITEINSTRUCTIONS |
?
D3DINFINITEINSTRUCTIONS represents a virtually unlimited amount.
Valid values for this render state are numbers that are powers of 2; if the driver sets any other integer, the runtime uses the next nearest power of 2 number.
The runtime sets the MaxVShaderInstructionsExecuted member of the
The driver uses these render states when it performs graphics rendering. Only render states that are specific to drivers are included in the Windows Driver Kit (WDK) documentation. The render states accessible to DirectX applications are included in the DirectX SDK documentation. These application-level render states include such characteristics as whether alpha blending is enabled, whether dithering is enabled, whether Direct3D lighting is used, and the type of shading to be used.
To update a particular render state, Direct3D stores information about the render state, and then calls the driver's D3dDrawPrimitives2 callback routine. The information provided to the driver enables it to:
Determine that it should update one or more render states.
Identify which render states to update, and what the new render state values should be.
Note that for certain render states to be honored, the driver must have previously set capability flags in the relevant member of the D3DPRIMCAPS structure.
In order to indicate a specific render state update, Direct3D inserts a D3DHAL_DP2COMMAND structure into the command buffer, setting the bCommand member of this structure to D3DDP2OP_RENDERSTATE (see the description for D3DDP2OP_RENDERSTATE in D3DHAL_DP2OPERATION), and setting the wStateCount member of the same structure to the number of render states to be updated.
Immediately following the D3DHAL_DP2COMMAND structure, Direct3D inserts one D3DHAL_DP2RENDERSTATE structure into the command buffer for each render state to be updated. The RenderState member of this structure identifies the render state to be changed; the new value of this render state is specified in either the dwState member (for DWORD values) or the fState member (for D3DVALUE values).
The following figure shows a portion of the command buffer containing a D3DDP2OP_RENDERSTATE command and two D3DHAL_DP2RENDERSTATE structures. The first of the three structures indicates that two render states are to be updated. The second structure indicates that the D3DRENDERSTATE_FILLMODE render state is to be changed to
Additional Notes
See the
Some changes have been made to the
Multitexture macro ops and D3DRENDERSTATE_TEXTUREFACTOR override all of the per-texture stage blending controls (COLOR{OP,ARG1,ARG2} & ALPHA{OP,ARG1,ARG2}).
+ Determines whether textures are evicted from memory.
+ The driver uses a
This render state determines whether the driver evicts textures that it manages (as opposed to textures managed by the Direct3D runtime) from video memory. If the render state value is TRUE, the driver evicts the textures. Otherwise, the driver does not evict those textures.
Specifies either begin scene information or end scene information for geometric data captured within a frame.
+ The driver uses a
The driver responds to D3DRENDERSTATE_SCENECAPTURE first with TRUE for begin scene information and next with
Care must be taken in updating a driver that implements the legacy D3DHALCallbacks->D3dSceneCapture callback routine to one using the D3DRENDERSTATE_SCENECAPTURE render state. The D3dSceneCapture callback routine uses the constants D3DHAL_SCENE_CAPTURE_START and D3DHAL_SCENE_CAPTURE_END to indicate, respectively, the beginning and end of a scene. The values of these constants are, respectively, 0 and 1. If you use these constants in place of TRUE and
DirectX 8.0 and later versions only.
Deletes either a rectangular or triangular patch from memory. + The driver uses a DWORD data type without a default value to detect the patch to delete.
This render state notifies the driver that a patch is to be deleted. The value of this render state is the handle to the patch affected. All cached information should be freed and the handle should be removed from the driver's patch handle table. This render state is not visible to applications but is generated internally when an application calls the DeletePatch function. This render state is sent to the driver only when patches are deleted by DeletePatch explicitly. All other patches should be cleaned up when the device is destroyed.
DirectX 9.0 and later versions only.
Determines the maximum number of instructions that the vertex shader assembler can execute.
The driver uses a DWORD data type with a default value of D3DINFINITEINSTRUCTIONS (0xffffffff) to report the maximum number of vertex-shader instructions. + This maximum number depends on the version of the vertex shader that the display device supports as shown in the following table.
Version | Maximum number |
---|---|
earlier than 2_0 | 0 |
2_0 and later | From 216 (0x0000ffff) to D3DINFINITEINSTRUCTIONS |
?
D3DINFINITEINSTRUCTIONS represents a virtually unlimited amount.
Valid values for this render state are numbers that are powers of 2; if the driver sets any other integer, the runtime uses the next nearest power of 2 number.
The runtime sets the MaxVShaderInstructionsExecuted member of the
DirectX 9.0 and later versions only.
Determines the maximum number of instructions that the pixel shader assembler can execute.
The driver uses a DWORD data type with a default value of D3DINFINITEINSTRUCTIONS (0xffffffff) to report the maximum number of pixel-shader instructions. + This maximum number depends on the version of the pixel shader that the display device supports as shown in the following table.
Version | Maximum number |
---|---|
earlier than 2_0 | 0 |
2_0 | From 96 to D3DINFINITEINSTRUCTIONS |
3_0 and later | From 216 (0x0000ffff) to D3DINFINITEINSTRUCTIONS |
?
D3DINFINITEINSTRUCTIONS represents a virtually unlimited amount.
Valid values for this render state are numbers that are powers of 2; if the driver sets any other integer, the runtime uses the next nearest power of 2 number.
The runtime sets the MaxVShaderInstructionsExecuted member of the
Sampler states define texture sampling operations such as texture addressing and texture filtering. Some sampler states set-up vertex processing, and some set-up pixel processing. Sampler states can be saved and restored using stateblocks (see State Blocks Save and Restore State (Direct3D 9)).
+Defines the sampler texture types for vertex shaders.
+Uninitialized value. The value of this element is D3DSP_TEXTURETYPE_SHIFT.
Declaring a 2D texture. The value of this element is D3DSP_TEXTURETYPE_SHIFT * 4.
Declaring a cube texture. The value of this element is D3DSP_TEXTURETYPE_SHIFT * 8.
Declaring a volume texture. The value of this element is D3DSP_TEXTURETYPE_SHIFT * 16.
Flags indicating the method the rasterizer uses to create an image on a surface.
+This enumeration is used as a member in
The image is created from the first scanline to the last without skipping any.
The image is created using the interlaced method in which odd-numbered lines are drawn on odd-numbered passes and even lines are drawn on even-numbered passes.
The image is created using the interlaced method in which odd-numbered lines are drawn on odd-numbered passes and even lines are drawn on even-numbered passes.
The following page provides a basic outline of key differences between Direct3D 9 and Direct3D 10. The outline below provides some insight to assist developers with Direct3D 9 experience to explore and relate to Direct3D 10.
Although the info in this topic compares Direct3D 9 with Direct3D 10, because Direct3D 11 builds on the improvements made in Direct3D 10 and 10.1, you also need this info to migrate from Direct3D 9 to Direct3D 11. For info about moving beyond Direct3D 10 to Direct3D 11, see Migrating to Direct3D 11.
Defines constants that describe the supported shading modes.
+The first vertex of a triangle for flat shading mode is defined in the following manner.
The members of this enumerated type define the vales for the
Flat shading mode. The color and specular component of the first vertex in the triangle are used to determine the color and specular component of the face. These colors remain constant across the triangle; that is, they are not interpolated. The specular alpha is interpolated. See Remarks.
Gouraud shading mode. The color and specular components of the face are determined by a linear interpolation between all three of the triangle's vertices.
Not supported.
The
Parser flags
Parse time flags are only used by the effect system (before effect compilation) when you create an effect compiler. For example, you could create a compiler object with
The effect system will use parser flags when called by the following functions:
The effect system will use compiler flags when called by the following functions:
In addition, you can use compiler flags when creating an effect by calling
The effect system will use assembler flags when called by the following functions:
Applying compiler flags or assembler flags to the incorrect API will fail shader validation. Check the Direct3D error code return value from the function (with the DirectX Error Lookup Tool) to help track down this error.
+The following flags are used to specify sprite rendering options to the flags parameter in the Begin method:
+Predefined sets of pipeline state used by state blocks (see State Blocks Save and Restore State (Direct3D 9)).
+As the following diagram shows, vertex and pixel state are both subsets of device state.
There are only a few states that are considered both vertex and pixel state. These states are:
Driver stencil capability flags.
+Defines stencil-buffer operations.
+Stencil-buffer entries are integer values ranging from 0 through 2n - 1, where n is the bit depth of the stencil buffer.
+Do not update the entry in the stencil buffer. This is the default value.
Set the stencil-buffer entry to 0.
Replace the stencil-buffer entry with a reference value.
Increment the stencil-buffer entry, clamping to the maximum value.
Decrement the stencil-buffer entry, clamping to zero.
Invert the bits in the stencil-buffer entry.
Increment the stencil-buffer entry, wrapping to zero if the new value exceeds the maximum value.
Decrement the stencil-buffer entry, wrapping to the maximum value if the new value is less than zero.
Given a scene that contains many objects that use the same geometry, you can draw many instances of that geometry at different orientations, sizes, colors, and so on with dramatically better performance by reducing the amount of data you need to supply to the renderer.
This can be accomplished through the use of two techniques: the first for drawing indexed geometry and the second for non-indexed geometry. Both techniques use two vertex buffers: one to supply geometry data and one to supply per-object instance data. The instance data can be a wide variety of information such as a transform, color data, or lighting data - basically anything that you can describe in a vertex declaration. Drawing many instances of geometry with these techniques can dramatically reduce the amount of data sent to the renderer.
Defines swap effects.
+The state of the back buffer after a call to Present is well-defined by each of these swap effects, and whether the Direct3D device was created with a full-screen swap chain or a windowed swap chain has no effect on this state. In particular, the
Applications that use
An invisible window cannot receive user-mode events; furthermore, an invisible-fullscreen window will interfere with the presentation of another applications' windowed-mode window. Therefore, each application needs to ensure that a device window is visible when a swapchain is presented in fullscreen mode.
+When a swap chain is created with a swap effect of
Like a swap chain that uses
An application that uses this swap effect cannot make any assumptions about the contents of a discarded back buffer and should therefore update an entire back buffer before invoking a Present operation that would display it. Although this is not enforced, the debug version of the runtime will overwrite the contents of discarded back buffers with random data to enable developers to verify that their applications are updating the entire back buffer surfaces correctly.
The swap chain might include multiple back buffers and is best envisaged as a circular queue that includes the front buffer. Within this queue, the back buffers are always numbered sequentially from 0 to (n - 1), where n is the number of back buffers, so that 0 denotes the least recently presented buffer. When Present is invoked, the queue is "rotated" so that the front buffer becomes back buffer (n - 1), while the back buffer 0 becomes the new front buffer.
This swap effect may be specified only for a swap chain comprising a single back buffer. Whether the swap chain is windowed or full-screen, the runtime will guarantee the semantics implied by a copy-based Present operation, namely that the operation leaves the content of the back buffer unchanged, instead of replacing it with the content of the front buffer as a flip-based Present operation would.
For a full-screen swap chain, the runtime uses a combination of flip operations and copy operations, supported if necessary by hidden back buffers, to accomplish the Present operation. Accordingly, the presentation is synchronized with the display adapter's vertical retrace and its rate is constrained by the chosen presentation interval. A swap chain specified with the
Use a dedicated area of video memory that can be overlayed on the primary surface. No copy is performed when the overlay is displayed. The overlay operation is performed in hardware, without modifying the data in the primary surface.
Differences between Direct3D 9 and Direct3D 9Ex: |
?
Designates when an application is adopting flip mode, during which time an application's frame is passed instead of copied to the Desktop Window Manager(DWM) for composition when the application is presenting in windowed mode. Flip mode allows an application to more efficiently use memory bandwidth as well as enabling an application to take advantage of full-screen-present statistics. Flip mode does not affect full-screen behavior. A sample application that uses
Note??If you create a swap chain with
Differences between Direct3D 9 and Direct3D 9Ex: |
?
Defines settings used for mesh tangent frame computations.
+Texture coordinate values in the u direction are between 0 and 1. In this case a texture coordinate set will be chosen that minimizes the perimeter of the triangle. See Texture Wrapping (Direct3D 9).
Texture coordinate values in the v direction are between 0 and 1. In this case a texture coordinate set will be chosen that minimizes the perimeter of the triangle. See Texture Wrapping (Direct3D 9).
Texture coordinate values in both u and v directions are between 0 and 1. In this case a texture coordinate set will be chosen that minimizes the perimeter of the triangle. See Texture Wrapping (Direct3D 9).
Do not normalize partial derivatives with respect to texture coordinates. If not normalized, the scale of the partial derivatives is proportional to the scale of the 3D model divided by the scale of the triangle in (u, v) space. This scale value provides a measure of how much the texture is stretched in a given direction. The resulting vector length is a weighted sum of the lengths of the partial derivatives.
Do not transform texture coordinates to orthogonal Cartesian coordinates. Mutually exclusive with
Compute the partial derivative with respect to texture coordinate v independently for each vertex, and then compute the partial derivative with respect to u as the cross product of the partial derivative with respect to v and the normal vector. Mutually exclusive with
Compute the partial derivative with respect to texture coordinate u independently for each vertex, and then compute the partial derivative with respect to v as the cross product of the normal vector and the partial derivative with respect to u. Mutually exclusive with
Weight the direction of the computed per-vertex normal or partial derivative vector according to the areas of triangles attached to that vertex. Mutually exclusive with
Compute a unit-length normal vector for each triangle of the input mesh. Mutually exclusive with
Vertices are ordered in a clockwise direction around each triangle. The computed normal vector direction is therefore inverted 180 degrees from the direction computed using counterclockwise vertex ordering.
Compute the per-vertex normal vector for each triangle of the input mesh, and ignore any normal vectors already in the input mesh.
The results are stored in the original input mesh, and the output mesh is not used.
Defines constants that describe the supported texture-addressing modes.
+Tile the texture at every integer junction. For example, for u values between 0 and 3, the texture is repeated three times; no mirroring is performed.
Similar to
Texture coordinates outside the range [0.0, 1.0] are set to the texture color at 0.0 or 1.0, respectively.
Texture coordinates outside the range [0.0, 1.0] are set to the border color.
Similar to
A programmable pixel shader is made up of a set of instructions that operate on pixel data. Registers transfer data in and out of the ALU. Additional control can be applied to modify the instruction, the results, or what data gets written out.
Texture argument constants are used as values for the following members of the
Set and retrieve texture arguments by calling the SetTextureStageState and GetTextureStageState methods.
Argument flags
You can combine an argument flag with a modifier, but two argument flags cannot be combined.
+A programmable pixel shader is made up of a set of instructions that operate on pixel data. Registers transfer data in and out of the ALU. Additional control can be applied to modify the instruction, the results, or what data gets written out.
Driver texture coordinate capability flags.
+Defines texture filtering modes for a texture stage.
+To check if a format supports texture filter types other than
Set a texture stage's magnification filter by calling
Set a texture stage's minification filter by calling
Set the texture filter to use between-mipmap levels by calling
Not all valid filtering modes for a device will apply to volume maps. In general,
When used with
When used with D3DSAMP_ MAGFILTER or
When used with D3DSAMP_ MAGFILTER or
When used with D3DSAMP_ MAGFILTER or
A 4-sample tent filter used as a texture magnification or minification filter. Use with
A 4-sample Gaussian filter used as a texture magnification or minification filter. Use with
Convolution filter for monochrome textures. See
Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. |
?
Use with
Defines per-stage texture-blending operations.
+ The members of this type are used when setting color or alpha operations by using the
In the above formulas, SRGBA is the RGBA color produced by a texture operation, and Arg1, Arg2, and Arg3 represent the complete RGBA color of the texture arguments. Individual components of an argument are shown with subscripts. For example, the alpha component for argument 1 would be shown as Arg1A.
+Disables output from this texture stage and all stages with a higher index. To disable texture mapping, set this as the color operation for the first texture stage (stage 0). Alpha operations cannot be disabled when color operations are enabled. Setting the alpha operation to
Use this texture stage's first color or alpha argument, unmodified, as the output. This operation affects the color argument when used with the
Use this texture stage's second color or alpha argument, unmodified, as the output. This operation affects the color argument when used with the
Multiply the components of the arguments.
Multiply the components of the arguments, and shift the products to the left 1 bit (effectively multiplying them by 2) for brightening.
Multiply the components of the arguments, and shift the products to the left 2 bits (effectively multiplying them by 4) for brightening.
Add the components of the arguments.
Add the components of the arguments with a - 0.5 bias, making the effective range of values from - 0.5 through 0.5.
Add the components of the arguments with a - 0.5 bias, and shift the products to the left 1 bit.
Subtract the components of the second argument from those of the first argument.
Add the first and second arguments; then subtract their product from the sum.
Linearly blend this texture stage, using the interpolated alpha from each vertex.
Linearly blend this texture stage, using the alpha from this stage's texture.
Linearly blend this texture stage, using a scalar alpha set with the
Linearly blend a texture stage that uses a premultiplied alpha.
Linearly blend this texture stage, using the alpha taken from the previous texture stage.
Modulate the color of the second argument, using the alpha of the first argument; then add the result to argument one. This operation is supported only for color operations (
Modulate the arguments; then add the alpha of the first argument. This operation is supported only for color operations (
Similar to
Similar to
Perform per-pixel bump mapping, using the environment map in the next texture stage, without luminance. This operation is supported only for color operations (
Perform per-pixel bump mapping, using the environment map in the next texture stage, with luminance. This operation is supported only for color operations (
Modulate the components of each argument as signed components, add their products; then replicate the sum to all color channels, including alpha. This operation is supported for color and alpha operations.
In DirectX 6 and DirectX 7, multitexture operations the above inputs are all shifted down by half (y = x - 0.5) before use to simulate signed data, and the scalar result is automatically clamped to positive values and replicated to all three output channels. Also, note that as a color operation this does not updated the alpha it just updates the RGB components.
However, in DirectX 8.1 shaders you can specify that the output be routed to the .rgb or the .a components or both (the default). You can also specify a separate scalar operation on the alpha channel.
Performs a multiply-accumulate operation. It takes the last two arguments, multiplies them together, and adds them to the remaining input/source argument, and places that into the result.
SRGBA = Arg1 + Arg2 * Arg3
Linearly interpolates between the second and third source arguments by a proportion specified in the first source argument.
SRGBA = (Arg1) * Arg2 + (1- Arg1) * Arg3.
Represents the capabilities of the hardware exposed through the Direct3D object.
+The MaxTextureBlendStages and MaxSimultaneousTextures members might seem similar, but they contain different information. The MaxTextureBlendStages member contains the total number of texture-blending stages supported by the current device, and the MaxSimultaneousTextures member describes how many of those stages can have textures bound to them by using the SetTexture method.
When the driver fills this structure, it can set values for execute-buffer capabilities, even when the interface being used to retrieve the capabilities (such as
In general, performance problems may occur if you use a texture and then modify it during a scene. Ensure that no texture used in the current BeginScene and EndScene block is evicted unless absolutely necessary. In the case of extremely high texture usage within a scene, the results are undefined. This occurs when you modify a texture that you have used in the scene and there is no spare texture memory available. For such systems, the contents of the z-buffer become invalid at EndScene. Applications should not call UpdateSurface to or from the back buffer on this type of hardware inside a BeginScene/EndScene pair. In addition, applications should not try to access the z-buffer if the
The following flags concerning mipmapped textures are not supported in Direct3D 9.
Member of the
Adapter on which this Direct3D device was created. This ordinal is valid only to pass to methods of the
The following driver-specific capability.
Value | Meaning |
---|---|
Display hardware is capable of returning the current scan line. | |
The display driver supports an overlay DDI that allows for verification of overlay capabilities. For more information about the overlay DDI, see Overlay DDI. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? |
?
Driver-specific capabilities identified in
Driver-specific capabilities identified in
Bit mask of values representing what presentation swap intervals are available.
Value | Meaning |
---|---|
The driver supports an immediate presentation swap interval. | |
The driver supports a presentation swap interval of every screen refresh. | |
The driver supports a presentation swap interval of every second screen refresh. | |
The driver supports a presentation swap interval of every third screen refresh. | |
The driver supports a presentation swap interval of every fourth screen refresh. |
?
Bit mask indicating what hardware support is available for cursors. Direct3D 9 does not define alpha-blending cursor capabilities.
Value | Meaning |
---|---|
A full-color cursor is supported in hardware. Specifically, this flag indicates that the driver supports at least a hardware color cursor in high-resolution modes (with scan lines greater than or equal to 400). | |
A full-color cursor is supported in hardware. Specifically, this flag indicates that the driver supports a hardware color cursor in both high-resolution and low-resolution modes (with scan lines less than 400). |
?
Flags identifying the capabilities of the device.
Value | Meaning |
---|---|
Device supports blits from system-memory textures to nonlocal video-memory textures. | |
Device can queue rendering commands after a page flip. Applications do not change their behavior if this flag is set; this capability means that the device is relatively fast. | |
Device can support at least a DirectX 5-compliant driver. | |
Device can support at least a DirectX 7-compliant driver. | |
Device exports an | |
Device can use execute buffers from system memory. | |
Device can use execute buffers from video memory. | |
Device has hardware acceleration for scene rasterization. | |
Device can support transformation and lighting in hardware. | |
Device supports N patches. | |
Device can support rasterization, transform, lighting, and shading in hardware. | |
Device supports quintic B?zier curves and B-splines. | |
Device supports rectangular and triangular patches. | |
When this device capability is set, the hardware architecture does not require caching of any information, and uncached patches (handle zero) will be drawn as efficiently as cached ones. Note that setting | |
Device is texturing from separate memory pools. | |
Device can retrieve textures from non-local video memory. | |
Device can retrieve textures from system memory. | |
Device can retrieve textures from device memory. | |
Device can use buffers from system memory for transformed and lit vertices. | |
Device can use buffers from video memory for transformed and lit vertices. |
?
Miscellaneous driver primitive capabilities. See
Information on raster-drawing capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device supports anisotropic filtering. | |
Device iterates colors perspective correctly. | |
Device can dither to improve color resolution. | |
Device supports legacy depth bias. For true depth bias, see | |
Device supports range-based fog. In range-based fog, the distance of an object from the viewer is used to compute fog effects, not the depth of the object (that is, the z-coordinate) in the scene. | |
Device calculates the fog value by referring to a lookup table containing fog values that are indexed to the depth of a given pixel. | |
Device calculates the fog value during the lighting operation and interpolates the fog value during rasterization. | |
Device supports level-of-detail bias adjustments. These bias adjustments enable an application to make a mipmap appear crisper or less sharp than it normally would. For more information about level-of-detail bias in mipmaps, see | |
Device supports toggling multisampling on and off between | |
Device supports scissor test. See Scissor Test (Direct3D 9). | |
Device performs true slope-scale based depth bias. This is in contrast to the legacy style depth bias. | |
Device supports depth buffering using w. | |
Device supports w-based fog. W-based fog is used when a perspective projection matrix is specified, but affine projections still use z-based fog. The system considers a projection matrix that contains a nonzero value in the [3][4] element to be a perspective projection matrix. | |
Device can perform hidden-surface removal (HSR) without requiring the application to sort polygons and without requiring the allocation of a depth-buffer. This leaves more video memory for textures. The method used to perform HSR is hardware-dependent and is transparent to the application. Z-bufferless HSR is performed if no depth-buffer surface is associated with the rendering-target surface and the depth-buffer comparison test is enabled (that is, when the state value associated with the | |
Device supports z-based fog. | |
Device can perform z-test operations. This effectively renders a primitive and indicates whether any z pixels have been rendered. |
?
Z-buffer comparison capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Always pass the z-test. | |
Pass the z-test if the new z equals the current z. | |
Pass the z-test if the new z is greater than the current z. | |
Pass the z-test if the new z is greater than or equal to the current z. | |
Pass the z-test if the new z is less than the current z. | |
Pass the z-test if the new z is less than or equal to the current z. | |
Always fail the z-test. | |
Pass the z-test if the new z does not equal the current z. |
?
Source-blending capabilities. This member can be one or more of the following flags. (The RGBA values of the source and destination are indicated by the subscripts s and d.)
Value | Meaning |
---|---|
The driver supports both | |
Source blend factor is (1 - As, 1 - As, 1 - As, 1 - As) and destination blend factor is (As, As, As, As); the destination blend selection is overridden. | |
The driver supports the | |
Blend factor is (Ad, Ad, Ad, Ad). | |
Blend factor is (Rd, Gd, Bd, Ad). | |
Blend factor is (1 - Ad, 1 - Ad, 1 - Ad, 1 - Ad). | |
Blend factor is (1 - Rd, 1 - Gd, 1 - Bd, 1 - Ad). | |
Blend factor is (1 - As, 1 - As, 1 - As, 1 - As). | |
Blend factor is (1 - Rs, 1 - Gs, 1 - Bs, 1 - As). | |
Blend factor is (1 - PSOutColor[1]r, 1 - PSOutColor[1]g, 1 - PSOutColor[1]b, not used)). See Render Target Blending. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? | |
Blend factor is (1, 1, 1, 1). | |
Blend factor is (As, As, As, As). | |
Blend factor is (f, f, f, 1); f = min(As, 1 - Ad). | |
Blend factor is (Rs, Gs, Bs, As). | |
Blend factor is (PSOutColor[1]r, PSOutColor[1]g, PSOutColor[1]b, not used). See Render Target Blending. Differences between Direct3D 9 and Direct3D 9Ex: This flag is available in Direct3D 9Ex only. ? | |
Blend factor is (0, 0, 0, 0). |
?
Destination-blending capabilities. This member can be the same capabilities that are defined for the SrcBlendCaps member.
Alpha-test comparison capabilities. This member can include the same capability flags defined for the ZCmpCaps member. If this member contains only the
Shading operations capabilities. It is assumed, in general, that if a device supports a given command at all, it supports the
The color, specular highlights, fog, and alpha interpolants of a triangle each have capability flags that an application can use to find out how they are implemented by the device driver.
This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device can support an alpha component for Gouraud-blended transparency (the | |
Device can support colored Gouraud shading. In this mode, the per-vertex color components (red, green, and blue) are interpolated across a triangle face. | |
Device can support fog in the Gouraud shading mode. | |
Device supports Gouraud shading of specular highlights. |
?
Miscellaneous texture-mapping capabilities. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Alpha in texture pixels is supported. | |
Device can draw alpha from texture palettes. | |
Supports cube textures. | |
Device requires that cube texture maps have dimensions specified as powers of two. | |
Device supports mipmapped cube textures. | |
Device supports mipmapped textures. | |
Device supports mipmapped volume textures. | |
If this flag is not set, and A texture that is not a power of two cannot be set at a stage that will be read based on a shader computation (such as the bem - ps and texm3x3 - ps instructions in pixel shaders versions 1_0 to 1_3). For example, these textures can be used to store bumps that will be fed into texture reads, but not the environment maps that are used in texbem - ps, texbeml - ps, and texm3x3spec - ps. This means that a texture with dimensions that are not powers of two cannot be addressed or sampled using texture coordinates computed within the shader. This type of operation is known as a dependent read and cannot be performed on these types of textures. | |
Device does not support a projected bump-environment loopkup operation in programmable and fixed function shaders. | |
Perspective correction texturing is supported. | |
If If If this flag is not set, and | |
Supports the | |
All textures must be square. | |
Texture indices are not scaled by the texture size prior to interpolation. | |
Device supports volume textures. | |
Device requires that volume texture maps have dimensions specified as powers of two. |
?
Texture-filtering capabilities for a texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-filtering capabilities for a cube texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-filtering capabilities for a volume texture. Per-stage filtering capabilities reflect which filtering modes are supported for texture stages when performing multiple-texture blending. This member can be any combination of the per-stage texture-filtering flags defined in
Texture-addressing capabilities for texture objects. This member can be one or more of the following flags.
Value | Meaning |
---|---|
Device supports setting coordinates outside the range [0.0, 1.0] to the border color, as specified by the | |
Device can clamp textures to addresses. | |
Device can separate the texture-addressing modes of the u and v coordinates of the texture. This ability corresponds to the | |
Device can mirror textures to addresses. | |
Device can take the absolute value of the texture coordinate (thus, mirroring around 0) and then clamp to the maximum value. | |
Device can wrap textures to addresses. |
?
Texture-addressing capabilities for a volume texture. This member can be one or more of the flags defined for the TextureAddressCaps member.
Defines the capabilities for line-drawing primitives.
Value | Meaning |
---|---|
Supports alpha-test comparisons. | |
Antialiased lines are supported. | |
Supports source-blending. | |
Supports fog. | |
Supports texture-mapping. | |
Supports z-buffer comparisons. |
?
Maximum texture width for this device.
Maximum texture height for this device.
Maximum value for any of the three dimensions (width, height, and depth) of a volume texture.
This number represents the maximum range of the integer bits of the post-normalized texture coordinates. A texture coordinate is stored as a 32-bit signed integer using 27 bits to store the integer part and 5 bits for the floating point fraction. The maximum integer index, 227, is used to determine the maximum texture coordinate, depending on how the hardware does texture-coordinate scaling.
Some hardware reports the cap
Less desirably, on some hardware
For example, assume that MaxTextureRepeat is equal to 32k and the size of the texture is 4k. If the hardware sets
Texture stage states define multi-blender texture operations. Some sampler states set up vertex processing, and some set up pixel processing. Texture stage states can be saved and restored using stateblocks (see State Blocks Save and Restore State (Direct3D 9)).
+Members of this enumerated type are used with the
The valid range of values for the
Defines texture coordinate transformation values.
+Texture coordinates can be transformed using a 4 x 4 matrix before the results are passed to the rasterizer. The texture coordinate transforms are set by calling
Texture coordinates are passed directly to the rasterizer.
The rasterizer should expect 1D texture coordinates. This value is used by fixed function vertex processing; it should be set to 0 when using a programmable vertex shader.
The rasterizer should expect 2D texture coordinates. This value is used by fixed function vertex processing; it should be set to 0 when using a programmable vertex shader.
The rasterizer should expect 3D texture coordinates. This value is used by fixed function vertex processing; it should be set to 0 when using a programmable vertex shader.
The rasterizer should expect 4D texture coordinates. This value is used by fixed function vertex processing; it should be set to 0 when using a programmable vertex shader.
This flag is honored by the fixed function pixel pipeline, as well as the programmable pixel pipeline in versions ps_1_1 to ps_1_3. When texture projection is enabled for a texture stage, all four floating point values must be written to the corresponding texture register. Each texture coordinate is divided by the last element before being passed to the rasterizer. For example, if this flag is specified with the
In short, texture wrapping changes the basic way that Direct3D rasterizes textured polygons using the texture coordinates specified for each vertex. While rasterizing a polygon, the system interpolates between the texture coordinates at each of the polygon's vertices to determine the texels that should be used for every pixel of the polygon. Normally, the system treats the texture as a 2D plane, interpolating new texels by taking the shortest route from point A within a texture to point B. If point A represents the u, v position (0.8, 0.1), and point B is at (0.1,0.1), the line of interpolation looks like the following diagram.
Note that the shortest distance between A and B in this illustration runs roughly through the middle of the texture. Enabling u-texture or v-texture coordinate wrapping changes how Direct3D perceives the shortest route between texture coordinates in the u-direction and v-direction. By definition, texture wrapping causes the rasterizer to take the shortest route between texture coordinate sets, assuming that 0.0 and 1.0 are coincident. The last bit is the tricky part: You can imagine that enabling texture wrapping in one direction causes the system to treat a texture as though it were wrapped around a cylinder. For example, consider the following diagram.
The preceding illustration shows how wrapping in the u - direction affects how the system interpolates texture coordinates. Using the same points as in the example for normal, or nonwrapped, textures, you can see that the shortest route between points A and B is no longer across the middle of the texture; it's now across the border where 0.0 and 1.0 exist together. Wrapping in the v-direction is similar, except that it wraps the texture around a cylinder that is lying on its side. Wrapping in both the u-direction and v-direction is more complex. In this situation, you can envision the texture as a torus, or doughnut.
The most common practical application for texture wrapping is to perform environment mapping. Usually, an object textured with an environment map appears very reflective, showing a mirrored image of the object's surroundings in the scene. For the sake of this discussion, picture a room with four walls, each one painted with a letter R, G, B, Y and the corresponding colors: red, green, blue, and yellow. The environment map for such a simple room might look like the following illustration.
Imagine that the room's ceiling is held up by a perfectly reflective, four-sided, pillar. Mapping the environment map texture to the pillar is simple; making the pillar look as though it is reflecting the letters and colors is not as easy. The following diagram shows a wire frame of the pillar with the applicable texture coordinates listed near the top vertices. The seam where wrapping will cross the edges of the texture is shown with a dotted line.
With wrapping enabled in the u-direction, the textured pillar shows the colors and symbols from the environment map appropriately and, at the seam in the front of the texture, the rasterizer properly chooses the shortest route between the texture coordinates, assuming that u-coordinates 0.0 and 1.0 share the same location. The textured pillar looks like the following illustration.
If texture wrapping isn't enabled, the rasterizer does not interpolate in the direction needed to generate a believable, reflected image. Rather, the area at the front of the pillar contains a horizontally compressed version of the texels between u-coordinates 0.175 and 0.875, as they pass through the center of the texture. The wrap effect is ruined.
+Defines the priority type to which an animation track is assigned.
+Tracks with the same priority are blended together, and the two resulting values are then blended using the priority blend factor.
+Track should be blended with all the low-priority tracks before the low-priority blend is mixed with the high-priority blend.
Track should be blended with all the high-priority tracks before the high-priority blend is mixed with the low-priority blend.
Defines the transition style between values of a mesh animation.
+The calculation for the ramp from ease in to ease out is calculated as follows:
where the ramp is a function Q(t) with the following properties:
Mathematically, this translates into:
Solving for A, B, C, D:
Therefore:
Linear transition between values.
Ease-in, ease-out spline transition between values.
Usage options that identify how resources are to be used.
The following table summarizes the available usage options.
+Texture wrapping options for IMT computation APIs.
+The texture wraps in the U direction.
The texture wraps in the V direction.
The texture wraps in both the U and V direction.
Defines flags used to control the number or matrices that the system applies when performing multimatrix vertex blending.
+Members of this type are used with the
Geometry blending (multimatrix vertex blending) requires that your application use a vertex format that has blending (beta) weights for each vertex.
+Disable vertex blending; apply only the world matrix set by the D3DTS_WORLDMATRIX macro, where the index value for the transformation state is 0.
Enable vertex blending between the two matrices set by the D3DTS_WORLDMATRIX macro, where the index value for the transformation states are 0 and 1.
Enable vertex blending between the three matrices set by the D3DTS_WORLDMATRIX macro, where the index value for the transformation states are 0, 1, and 2.
Enable vertex blending between the four matrices set by the D3DTS_WORLDMATRIX macro, where the index value for the transformation states are 0, 1, 2, and 3.
Vertex blending is done by using the value assigned to
Use a single matrix with a weight of 1.0.
Flexible Vertex Format Constants, or FVF codes, are used to describe the contents of vertices interleaved in a single data stream that will be processed by the fixed-function pipeline.
+This constant is the maximum number of vertex declarators for a mesh.
+MAXD3DDECLLENGTH is defined as a maximum of 64 (see d3d9types.h). This does not include the "end" marker vertex element.
+The maximum number of elements in the vertex declaration. The additional (+1) is for D3DDECL_END.
A combination of one or more flags that control the device create behavior.
+Vertex shader caps constants. These constants are used by the VS20Caps member of
Vertex texture sampler constants.
These constants identify the texture samplers used by vertex shaders.
+Specifies the type of I/O bus used by the graphics adapter.
+As many as three flags can be set. Flags in the range 0x00 through 0x04 (D3DBUSTYPE_Xxx) provide the basic bus type. Flags in the range 0x10000 through 0x50000 (D3DBUSIMPL_MODIFIER_Xxx) modify the basic description. The driver sets one bus-type flag, and can set zero or one modifier flag. If the driver sets a modifier flag, it also sets the
Indicates a type of bus other than the types listed here. +
PCI bus. +
PCI-X bus. +
PCI Express bus. +
Accelerated Graphics Port (AGP) bus. +
The implementation for the graphics adapter is in a motherboard chipset's north bridge. This flag implies that data never goes over an expansion bus (such as PCI or AGP) when it is transferred from main memory to the graphics adapter.
Indicates that the graphics adapter is connected to a motherboard chipset's north bridge by tracks on the motherboard and all of the graphics adapter's chips are soldered to the motherboard. This flag implies that data never goes over an expansion bus (such as PCI or AGP) when it is transferred from main memory to the graphics adapter.
The graphics adapter is connected to a motherboard chipset's north bridge by tracks on the motherboard, and all of the graphics adapter's chips are connected through sockets to the motherboard. +
The graphics adapter is connected to the motherboard through a daughterboard connector. +
The graphics adapter is connected to the motherboard through a daughterboard connector, and the graphics adapter is inside an enclosure that is not user accessible. +
One of the D3DBUSIMPL_MODIFIER_MODIFIER_Xxx flags is set. +
Options for welding together vertices.
+Weld together all vertices that are at the same location. Using this flag avoids an epsilon comparison between vertex components.
If a given vertex component is within epsilon, modify partially matched vertices so that both components are identical. If all components are equal, remove one of the vertices.
Instructs the weld to allow only modifications to vertices and not removal. This flag is valid only if
Instructs the weld not to split vertices that are in separate attribute groups. When the
Creates an
The
The
Pass the
Note??Direct3DCreate9Ex is supported only in Windows Vista, Windows Server 2008, and Windows 7. Earlier versions of the D3D9.dll library do not include Direct3D9Ex and Direct3DCreate9Ex.
+Create an
The value of this parameter should be
If successful, this function returns a reference to an
The Direct3D object is the first Direct3D COM object that your graphical application needs to create and the last object that your application needs to release. Functions for enumerating and retrieving capabilities of a device are accessible through the Direct3D object. This enables applications to select devices without creating them.
Create an
LPDIRECT3D9 g_pD3D =null ; if(null == (g_pD3D = Direct3DCreate9())) return E_FAIL; +
The
For an example, see Creating a Device (Direct3D 9).
+Marks the beginning of a section of event code.
+A
Returns the number of previous calls to BeginEvent that have not yet been finalized by calls to the ID3DUserDefinedAnnotation::EndEvent method.
The return value is ?1 if the calling application is not running under a Direct3D profiling tool.
You call the EndEvent method to mark the end of the section of event code.
A user can visualize the event when the calling application is running under an enabled Direct3D profiling tool such as Microsoft Visual Studio Ultimate?2012.
BeginEvent has no effect if the calling application is not running under an enabled Direct3D profiling tool.
+Adds a child frame to a frame.
+Pointer to the parent node.
Pointer to the child node.
If the function succeeds, the return value is
Loads the first frame hierarchy from a .x file.
+Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Combination of one or more flags from the D3DXMESH enumeration that specify creation options for the mesh.
Pointer to an
Pointer to an
Application provided interface that allows loading of user data. See
Returns a reference to the loaded frame hierarchy. See
Returns a reference to the animation controller corresponding to animation in the .x file. This is created with default tracks and events. See
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
All the meshes in the file will be collapsed into one output mesh. If the file contains a frame hierarchy, all the transformations will be applied to the mesh.
During the load, CreateFrame and LoadFrameChildData are called back on each frame to control loading and allocation of the frame. The application defines these methods to control how frames are stored. CreateMeshContainer and LoadMeshChildData are called back on each mesh object to control loading and allocation of mesh objects. LoadTopLevelData is called back for each top level object that doesn't get loaded by the other methods.
To free this data, call ID3DXAnimationController::Release to free the animation sets, and D3DXFRAMEDestroy, passing in the root node of the frame hierarchy and an object of your derived
Given a frame hierarchy, registers all the named matrices in the animation mixer.
+The top level node in the frame hierarchy.
Pointer to the animation controller object.
If the function succeeds, the return value is
Loads the first frame hierarchy from a .x file.
+Pointer to a buffer that contains the mesh hierarchy.
Size of the pMemory buffer, in bytes.
Combination of one or more flags from the D3DXMESH enumeration that specify creation options for the mesh.
Pointer to an
Pointer to an
Application provided interface that allows loading of user data. See
Returns a reference to the loaded frame hierarchy. See
Returns a reference to the animation controller corresponding to animation in the .x file. This is created with default tracks and events. See
If the function succeeds, the return value is
All the meshes in the file will be collapsed into one output mesh. If the file contains a frame hierarchy, all the transformations will be applied to the mesh.
+Creates a
If the function succeeds, the return value is
Creates a .x file and saves the mesh hierarchy and corresponding animations in it.
+Pointer to a string that specifies the name of the .x file identifying the saved mesh. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Format of the .x file (text or binary, compressed or not). See
Root node of the hierarchy to be saved. See
Animation controller that has animation sets to be stored. See
Application-provided interface that allows saving of user data. See
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function does not save compressed animation sets.
+Computes the bounding sphere of all the meshes in the frame hierarchy.
+Pointer to the root node.
Returns the center of the bounding sphere.
Returns the radius of the bounding sphere.
If the function succeeds, the return value is
Creates an animation controller object.
+Maximum number of animation outputs the controller can support.
Maximum number of animation sets that can be mixed.
Maximum number of animation sets that can be mixed simultaneously.
Maximum number of outstanding events that the controller will support.
Pointer to the animation controller object created. See
If the function succeeds, the return value is
An animation controller controls an animation mixer. The controller adds methods to modify blending parameters over time to enable smooth transitions.
+Finds the child frame of a root frame.
+Pointer to the root frame. See
Name of the child frame to find.
Returns the child frame if it is found, or
Creates a
If the function succeeds, the return value is
Destroys the subtree of frames under the root, including the root.
+Pointer to the root node.
Allocation interface used to deallocate nodes of the frame hierarchy.
If the function succeeds, the return value is
Counts number of frames in a subtree that have non-null names.
+Pointer to the root node of the subtree.
Returns the frame count.
Creates a render environment map.
+Pointer to an
Size of the render surface.
The number of mipmap levels.
Member of the
If TRUE, the render surface supports a depth-stencil surface. Otherwise, this member is set to
If DepthStencil is set to TRUE, this parameter is a member of the
Address of a reference to an
If the function succeeds, the return value is
Returns the driver level.
+Pointer to an
The driver level. See remarks.
This method returns the driver version, which is one of the following:
Creates a font object for a device and font.
+Pointer to an
The height of the characters in logical units.
The width of the characters in logical units.
Typeface weight. One example is bold.
The number of mipmap levels.
True for italic font, false otherwise.
The character set of the font.
Specifies how Windows should attempt to match the desired font sizes and characteristics with actual fonts. Use OUT_TT_ONLY_PRECIS for instance, to ensure that you always get a TrueType font.
Specifies how Windows should match the desired font with a real font. It applies to raster fonts only and should not affect TrueType fonts.
Pitch and family index.
String containing the typeface name. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Returns a reference to an
If the function succeeds, the return value is
The creation of an
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
If you want more information about font parameters, see The Logical Font.
+Creates a render surface.
+Pointer to an
Width of the render surface, in pixels.
Height of the render surface, in pixels.
Member of the
If TRUE, the render surface supports a depth-stencil surface. Otherwise, this member is set to
If DepthStencil is set to TRUE, this parameter is a member of the
Address of a reference to an
If the function succeeds, the return value is
Creates a sprite object which is associated with a particular device. Sprite objects are used to draw 2D images to the screen.
+Pointer to an
Address of a reference to an
If the function succeeds, the return value is
This interface can be used to draw two dimensional images in screen space of the associated device.
+Creates a font object indirectly for both a device and a font.
+Pointer to an
Pointer to a
Returns a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Uses a left-handed coordinate system to create a line.
+Pointer to an
Pointer to an
If the function succeeds, the return value is
This function creates a mesh with the
Turns on or off all D3DX debug output.
+If TRUE, debugger output is halted; if
Returns the previous value of Mute.
Verify that the version of D3DX you compiled with is the version that you are running.
+Use
Use
Returns TRUE if the version of D3DX you compiled against is the version you are running with; otherwise,
Use this function during the initialization of your application like this:
CD3DXMyApplication::Initialize( hInstance, LPCSTR szWindowName, LPCSTR szClassName, UINT uWidth, UINT uHeight) + { hr; if (! ( , )) return E_FAIL; ... + } +
Use Direct3DCreate9 to verify that the correct runtime is installed.
+Create an effect from an ASCII or binary effect description.
+Pointer to the device that will create the effect. See
Pointer to a buffer containing an effect description.
Length of the effect data, in bytes.
An optional
Optional interface reference,
If pSrcData contains a text effect, flags can be a combination of
Pointer to a
Returns a reference to an
Returns a buffer containing a listing of compile errors.
If the function succeeds, the return value is
Disassemble an effect.
+Pointer to an
Enable color coding to make the disassembly easier to read.
Returns a buffer containing the disassembled shader. See
If the function succeeds, the return value is
Create an effect from an ASCII or binary effect description. This is an extended version of
If the function succeeds, the return value is
This function is an extended version of
This function checks each constant in pSkipConstants to see that:
If a constant is named in the string that is not present in the effect, it is ignored.
If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the LPCTSTR data type resolves to LPCSTR.
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Create an effect from an ASCII or binary effect description.
+Pointer to the device.
Handle to a module containing the effect description. If this parameter is
Pointer to the resource. This parameter supports both Unicode and ANSI strings. See Remarks.
An optional
Optional interface reference,
If hSrcModule contains a text effect, flags can be a combination of
Pointer to a
Returns a buffer containing the compiled effect.
Returns a buffer containing a listing of compile errors.
If the function succeeds, the return value is
If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the LPCTSTR data type resolves to LPCSTR.
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Creates an effect compiler from an ASCII effect description.
+Pointer to a buffer containing an effect description.
Length, in bytes, of the effect data.
An optional
Optional interface reference,
Compile options identified by various flags (see
Address of a reference to an
Address of a reference to an
If the function succeeds, the return value is
Create an effect from an ASCII or binary effect description. This function is an extended version of
If the function succeeds, the return value is
This function is an extended version of
This function checks each constant in pSkipConstants to see that:
If a constant is named in the string that is not present in the effect, it is ignored.
If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the LPCTSTR data type resolves to LPCSTR.
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Creates an effect compiler from an ASCII effect description.
+Pointer to the filename. This parameter supports both Unicode and ANSI strings. See Remarks.
An optional
Optional interface reference,
Compile options identified by various flags (see
Address of a reference to an
Address of a reference to an
If the function succeeds, the return value is
If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the LPCTSTR data type resolves to LPCSTR.
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Creates an
If the function succeeds, the return value is
If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the LPCTSTR data type resolves to LPCSTR.
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Create an effect from an ASCII or binary effect description.
+Pointer to the device that will create the effect. See
Pointer to the filename. This parameter supports both Unicode and ANSI strings. See Remarks.
Optional
Optional interface reference,
If pSrcFile contains a text effect, flags can be a combination of
Pointer to a
Returns a reference to a buffer containing the compiled effect. See
Returns a reference to a buffer containing a listing of compile errors. See
If the function succeeds, the return value is
If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the LPCTSTR data type resolves to LPCSTR.
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Create an effect pool. A pool is used to share parameters between effects.
+Returns a reference to the created pool.
If the method succeeds, the return value is
If the arguments are invalid, the method will return
If the method fails, the return value will be E_FAIL.
For effects within a pool, shared parameters with the same name share values.
+Creates an effect from an ASCII or binary effect description. This function is an extended version of
If the function succeeds, the return value is
This function is an extended version of
This function checks each constant in pSkipConstants to see that:
If a constant is named in the string that is not present in the effect, it is ignored.
+Saves a mesh to a .x file.
+Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Pointer to an
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh. This parameter may be
Pointer to an array of
Pointer to an array of effect instances, one per attribute group in the mesh. This parameter may be
Number of
A combination of file format and save options when saving an .x file. See D3DX X File Constants.
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
The default file format is binary; however, if a file is specified as both a binary and a text file, it will be saved as a text file. Regardless of the file format, you may also use the compressed format to reduce the file size.
The following is a typical code example of how to use this function.
+* m_pMesh; // Mesh object to be saved to a .x file + * m_pMaterials; // Array of material structs in the mesh + DWORD m_dwNumMaterials; // Number of material structs in the mesh DWORD dwFormat = ; // Binary-format .x file (default) + // DWORD dwFormat = ; // Text-format .x file // Load mesh into m_pMesh and determine values of m_pMaterials and + // m_dwNumMaterials with calls to D3DXLoadMeshxxx or other D3DX functions // ... ( L"outputxfilename.x", m_pMesh, null , m_pMaterials,null , m_dwNumMaterials, dwFormat ); +
Creates an N-patch mesh from a triangle mesh.
+Address of a reference to an
Address of a reference to an
If the function succeeds, the return value is
Returns a declarator from a flexible vertex format (FVF) code.
+Combination of
An array of
If the function succeeds, the return value is
Compute tangent, binormal, and normal vectors for a mesh.
+Pointer to an input
Combination of one or more
Use
If the function succeeds, the return value is
This function simply calls
(pMesh, , 0, , 0, , 0, , 0, dwOptions | , null , 0.01f, 0.25f, 0.01f,null ,null ); +
Singularities are handled as required by grouping edges and splitting vertices. If any vertices need to be split, the function will fail. The computed normal vector at each vertex is always normalized to have unit length.
The most robust solution for computing orthogonal Cartesian coordinates is to not set flags
Generates an optimized face remapping for a triangle list.
+Pointer to triangle list indices to use for ordering vertices.
Number of faces in the triangle list. For 16-bit meshes, this is limited to 2^16 - 1 (65535) or fewer faces.
Number of vertices referenced by the triangle list.
Flag indicating index type: TRUE if indices are 32-bit (more than 65535 indices),
Pointer to the original mesh face that was split to generate the current face.
If the function succeeds, the return value is
This function's optimization procedure is functionally equivalent to calling
Welds together replicated vertices that have equal attributes. This method uses specified epsilon values for equality comparisons.
+Pointer to an
Combination of one or more flags from D3DXWELDEPSILONSFLAGS.
Pointer to a D3DXWeldEpsilons structure, specifying the epsilon values to be used for this method. Use
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the source mesh. If the edge has no adjacent faces, the value is 0xffffffff. If this parameter is set to
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the optimized mesh. If the edge has no adjacent faces, the value is 0xffffffff.
An array of DWORDs, one per face, that identifies the original mesh face that corresponds to each face in the welded mesh.
Address of a reference to an
If the function succeeds, the return value is
This function uses supplied adjacency information to determine the points that are replicated. Vertices are merged based on an epsilon comparison. Vertices with equal position must already have been calculated and represented by point-representative data.
This function combines logically-welded vertices that have similar components, such as normals or texture coordinates within pEpsilons.
The following example code calls this function with welding enabled. Vertices are compared using epsilon values for normal vector and vertex position. A reference is returned to a face remapping array (pFaceRemap).
TCHAR strMediaPath[512]; // X-file path + LPD3DXBUFFER pAdjacencyBuffer =+null ; // adjacency data buffer + LPD3DXBUFFER pD3DXMtrlBuffer =null ; // material buffer + LPD3DXMESH pMesh =null ; // mesh object + DWORD m_dwNumMaterials; // number of materials +Epsilons; // structure with epsilon values + DWORD *pFaceRemap[65536]; // face remapping array + DWORD i; // internal variable // Load the mesh from the specified file hr = ( strMediaPath, , m_pd3dDevice, &pAdjacencyBuffer, &pD3DXMtrlBuffer, null , &m_dwNumMaterials, &pMesh ) ) if( FAILED( hr ) ) goto End; // Go to error handling // Set epsilon values Epsilons.Normal = 0.001; Epsilons.Position = 0.1; // Weld the vertices for( i=0; i < 65536; i++ ) { pFaceRemap[i] = 0; } hr =( pMesh, , &Epsilons, (DWORD*)pAdjacencyBuffer->GetBufferPointer(), (DWORD*)pAdjacencyBuffer->GetBufferPointer(), (DWORD*)pFaceRemap, null ) if( FAILED( hr ) ) goto End; // Go to error handling +
Generates an output vertex declaration from the input declaration. The output declaration is intended for use by the mesh tessellation functions.
+Pointer to the output vertex declaration. See
Pointer to the input vertex declaration. See
If the function succeeds, the return value is
Creates a buffer object.
+Size of the buffer to create, in bytes.
Address of a reference to an
If the function succeeds, the return value is
Loads a patch mesh from an
If the function succeeds, the return value is
For mesh files that do not contain effect instance information, default effect instances will be generated from the material information in the .x file. A default effect instance will have default values that correspond to the members of the
The default texture name is also filled in, but is handled differently. The name will be Texture0@Name, which corresponds to an effect variable by the name of "Texture0" with an annotation called "Name." This will contain the string file name for the texture.
+Tessellates the given mesh using the N-patch tessellation scheme.
+Pointer to an
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the source mesh. This parameter may be
Number of segments per edge to tessellate.
Set to TRUE to use quadratic interpolation for normals; set to
Address of a reference to an
Address of a reference to an
If the function succeeds, the return value is
This function tessellates by using the N-patch algorithm.
+Calculates per-triangle IMT's from a custom application-specified signal that varies over the surface of the mesh (generally at a higher frequency than vertex data). The signal is evaluated via a user-specified callback function.
+A reference to an input mesh (see
Zero-based texture coordinate index that identifies which set of texture coordinates to use.
The number of components in each data point in the signal.
The maximum distance between vertices; the algorithm continues subdividing until the distance between all vertices is less than or equal to fMaxUVDistance.
Texture wrap options. This is a combination of one or more
A reference to a user-provided evaluator function, which will be used to compute the signal value at arbitrary U,V coordinates. The function follows the prototype of LPD3DXIMTSIGNALCALLBACK.
A reference to a user-defined value which is passed to the signal callback function. Typically used by an application to pass a reference to a data structure that provides context information for the callback function.
A reference to a callback function to monitor IMT computation progress.
A reference to a user-defined variable which is passed to the status callback function. Typically used by an application to pass a reference to a data structure that provides context information for the callback function.
A reference to the buffer (see
If the function succeeds, the return value is
This function requires that the input mesh contain a signal-to-mesh texture mapping (ie. texture coordinates). It allows the user to define a signal arbitrarily over the surface of the mesh.
+Validates a patch mesh, returning the number of degenerate vertices and patches.
+Pointer to an
Returns the number of degenerate vertices in the patch mesh.
Returns the number of degenerate patches in the patch mesh.
Returns a reference to a buffer containing a string of errors and warnings that explain the problems found in the patch mesh.
If the function succeeds, the return value is
This method validates the mesh by checking for invalid indices. Error information is available from the debugger output.
+Validates a mesh.
+Pointer to an
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh to be tested.
Returns a buffer containing a string of errors and warnings, which explain the problems found in the mesh.
If the function succeeds, the return value is
This method validates the mesh by checking for invalid indices. Error information is available from the debugger output.
+Splits a mesh into meshes smaller than the specified size.
+Pointer to an
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh to be simplified.
Maximum number of vertices in the resulting mesh.
Option flags for the new meshes.
Number of meshes returned.
Buffer containing an array of
Buffer containing an array of adjacency arrays (DWORDs) for the new meshes. See
Buffer containing an array of face remap arrays (DWORDs) for the new meshes. See
Buffer containing an array of vertex remap arrays for the new meshes. See
If the function succeeds, the return value is
A common use of this function is to split a mesh with 32-bit indices (more than 65535 vertices) into more than one mesh, each of which has 16-bit indices.
The adjacency, vertex remap and face remap arrays are arrays are DWORDs where each array contains n DWORD references, followed by the DWORD data referenced by the references. For example, to obtain the face remap information for face 3 in mesh 2, the following code could be used, assuming the face remap data was returned in a variable named ppFaceRemapArrayOut.
const DWORD **face_remaps = static_cast<DWORD **>(ppFaceRemapArrayOut->GetBufferPointer()); + const DWORD remap = face_remaps[2][3]; ++
Computes the tangent vectors for the texture coordinates given in the texture stage. Provided to support legacy applications. Use
If the function succeeds, the return value is
If the mesh vertex declaration specifies tangent or binormal fields,
This function simply calls
+( Mesh, , TexStageIndex, ( BinormIndex == D3DX_DEFAULT ) ? D3DX_DEFAULT : , // provides backward function compatibility BinormIndex, ( TangentIndex == D3DX_DEFAULT ) ? D3DX_DEFAULT : , TangentIndex, D3DX_DEFAULT, // do not store normals 0, ( Wrap ? : 0 ) | | , pAdjacency, -1.01f, -0.01f, -1.01f, null ,null ); +
Determines if a ray intersects with a mesh.
+Pointer to an
Pointer to a
Pointer to a
Pointer to a
Pointer to an index value of the face closest to the ray origin, if pHit is TRUE.
Pointer to a barycentric hit coordinate, U.
Pointer to a barycentric hit coordinate, V.
Pointer to a ray intersection parameter distance.
Pointer to an
Pointer to a DWORD that contains the number of entries in the ppAllHits array.
If the function succeeds, the return value is
The
Any point in the plane V1V2V3 can be represented by the barycentric coordinate (U,V). The parameter U controls how much V2 gets weighted into the result, and the parameter V controls how much V3 gets weighted into the result. Lastly, the value of [1 - (U + V)] controls how much V1 gets weighted into the result.
Barycentric coordinates are a form of general coordinates. In this context, using barycentric coordinates represents a change in coordinate systems. What holds true for Cartesian coordinates holds true for barycentric coordinates.
Barycentric coordinates define a point inside a triangle in terms of the triangle's vertices. For a more in-depth description of barycentric coordinates, see Mathworld's Barycentric Coordinates Description.
+Pack mesh partitioning data into an atlas.
+Pointer to an input mesh (see
Texture width.
Texture height.
The minimum distance, in texels, between two charts on the atlas. The gutter is always scaled by the width; so, if a gutter of 2.5 is used on a 512x512 texture, then the minimum distance between two charts is 2.5 / 512.0 texels.
Zero-based texture coordinate index that identifies which set of texture coordinates to use.
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh. It should be derived from the ppPartitionResultAdjacency returned from
A reference to a callback function (see LPD3DXUVATLASCB) that is useful for monitoring progress.
Specify how often D3DX will call the callback; a reasonable default value is 0.0001f.
A void reference to be passed back to the callback function.
This options parameter is currently reserved.
A reference to an
If the function succeeds, the return value is
Loads a skin mesh from a DirectX .x file data object.
+Pointer to an
Combination of one or more flags, from the D3DXMESH enumeration, specifying creation options for the mesh.
Pointer to an
Address of a reference to an
Address of a reference to an
Pointer to a buffer containing an array of effect instances, one per attribute group in the returned mesh. An effect instance is a particular instance of state information used to initialize an effect. See
Pointer to the number of
Address of a reference to an
Address of a reference to an
If the function succeeds, the return value is
This method takes a reference to an internal object in the .x file, enabling you to load the frame hierarchy.
For mesh files that do not contain effect instance information, default effect instances will be generated from the material information in the .x file. A default effect instance will have default values that correspond to the members of the
The default texture name is also filled in, but is handled differently. The name will be Texture0@Name, which corresponds to an effect variable by the name of "Texture0" with an annotation called "Name." This will contain the string file name for the texture.
+Creates a mesh object using a declarator.
+Number of faces for the mesh. The valid range for this number is greater than 0, and one less than the maximum DWORD (typically 65534), because the last index is reserved.
Number of vertices for the mesh. This parameter must be greater than 0.
Combination of one or more flags from the D3DXMESH enumeration, specifying options for the mesh.
Array of
Pointer to an
Address of a reference to an
If the function succeeds, the return value is
Creates an empty skin mesh object using a flexible vertex format (FVF) code.
+Number of vertices for the skin mesh.
Combination of
Number of bones for the skin mesh.
Address of a reference to an
If the function succeeds, the return value is
Use SetBoneInfluence to populate the empty skin mesh object returned by this method.
+Intersects the specified ray with the given mesh subset. This provides similar functionality to
If the function succeeds, the return value is
The
Any point in the plane V1V2V3 can be represented by the barycentric coordinate (U,V). The parameter U controls how much V2 gets weighted into the result and the parameter V controls how much V3 gets weighted into the result. Lastly, the value of [1 - (U + V)] controls how much V1 gets weighted into the result.
Barycentric coordinates are a form of general coordinates. In this context, using barycentric coordinates represents a change in coordinate systems. What holds true for Cartesian coordinates holds true for barycentric coordinates.
Barycentric coordinates define a point inside a triangle in terms of the triangle's vertices. For a more in-depth description of barycentric coordinates, see Mathworld's Barycentric Coordinates Description.
+Returns the size of a vertex for a flexible vertex format (FVF).
+FVF to be queried. A combination of
The FVF vertex size, in bytes.
Returns the number of elements in the vertex declaration.
+A reference to the vertex declaration. See
The number of elements in the vertex declaration.
Cleans a mesh, preparing it for simplification.
+Vertex operations to perform in preparation for mesh cleaning. See
Pointer to an
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh to be cleaned.
Address of a reference to an
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the output mesh.
Returns a buffer containing a string of errors and warnings, which explain the problems found in the mesh.
If the function succeeds, the return value is
This function cleans a mesh using the cleaning method and options specified in the CleanType parameter. See the
Computes the intersection of a ray and a triangle.
+Pointer to a
Pointer to a
Pointer to a
Pointer to a
Pointer to a
Barycentric hit coordinates, U.
Barycentric hit coordinates, V.
Ray-intersection parameter distance.
Returns TRUE if the ray intersects the area of the triangle. Otherwise, returns
Any point in the plane V1V2V3 can be represented by the barycentric coordinate (U,V). The parameter U controls how much V2 gets weighted into the result, and the parameter V controls how much V3 gets weighted into the result. Lastly, the value of [1 - (U + V)] controls how much V1 gets weighted into the result.
Barycentric coordinates are a form of general coordinates. In this context, using barycentric coordinates represents a change in coordinate systems. What holds true for Cartesian coordinates holds true for barycentric coordinates.
Barycentric coordinates define a point inside a triangle in terms of the triangle's vertices. For a more in-depth description of barycentric coordinates, see Mathworld's Barycentric Coordinates Description.
+Converts the specified mesh subset into a single triangle strip.
+Pointer to an
Attribute ID of the mesh subset to convert to strips.
Combination of one or more flags from the D3DXMESH enumeration, specifying options for creating the index buffer. Cannot be
Pointer to an
Number of indices in the buffer returned in the ppIndexBuffer parameter.
If the function succeeds, the return value is
Before running this function, call Optimize or
Creates an empty skin mesh object using a declarator.
+Number of vertices for the skin mesh.
Array of
Number of bones for the skin mesh.
Address of a reference to an
If the function succeeds, the return value is
Use SetBoneInfluence to populate the empty skin mesh object returned by this method.
+Returns the size of a vertex from the vertex declaration.
+A reference to the vertex declaration. See
The zero-based stream index.
The vertex declaration size, in bytes.
Tessellates a rectangular higher-order surface patch into a triangle mesh.
+Vertex buffer containing the patch data.
Pointer to an array of four floating-point values that identify the number of segments into which each edge of the rectangle patch should be divided when tessellated. See
Vertex declaration structure that defines the vertex data. See
Describes a rectangular patch. See
Pointer to the created mesh. See
If the function succeeds, the return value is
Use
Returns a flexible vertex format (FVF) code from a declarator.
+Array of
Pointer to a DWORD value, representing the returned combination of
This function will fail for any declarator that does not map directly to an FVF.
+Computes a coordinate-axis oriented bounding box.
+Pointer to the first position.
Number of vertices.
Count or number of bytes between vertices.
Pointer to a
Pointer to a
If the function succeeds, the return value is
Creates a skin mesh from another mesh.
+Pointer to an
The length of the array attached to the BoneId. See
Pointer to an array of bone combinations. See
Address of a reference to an
If the function succeeds, the return value is
Performs tangent frame computations on a mesh. Tangent, binormal, and optionally normal vectors are generated. Singularities are handled as required by grouping edges and splitting vertices.
+Pointer to an input
Specifies the texture coordinate input semantic. If D3DX_DEFAULT, the function assumes that there are no texture coordinates, and the function will fail unless normal vector calculation is specified.
If a mesh has multiple texture coordinates, specifies the texture coordinate to use for the tangent frame computations. If zero, the mesh has only a single texture coordinate.
Specifies the output semantic for the type, typically
Specifies the semantic index at which to store the partial derivative with respect to the U texture coordinate.
Specifies the
Specifies the semantic index at which to store the partial derivative with respect to the V texture coordinate.
Specifies the output normal semantic, typically
Specifies the semantic index at which to store the normal vector at each vertex.
Combination of one or more
Description | |
---|---|
Weight the normal vector length by the angle, in radians, subtended by the two edges leaving the vertex. | & !( |
Compute orthogonal Cartesian coordinates from texture coordinates (u, v). See Remarks. | & !( |
Textures are not wrapped in either u or v directions | & !( |
Partial derivatives with respect to texture coordinates are normalized. | & !( |
Vertices are ordered in a counterclockwise direction around each triangle. | & !( |
Use per-vertex normal vectors already present in the input mesh. | & !( |
?
If
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh. The number of bytes in this array must be at least 3 * GetNumFaces * sizeof(DWORD).
Specifies the maximum cosine of the angle at which two partial derivatives are deemed to be incompatible with each other. If the dot product of the direction of the two partial derivatives in adjacent triangles is less than or equal to this threshold, then the vertices shared between these triangles will be split.
Specifies the maximum magnitude of a partial derivative at which a vertex will be deemed singular. As multiple triangles are incident on a point that have nearby tangent frames, but altogether cancel each other out (such as at the top of a sphere), the magnitude of the partial derivative will decrease. If the magnitude is less than or equal to this threshold, then the vertex will be split for every triangle that contains it.
Similar to fPartialEdgeThreshold, specifies the maximum cosine of the angle between two normals that is a threshold beyond which vertices shared between triangles will be split. If the dot product of the two normals is less than the threshold, the shared vertices will be split, forming a hard edge between neighboring triangles. If the dot product is more than the threshold, neighboring triangles will have their normals interpolated.
Address of a reference to an output
Address of a reference to an output
If the function succeeds, the return value is
A simplified version of this function is available as
The computed normal vector at each vertex is always normalized to have unit length.
The most robust solution for computing orthogonal Cartesian coordinates is to not set flags
Determines if a ray intersects the volume of a sphere's bounding box.
+Pointer to a
Radius of the sphere.
Pointer to a
Pointer to a
Returns TRUE if the ray intersects the volume of the sphere's bounding box. Otherwise, returns
Create a UV atlas for a mesh.
+Pointer to an input mesh (see
The maximum number of charts to partition the mesh into. See remarks about the partitioning modes. Use 0 to tell D3DX that the atlas should be parameterized based on stretch.
The amount of stretching allowed. 0 means no stretching is allowed, 1 means any amount of stretching can be used.
Texture width.
Texture height.
The minimum distance, in texels, between two charts on the atlas. The gutter is always scaled by the width; so, if a gutter of 2.5 is used on a 512x512 texture, then the minimum distance between two charts is 2.5 / 512.0 texels.
Zero-based texture coordinate index that identifies which set of texture coordinates to use.
A reference to an array of adjacency data. with 3 DWORDs per face, indicating which triangles are adjacent to each other (see
An array with 3 DWORDS per face. Each face indicates if an edge is false or not. A non-false edge is indicated by -1, a false edge is indicated by any other value. This enables the parameterization of a mesh of quads where the edges down the middle of each quad will not be cut.
A reference to an array of integrated metric tensors that describes how to stretch a triangle (see IntegratedMetricTensor).
A reference to a callback function (see LPD3DXUVATLASCB) that is useful for monitoring progress.
Specify how often D3DX will call the callback; a reasonable default value is 0.0001f.
Pointer to a user-defined value which is passed to the callback function; typically used by an application to pass a reference to a data structure that provides context information for the callback function.
Specify the quality of the charts generated. See D3DXUVATLAS.
Pointer to the created mesh with the atlas (see
A reference to an array of the final face-partitioning data. Each element contains one DWORD per face (see
A reference to an array of remapped vertices. Each array element identifies the original vertex that each final vertex came from (if the vertex was split during remapping). Each array element contains one DWORD per vertex.
A reference to the maximum stretch value generated by the atlas algorithm. The range is between 0.0 and 1.0.
A reference to the number of charts created by the atlas algorithm. If dwMaxChartNumber is too low, this parameter will return the minimum number of charts required to create an atlas.
If the function succeeds, the return value is
Create a UV atlas for a mesh.
+Pointer to an input mesh (see
The maximum number of charts to partition the mesh into. See remarks about the partitioning modes. Use 0 to tell D3DX that the atlas should be parameterized based on stretch.
The amount of stretching allowed. 0 means no stretching is allowed, 1 means any amount of stretching can be used.
Zero-based texture coordinate index that identifies which set of texture coordinates to use.
A reference to an array of adjacency data with 3 DWORDs per face, indicating which triangles are adjacent to each other (see
An array with 3 DWORDS per face. Each face indicates if an edge is false or not. A non-false edge is indicated by -1, a false edge is indicated by any other value. This enables the parameterization of a mesh of quads where the edges down the middle of each quad will not be cut.
A reference to an array of integrated metric tensors that describes how to stretch a triangle (see IntegratedMetricTensor).
A reference to a callback function (see LPD3DXUVATLASCB) that is useful for monitoring progress.
Specify how often D3DX will call the callback; a reasonable default value is 0.0001f.
Pointer to a user-defined value that is passed to the callback function; typically used by an application to pass a reference to a data structure that provides context information for the callback function.
Specify the quality of the charts generated by combining one or more D3DXUVATLAS flags.
Pointer to the created mesh with the atlas (see
A reference to an array of the final face-partitioning data. Each element contains one DWORD per face (see
A reference to an array of remapped vertices. Each array element identifies the original vertex each final vertex came from (if the vertex was split during remapping). Each array element contains one DWORD per vertex.
Address of a reference to an
A reference to the maximum stretch value generated by the atlas algorithm. The range is between 0.0 and 1.0.
A reference to the number of charts created by the atlas algorithm. If dwMaxChartNumber is too low, this parameter will return the minimum number of charts required to create an atlas.
If the function succeeds, the return value is
Calculates per-triangle IMT's from a texture mapped onto a mesh, to be used optionally as input to the D3DX UVAtlas Functions.
+If the function succeeds, the return value is
Given a texture that maps over the surface of the mesh, the algorithm computes the IMT for each face. This will cause triangles containing lower-frequency signal data to take up less space in the final texture atlas when parameterized with the UVAtlas functions. The texture is assumed to be interpolated over the mesh bilinearly.
+Concatenates a group of meshes into one common mesh. This method can optionally apply a matrix transformation to each input mesh and its texture coordinates.
+Array of input mesh references (see
Number of input meshes to concatenate.
Mesh creation options; this is a combination of one or more D3DXMESH flags. The mesh creation options are equivalent to the options parameter required by
Optional array of geometry transforms. The number of elements in the array is NumMeshes; each element is a transformation matrix (see
Optional array of texture transforms. The number of elements in the array is NumMeshes; each element is a transformation matrix (see
Optional reference to a vertex declaration (see
Pointer to a
Address of a reference to the mesh created (see
If the function succeeds, the return value is
If no vertex declaration is given as part of the Options mesh creation parameter, the method will generate a union of all of the vertex declarations of the submeshes, promoting channels and types if necessary. The method will create an attribute table from attribute tables of the input meshes. To ensure creation of an attribute table, call Optimize with Flags set to
Determines whether a ray intersects the volume of a box's bounding box.
+Pointer to a
Pointer to a
Pointer to a
Pointer to a
Returns TRUE if the ray intersects the volume of the box's bounding box. Otherwise, returns
The values passed to
xmax, ymax, zmax + xmax, ymax, zmin + xmax, ymin, zmax + xmax, ymin, zmin + xmin, ymax, zmax + xmin, ymax, zmin + xmin, ymin, zmax + xmin, ymin, zmin +
The depth of the bounding box in the z direction is zmax - zmin, in the y direction is ymax - ymin, and in the x direction is xmax - xmin. For example, with the following minimum and maximum vectors, min (-1, -1, -1) and max (1, 1, 1), the bounding box is defined in the following manner.
1, 1, 1 1, 1, -1 1, -1, 1 1, -1, -1 + -1, 1, 1 + -1, 1, -1 + -1, -1, 1 + -1, -1, -l ++
Loads a mesh from a resource.
+Handle to the module where the resource is located, or
Pointer to a string that specifies the resource to create the mesh from. See remarks.
Pointer to a string that specifies the resource type. See remarks.
Combination of one or more flags from the D3DXMESH enumeration that specify creation options for the mesh.
Pointer to an
Address of a reference to an
Address of a reference to an
Pointer to a buffer containing an array of effect instances, one per attribute group in the returned mesh. An effect instance is a particular instance of state information used to initialize an effect. See
Pointer to the number of
Address of a reference to an
If the function succeeds, the return value is
See FindResource to find out more about the Module, Name and Type parameters.
All the meshes in the file will be collapsed into one output mesh. If the file contains a frame hierarchy, all the transformations will be applied to the mesh.
For mesh files that do not contain effect instance information, default effect instances will be generated from the material information in the .x file. A default effect instance will have default values that correspond to the members of the
The default texture name is also filled in, but is handled differently. The name will be Texture0@Name, which corresponds to an effect variable by the name of "Texture0" with an annotation called "Name." This will contain the string file name for the texture.
+Generates a simplified mesh using the provided weights that come as close as possible to the given MinValue.
+Pointer to an
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh to be simplified.
Pointer to a
Pointer to an array of vertex weights. If this parameter is set to
Number of vertices or faces, depending on the flag set in the Options parameter, by which to simplify the source mesh.
Specifies simplification options for the mesh. One of the flags in D3DXMESHSIMP can be set.
Address of a reference to an
If the function succeeds, the return value is
This function generates a mesh that has MinValue vertices or faces.
If the simplification process cannot reduce the mesh to MinValue, the call still succeeds because MinValue is a desired minimum, not an absolute minimum.
If pVertexAttributeWeights is set to
AttributeWeights; AttributeWeights.Position = 1.0; + AttributeWeights.Boundary = 1.0; + AttributeWeights.Normal = 1.0; + AttributeWeights.Diffuse = 0.0; + AttributeWeights.Specular = 0.0; + AttributeWeights.Tex[8] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; +
This default structure is what most applications should use because it considers only geometric and normal adjustment. Only in special cases will the other member fields need to be modified.
+Creates a mesh object using a flexible vertex format (FVF) code.
+Number of faces for the mesh. The valid range for this number is greater than 0, and one less than the max DWORD value, typically 232 - 1, because the last index is reserved.
Number of vertices for the mesh. This parameter must be greater than 0.
Combination of one or more flags from the D3DXMESH enumeration, specifying creation options for the mesh.
Combination of
Pointer to an
Address of a reference to an
If the function succeeds, the return value is
Calculate per-triangle IMT's from from per-vertex data. This function allows you to calculate the IMT based off of any value in a mesh (color, normal, etc).
+A reference to an input mesh (see
A reference to an array of per-vertex data from which IMT will be computed. The array size is uSignalStride * v, where v is the number of vertices in the mesh.
The number of floats per vertex.
The number of bytes per vertex in the array. This must be a multiple of sizeof(float)
Texture wrap options. This is a combination of one or more
A reference to a callback function to monitor IMT computation progress.
A reference to a user-defined variable which is passed to the status callback function. Typically used by an application to pass a reference to a data structure that provides context information for the callback function.
A reference to the buffer (see
If the function succeeds, the return value is
Generates an optimized vertex remapping for a triangle list. This function is commonly used after applying the face remapping generated by
If the function succeeds, the return value is
By default, a mesh uses 16 bit indices when it is created unless the application specifies otherwise. To check whether an existing mesh uses 16-bit or 32-bit indices, call
Loads a mesh from a DirectX .x file.
+Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Combination of one or more flags from the D3DXMESH enumeration, which specifies creation options for the mesh.
Pointer to an
Pointer to a buffer that contains adjacency data. The adjacency data contains an array of three DWORDs per face that specify the three neighbors for each face in the mesh. For more information about accessing the buffer, see
Pointer to a buffer containing materials data. The buffer contains an array of
Pointer to a buffer containing an array of effect instances, one per attribute group in the returned mesh. An effect instance is a particular instance of state information used to initialize an effect. See
Pointer to the number of
Address of a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
All the meshes in the file will be collapsed into one output mesh. If the file contains a frame hierarchy, all the transformations will be applied to the mesh.
For mesh files that do not contain effect instance information, default effect instances will be generated from the material information in the .x file. A default effect instance will have default values that correspond to the members of the
The default texture name is also filled in, but is handled differently. The name will be Texture0@Name, which corresponds to an effect variable by the name of "Texture0" with an annotation called "Name." This will contain the string file name for the texture.
+Creates a mesh from a control-patch mesh.
+Patch information structure. For more information, see
Number of patches.
Number of control vertices in the patch.
Unused. Reserved for later use.
Array of
Pointer the device that creates the patch mesh. See
Pointer to the
If the function succeeds, the return value is
This method takes an input patch mesh and converts it to a tessellated mesh. Patch meshes use 16-bit index buffers. Therefore, indices to LockIndexBuffer are 16 bits.
+Gets the size of the rectangle patch.
+Number of segments per edge to tessellate.
Pointer to a DWORD that contains the number of triangles in the patch.
Pointer to a DWORD that contains the number of vertices in the patch.
If the function succeeds, the return value is
Tessellates a triangular higher-order surface patch into a triangle mesh.
+Vertex buffer containing the patch data.
Pointer to an array of three floating-point values that identify the number of segments into which each edge of the triangle patch should be divided when tessellated. See
Vertex declaration structure that defines the vertex data. See
Describes a triangle patch. See
Pointer to the created mesh. See
If the function succeeds, the return value is
Use
Computes a bounding sphere for the mesh.
+Pointer to first position.
Number of vertices.
Number of bytes between position vectors. Use GetNumBytesPerVertex,
Radius of the returned bounding sphere.
If the function succeeds, the return value is
Loads a mesh from a DirectX .x file.
+Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Combination of one or more flags from the D3DXMESH enumeration, which specifies creation options for the mesh.
Pointer to an
Pointer to a buffer that contains adjacency data. The adjacency data contains an array of three DWORDs per face that specify the three neighbors for each face in the mesh. For more information about accessing the buffer, see
Pointer to a buffer containing materials data. The buffer contains an array of
Pointer to a buffer containing an array of effect instances, one per attribute group in the returned mesh. An effect instance is a particular instance of state information used to initialize an effect. See
Pointer to the number of
Address of a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
All the meshes in the file will be collapsed into one output mesh. If the file contains a frame hierarchy, all the transformations will be applied to the mesh.
For mesh files that do not contain effect instance information, default effect instances will be generated from the material information in the .x file. A default effect instance will have default values that correspond to the members of the
The default texture name is also filled in, but is handled differently. The name will be Texture0@Name, which corresponds to an effect variable by the name of "Texture0" with an annotation called "Name." This will contain the string file name for the texture.
+Loads a mesh from memory.
+Pointer to the memory buffer which contains the mesh data.
Size of the file in memory, in bytes.
Combination of one or more flags from the D3DXMESH enumeration, specifying creation options for the mesh.
Pointer to an
Address of a reference to an
Address of a reference to an
Pointer to a buffer containing an array of effect instances, one per attribute group in the returned mesh. An effect instance is a particular instance of state information used to initialize an effect. See
Pointer to the number of
Address of a reference to an
If the function succeeds, the return value is
All the meshes in the file will be collapsed into one output mesh. If the file contains a frame hierarchy, all the transformations will be applied to the mesh.
For mesh files that do not contain effect instance information, default effect instances will be generated from the material information in the .x file. A default effect instance will have default values that correspond to the members of the
The default texture name is also filled in, but is handled differently. The name will be Texture0@Name, which corresponds to an effect variable by the name of "Texture0" with an annotation called "Name." This will contain the string file name for the texture.
+Convert the specified mesh subset into a series of strips.
+Pointer to an
Attribute ID of the mesh subset to convert to strips.
Combination of one or more flags from the D3DXMESH enumeration, specifying options for creating the index buffer. Cannot be
Pointer to an
Number of indices in the buffer returned in the ppIndexBuffer parameter.
Buffer containing an array of one DWORD per strip, which specifies the number of triangles in the that strip.
Number of individual strips in the index buffer and corresponding strip length array.
If the function succeeds, the return value is
Before running this function, call Optimize or
Calculate per-triangle IMT's from per-texel data. This function is similar to
If the function succeeds, the return value is
Gets the size of the triangle patch.
+Number of segments per edge to tessellate.
Pointer to a DWORD that contains the number of triangles in the patch.
Pointer to a DWORD that contains the number of vertices in the triangle patch.
If the function succeeds, the return value is
Computes unit normals for each vertex in a mesh. Provided to support legacy applications. Use
If the function succeeds, the return value is
The input mesh must have the
A normal for a vertex is generated by averaging the normals of all faces that share that vertex.
If adjacency is provided, replicated vertices are ignored and "smoothed" over. If adjacency is not provided, replicated vertices will have normals averaged in from only the faces explicitly referencing them.
This function simply calls
+( pMesh, D3DX_DEFAULT, 0, D3DX_DEFAULT, 0, D3DX_DEFAULT, 0, , 0, | , pAdjacency, -1.01f, -0.01f, -1.01f, null ,null ); +
Disassemble a shader.
Note??Instead of using this legacy function, we recommend that you use the
If the function succeeds, the return value is
Preprocesses a shader without performing compilation. This resolves all #defines and #includes, providing a self-contained shader for subsequent compilation.
Note??Instead of using this legacy function, we recommend that you use the
If the function succeeds, the return value is
Compile a shader file.
Note??Instead of using this legacy function, we recommend that you compile offline by using the Fxc.exe command-line compiler or use the
If the function succeeds, the return value is
E_NOTIMPL is returned if you're using 1.1 shaders (vs_1_1and ps_1_1).
Creates a texture shader object from the compiled shader.
+Pointer to the function DWORD stream.
Returns an
If the function succeeds, the return value is
Get the semantics for all shader output elements.
+Pointer to the shader function DWORD stream.
Pointer to an array of
Returns the number of elements in pSemantics.
If the function succeeds, the return value is
Gets the semantics for the shader inputs. Use this method to determine the input vertex format.
+Pointer to the shader function DWORD stream.
Pointer to an array of
Returns the number of elements in pSemantics.
If the function succeeds, the return value is
Use
Returns the name of the highest high-level shader language (HLSL) profile supported by a given device.
+Pointer to the device. See
The HLSL profile name.
If the device does not support pixel shaders then the function returns
A shader profile specifies the assembly shader version to use and the capabilities available to the HLSL compiler when compiling a shader. The following table lists the pixel shader profiles that are supported.
Shader Profile | Description |
---|---|
ps_1_1 | Compile to ps_1_1 version. |
ps_1_2 | Compile to ps_1_2 version. |
ps_1_3 | Compile to ps_1_3 version. |
ps_1_4 | Compile to ps_1_4 version. |
ps_2_0 | Compile to ps_2_0 version. |
ps_2_a | Same as the ps_2_0 profile, with the following additional capabilities available for the compiler to target:
|
ps_2_b | Same as the ps_2_0 profile, with the following additional capabilities available for the compiler to target:
|
ps_3_0 | Compile to ps_3_0 version. |
?
For more information about the differences between shader versions, see Pixel Shader Differences.
+Searches through a shader for a particular comment. The comment is identified by a four-character code (FOURCC) in the first DWORD of the comment.
+Pointer to the shader function DWORD stream.
FOURCC code that identifies the comment block. See FourCC Formats.
Returns a reference to the comment data (not including the comment token and FOURCC code). This value can be
Returns the size of the comment data in bytes. This value can be
If the function succeeds, the return value is
Assemble a shader.
+Handle to a module containing the effect description. If this parameter is
Pointer to a string that specifies the resource name. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
An optional
Optional interface reference,
Compile options identified by various flags. The Direct3D 10 HLSL compiler is now the default. See
Returns a buffer containing the created shader. This buffer contains the compiled shader code, as well as any embedded debug and symbol table information.
Returns a buffer containing a listing of errors and warnings that were encountered during the compile. These are the same messages the debugger displays when running in debug mode. This value may be
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Assemble a shader.
+Pointer to a memory buffer that contains the shader data.
Length of the effect data, in bytes.
An optional
Optional interface reference,
Compile options identified by various flags. The Direct3D 10 HLSL compiler is now the default. See
Returns a buffer containing the created shader. This buffer contains the compiled shader code, as well as any embedded debug and symbol table information.
Returns a buffer containing a listing of errors and warnings that were encountered during the compile. These are the same messages the debugger displays when running in debug mode. This value may be
If the function succeeds, the return value is
Compile a shader file.
Note??Instead of using this legacy function, we recommend that you compile offline by using the Fxc.exe command-line compiler or use the
If the function succeeds, the return value is
Get the sampler names referenced in a shader.
+Pointer to the shader function DWORD stream.
Pointer to an array of LPCSTRs. The function will fill this array with references to the sampler names contained within pFunction. The maximum array size is the maximum number of sampler registers (16 for vs_3_0 and ps_3_0).
To find the number of samplers used, check pCount after calling
Returns the number of samplers referenced by the shader.
If the function succeeds, the return value is
Gets the shader-constant table embedded inside a shader.
+Pointer to the function DWORD stream.
Returns the constant table interface (see
A constant table is generated by
Assemble a shader.
+Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
An optional
Optional interface reference,
Compile options identified by various flags. The Direct3D 10 HLSL compiler is now the default. See
Returns a buffer containing the created shader. This buffer contains the compiled shader code, as well as any embedded debug and symbol table information.
Returns a buffer containing a listing of errors and warnings that were encountered during the compile. These are the same messages the debugger displays when running in debug mode. This value may be
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Gets the shader-constant table embedded inside a shader.
+Pointer to the function DWORD stream.
Use the D3DXCONSTTABLE_LARGEADDRESSAWARE flag to access up to 4 GB of virtual address space (instead of the default of 2 GB). If you do not need the additional virtual address space, use
Returns the constant table interface (see
If the function succeeds, the return value is
A constant table is generated by
Returns the name of the highest high-level shader language (HLSL) profile supported by a given device.
+Pointer to the device. See
The HLSL profile name.
If the device does not support vertex shaders then the function returns
A shader profile specifies the assembly shader version to use and the capabilities available to the HLSL compiler when compiling a shader. The following table lists the vertex shader profiles that are supported.
Shader Profile | Description |
---|---|
vs_1_1 | Compile to vs_1_1 version. |
vs_2_0 | Compile to vs_2_0 version. |
vs_2_a | Same as the vs_2_0 profile, with the following additional capabilities available for the compiler to target:
|
vs_3_0 | Compile to vs_3_0 version. |
?
For more information about the differences between shader versions, see Vertex Shader Differences.
+Preprocesses a shader resource without performing compilation. This resolves all #defines and #includes, providing a self-contained shader for subsequent compilation.
Note??Instead of using this legacy function, we recommend that you use the
If the function succeeds, the return value is
Compile a shader file.
Note??Instead of using this legacy function, we recommend that you compile offline by using the Fxc.exe command-line compiler or use the
If the function succeeds, the return value is
Preprocesses a shader file without performing compilation. This resolves all #defines and #includes, providing a self-contained shader for subsequent compilation.
Note??Instead of using this legacy function, we recommend that you use the
If the function succeeds, the return value is
Returns the size of the shader byte code, in bytes.
+Pointer to the function DWORD stream.
Returns the size of the shader byte code, in bytes.
Returns the shader version of the compiled shader.
+Pointer to the function DWORD stream.
Returns the shader version of the given shader, or zero if the shader function is
Uses a left-handed coordinate system to create a mesh containing a cylinder.
+Pointer to an
Radius at the negative Z end. Value should be greater than or equal to 0.0f.
Radius at the positive Z end. Value should be greater than or equal to 0.0f.
Length of the cylinder along the z-axis.
Number of slices about the main axis.
Number of stacks along the main axis.
Address of a reference to the output shape, an
Address of a reference to an
If the function succeeds, the return value is
The created cylinder is centered at the origin, and its axis is aligned with the z-axis.
This function creates a mesh with the
Uses a left-handed coordinate system to create a mesh containing a torus.
+Pointer to an
Inner-radius of the torus. Value should be greater than or equal to 0.0f.
Outer-radius of the torus. Value should be greater than or equal to 0.0f.
Number of sides in a cross-section. Value must be greater than or equal to 3.
Number of rings making up the torus. Value must be greater than or equal to 3.
Address of a reference to the output shape, an
Address of a reference to an
If the function succeeds, the return value is
The created torus is centered at the origin, and its axis is aligned with the z-axis. The inner radius of the torus is the radius of the cross-section (the minor radius), and the outer radius of the torus is the radius of the central hole.
This function returns a mesh that can be used later for drawing or manipulation by the application.
This function creates a mesh with the
Uses a left-handed coordinate system to create a mesh containing a sphere.
+Pointer to an
Radius of the sphere. This value should be greater than or equal to 0.0f.
Number of slices about the main axis.
Number of stacks along the main axis.
Address of a reference to the output shape, an
Address of a reference to an
If the function succeeds, the return value is
The created sphere is centered at the origin, and its axis is aligned with the z-axis.
This function creates a mesh with the
Uses a left-handed coordinate system to create a mesh containing a teapot.
+Pointer to an
Address of a reference to the output shape, an
Address of a reference to an
If the function succeeds, the return value is
This function creates a mesh with the
Uses a left-handed coordinate system to create a mesh containing an axis-aligned box.
+Pointer to an
Width of the box, along the x-axis.
Height of the box, along the y-axis.
Depth of the box, along the z-axis.
Address of a reference to the output shape, an
Address of a reference to an
If the function succeeds, the return value is
The created box is centered at the origin.
This function creates a mesh with the
Uses a left-handed coordinate system to create a mesh containing an + n-sided polygon.
+Pointer to an
Length of each side.
Number of sides for the polygon. Value must be greater than or equal to 3.
Address of a reference to the output shape, an
Address of a reference to an
If the function succeeds, the return value is
The created polygon is centered at the origin.
This function creates a mesh with the
Creates a mesh containing the specified text, using the font associated with the device context.
+Pointer to the device that created the mesh.
Device context, containing the font for output. The font selected by the device context must be a TrueType font.
Pointer to a string that specifies the text to generate. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Maximum chordal deviation from TrueType font outlines.
Amount to extrude text in the negative z-direction.
Pointer to the returned mesh.
Pointer to a buffer containing adjacency information. May be
Pointer to an array of GLYPHMETRICSFLOAT structures that contain the glyph metric data. Each element contains information about the position and orientation of the corresponding glyph in the string. The number of elements in the array should be equal to the number of characters in the string. Note that the origin in each structure is not relative to the entire string, but rather is relative to that character cell. To compute the entire bounding box, add the increment for each glyph while traversing the string. If you are not concerned with the glyph sizes, set this parameter to
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function creates a mesh with the
Retrieves information about a given image file in memory.
+VOID reference to the source file in memory.
Size of file in memory, in bytes. .
Pointer to a
Uses a compiled high-level shader language (HLSL) function to fill each texel of each mipmap level of a texture.
+Pointer to an
Pointer to a
If the function succeeds, the return value is
The texture target must be an HLSL function that takes contains the following semantics:
The input parameters can be in any order. For an example, see
Creates an empty cube texture, adjusting the calling parameters as needed.
+Pointer to an
Width and height of the cube texture, in pixels. For example, if the cube texture is an 8-pixel by 8-pixel cube, the value for this parameter should be 8.
Number of mip levels requested. If this value is zero or D3DX_DEFAULT, a complete mipmap chain is created.
0,
Member of the
Member of the
Address of a reference to an
If the function succeeds, the return value is
Cube textures differ from other surfaces in that they are collections of surfaces.
Internally,
Creates a volume texture from a file. This is a more advanced function than
If the function succeeds, the return value is
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
When skipping mipmap levels while loading a .dds file, use the D3DX_SKIP_DDS_MIP_LEVELS macro to generate the MipFilter value. This macro takes the number of levels to skip and the filter type and returns the filter value, which would then be passed into the MipFilter parameter.
+Uses a compiled high-level shader language (HLSL) function to fill each texel of each mipmap level of a texture.
+Pointer to an
Pointer to a
If the function succeeds, the return value is
The texture target must be an HLSL function that takes contains the following semantics:
The following is an example of such an HLSL function:
float4 TextureGradientFill( float2 vTexCoord : POSITION, float2 vTexelSize : PSIZE) : COLOR { float r,g, b, xSq,ySq, a; xSq = 2.f*vTexCoord.x-1.f; xSq *= xSq; ySq = 2.f*vTexCoord.y-1.f; ySq *= ySq; a = sqrt(xSq+ySq); if (a > 1.0f) { a = 1.0f-(a-1.0f); } else if (a < 0.2f) { a = 0.2f; } r = 1-vTexCoord.x; g = 1-vTexCoord.y; b = vTexCoord.x; return float4(r, g, b, a); }; +
Note that the input parameters can be in any order, but both input semantics must be represented.
+Checks cube-texture-creation parameters.
+Pointer to an
Pointer to the requested width and height in pixels, or
Pointer to the number of requested mipmap levels, or
0 or
Pointer to a member of the
Member of the
If the function succeeds, the return value is
If parameters to this function are invalid, this function returns corrected parameters.
Cube textures differ from other surfaces in that they are collections of surfaces. To call SetRenderTarget with a cube texture, you must select an individual face using GetCubeMapSurface and pass the resulting surface to SetRenderTarget.
+Creates a texture from a file.
+Pointer to an
Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Address of a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
The function is equivalent to
Mipmapped textures automatically have each level filled with the loaded texture.
When loading images into mipmapped textures, some devices are unable to go to a 1x1 image and this function will fail. If this happens, the images need to be loaded manually.
Note that a resource created with this function will be placed in the memory class denoted by
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
For the best performance when using
Loads a surface from a file.
+Pointer to an
Pointer to a
Pointer to a
Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Pointer to a
Combination of one or more
Pointer to a
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function handles conversion to and from compressed texture formats and supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Writing to a non-level-zero surface will not cause the dirty rectangle to be updated. If
Uses a user-provided function to fill each texel of each mip level of a given cube texture.
+Pointer to an
Pointer to a user-provided evaluator function, which will be used to compute the value of each texel. The function follows the prototype of LPD3DXFILL3D.
Pointer to an arbitrary block of user-defined data. This reference will be passed to the function provided in pFunction.
If the function succeeds, the return value is
Here is an example that creates a function called ColorCubeFill, which relies on
// Define a function that matches the prototype of LPD3DXFILL3D + VOID WINAPI ColorCubeFill (+* pOut, const * pTexCoord, + const * pTexelSize, LPVOID pData) + { *pOut = (pTexCoord->x, pTexCoord->y, pTexCoord->z, 0.0f); + } // Fill the texture using + if (FAILED (hr = (m_pTexture, ColorCubeFill, null ))) + { return hr; + } +
Creates an empty volume texture, adjusting the calling parameters as needed.
+Pointer to an
Width in pixels. This value must be nonzero. The maximum dimension that a driver supports (for width, height, and depth) can be found in MaxVolumeExtent in
Height in pixels. This value must be nonzero. The maximum dimension that a driver supports (for width, height, and depth) can be found in MaxVolumeExtent in
Depth in pixels. This value must be nonzero. The maximum dimension that a driver supports (for width, height, and depth) can be found in MaxVolumeExtent in
Number of mip levels requested. If this value is zero or D3DX_DEFAULT, a complete mipmap chain is created.
0 or
Member of the
Member of the
Address of a reference to an
If the function succeeds, the return value is
Internally,
Loads a volume from a file.
+Pointer to an
Pointer to a
Pointer to a
Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Pointer to a
Combination of one or more
Pointer to a
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function handles conversion to and from compressed texture formats and supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Writing to a non-level-zero surface of the volume texture will not cause the dirty rectangle to be updated. If
Creates a texture from a file. This is a more advanced function than
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Use
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Mipmapped textures automatically have each level filled with the loaded texture. When loading images into mipmapped textures, some devices are unable to go to a 1x1 image and this function will fail. If this happens, then the images need to be loaded manually.
For the best performance when using
When skipping mipmap levels while loading a .dds file, use the D3DX_SKIP_DDS_MIP_LEVELS macro to generate the MipFilter value. This macro takes the number of levels to skip and the filter type and returns the filter value, which would then be passed into the MipFilter parameter.
+Converts a height map into a normal map. The (x,y,z) components of each normal are mapped to the (r,g,b) channels of the output texture.
+Pointer to an
Pointer to an
Pointer to a
One or more
One
Constant value multiplier that increases (or decreases) the values in the normal map. Higher values usually make bumps more visible, lower values usually make bumps less visible.
If the function succeeds, the return value is
This method computes the normal by using the central difference with a kernel size of 3x3. The central differencing denominator used is 2.0. RGB channels in the destination contain biased (x,y,z) components of the normal.
+Checks volume-texture-creation parameters.
+Pointer to an
Pointer to the requested width in pixels, or
Pointer to the requested height in pixels, or
Pointer to the requested depth in pixels, or
Pointer to the number of requested mipmap levels, or
Currently not used, set to 0.
Pointer to a member of the
Member of the
If the function succeeds, the return value is
If parameters to this function are invalid, this function returns corrected parameters.
+Creates a texture from a file in memory.
+Pointer to an
Pointer to the file in memory from which to create the texture.
Size in bytes of the file in memory.
Address of a reference to an
If the function succeeds, the return value is
The function is equivalent to
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Note that a resource created with this function when called from a
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
Creates a cube texture from a resource specified by a string. This is a more advanced function than
If the function succeeds, the return value is
The compiler setting determines the function version. If Unicode is defined, the function call resolves to
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Cube textures differ from other surfaces in that they are collections of surfaces. To call SetRenderTarget with a cube texture, you must select an individual face using GetCubeMapSurface and pass the resulting surface to SetRenderTarget.
Creates a volume texture from a file in memory.
+Pointer to an
Pointer to the file in memory from which to create the volume texture.
Size of the file in memory, in bytes.
Address of a reference to an
If the function succeeds, the return value is
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
The function is equivalent to
Note that a resource created with this function when called from a
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
Loads a volume from another volume.
+Pointer to an
Pointer to a
Pointer to a
A Pointer to an
Pointer to a
Pointer to a
A combination of one or more
If the function succeeds, the return value is
Writing to a non-level-zero surface of the volume texture will not cause the dirty rectangle to be updated. If
Saves a texture to a file.
+Pointer to a string that specifies the file name of the destination image. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Pointer to
Pointer to a
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function handles conversion to and from compressed texture formats.
If the volume is nondynamic (because of a usage parameter set to 0 at the creation) and located in video memory (the memory pool set to
Creates a texture from a file.
+Pointer to an
Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Address of a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
The function is equivalent to
Mipmapped textures automatically have each level filled with the loaded texture.
When loading images into mipmapped textures, some devices are unable to go to a 1x1 image and this function will fail. If this happens, the images need to be loaded manually.
Note that a resource created with this function will be placed in the memory class denoted by
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
For the best performance when using
Creates a cube texture from a file in memory. This is a more advanced function than
If the function succeeds, the return value is
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Cube textures differ from other surfaces in that they are collections of surfaces. To call SetRenderTarget with a cube texture, you must select an individual face using GetCubeMapSurface and pass the resulting surface to SetRenderTarget .
This method is designed to be used for loading image files stored as RT_RCDATA, which is an application-defined resource (raw data). Otherwise this method will fail.
For details on
When skipping mipmap levels while loading a .dds file, use the D3DX_SKIP_DDS_MIP_LEVELS macro to generate the MipFilter value. This macro takes the number of levels to skip and the filter type and returns the filter value, which would then be passed into the MipFilter parameter.
+Creates a texture from a resource. This is a more advanced function than
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
The resource being loaded must be of type RT_BITMAP or RT_RCDATA. Resource type RT_RCDATA is used to load formats other than bitmaps (such as .tga, .jpg, and .dds).
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Retrieves information about a given image file.
+File name of image to retrieve information about. If UNICODE or _UNICODE are defined, this parameter type is LPCWSTR, otherwise, the type is LPCSTR.
Pointer to a
This function supports both Unicode and ANSI strings.
+Loads a surface from memory.
+Pointer to an
Pointer to a
Pointer to a
Pointer to the upper left corner of the source image in memory.
Member of the
Pitch of source image, in bytes. For DXT formats, this number should represent the width of one row of cells, in bytes.
Pointer to a
Pointer to a
Combination of one or more
If the function succeeds, the return value is
This function handles conversion to and from compressed texture formats.
Writing to a non-level-zero surface will not cause the dirty rectangle to be updated. If
Saves a texture to an image file.
+
Pointer to
Pointer to a
Address of a reference to an
This function handles conversion to and from compressed texture formats.
+Checks texture-creation parameters.
+Pointer to an
Pointer to the requested width in pixels, or
Pointer to the requested height in pixels, or
Pointer to number of requested mipmap levels, or
0 or
Pointer to a member of the
Member of the
If the function succeeds, the return value is
If parameters to this function are invalid, this function returns corrected parameters.
This function uses the following heuristics when comparing the requested requirements against available formats:
Saves a volume to a file on disk.
+Pointer to a string that specifies the file name of the destination image. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Pointer to
Pointer to a
Pointer to a
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function handles conversion to and from compressed texture formats.
If the volume is nondynamic (because of a usage parameter set to 0 at the creation) and located in video memory (the memory pool set to
Loads a volume from a resource.
+Pointer to an
Pointer to a
Pointer to a
Handle to the module where the resource is located, or
Pointer to a string that specifies the file name of the source image. If UNICODE or _UNICODE are defined, this parameter type is LPCWSTR, otherwise, the type is LPCSTR.
Pointer to a
Combination of one or more
Pointer to a
If the function succeeds, the return value is
The resource being loaded must be a bitmap resource(RT_BITMAP).
This function handles conversion to and from compressed texture formats.
Writing to a non-level-zero surface of the volume texture will not cause the dirty rectangle to be updated. If
This function supports both Unicode and ANSI strings.
+Creates a volume texture from a resource specified by a string. This is a more advanced function than
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
The resource being loaded must be an application-defined resource (RT_RCDATA).
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Creates a cube texture from a resource.
+Pointer to an
Handle to the module where the resource is located, or
Pointer to a string that specifies the resource name. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Address of a reference to an
If the function succeeds, the return value is
The compiler setting determines the function version. If Unicode is defined, the function call resolves to
The function is equivalent to
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Note that a resource created with this function when called from a
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
Saves a surface to a file.
+Pointer to a string that specifies the file name of the destination image. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Pointer to
Pointer to a
Pointer to a
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function handles conversion to and from compressed texture formats.
+Creates a cube texture from a file in memory.
+Pointer to an
Pointer to the file in memory from which to create the cubemap. See Remarks.
Size of the file in memory, in bytes.
Address of a reference to an
If the function succeeds, the return value is
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
The function is equivalent to
Note that a resource created with this function when called from a
This method is designed to be used for loading image files stored as RT_RCDATA, which is an application-defined resource (raw data). Otherwise this method will fail.
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
Creates a cube texture from a file. This is a more advanced function than
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Cube textures differ from other surfaces in that they are collections of surfaces. To call SetRenderTarget with a cube texture, you must select an individual face using GetCubeMapSurface and pass the resulting surface to SetRenderTarget.
When skipping mipmap levels while loading a .dds file, use the D3DX_SKIP_DDS_MIP_LEVELS macro to generate the MipFilter value. This macro takes the number of levels to skip and the filter type and returns the filter value, which would then be passed into the MipFilter parameter.
+Retrieves information about a given image in a resource.
+Module where the resource is loaded. Set this parameter to
Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Pointer to a
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
Uses a compiled high-level shader language (HLSL) function to fill each texel of each mipmap level of a texture.
+Pointer to an
Pointer to a
If the function succeeds, the return value is
The texture target must be an HLSL function that takes contains the following semantics:
The input parameters can be in any order. For an example, see
Uses a user-provided function to fill each texel of each mip level of a given texture.
+Pointer to an
Pointer to a user-provided evaluator function, which will be used to compute the value of each texel. The function follows the prototype of LPD3DXFILL2D.
Pointer to an arbitrary block of user-defined data. This reference will be passed to the function provided in pFunction.
If the function succeeds, the return value is
Here is an example that creates a function called ColorFill, which relies on
// Define a function that matches the prototype of LPD3DXFILL3D + VOID WINAPI ColorFill (+* pOut, const * pTexCoord, + const * pTexelSize, LPVOID pData) + { *pOut = (pTexCoord->x, pTexCoord->y, 0.0f, 0.0f); + } // Fill the texture using + if (FAILED (hr = (m_pTexture, ColorFill, null ))) + { return hr; + } +
Saves a surface to an image file.
+Address of a reference to an
Pointer to
Pointer to a
Pointer to a
If the function succeeds, the return value is
This function handles conversion to and from compressed texture formats.
+Creates a volume texture from a file.
+Pointer to an
Pointer to a string that specifies the file name. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Address of a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
The function is equivalent to
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Mipmapped textures automatically have each level filled with the loaded texture.
When loading images into mipmapped textures, some devices are unable to go to a 1x1 image and this function will fail. If this happens, then the images need to be loaded manually.
Note that a resource created with this function when called from a
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
Creates a texture from a resource.
+Pointer to an
Handle to the module where the resource is located, or
Pointer to a string that specifies the resource name. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Address of a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
The function is equivalent to
The resource being loaded must be of type RT_BITMAP or RT_RCDATA. Resource type RT_RCDATA is used to load formats other than bitmaps (such as .tga, .jpg, and .dds).
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Note that a resource created with this function when called from a
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
Loads a volume from memory.
+Pointer to an
Pointer to a
Pointer to a
Pointer to the top-left corner of the source volume in memory.
Member of the
Pitch of source image, in bytes. For DXT formats (compressed texture formats), this number should represent the size of one row of cells, in bytes.
Pitch of source image, in bytes. For DXT formats (compressed texture formats), this number should represent the size of one slice of cells, in bytes.
Pointer to a
Pointer to a
A combination of one or more
If the function succeeds, the return value is
Writing to a non-level-zero surface of the volume texture will not cause the dirty rectangle to be updated. If
Creates a volume texture from a file.
+Pointer to an
Pointer to a string that specifies the filename. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Width in pixels. If this value is zero or D3DX_DEFAULT, the dimensions are taken from the file. The maximum dimension that a driver supports (for width, height, and depth) can be found in MaxVolumeExtent in
Height, in pixels. If this value is zero or D3DX_DEFAULT, the dimensions are taken from the file. The maximum dimension that a driver supports (for width, height, and depth) can be found in MaxVolumeExtent in
Depth, in pixels. If this value is zero or D3DX_DEFAULT, the dimensions are taken from the file. The maximum dimension that a driver supports (for width, height, and depth) can be found in MaxVolumeExtent in
Number of mip levels requested. If this value is zero or D3DX_DEFAULT, a complete mipmap chain is created.
0,
Member of the
Member of the
A combination of one or more
A combination of one or more
Pointer to a
Pointer to a
Address of a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Mipmapped textures automatically have each level filled with the loaded volume texture. When loading images into mipmapped textures, some devices are unable to go to a 1x1 image and this function will fail. If this happens, then the images need to be loaded manually.
When skipping mipmap levels while loading a .dds file, use the D3DX_SKIP_DDS_MIP_LEVELS macro to generate the MipFilter value. This macro takes the number of levels to skip and the filter type and returns the filter value, which would then be passed into the MipFilter parameter.
+Creates a texture from a file in memory. This is a more advanced function than
If the function succeeds, the return value is
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
For details about
When skipping mipmap levels while loading a .dds file, use the D3DX_SKIP_DDS_MIP_LEVELS macro to generate the MipFilter value. This macro takes the number of levels to skip and the filter type and returns the filter value, which would then be passed into the MipFilter parameter.
+Saves a volume to a buffer. The method creates an
If the function succeeds, the return value is
Creates a volume texture from a resource.
+Pointer to an
Handle to the module where the resource is located, or
Pointer to a string that specifies the resource name. If the compiler settings require Unicode, the data type LPCTSTR resolves to LPCWSTR. Otherwise, the string data type resolves to LPCSTR. See Remarks.
Address of a reference to an
If the function succeeds, the return value is
The compiler setting also determines the function version. If Unicode is defined, the function call resolves to
The function is equivalent to
The resource being loaded must be an application-defined resource (RT_RCDATA).
This function supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Note that a resource created with this function when called from a
Filtering is automatically applied to a texture created using this method. The filtering is equivalent to
Creates an empty texture, adjusting the calling parameters as needed.
+Pointer to an
Width in pixels. If this value is 0, a value of 1 is used. See Remarks.
Height in pixels. If this value is 0, a value of 1 is used. See Remarks.
Number of mip levels requested. If this value is zero or D3DX_DEFAULT, a complete mipmap chain is created.
0,
Member of the
Member of the
Address of a reference to an
If the function succeeds, the return value is
Internally,
If both Height and Width are set to D3DX_DEFAULT, a value of 256 is used for both parameters. If either Height or Width is set to D3DX_DEFAULT And the other parameter is set to a numeric value, the texture will be square with both the height and width equal to the numeric value.
+Uses a user-provided function to fill each texel of each mip level of a given volume texture.
+Pointer to an
Pointer to a user-provided evaluator function, which will be used to compute the value of each texel. The function follows the prototype of LPD3DXFILL3D.
Pointer to an arbitrary block of user-defined data. This reference will be passed to the function provided in pFunction.
If the function succeeds, the return value is
If the volume is non-dynamic (because usage is set to 0 when it is created), and located in video memory (the memory pool set to
This example creates a function called ColorVolumeFill, which relies on
// Define a function that matches the prototype of LPD3DXFILL3D + VOID WINAPI ColorVolumeFill (+* pOut, const * pTexCoord, + const * pTexelSize, LPVOID pData) + { *pOut = (pTexCoord->x, pTexCoord->y, pTexCoord->z, 0.0f); + } // Fill volume texture + if (FAILED (hr = (m_pTexture, ColorVolumeFill, null ))) + { return hr; + } +
Loads a surface from another surface with color conversion.
+Pointer to an
Pointer to a
Pointer to a
Pointer to an
Pointer to a
Pointer to a
A combination of one or more
If the function succeeds, the return value is
This function handles conversion to and from compressed texture formats.
Writing to a non-level-zero surface will not cause the dirty rectangle to be updated. If
Loads a volume from a file in memory.
+Pointer to an
Pointer to a
Pointer to a
Pointer to the file in memory from which to load the volume.
Size in bytes of the file in memory.
Pointer to a
Combination of one or more
Pointer to a
If the function succeeds, the return value is
This function handles conversion to and from compressed texture formats and supports the following file formats: .bmp, .dds, .dib, .hdr, .jpg, .pfm, .png, .ppm, and .tga. See
Writing to a non-level-zero surface of the volume texture will not cause the dirty rectangle to be updated. If
Creates an instance of an
If the function succeeds, the return value is
After using this function, use RegisterTemplates or RegisterEnumTemplates to register templates, CreateEnumObject to create an enumerator object, or CreateSaveObject to create a save object.
+An application implements this interface to handle callbacks in animation sets generated by calls to
The LPD3DXANIMATIONCALLBACKHANDLER type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXANIMATIONCALLBACKHANDLER; +
The application implements this method. This method is called when a callback occurs for an animation set in one of the tracks during a call to
The return values of this method are implemented by an application programmer. In general, if no error occurs, program the method to return
This interface is used to control animation functionality, connecting animation sets with the transformation frames that are being animated. The interface has methods to mix multiple animations and to modify blending parameters over time to enable smooth transitions and other effects.
+Create an animation controller object with
The LPD3DXANIMATIONCONTROLLER type is defined as a reference to the
typedef interface; + typedef interface *LPD3DXANIMATIONCONTROLLER; +
The D3DXEVENTHANDLE type is defined as an event handle to animation controller events.
typedef DWORD D3DXEVENTHANDLE; +
The LPD3DXEVENTHANDLE type is defined as a reference to an event handle to animation controller events.
typedef D3DXEVENTHANDLE *LPD3DXEVENTHANDLE; ++
Get the maximum number of animation outputs the animation controller can support.
+The maximum number of animation outputs the controller can manage.
Gets the maximum number of animation sets the animation controller can support.
+The maximum number of animation sets the controller can manage.
Gets the maximum number of tracks in the animation controller.
+Number of tracks.
The maximum number of tracks the controller can manage.
+Gets the maximum number of events the animation controller can support.
+The maximum number of events the controller can manage.
Adds an animation output to the animation controller and registers references for scale, rotate, and translate (SRT) transformations.
+Name of the animation output.
Pointer to a
Pointer to a
Pointer to a
Pointer to a
If the method succeeds, the return value is
If the animation output is already registered, pMatrix will be filled with the input transformation data.
Animation sets created with
Adds an animation set to the animation controller.
+Pointer to the
If the method succeeds, the return value is
Removes an animation set from the animation controller.
+Pointer to the
If the method succeeds, the return value is
Returns the number of animation sets currently registered in the animation controller.
+Number of animation sets.
The controller contains any number of animations sets and tracks. Animation sets can be registered with RegisterAnimationOutput. An animation controller created by a call to
Gets an animation set.
+Index of the animation set.
Pointer to the
If the method succeeds, the return value is
The animation controller contains an array of animation sets. This method returns one of them at the given index.
+Gets an animation set, given its name.
+String containing the name of the animation set.
Pointer to the
If the method succeeds, the return value is
The animation controller contains an array of animation sets. This method returns an animation set that has the given name.
+Animates the mesh and advances the global animation time by a specified amount.
+Amount, in seconds, by which to advance the global animation time. TimeDelta value must be non-negative or zero.
Pointer to a user-defined animation callback handler interface,
If the method succeeds, the return value is
Resets the global animation time to zero. Any pending events will retain their original schedules, but in the new timeframe.
+If the method succeeds, the return value is
This method is typically used when the global animation time value is nearing the maximum precision of DOUBLE storage, or 264 - 1.
+Gets the global animation time.
+Returns the global animation time.
Animations are designed using a local animation time and mixed into global time with AdvanceTime.
+Applies the animation set to the specified track.
+Identifier of the track to which the animation set is applied.
Pointer to the
If the method succeeds, the return value is
This method sets the animation set to the specified track for mixing. The animation set for each track is blended according to the track weight and speed when AdvanceTime is called.
+Gets the animation set for the given track.
+Track identifier.
Pointer to the
If the method succeeds, the return value is
Sets the priority blending weight for the specified animation track.
+Track identifier.
Track priority. This parameter should be set to one of the constants from
If the method succeeds, the return value is
Sets the track speed. The track speed is similar to a multiplier that is used to speed up or slow down the playback of the track.
+Identifier of the track to set the speed on.
New speed.
If the method succeeds, the return value is
Sets the track weight. The weight is used to determine how to blend multiple tracks together.
+Identifier of the track to set the weight on.
Weight value.
If the method succeeds, the return value is
Sets the track to the specified local animation time.
+Track identifier.
Local animation time value to assign to the track.
If the method succeeds, the return value is
Enables or disables a track in the animation controller.
+Identifier of the track to be mixed.
Enable value. Set to TRUE to enable this track in the controller, or to
If the method succeeds, the return value is
To mix a track with other tracks, the Enable flag must be set to TRUE. Conversely, setting the flag to
Sets the track description.
+Identifier of the track to modify.
Description of the track.
If the method succeeds, the return value is
Gets the track description.
+Track identifier.
Pointer to the track description. See
If the method succeeds, the return value is
Sets the priority blending weight used by the animation controller.
+Priority blending weight used by the animation controller.
If the method succeeds, the return value is
The blend weight is used to blend high and low priority tracks together.
+Gets the current priority blending weight used by the animation controller.
+Returns the current priority blending weight.
The priority blending weight is used to blend high and low priority tracks together.
+Sets an event key that changes the rate of play of an animation track.
+Identifier of the track to modify.
New speed of the animation track.
Global time key. Specifies the global time when the change will take place.
Transition time, which specifies how long the smooth transition will take to complete.
Specifies the transition type used for transitioning between speeds. See
Event handle to the priority blend event.
Sets an event key that changes the weight of an animation track. The weight is used as a multiplier when combining multiple tracks together.
+Identifier of the track to modify.
New weight of the track.
Global time key. Specifies the global time when the change will take place.
Transition time, which specifies how long the smooth transition will take to complete.
Specifies the transition type used for transitioning between weights. See
Event handle to the priority blend event.
The weight is used like a multiplier to determine how much of this track to blend together with other tracks.
+Sets an event key that changes the local time of an animation track.
+Identifier of the track to modify.
New local time of the animation track.
Global time key. Specifies the global time when the change will take place.
Event handle to the priority blend event.
Sets an event key that enables or disables an animation track.
+Identifier of the animation track to modify.
Enable flag. Set this to TRUE to enable the animation track, or to
Global time key. Specifies the global time when the change will take place.
Event handle to the priority blend event.
Sets blending event keys for the specified animation track.
+Number between 0 and 1 that is used to blend tracks together.
Global time to start the blend.
Global time duration of the blend.
Specifies the transition type used for the duration of the blend. See
Event handle to the priority blend event.
The animation controller blends in three phases: low priority tracks are blended first, high priority tracks are blended second, and then the results of both are blended.
+Removes a specified event from an animation track, preventing the execution of the event.
+Event handle to the event to be removed from the animation track.
If the method succeeds, the return value is
Removes all events from a specified animation track.
+Identifier of the track on which all events should be removed.
If the method succeeds, the return value is
This method prevents the execution of all events previously scheduled on the track, and discards all data associated with those events.
+Removes all scheduled priority blend events from the animation controller.
+If the method succeeds, the return value is
Returns an event handle to the event currently running on the specified animation track.
+Track identifier.
Type of event to query.
Event handle to the event currently running on the specified track.
Returns an event handle to a priority blend event that is currently running.
+Event handle to the currently running priority blend event.
Returns an event handle to the next event scheduled to occur after a specified event on an animation track.
+Track identifier.
Event handle to a specified event after which to search for a following event. If set to
Event handle to the next event scheduled to run on the specified track.
This method can be used iteratively to locate a desired event by repeatedly passing in
Note??Do not iterate further after the method has returned
Returns an event handle to the next priority blend event scheduled to occur after a specified event.
+Event handle to a specified event after which to search for a following priority blend event. If set to
Event handle to the next scheduled priority blend event.
This method can be used iteratively to locate a desired priority blend event by repeatedly passing in
Note??Do not iterate further after the method has returned
Checks whether a specified event handle is valid and the animation event has not yet completed.
+Event handle to an animation event.
Returns
Returns E_FAIL if the event handle is invalid and/or the event has completed.
The method will indicate that an event handle is valid even if the event is running but has not yet completed.
+Gets a description of a specified animation event.
+Event handle to an animation event to describe.
Pointer to a
If the method succeeds, the return value is
Clones, or copies, an animation controller.
+Maximum number of animation outputs the controller can support.
Maximum number of animation sets the controller can support.
Maximum number of tracks the controller can support.
Maximum number of events the controller can support.
Address of a reference to the cloned
If the method succeeds, the return value is
Get the maximum number of animation outputs the animation controller can support.
+Gets the maximum number of animation sets the animation controller can support.
+Gets the maximum number of tracks in the animation controller.
+The maximum number of tracks the controller can manage.
+Gets the maximum number of events the animation controller can support.
+Returns the number of animation sets currently registered in the animation controller.
+The controller contains any number of animations sets and tracks. Animation sets can be registered with RegisterAnimationOutput. An animation controller created by a call to
Gets the global animation time.
+Animations are designed using a local animation time and mixed into global time with AdvanceTime.
+Gets or sets the current priority blending weight used by the animation controller.
+The priority blending weight is used to blend high and low priority tracks together.
+Returns an event handle to a priority blend event that is currently running.
+This interface encapsulates the minimum functionality required of an animation set by an animation controller. Advanced users might want to implement this interface themselves to suit their specialized needs; for most users, however, the derived
An animation set consists of animations for many nodes for the same animation.
The LPD3DXANIMATIONSET type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXANIMATIONSET; +
Gets the animation set name.
+Name of the animation set.
Gets the period of the animation set.
+Period of the animation set.
The period is the range of time that the animation key frames are valid. For looping animations, this is the period of the loop. The time units that the key frames are specified in (for example, seconds) is determined by the application.
+Returns time position in the local timeframe of an animation set.
+Local time of the animation set.
Time position as measured in the timeframe of the animation set. This value will be bounded by the period of the animation set.
The time position returned by this method can be used as the PeriodicPosition parameter of
Gets the number of animations in the animation set.
+Number of animations in the animation set.
Gets the name of an animation, given its index.
+Index of the animation.
Address of a reference to a string that receives the animation name.
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
Gets the index of an animation, given its name.
+Name of the animation.
Pointer to the animation index.
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
Gets the scale, rotation, and translation values of the animation set.
+Position of the animation set. The position can be obtained by calling
Animation index.
Pointer to the
Pointer to the
Pointer to the
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
Gets information about a specific callback in the animation set.
+Position from which to find callbacks.
Callback search flags. This parameter can be set to a combination of one or more flags from
Pointer to the position of the callback.
Address of the callback data reference.
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
Gets the animation set name.
+Gets the period of the animation set.
+The period is the range of time that the animation key frames are valid. For looping animations, this is the period of the loop. The time units that the key frames are specified in (for example, seconds) is determined by the application.
+Gets the number of animations in the animation set.
+Locks a vertex buffer and obtains a reference to the vertex buffer memory.
+When working with vertex buffers, you are allowed to make multiple lock calls; however, you must ensure that the number of lock calls match the number of unlock calls. DrawPrimitive calls will not succeed with any outstanding lock count on any currently set vertex buffer.
+Draws a subset of a mesh.
+DWORD that specifies which subset of the mesh to draw. This value is used to differentiate faces in a mesh as belonging to one or more attribute groups.
If the method succeeds, the return value is
The subset that is specified by AttribId will be rendered by the
An attribute table is used to identify areas of the mesh that need to be drawn with different textures, render states, materials, and so on. In addition, the application can use the attribute table to hide portions of a mesh by not drawing a given attribute identifier (AttribId) when drawing the frame.
+Retrieves the number of faces in the mesh.
+Returns the number of faces in the mesh.
Retrieves the number of vertices in the mesh.
+Returns the number of vertices in the mesh.
Gets the fixed function vertex value.
+Returns the flexible vertex format (FVF) codes.
This method can return 0 if the vertex format cannot be mapped directly to an FVF code. This will occur for a mesh created from a vertex declaration that doesn't have the same order and elements supported by the FVF codes.
+Retrieves a declaration describing the vertices in the mesh.
+Array of
If the method succeeds, the return value is
The array of elements includes the D3DDECL_END macro, which ends the declaration.
+Gets the number of bytes per vertex.
+Returns the number of bytes per vertex.
Retrieves the mesh options enabled for this mesh at creation time.
+Returns a combination of one or more of the following flags, indicating the options enabled for this mesh at creation time.
Value | Description |
---|---|
Use 32-bit indices. | |
Use the | |
Equivalent to specifying both | |
Use the | |
Specifying this flag causes the vertex and index buffer of the mesh to be created with | |
Equivalent to specifying both | |
Use the | |
Use the | |
Use the | |
Use the | |
Use the | |
Equivalent to specifying both | |
Use the | |
Use the | |
Use the | |
Use the | |
Equivalent to specifying both |
?
Retrieves the device associated with the mesh.
+Address of a reference to an
If the method succeeds, the return value is
Calling this method will increase the internal reference count on the
Clones a mesh using a flexible vertex format (FVF) code.
+A combination of one or more D3DXMESH flags specifying creation options for the mesh.
Combination of FVF codes, which specifies the vertex format for the vertices in the output mesh. For the values of the codes, see
Pointer to an
Address of a reference to an
If the method succeeds, the return value is
Clones a mesh using a declarator.
+A combination of one or more D3DXMESH flags specifying creation options for the mesh.
An array of
Pointer to an
Address of a reference to an
If the method succeeds, the return value is
Retrieves the vertex buffer associated with the mesh.
+Address of a reference to an
If the method succeeds, the return value is
Retrieves the data in an index buffer.
+Address of a reference to an
If the method succeeds, the return value is
Locks a vertex buffer and obtains a reference to the vertex buffer memory.
+Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
VOID* reference to a buffer containing the vertex data.
If the method succeeds, the return value is
When working with vertex buffers, you are allowed to make multiple lock calls; however, you must ensure that the number of lock calls match the number of unlock calls. DrawPrimitive calls will not succeed with any outstanding lock count on any currently set vertex buffer.
+Unlocks a vertex buffer.
+If the method succeeds, the return value is
Locks an index buffer and obtains a reference to the index buffer memory.
+Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
VOID* reference to a buffer containing the index data. The count of indices in this buffer will be equal to
If the method succeeds, the return value is
When working with index buffers, you are allowed to make multiple lock calls. However, you must ensure that the number of lock calls match the number of unlock calls. DrawPrimitive calls will not succeed with any outstanding lock count on any currently set index buffer.
+Unlocks an index buffer.
+If the method succeeds, the return value is
Retrieves either an attribute table for a mesh, or the number of entries stored in an attribute table for a mesh.
+Pointer to an array of
Pointer to either the number of entries stored in pAttribTable or a value to be filled in with the number of entries stored in the attribute table for the mesh.
If the method succeeds, the return value is
An attribute table is created by
An attribute table is used to identify areas of the mesh that need to be drawn with different textures, render states, materials, and so on. In addition, the application can use the attribute table to hide portions of a mesh by not drawing a given attribute identifier when drawing the frame.
+Converts point representative data to mesh adjacency information.
+Pointer to an array of one DWORD per vertex of the mesh that contains point representative data. This parameter is optional. Supplying a
Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh. The number of bytes in this array must be at least 3 *
If the method succeeds, the return value is
Converts mesh adjacency information to an array of point representatives.
+Pointer to an array of three DWORDs per face that specify the three neighbors for each face in the mesh. The number of bytes in this array must be at least 3 *
Pointer to an array of one DWORD per vertex of the mesh that will be filled with point representative data.
If the method succeeds, the return value is
Generate a list of mesh edges, as well as a list of faces that share each edge.
+Specifies that vertices that differ in position by less than epsilon should be treated as coincident.
Pointer to an array of three DWORDs per face to be filled with the indices of adjacent faces. The number of bytes in this array must be at least 3 *
If the method succeeds, the return value is
After an application generates adjacency information for a mesh, the mesh data can be optimized for better drawing performance.
The order of the entries in the adjacency buffer is determined by the order of the vertex indices in the index buffer. The adjacent triangle 0 always corresponds to the edge between the indices of the corners 0 and 1. The adjacent triangle 1 always corresponds to the edge between the indices of the corners 1 and 2 while the adjacent triangle 2 corresponds to the edge between the indices of the corners 2 and 0.
+This method allows the user to change the mesh declaration without changing the data layout of the vertex buffer. The call is valid only if the old and new declaration formats have the same vertex size.
+An array of
If the method succeeds, the return value is
Gets the fixed function vertex value.
+This method can return 0 if the vertex format cannot be mapped directly to an FVF code. This will occur for a mesh created from a vertex declaration that doesn't have the same order and elements supported by the FVF codes.
+Gets the number of bytes per vertex.
+Retrieves the mesh options enabled for this mesh at creation time.
+Retrieves the device associated with the mesh.
+Calling this method will increase the internal reference count on the
Retrieves the vertex buffer associated with the mesh.
+Retrieves the data in an index buffer.
+An application uses the methods of this interface to implement a key frame animation set stored in a compressed data format.
+Create a compressed-format key frame animation set with
The LPD3DXCOMPRESSEDANIMATIONSET type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXCOMPRESSEDANIMATIONSET; +
Gets the type of the animation set playback loop.
+Type of the animation set playback loop. See
Gets the number of animation key frame ticks that occur per second.
+Number of animation key frame ticks that occur per second.
Gets the data buffer that stores compressed key frame animation data.
+Address of a reference to the
If the method succeeds, the return value is
Gets the number of callback keys in the animation set.
+Number of callback keys in the animation set.
Fills an array with callback key data used for key frame animation.
+Pointer to a user-allocated array of
If the method succeeds, the return value is
Gets the type of the animation set playback loop.
+Gets the number of animation key frame ticks that occur per second.
+Gets the data buffer that stores compressed key frame animation data.
+Gets the number of callback keys in the animation set.
+This is a user-implemented interface that allows a user to set the device state from an effect. Each of the methods in this interface must be implemented by the user and will then be used as callbacks to the application when either of the following occur:
When an application uses the state manager to implement custom callbacks, an effect no longer automatically saves and restores state when calling
A user creates an
The LPD3DXEFFECTSTATEMANAGER type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXEFFECTSTATEMANAGER; +
A callback function that must be implemented by a user to set a transform.
+The type of transform to apply the matrix to. See
A transformation matrix. See
The user-implemented method should return
A callback function that must be implemented by a user to set material state.
+A reference to the material state. See
The user-implemented method should return
A callback function that must be implemented by a user to set a light.
+The zero-based index of the light. This is the same index in
The light object. See
The user-implemented method should return
A callback function that must be implemented by a user to enable/disable a light.
+The zero-based index of the light. This is the same index in
True to enable the light, false otherwise.
The user-implemented method should return
A callback function that must be implemented by a user to set render state.
+The render state to set.
The render state value. See Effect States (Direct3D 9).
The user-implemented method should return
A callback function that must be implemented by a user to set a texture.
+The stage to which the texture is assigned. This is the index value in
A reference to the texture object. This can be any of the Direct3D texture types (cube, volume, etc.). See
The user-implemented method should return
A callback function that must be implemented by a user to set the texture stage state.
+The stage that the texture is assigned to. This is the index value in
Defines the type of operation that a texture stage will perform. See
Can be either an operation (
The user-implemented method should return
A callback function that must be implemented by a user to set a sampler.
+The zero-based sampler number.
Identifies sampler state, which can specify the filtering, addressing, or the border color. See
A value from one of the sampler state types in Type.
The user-implemented method should return
A callback function that must be implemented by a user to set the number of subdivision segments for N-patches.
+Break the surface into this number of subdivisions. This is the same as the number used by
The user-implemented method should return
A callback function that must be implemented by a user to set a FVF code.
+The FVF constant, that determines how to interpret vertex data. See
The user-implemented method should return
A callback function that must be implemented by a user to set a vertex shader.
+A reference to a vertex shader object. See
The user-implemented method should return
A callback function that must be implemented by a user to set an array of vertex shader floating-point constants.
+The zero-based index of the first constant register.
An array of floating-point constants.
The number of registers in pConstantData.
The user-implemented method should return
A callback function that must be implemented by a user to set an array of vertex shader integer constants.
+The zero-based index of the first constant register.
An array of integer constants.
The number of registers in pConstantData.
The user-implemented method should return
A callback function that must be implemented by a user to set an array of vertex shader Boolean constants.
+The zero-based index of the first constant register.
An array of Boolean constants.
The number of registers in pConstantData.
The user-implemented method should return
A callback function that must be implemented by a user to set a pixel shader.
+A reference to a pixel shader object. See
The user-implemented method should return
A callback function that must be implemented by a user to set an array of vertex shader floating-point constants.
+The zero-based index of the first constant register.
An array of floating-point constants.
The number of registers in pConstantData.
The user-implemented method should return
A callback function that must be implemented by a user to set an array of vertex shader integer constants.
+The zero-based index of the first constant register.
An array of integer constants.
The number of registers in pConstantData.
The user-implemented method should return
A callback function that must be implemented by a user to set an array of vertex shader Boolean constants.
+The zero-based index of the first constant register.
An array of Boolean constants.
The number of registers in pConstantData.
The user-implemented method should return
A callback function that must be implemented by a user to set material state.
+A callback function that must be implemented by a user to set the number of subdivision segments for N-patches.
+A callback function that must be implemented by a user to set a FVF code.
+A callback function that must be implemented by a user to set a vertex shader.
+A callback function that must be implemented by a user to set a pixel shader.
+This interface is implemented by the application to allocate or free frame and mesh container objects. Methods on this are called during loading and destroying frame hierarchies.
+The LPD3DXALLOCATEHIERARCHY type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXALLOCATEHIERARCHY; +
Requests allocation of a frame object.
+Name of the frame to be created.
Returns the created frame object.
The return values of this method are implemented by an application programmer. In general, if no error occurs, program the method to return
Requests allocation of a mesh container object.
+Name of the mesh.
Pointer to the mesh data structure. See
Array of materials used in the mesh.
Array of effect instances used in the mesh. See
Number of materials in the materials array.
Adjacency array for the mesh.
Pointer to the skin mesh object if skin data is found. See
Returns the created mesh container. See
The return values of this method are implemented by an application programmer. In general, if no error occurs, program the method to return
Requests deallocation of a frame object.
+Pointer to the frame to be deallocated.
The return values of this method are implemented by an application programmer. In general, if no error occurs, program the method to return
Requests deallocation of a mesh container object.
+Pointer to the mesh container object to be deallocated.
The return values of this method are implemented by an application programmer. In general, if no error occurs, program the method to return
This interface is implemented by the application to save any additional user data embedded in .x files. An instance of this interface is passed to
The LPD3DXLOADUSERDATA type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXLOADUSERDATA; +
Load top level data from a .x file.
+Pointer to a .x file data structure. This is defined in Dxfile.h.
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
Load frame child data from a .x file.
+Pointer to a mesh container. See
Pointer to a .x file data structure. This is defined in Dxfile.h.
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
Load mesh child data from a .x file.
+Pointer to a mesh container. See
Pointer to a .x file data structure. This is defined in Dxfile.h.
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
A user creates an
The LPD3DXINCLUDE type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXINCLUDE; +
Applications use the methods of the
The
This interface inherits additional functionality from the
This interface, like all COM interfaces, inherits from the
The LPDIRECT3DINDEXBUFFER9 and PDIRECT3DINDEXBUFFER9 types are defined as references to the
typedef struct+*LPDIRECT3DINDEXBUFFER9, *PDIRECT3DINDEXBUFFER9; +
Locks a range of index data and obtains a reference to the index buffer memory.
+Offset into the index data to lock, in bytes. Lock the entire index buffer by specifying 0 for both parameters, SizeToLock and OffsetToLock.
Size of the index data to lock, in bytes. Lock the entire index buffer by specifying 0 for both parameters, SizeToLock and OffsetToLock.
VOID* reference to a memory buffer containing the returned index data.
Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
If the method succeeds, the return value is
As a general rule, do not hold a lock across more than one frame. When working with index buffers, you are allowed to make multiple lock calls. However, you must ensure that the number of lock calls match the number of unlock calls.
The
See Programming Tips (Direct3D 9) for information about using
Unlocks index data.
+If the method succeeds, the return value is
Retrieves a description of the index buffer resource.
+Pointer to a
If the method succeeds, the return value is
Retrieves a description of the index buffer resource.
+This interface is implemented by the application to save any additional user data embedded in .x files. An instance of this interface is passed to
The LPD3DXSAVEUSERDATA type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXSAVEUSERDATA; +
Add child data to the frame.
+Pointer to a mesh container. See
Pointer to a .x file save object. Use the reference to call
Pointer to a .x file data node. Use the reference to call
The return values of this method are implemented by an application programmer. In general, if no error occurs, program the method to return
Add child data to the mesh.
+Pointer to a mesh container. See
Pointer to a .x file save object. Use the reference to call
Pointer to a .x file data node. Use the reference to call
The return values of this method are implemented by an application programmer. In general, if no error occurs, program the method to return
Add a top level object before the frame hierarchy.
+Pointer to a .x file save object. Use this reference to call IDirectXFileSaveObject::CreateDataObject to create the data object to be saved. Then call IDirectXFileSaveObject::SaveData to save the data.
The return values of this method are implemented by an application programmer. In general, if no error occurs, program the method to return
Add a top level object after the frame hierarchy.
+Pointer to a .x file save object. Use this reference to call IDirectXFileSaveObject::CreateDataObject to create the data object to be saved. Then call IDirectXFileSaveObject::SaveData to save the data.
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
A callback for the user to register a .x file template.
+Use this reference to register user-defined .x file templates. See
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
A callback for the user to save a .x file template.
+Pointer to a .x file save object. Do not use this parameter to add data objects. See
The return values of this method are implemented by an application programmer.In general, if no error occurs, program the method to return
An application uses the methods of this interface to implement a key frame animation set.
+Create a keyframed animation set with
The LPD3DXKEYFRAMEDANIMATIONSET type is defined as a reference to this interface.
typedef interface+; + typedef interface *LPD3DXKEYFRAMEDANIMATIONSET; +
Gets the type of the animation set playback loop.
+Type of the animation set playback loop. See
Gets the number of animation key frame ticks that occur per second.
+Number of animation key frame ticks that occur per second.
Gets the number of scale keys in the specified key frame animation.
+Animation index.
Number of scale keys in the specified key frame animation.
Fills an array with scale key data used for key frame animation.
+Animation index.
Pointer to a user-allocated array of
If the method succeeds, the return value is
Get scale information for a specific key frame in the animation set.
+Animation index.
Key frame.
Pointer to the scale data. See
If the method succeeds, the return value is
Set scale information for a specific key frame in the animation set.
+Animation index.
Key frame.
Pointer to the scale data. See
If the method succeeds, the return value is
Gets the number of rotation keys in the specified key frame animation.
+Animation index.
Number of rotation keys in the specified key frame animation.
Fills an array with rotational key data used for key frame animation.
+Animation index.
Pointer to a user-allocated array of
If the method succeeds, the return value is
Get rotation information for a specific key frame in the animation set.
+Animation index.
Key frame.
Pointer to the rotation data. See
If the method succeeds, the return value is
Set rotation information for a specific key frame in the animation set.
+Animation index.
Key frame.
Pointer to the rotation data. See
If the method succeeds, the return value is
Gets the number of translation keys in the specified key frame animation.
+Animation index.
Number of translation keys in the specified key frame animation.
Fills an array with translational key data used for key frame animation.
+Animation index.
Pointer to a user-allocated array of
If the method succeeds, the return value is
Get translation information for a specific key frame in the animation set.
+Animation index.
Key Frame.
Pointer to the rotation information. See
If the method succeeds, the return value is
Set translation information for a specific key frame in the animation set.
+Animation index.
Key Frame.
Pointer to the translation data. See
If the method succeeds, the return value is
Gets the number of callback keys in the animation set.
+Number of callback keys in the animation set.
Fills an array with callback key data used for key frame animation.
+Pointer to a user-allocated array of
If the method succeeds, the return value is
Gets information about a specific callback in the animation set.
+Animation index.
Pointer to the callback function.
If the method succeeds, the return value is
Sets information about a specific callback in the animation set.
+Animation index.
Pointer to the callback function.
If the method succeeds, the return value is
Removes the scale data at the specified key frame.
+Animation identifier.
Key frame.
If the method succeeds, the return value is
This method is slow and should not be used after an animation has begun to play.
+Removes the rotation data at the specified key frame.
+Animation identifier.
Key frame.
If the method succeeds, the return value is
This method is slow and should not be used after an animation has begun to play.
+Removes the translation data at the specified key frame.
+Animation identifier.
Key frame.
If the method succeeds, the return value is
This method is slow and should not be used after an animation has begun to play.
+Register the scale, rotate, and translate (SRT) key frame data for an animation.
+Pointer to the animation name.
Number of scale keys.
Number of rotation keys.
Number of translation keys.
Address of a reference to a user-allocated array of
Address of a reference to a user-allocated array of
Address of a reference to a user-allocated array of
Returns the animation index.
If the method succeeds, the return value is
Transforms animations in an animation set into a compressed format and returns a reference to the buffer that stores the compressed data.
+One of the
Desired compression loss ratio, in the range from 0 to 1.
Pointer to a
Address of a reference to the
If the method succeeds, the return value is
Remove the animation data from the animation set.
+The animation index.
If the method succeeds, the return value is
Gets the type of the animation set playback loop.
+Gets the number of animation key frame ticks that occur per second.
+Gets the number of callback keys in the animation set.
+The
Create a line drawing object with
The LPD3DXLINE type is defined as a reference to the
typedef interface+; + typedef interface *LPD3DXLINE; +
Retrieves the Direct3D device associated with the line object.
+Address of a reference to an
If the method succeeds, the return value is
Prepares a device for drawing lines.
+If the method succeeds, the return value is
Calling
This method must be called from inside an
Draws a line strip in screen space. Input is in the form of an array that defines points (of
If the method succeeds, the return value is
Draws a line strip in screen space with a specified input transformation matrix.
+Array of vertices that make up the line. See
Number of vertices in the vertex list.
A scale, rotate, and translate (SRT) matrix for transforming the points. See
Color of the line. See
If the method succeeds, the return value is
Applies a stipple pattern to the line.
+Describes the stipple pattern: 1 is opaque, 0 is transparent.
If the method succeeds, the return value is
Gets the line stipple pattern.
+Returns the line stipple pattern: 1 is opaque, 0 is transparent.
Stretches the stipple pattern along the line direction.
+Stipple pattern scaling value. 1.0f is the default value and represents no scaling. A value less than 1.0f shrinks the pattern, and a value greater than 1.0 stretches the pattern.
If the method succeeds, the return value is
Gets the stipple-pattern scale value.
+Returns the value used to scale the stipple-pattern. 1.0f is the default value and represents no scaling. A value less than 1.0f shrinks the pattern, and a value greater than 1.0 stretches the pattern.
Specifies the thickness of the line.
+Describes the line width.
If the method succeeds, the return value is
Gets the thickness of the line.
+The line thickness.
Toggles line antialiasing.
+Toggles antialiasing on and off. TRUE turns antialiasing on, and
If the method succeeds, the return value is
Gets the line antialiasing state.
+Returns the antialiasing switch value. TRUE means antialiasing is on, and
Toggles the mode to draw OpenGL-style lines.
+Toggles OpenGL-style line drawing. TRUE enables OpenGL-style lines, and
If the method succeeds, the return value is
Gets the OpenGL-style line-drawing mode.
+Returns TRUE if OpenGL-style lines are enabled, and
Restores the device state to how it was when
If the method succeeds, the return value is
Use this method to release all references to video memory resources and delete all stateblocks. This method should be called whenever a device is lost, or before resetting a device.
+If the method succeeds, the return value is
This method should be called whenever the device is lost or before the user calls
Use this method to re-acquire resources and save initial state.
+If the method succeeds, the return value is
Retrieves the Direct3D device associated with the line object.
+Gets or sets the line stipple pattern.
+Gets or sets the stipple-pattern scale value.
+Gets or sets the thickness of the line.
+Gets or sets the line antialiasing state.
+Gets or sets the OpenGL-style line-drawing mode.
+Applications use the methods of the
To obtain the
This interface inherits additional functionality from the
The LPD3DXMESH type is defined as a reference to the
typedef struct+*LPD3DXMESH; +
Locks the mesh buffer that contains the mesh attribute data, and returns a reference to it.
+Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
Address of a reference to a buffer containing a DWORD for each face in the mesh.
If the method succeeds, the return value is
If
Unlocks an attribute buffer.
+If the method succeeds, the return value is
Generates a new mesh with reordered faces and vertices to optimize drawing performance.
+Specifies the type of optimization to perform. This parameter can be set to a combination of one or more flags from D3DXMESHOPT and D3DXMESH (except
Pointer to an array of three DWORDs per face that specifies the three neighbors for each face in the source mesh. If the edge has no adjacent faces, the value is 0xffffffff. See Remarks.
Pointer to an array of three DWORDs per face that specifies the three neighbors for each face in the optimized mesh. If the edge has no adjacent faces, the value is 0xffffffff.
An array of DWORDs, one per face, that identifies the original mesh face that corresponds to each face in the optimized mesh. If the value supplied for this argument is
Address of a reference to an
Address of a reference to an
If the method succeeds, the return value is
This method generates a new mesh. Before running Optimize, an application must generate an adjacency buffer by calling
This method is very similar to the
Generates a mesh with reordered faces and vertices to optimize drawing performance. This method reorders the existing mesh.
+Combination of one or more D3DXMESHOPT flags, specifying the type of optimization to perform.
Pointer to an array of three DWORDs per face that specifies the three neighbors for each face in the source mesh. If the edge has no adjacent faces, the value is 0xffffffff.
Pointer to an array of three DWORDs per face that specifies the three neighbors for each face in the optimized mesh. If the edge has no adjacent faces, the value is 0xffffffff. If the value supplied for this argument is
An array of DWORDs, one per face, that identifies the original mesh face that corresponds to each face in the optimized mesh. If the value supplied for this argument is
Address of a reference to an
If the method succeeds, the return value is
Before running
Note??This method will fail if the mesh is sharing its vertex buffer with another mesh, unless the
Sets the attribute table for a mesh and the number of entries stored in the table.
+Pointer to an array of
Number of attributes in the mesh attribute table.
If the method succeeds, the return value is
If an application keeps track of the information in an attribute table, and rearranges the table as a result of changes to attributes or faces, this method allows the application to update the attribute tables instead of calling
This interface encapsulates patch mesh functionality.
+A patch mesh is a mesh that consists of a series of patches.
To obtain the
The LPD3DXPATCHMESH type is defined as a reference to the
typedef struct+*LPD3DXPATCHMESH; +
Gets the number of patches in the mesh.
+The number of patches.
Gets the number of vertices in the mesh.
+The number of vertices.
Gets the vertex declaration.
+Array of
If the method succeeds, the return value is
The array of elements includes the D3DDECL_END macro, which ends the declaration.
+Gets the number of control vertices per patch.
+The number of control vertices per patch.
Gets the type of patch.
+The patch type.
For more information about patch types, see
Gets the device that created the mesh.
+Pointer to the device.
If the method succeeds, the return value is
Gets the attributes of the patch.
+Pointer to the structures containing the patch attributes. For more information about patch attributes, see
If the method succeeds, the return value is
Gets the mesh vertex buffer.
+Pointer to the vertex buffer.
If the method succeeds, the return value is
This method assumes uniform tessellation.
+Gets the mesh index buffer.
+Pointer to the index buffer.
If the method succeeds, the return value is
The index buffer contains the vertex ordering in the vertex buffer. The index buffer is used to access the vertex buffer when the mesh is rendered.
+Lock the vertex buffer.
+Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
VOID* reference to a memory buffer containing the returned vertex data.
If the method succeeds, the return value is
The vertex buffer is usually locked, written to, and then unlocked for reading.
Patch meshes use 16-bit index buffers.
+Unlock the vertex buffer.
+If the method succeeds, the return value is
The vertex buffer is usually locked, written to, and then unlocked for reading.
+Lock the index buffer.
+Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
VOID* reference to a memory buffer containing the returned index data.
If the method succeeds, the return value is
The index buffer is usually locked, written to, and then unlocked for reading. Patch mesh index buffers are 16-bit buffers.
+Unlock the index buffer.
+If the method succeeds, the return value is
The index buffer is usually locked, written to, and then unlocked for reading.
+Locks the attribute buffer.
+Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
Address of a reference to a buffer containing a DWORD for each face in the mesh.
If the method succeeds, the return value is
The attribute buffer is usually locked, written to, and then unlocked for reading.
+Unlock the attribute buffer.
+If the method succeeds, the return value is
The attribute buffer is usually locked, written to, and then unlocked for reading.
+Gets the size of the tessellated mesh, given a tessellation level.
+Tessellation level.
Adaptive tessellation. For adaptive tessellation, set this value to TRUE and set fTessLevel to the maximum tessellation value. This will result in the maximum mesh size necessary for adaptive tessellation.
Pointer to the number of triangles generated by the tessellated mesh.
Pointer to the number of vertices generated by the tessellated mesh.
If the method succeeds, the return value is
This method assumes uniform tessellation.
+Generate a list of mesh edges and the patches that share each edge.
+Specifies that vertices that differ in position by less than the tolerance should be treated as coincident.
If the method succeeds, the return value is
After an application generates adjacency information for a mesh, the mesh data can be optimized for better drawing performance. This method determines which patches are adjacent (within the provided tolerance). This information is used internally to optimize tessellation.
+Creates a new patch mesh with the specified vertex declaration.
+Combination of one or more D3DXMESH flags that specify creation options for the mesh.
Array of
Address of a reference to an
If the method succeeds, the return value is
CloneMesh converts the vertex buffer to the new vertex declaration. Entries in the vertex declaration that are new to the original mesh are set to 0. If the current mesh has adjacency, the new mesh will also have adjacency.
+Optimizes the patch mesh for efficient tessellation.
+Currently unused.
If the method succeeds, the return value is
After an application generates adjacency information for a mesh, the mesh data can be optimized (reordered) for better drawing performance. This method determines which patches are adjacent (within the provided tolerance).
Adjacency information is also used to optimize tessellation. Generate adjacency information once and tessellate repeatedly by calling
Sets mesh geometry displacement parameters.
+Texture containing the displacement data.
Minification level. For more information, see
Magnification level. For more information, see
Mip filter level. For more information, see
Texture address wrap mode. For more information, see
Level of detail bias value.
If the method succeeds, the return value is
Displacement maps can only be 2D textures. Mipmapping is ignored for nonadaptive tessellation.
+Gets mesh geometry displacement parameters.
+Texture containing the displacement data.
Minification level. For more information, see
Magnification level. For more information, see
Mip filter level. For more information, see
Texture address wrap mode. For more information, see
Level of detail bias value.
If the method succeeds, the return value is
Displacement maps can only be 2D textures. Mipmapping is ignored for nonadaptive tessellation.
+Performs uniform tessellation based on the tessellation level.
+Tessellation level. This is the number of vertices introduced between existing vertices. The range of this float parameter is 0 < fTessLevel <= 32.
Resulting tessellated mesh. See
If the method succeeds, the return value is
This function will perform more efficiently if the patch mesh has been optimized using
Performs adaptive tessellation based on the z-based adaptive tessellation criterion.
+Specifies a 4D vector that is dotted with the vertices to get the per-vertex adaptive tessellation amount. Each edge is tessellated to the average value of the tessellation levels for the two vertices it connects.
Maximum limit for adaptive tessellation. This is the number of vertices introduced between existing vertices. This integer value can range from 1 to 32, inclusive.
Minimum limit for adaptive tessellation. This is the number of vertices introduced between existing vertices. This integer value can range from 1 to 32, inclusive.
Resulting tessellated mesh. See
If the method succeeds, the return value is
This function will perform more efficiently if the patch mesh has been optimized using
Gets the number of patches in the mesh.
+Gets the number of vertices in the mesh.
+Gets the number of control vertices per patch.
+Gets the type of patch.
+For more information about patch types, see
Gets the device that created the mesh.
+Gets the mesh vertex buffer.
+This method assumes uniform tessellation.
+Gets the mesh index buffer.
+The index buffer contains the vertex ordering in the vertex buffer. The index buffer is used to access the vertex buffer when the mesh is rendered.
+Applications use the methods of the
The LPDIRECT3DPIXELSHADER9 and PDIRECT3DPIXELSHADER9 types are defined as references to the
typedef struct+*LPDIRECT3DPIXELSHADER9, *PDIRECT3DPIXELSHADER9;
Gets the device.
+Pointer to the
If the method succeeds, the return value is
Gets a reference to the shader data.
+Pointer to a buffer that contains the shader data. The application needs to allocate enough room for this.
Size of the data, in bytes. To get the buffer size that is needed to retrieve the data, set pData =
If the method succeeds, the return value is
Gets the device.
+Generates a new mesh with reordered faces and vertices to optimize drawing performance.
+Specifies the type of optimization to perform. This parameter can be set to a combination of one or more flags from D3DXMESHOPT and D3DXMESH (except
Pointer to an array of three DWORDs per face that specifies the three neighbors for each face in the optimized mesh. If the edge has no adjacent faces, the value is 0xffffffff.
An array of DWORDs, one per face, that identifies the original mesh face that corresponds to each face in the optimized mesh. If the value supplied for this argument is
Address of a reference to an
Address of a reference to an
If the method succeeds, the return value is
This method generates a new mesh. Before running Optimize, an application must generate an adjacency buffer by calling
This method is very similar to the
Applications use the methods of the
The LPDIRECT3DQUERY9 and PDIRECT3DQUERY9 types are defined as references to the
typedef struct+*LPDIRECT3DQUERY9, *PDIRECT3DQUERY9;
Gets the device that is being queried.
+Pointer to the device being queried. See
If the method succeeds, the return value is
Gets the query type.
+Returns the query type. See
Gets the number of bytes in the query data.
+Returns the number of bytes of query data.
Issue a query.
+Query flags specify the type of state change for the query. See
If the method succeeds, the return value is
A signaled query means the query has completed, the data is available, and
Polls a queried resource to get the query state or a query result. For more information about queries, see Queries (Direct3D 9).
+The return type identifies the query state (see Queries (Direct3D 9)). The method returns
It is possible to lose the device while polling for query status. When D3DGETDATA_FLUSH is specified, this method will return
An application must never write code that only invokes GetData ( ... , 0 ), expecting that GetData will eventually return
// Enables an infinite loop: + while( pQuery->GetData( ... , 0 ) == S_FALSE ) ; // Still enables an infinite loop: + pQuery->GetData( ... , D3DGETDATA_FLUSH ); + while( pQuery->GetData( ... , 0 ) == S_FALSE ) ; // Does not enable an infinite loop because eventually the command + // buffer will fill up and that will cause a flush to occur. + while( pQuery->GetData( ..., 0 ) == S_FALSE ) { pDevice->SetTexture(...); pDevice->Draw(...); + } ++
Gets the device that is being queried.
+Gets the number of bytes in the query data.
+The
An environment map is used to texture-map scene geometry to provide a more sophisticated scene without using complex geometry. This interface supports creating surfaces for the following kinds of geometry: cube, half sphere or hemispheric, parabolic, or sphere.
The
The LPD3DXRenderToEnvMap type is defined as a reference to the
typedef interface+; + typedef interface *LPD3DXRenderToEnvMap; +
Retrieves the Direct3D device associated with the environment map.
+Address of a reference to an
If the method succeeds, the return value is
Retrieves the description of the render surface.
+Pointer to a
If the method succeeds, the return value is
Initiate the rendering of a cubic environment map.
+Pointer to an
If the method succeeds, the return value is
See
Initiate the rendering of a spherical environment map.
+Pointer to an
If the method succeeds, the return value is
See
Initiate the rendering of a hemispheric environment map.
+Pointer to an
Pointer to an
If the method succeeds, the return value is
See
Initiate the rendering of a parabolic environment map.
+Pointer to an
Pointer to an
If the function succeeds, the return value is
See
Initiate the drawing of each face of an environment map.
+The first face of the environmental cube map. See
A valid combination of one or more
If the method succeeds, the return value is
This method must be called once for each type of environment map. The only exception is a cubic environment map which requires this method to be called six times, once for each face in
Restore all render targets and, if needed, compose all the rendered faces into the environment map surface.
+A valid combination of one or more
If the method succeeds, the return value is
Use this method to release all references to video memory resources and delete all stateblocks. This method should be called whenever a device is lost, or before resetting a device.
+If the method succeeds, the return value is
This method should be called whenever the device is lost or before the user calls
Use this method to re-acquire resources and save initial state.
+If the method succeeds, the return value is
Retrieves the Direct3D device associated with the environment map.
+Retrieves the description of the render surface.
+The
Surfaces can be used in a variety of ways including render targets, off-screen rendering, or rendering to textures.
A surface can be configured using a separate viewport using the
The
The LPD3DXRENDERTOSURFACE type is defined as a reference to the
typedef interface+; + typedef interface *LPD3DXRENDERTOSURFACE; +
Retrieves the Direct3D device associated with the render surface.
+Address of a reference to an
If the method succeeds, the return value is
Retrieves the parameters of the render surface.
+Pointer to a
If the method succeeds, the return value is
Begins a scene.
+Pointer to an
Pointer to a
If the method succeeds, the return value is
Ends a scene.
+Filter options, enumerated in
If the method succeeds, the return value is
Use this method to release all references to video memory resources and delete all stateblocks. This method should be called whenever a device is lost or before resetting a device.
+If the method succeeds, the return value is
This method should be called whenever the device is lost or before the user calls
Use this method to re-acquire resources and save initial state.
+If the method succeeds, the return value is
Pointer to an
Width of the render surface, in pixels.
Height of the render surface, in pixels.
Member of the
If TRUE, the render surface supports a depth-stencil surface. Otherwise, this member is set to
If DepthStencil is set to TRUE, this parameter is a member of the
Retrieves the Direct3D device associated with the render surface.
+Retrieves the parameters of the render surface.
+Sets the influence value for a bone.
+Bone number.
Number of influences.
The array of vertices influenced by a bone.
The array of weights influenced by a bone.
If the method succeeds, the return value is
Sets an influence value of a bone on a single vertex.
+Index of the bone. Must be between 0 and the number of bones.
Index of the influence array of the specified bone.
Blend factor of the specified bone influence.
If the method succeeds, the return value is
Gets the number of influences for a bone.
+Bone number.
Returns the number of influences for a bone.
Gets the vertices and weights that a bone influences.
+Bone number.
Get the array of vertices influenced by a bone.
Get the array of weights influenced by a bone.
If the method succeeds, the return value is
Use
Retrieves the blend factor and vertex affected by a specified bone influence.
+Index of the bone. Must be between 0 and the number of bones.
Index of the influence array of the specified bone.
Pointer to the blend factor influenced by influenceNum.
Pointer to the vertex influenced by influenceNum.
If the method succeeds, the return value is
Gets the maximum number of influences for any vertex in the mesh.
+Pointer to the maximum vertex influence.
If the method succeeds, the return value is
Gets the number of bones.
+Returns the number of bones.
Retrieves the index of the bone influence affecting a single vertex.
+Index of the bone. Must be between 0 and the number of bones.
Index of the vertex for which the bone influence is to be found. Must be between 0 and the number of vertices in the mesh.
Pointer to the index of the bone influence that affects vertexNum.
If the method succeeds, the return value is
Gets the maximum face influences in a triangle mesh with the specified index buffer.
+Pointer to the index buffer that contains the mesh index data.
Number of faces in the mesh.
Pointer to the maximum face influences.
If the method succeeds, the return value is
Sets the minimum bone influence. Influence values smaller than this are ignored.
+Minimum influence value. Influence values smaller than this are ignored.
If the method succeeds, the return value is
Gets the minimum bone influence. Influence values smaller than this are ignored.
+Returns the minimum bone influence value.
Sets the bone name.
+Bone number
Bone name
If the method succeeds, the return value is
Bone names are returned by
Gets the bone name, from the bone index.
+Bone number.
Returns the bone name. Do not free this string.
Sets the bone offset matrix.
+Bone number.
Pointer to the bone offset matrix.
If the method succeeds, the return value is
Bone names are returned by
Gets the bone offset matrix.
+Bone number.
Returns a reference to the bone offset matrix. Do not free this reference.
Clones a skin info object.
+Address of a reference to an
If the method succeeds, the return value is
Updates bone influence information to match vertices after they are reordered. This method should be called if the target vertex buffer has been reordered externally.
+Number of vertices to remap.
Array of DWORDS whose length is specified by NumVertices.
If the method succeeds, the return value is
Each element in pVertexRemap specifies the previous vertex index for that position. For example, if a vertex was in position 3 but has been remapped to position 5, then the fifth element of pVertexRemap should contain 3. The vertex remap array returned by
Sets the flexible vertex format (FVF) type.
+Flexible vertex format. See
If the method succeeds, the return value is
Sets the vertex declaration.
+Pointer to an array of
If the method succeeds, the return value is
Gets the fixed function vertex value.
+Returns the flexible vertex format (FVF) codes.
This method can return 0 if the vertex format cannot be mapped directly to an FVF code. This will occur for a mesh created from a vertex declaration that doesn't have the same order and elements supported by the FVF codes.
+Gets the vertex declaration.
+Array of
If the method succeeds, the return value is
The array of elements includes the D3DDECL_END macro, which ends the declaration.
+Applies software skinning to the target vertices based on the current matrices.
+Bone transform matrix.
Inverse transpose of the bone transform matrix.
Pointer to the buffer containing the source vertices.
Pointer to the buffer containing the destination vertices.
If the method succeeds, the return value is
When used to skin vertices with two position elements, this method skins the second position element with the inverse of the bone instead of the bone itself.
+Takes a mesh and returns a new mesh with per-vertex blend weights and a bone combination table. The table describes which bones affect which subsets of the mesh.
+Input mesh. See
Currently unused.
Input mesh adjacency information.
Output mesh adjacency information.
An array of DWORDs, one per face, that identifies the original mesh face that corresponds to each face in the blended mesh. If the value supplied for this argument is
Address of a reference to an
Pointer to a DWORD that will contain the maximum number of bone influences required per vertex for this skinning method.
Pointer to the number of bones in the bone combination table.
Pointer to the bone combination table. The data is organized in a
Pointer to the new mesh.
If the method succeeds, the return value is
Each element in the remap array specifies the previous index for that position. For example, if a vertex was in position 3 but has been remapped to position 5, then the fifth element of pVertexRemap will contain 3.
This method does not run on hardware that does not support fixed-function vertex blending.
+Takes a mesh and returns a new mesh with per-vertex blend weights, indices, and a bone combination table. The table describes which bone palettes affect which subsets of the mesh.
+The input mesh. See
Currently unused.
Number of bone matrices available for matrix palette skinning.
Input mesh adjacency information.
Output mesh adjacency information.
An array of DWORDs, one per face, that identifies the original mesh face that corresponds to each face in the blended mesh. If the value supplied for this argument is
Address of a reference to an
Pointer to a DWORD that will contain the maximum number of bone influences required per vertex for this skinning method.
Pointer to the number of bones in the bone combination table.
Pointer to the bone combination table. The data is organized in a
Pointer to the new mesh.
If the method succeeds, the return value is
Each element in the remap arrays specifies the previous index for that position. For example, if a vertex was in position 3 but has been remapped to position 5, then the fifth element of pVertexRemap will contain 3.
This method does not run on hardware that does not support fixed-function vertex blending.
+Gets the maximum number of influences for any vertex in the mesh.
+Gets the number of bones.
+Gets or sets the minimum bone influence. Influence values smaller than this are ignored.
+Gets or sets the fixed function vertex value.
+This method can return 0 if the vertex format cannot be mapped directly to an FVF code. This will occur for a mesh created from a vertex declaration that doesn't have the same order and elements supported by the FVF codes.
+The
The
The application typically first calls
The LPD3DXSPRITE type is defined as a reference to the
typedef interface+; + typedef interface *LPD3DXSPRITE; +
Retrieves the device associated with the sprite object.
+Address of a reference to an
If the method succeeds, the return value is
Calling this method will increase the internal reference count on the
Gets the sprite transform.
+Pointer to a
If the method succeeds, the return value is
Sets the sprite transform.
+Pointer to a
If the method succeeds, the return value is
Sets the right-handed world-view transform for a sprite. A call to this method is required before billboarding or sorting sprites.
+Pointer to a
Pointer to a
If the method succeeds, the return value is
A call to this method (or to
Sets the left-handed world-view transform for a sprite. A call to this method is required before billboarding or sorting sprites.
+Pointer to a
Pointer to a
If the method succeeds, the return value is
A call to this method (or to
Prepares a device for drawing sprites.
+Combination of zero or more flags that describe sprite rendering options. For this method, the valid flags are:
For a description of the flags and for information on how to control device state capture and device view transforms, see
If the method succeeds, the return value is
This method must be called from inside a
This method will set the following states on the device.
Render States:
Type ( | Value |
---|---|
TRUE | |
0x00 | |
AlphaCmpCaps | |
TRUE | |
0 |
?
Texture Stage States:
Stage Identifier | Type ( | Value |
---|---|---|
0 | ||
0 | ||
0 | ||
0 | ||
0 | ||
0 | ||
0 | 0 | |
0 | ||
1 | ||
1 |
?
Sampler States:
Sampler Stage Index | Type ( | Value |
---|---|---|
0 | ||
0 | ||
0 | ||
0 | 0 | |
0 | MaxAnisotropy | |
0 | ||
0 | ||
0 | 0 | |
0 | 0 |
?
Note??This method disables N-patches.
+Adds a sprite to the list of batched sprites.
+Pointer to an
Pointer to a
Pointer to a
Pointer to a
If the method succeeds, the return value is
To scale, rotate, or translate a sprite, call
Forces all batched sprites to be submitted to the device. Device states remain as they were after the last call to
If the method succeeds, the return value is
Calls
If the method succeeds, the return value is
Use this method to release all references to video memory resources and delete all stateblocks. This method should be called whenever a device is lost or before resetting a device.
+If the method succeeds, the return value is
This method should be called whenever the device is lost or before the user calls
Use this method to re-acquire resources and save initial state.
+If the method succeeds, the return value is
Adds a sprite to the list of batched sprites.
+Pointer to an
If the method succeeds, the return value is
To scale, rotate, or translate a sprite, call
Adds a sprite to the list of batched sprites.
+Pointer to an
Pointer to a
Pointer to a
Pointer to a
If the method succeeds, the return value is
To scale, rotate, or translate a sprite, call
Retrieves the device associated with the sprite object.
+Calling this method will increase the internal reference count on the
Gets or sets the sprite transform.
+Applications use the methods of the
This interface can be used to save and restore pipeline state. It can also be used to capture the current state.
The LPDIRECT3DSTATEBLOCK9 and PDIRECT3DSTATEBLOCK9 types are defined as references to the
typedef struct+*LPDIRECT3DSTATEBLOCK9, *PDIRECT3DSTATEBLOCK9;
Gets the device.
+Pointer to the
If the method succeeds, the return value is
Capture the current value of states that are included in a stateblock.
+If the method succeeds, the return value is
The Capture method captures current values for states within an existing state block. It does not capture the entire state of the device. For example:
* pStateBlock = null ; pd3dDevice->BeginStateBlock(); + // Add the ZENABLE state to the stateblock + pd3dDevice->SetRenderState (, ); + pd3dDevice->EndStateBlock ( &pStateBlock ); // Change the current value that is stored in the state block + pd3dDevice->SetRenderState ( , ); + pStateBlock->Capture(); pStateBlock->Release(); +
Creating an empty stateblock and calling the Capture method does nothing if no states have been set.
The Capture method will not capture information for lights that are explicitly or implicitly created after the stateblock is created.
+Apply the state block to the current device state.
+If the method succeeds, the return value is
Gets the device.
+Applications use the methods of the
The LPDIRECT3DSURFACE9 and PDIRECT3DSURFACE9 types are defined as references to the
typedef struct+*LPDIRECT3DSURFACE9, *PDIRECT3DSURFACE9; +
Provides access to the parent cube texture or texture (mipmap) object, if this surface is a child level of a cube texture or a mipmap. This method can also provide access to the parent swap chain if the surface is a back-buffer child.
+Reference identifier of the container being requested.
Address of a reference to fill with the container reference if the query succeeds. See Remarks.
If the method succeeds, the return value is
If the surface is created using CreateRenderTarget or CreateOffscreenPlainSurface or CreateDepthStencilSurface, the surface is considered stand alone. In this case, GetContainer will return the Direct3D device used to create the surface.
If the call succeeds, the reference count of the container is increased by one.
Here's an example getting the parent texture of a mip surface.
// Assumes pSurface is a valid+reference + void *pContainer = null ; +*pTexture = null ; +hr = pSurface->GetContainer(IID_IDirect3DTexture9, &pContainer); + if (SUCCEEDED(hr) && pContainer) + { pTexture = ( *)pContainer; + } +
Retrieves a description of the surface.
+Pointer to a
If the method succeeds, the return value is
Locks a rectangle on a surface.
+Pointer to a
Pointer to a rectangle to lock. Specified by a reference to a
Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
You may not specify a subrect when using
If the method succeeds, the return value is
If the method fails, the return value can be
If the
The only lockable format for a depth-stencil surface is
For performance reasons, dirty regions are recorded only for level zero of a texture. Dirty regions are automatically recorded when
A multisample back buffer cannot be locked.
This method cannot retrieve data from a surface that is is contained by a texture resource created with
Unlocks a rectangle on a surface.
+If the method succeeds, the return value is
Retrieves a device context.
+Pointer to the device context for the surface.
The following restrictions apply.
When a device context is outstanding on a surface, the application may not call these methods:
| |
| |
| |
| |
| |
| |
| |
|
?
* (on a swap chain that contains the surface)
It is valid to call
The hdc provides access to Win32 and GDI functionality.
+Release a device context handle.
+Handle to a device context.
If the method succeeds, the return value is
An hdc is a Windows resource. It must be released after use so Windows can return it to the pool of available resources.
This method will release only the device context returned by
Retrieves a description of the surface.
+Presents the contents of the next buffer in the sequence of back buffers owned by the swap chain.
+The Present method is a shortcut to Present. Present has been updated to take a flag allowing the application to request that the method return immediately when the driver reports that it cannot schedule a presentation.
If necessary, a stretch operation is applied to transfer the pixels within the source rectangle to the destination rectangle in the client area of the target window.
Present will fail if called between BeginScene and EndScene pairs unless the render target is not the current render target (such as the back buffer you get from creating an additional swap chain). This is a new behavior for Direct3D 9.
+Presents the contents of the next buffer in the sequence of back buffers owned by the swap chain.
+Pointer to the source rectangle (see
Pointer to the destination rectangle in client coordinates (see
Destination window whose client area is taken as the target for this presentation. If this value is
This value must be
Allows the application to request that the method return immediately when the driver reports that it cannot schedule a presentation. Valid values are 0, or any combination of
If the method succeeds, the return value is
The Present method is a shortcut to Present. Present has been updated to take a flag allowing the application to request that the method return immediately when the driver reports that it cannot schedule a presentation.
If necessary, a stretch operation is applied to transfer the pixels within the source rectangle to the destination rectangle in the client area of the target window.
Present will fail if called between BeginScene and EndScene pairs unless the render target is not the current render target (such as the back buffer you get from creating an additional swap chain). This is a new behavior for Direct3D 9.
+Generates a copy of the swapchain's front buffer and places that copy in a system memory buffer provided by the application.
+Pointer to an
If the method succeeds, the return value is
Calling this method will increase the internal reference count on the
Retrieves a back buffer from the swap chain of the device.
+Index of the back buffer object to return. Back buffers are numbered from 0 to the total number of back buffers - 1. A value of 0 returns the first back buffer, not the front buffer. The front buffer is not accessible through this method. Use
Stereo view is not supported in Direct3D 9, so the only valid value for this parameter is
Address of a reference to an
Calling this method will increase the internal reference count on the
Returns information describing the raster of the monitor on which the swap chain is presented.
+Pointer to a
If the method succeeds, the return value is
Retrieves the display mode's spatial resolution, color resolution, and refresh frequency.
+Pointer to a
If the method succeeds, the return value is
Retrieves the device associated with the swap chain.
+Address of a reference to an
If the method succeeds, the return value is
This method allows navigation to the owning device object.
Calling this method will increase the internal reference count on the
Retrieves the presentation parameters associated with a swap chain.
+Pointer to the presentation parameters. See
If the method succeeds, the return value is
This method can be used to see the presentation parameters of the parent swap chain of a surface (a back buffer, for instance). The parent swap chain can be retrieved with
Returns information describing the raster of the monitor on which the swap chain is presented.
+Retrieves the display mode's spatial resolution, color resolution, and refresh frequency.
+Retrieves the device associated with the swap chain.
+This method allows navigation to the owning device object.
Calling this method will increase the internal reference count on the
Retrieves the presentation parameters associated with a swap chain.
+This method can be used to see the presentation parameters of the parent swap chain of a surface (a back buffer, for instance). The parent swap chain can be retrieved with
Applications use the methods of the
There is always at least one swap chain for each device, known as the implicit swap chain. However, an additional swap chain for rendering multiple views from the same device can be created by calling the CreateAdditionalSwapChain method.
This interface, like all COM interfaces, inherits from the
The LPDIRECT3DSWAPCHAIN9 and PDIRECT3DSWAPCHAIN9 types are defined as references to the
Returns the number of times the swapchain has been processed.
+Pointer to a UINT to be filled with the number of times the
Retrieves the display mode's spatial resolution, color resolution, refresh frequency, and rotation settings.
+Pointer to a
Pointer to a
If the method succeeds, the return value is
Returns the number of times the swapchain has been processed.
+Applications use the methods of the
The
This interface inherits additional functionality from the
This interface, like all COM interfaces, inherits from the
The LPDIRECT3DTEXTURE9 and PDIRECT3DTEXTURE9 types are defined as references to the
typedef struct+*LPDIRECT3DTEXTURE9, *PDIRECT3DTEXTURE9; +
Retrieves a level description of a texture resource.
+Identifies a level of the texture resource. This method returns a surface description for the level specified by this parameter.
Pointer to a
Retrieves the specified texture surface level.
+Identifies a level of the texture resource. This method returns a surface for the level specified by this parameter. The top-level surface is denoted by 0.
Address of a reference to an
Calling this method will increase the internal reference count on the
Locks a rectangle on a texture resource.
+Specifies the level of the texture resource to lock.
Pointer to a
Pointer to a rectangle to lock. Specified by a reference to a
Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
You may not specify a subrect when using
If the method succeeds, the return value is
Textures created with
For performance reasons, dirty regions are recorded only for level zero of a texture. Dirty regions are automatically recorded when
The only lockable format for a depth-stencil texture is D3DLOCK_D16_LOCKABLE.
Video memory textures cannot be locked, but must be modified by calling
This method cannot retrieve data from a texture resource created with
Unlocks a rectangle on a texture resource.
+Specifies the level of the texture resource to unlock.
If the method succeeds, the return value is
Adds a dirty region to a texture resource.
+Pointer to a
If the method succeeds, the return value is
For performance reasons, dirty regions are only recorded for level zero of a texture. For sublevels, it is assumed that the corresponding (scaled) rectangle or box is also dirty. Dirty regions are automatically recorded when
Using
The
The
The
The LPD3DXTEXTURESHADER type is defined as a reference to the
typedef interface+*LPD3DXTEXTURESHADER; +
Gets a reference to the function DWORD stream.
+A reference to the function DWORD stream. See
If the method succeeds, the return value is
Get a reference to the constant table.
+Pointer to an
If the method succeeds, the return value is
Gets a description of the constant table.
+The attributes of the constant table. See
If the method succeeds, the return value is
Gets a reference to the array of constants in the constant table.
+Unique identifier to a constant. See
Returns a reference to an array of descriptions. See
The input supplied must be the maximum size of the array. The output is the number of elements that are filled in the array when the function returns.
If the method succeeds, the return value is
Samplers can appear more than once in a constant table, therefore, this method can return an array of descriptions each with a different register index.
+Gets a constant by looking up its index.
+A handle to the parent data structure. If the constant is a top-level parameter (there is no parent data structure), use
Zero-based index of the constant.
Returns a unique identifier to the constant.
To get a constant from an array of constants, use
Gets a constant by looking up its name.
+A handle to the parent data structure. If the constant is a top-level parameter (there is no parent data structure), use
A string containing the name of the constant.
Returns a unique identifier to the constant.
Get a constant from the constant table.
+A handle to the array of constants. This value may not be
Zero-based index of the element in the constant table.
Returns a unique identifier to the constant.
To get a constant that is not part of an array, use
Sets the constants to the default values declared in the shader.
+If the method succeeds, the return value is
Sets the constant table with the data in the buffer.
+Unique identifier to a constant. See
A reference to a buffer containing the constant data.
Size of the buffer, in bytes.
If the method succeeds, the return value is
Sets a
Unique identifier to the constant. See
If the method succeeds, the return value is
Sets an array of
Unique identifier to the array of constants. See
Array of
Number of
If the method succeeds, the return value is
Sets an integer value.
+Unique identifier to the constant. See
Integer value.
If the method succeeds, the return value is
Sets an array of integers.
+Unique identifier to the array of constants. See
Array of integers.
Number of integers in the array.
If the method succeeds, the return value is
Sets a floating-point number.
+Unique identifier to the constant. See
Floating-point number.
If the method succeeds, the return value is
Sets an array of floating-point numbers.
+Unique identifier to the array of constants. See
Array of floating-point numbers.
Number of floating-point values in the array.
If the method succeeds, the return value is
Sets a 4D vector.
+Unique identifier to the vector constant. See
Pointer to a 4D vector. See
If the method succeeds, the return value is
Sets an array of 4D vectors.
+Unique identifier to the array of vector constants. See
Array of 4D vectors. See
Number of vectors in the array.
If the method succeeds, the return value is
Sets a non-transposed matrix.
+Unique identifier to the matrix of constants. See
Pointer to a non-transposed matrix. See
If the method succeeds, the return value is
A non-transposed matrix contains row-major data; that is, each vector is contained in a row.
+Sets an array of non-transposed matrices.
+Unique identifier to the array of constant matrices. See
Array of non-transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A non-transposed matrix contains row-major data; that is, each vector is contained in a row.
+Sets an array of references to non-transposed matrices.
+Unique identifier to an array of constant matrices. See
Array of references to non-transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A non-transposed matrix contains row-major data; that is, each vector is contained in a row.
+Sets a transposed matrix.
+Unique identifier to the matrix of constants. See
Pointer to a transposed matrix. See
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
+Sets an array of transposed matrices.
+Unique identifier to the array of matrix constants. See
Array of transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
+Sets an array of references to transposed matrices.
+Unique identifier to the array of matrix constants. See
Array of references to transposed matrices. See
Number of matrices in the array.
If the method succeeds, the return value is
A transposed matrix contains column-major data; that is, each vector is contained in a column.
+Gets a reference to the function DWORD stream.
+Get a reference to the constant table.
+Gets a description of the constant table.
+Applications use the methods of the
The
This interface inherits additional functionality from the
This interface, like all COM interfaces, inherits from the
The LPDIRECT3DVERTEXBUFFER9 and PDIRECT3DVERTEXBUFFER9 types are defined as references to the
typedef struct+*LPDIRECT3DVERTEXBUFFER9, *PDIRECT3DVERTEXBUFFER9; +
Locks a range of vertex data and obtains a reference to the vertex buffer memory.
+Offset into the vertex data to lock, in bytes. To lock the entire vertex buffer, specify 0 for both parameters, SizeToLock and OffsetToLock.
Size of the vertex data to lock, in bytes. To lock the entire vertex buffer, specify 0 for both parameters, SizeToLock and OffsetToLock.
VOID* reference to a memory buffer containing the returned vertex data.
Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
If the method succeeds, the return value is
As a general rule, do not hold a lock across more than one frame. When working with vertex buffers, you are allowed to make multiple lock calls; however, you must ensure that the number of lock calls match the number of unlock calls. DrawPrimitive calls will not succeed with any outstanding lock count on any currently set vertex buffer.
The
For information about using
Unlocks vertex data.
+If the method succeeds, the return value is
Retrieves a description of the vertex buffer resource.
+Pointer to a
If the method succeeds, the return value is
Retrieves a description of the vertex buffer resource.
+Applications use the methods of the
A vertex shader declaration is made up of an array of vertex elements.
The LPDIRECT3DVERTEXDECLARATION9 and PDIRECT3DVERTEXDECLARATION9 types are defined as references to the
typedef struct+*LPDIRECT3DVERTEXDECLARATION9, *PDIRECT3DVERTEXDECLARATION9;
Gets the current device.
+Pointer to the
If the method succeeds, the return value is
Gets the vertex shader declaration.
+Array of vertex elements (see
Number of elements in the array. The application needs to allocate enough room for this.
If the method succeeds, the return value is
The number of elements, pNumElements, includes the D3DDECL_END macro, which ends the declaration. So the element count is actually one higher than the number of valid vertex elements.
Here's an example that will return the vertex declaration array of up to 256 elements:
decl[MAXD3DDECLLENGTH]; + UINT numElements; + hr = m_pVertexDeclaration->GetDeclaration( decl, &numElements); +
Specify
Gets the current device.
+Applications use the methods of the
The LPDIRECT3DVERTEXSHADER9 and PDIRECT3DVERTEXSHADER9 types are defined as references to the
typedef struct+*LPDIRECT3DVERTEXSHADER9, *PDIRECT3DVERTEXSHADER9;
Gets the device.
+Pointer to the
If the method succeeds, the return value is
Gets a reference to the shader data.
+Pointer to a buffer that contains the shader data. The application needs to allocate enough room for this.
Size of the data, in bytes. To get the buffer size that is needed to retrieve the data, set pData =
If the method succeeds, the return value is
Gets the device.
+Applications use the methods of the
The
This interface, like all COM interfaces, inherits from the
The LPDIRECT3DVOLUME9 and PDIRECT3DVOLUME9 types are defined as references to the
typedef struct+*LPDIRECT3DVOLUME9, *PDIRECT3DVOLUME9; +
Retrieves the device associated with a volume.
+Address of a reference to an
If the method succeeds, the return value is
This method allows navigation to the owning device object.
Calling this method will increase the internal reference count on the
Associates data with the volume that is intended for use by the application, not by Direct3D.
+Reference to the globally unique identifier that identifies the private data to set.
Pointer to a buffer that contains the data to associate with the volume.
Size of the buffer at pData in bytes.
Value that describes the type of data being passed, or indicates to the application that the data should be invalidated when the resource changes.
Item | Description |
---|---|
(none) | If no flags are specified, Direct3D allocates memory to hold the data within the buffer and copies the data into the new buffer. The buffer allocated by Direct3D is automatically freed, as appropriate. |
D3DSPD_IUNKNOWN | The data at pData is a reference to an |
?
If the method succeeds, the return value is
Direct3D does not manage the memory at pData. If this buffer was dynamically allocated, it is the calling application's responsibility to free the memory.
Data is passed by value, and multiple sets of data can be associated with a single volume.
+Copies the private data associated with the volume to a provided buffer.
+Reference to (C++) or address of (C) the globally unique identifier that identifies the private data to retrieve.
Pointer to a previously allocated buffer to fill with the requested private data if the call succeeds. The application calling this method is responsible for allocating and releasing this buffer. If this parameter is
Pointer to the size of the buffer at pData, in bytes. If this value is less than the actual size of the private data, such as 0, the method sets this parameter to the required buffer size, and the method returns
If the method succeeds, the return value is
Frees the specified private data associated with this volume.
+Reference to the globally unique identifier that identifies the private data to free.
If the method succeeds, the return value is
Direct3D calls this method automatically when a volume is released.
+Provides access to the parent volume texture object, if this surface is a child level of a volume texture.
+Reference identifier of the volume being requested.
Address of a reference to fill with the container reference, if the query succeeds.
If the method succeeds, the return value is
If the call succeeds, the reference count of the container is increased by one.
Here's an example getting the parent volume texture of a volume texture.
// Assumes pSurface is a valid+reference + void *pContainer = null ; +*pVolumeTexture = null ; +hr = pVolume->GetContainer(IID_IDirect3DVolumeTexture9, &pContainer); + if (SUCCEEDED(hr) && pContainer) + { pVolumeTexture = ( *)pContainer; +
Retrieves a description of the volume.
+Pointer to a
If the method succeeds, the return value is
Locks a box on a volume resource.
+Pointer to a
Pointer to a box to lock. Specified by a reference to a
Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
If the method succeeds, the return value is
For performance reasons, dirty regions are only recorded for level zero of a texture. Dirty regions are automatically recorded when
Unlocks a box on a volume resource.
+If the method succeeds, the return value is
Retrieves the device associated with a volume.
+This method allows navigation to the owning device object.
Calling this method will increase the internal reference count on the
Retrieves a description of the volume.
+Applications use the methods of the
The
This interface inherits additional functionality from the
This interface, like all COM interfaces, inherits from the
The LPDIRECT3DVOLUMETEXTURE9 and PDIRECT3DVOLUMETEXTURE9 types are defined as references to the
typedef struct+*LPDIRECT3DVOLUMETEXTURE9, *PDIRECT3DVOLUMETEXTURE9; +
Retrieves a level description of a volume texture resource.
+Identifies a level of the volume texture resource. This method returns a volume description for the level specified by this parameter.
Pointer to a
Retrieves the specified volume texture level.
+Identifies a level of the volume texture resource. This method returns a volume for the level specified by this parameter.
Address of a reference to an
Calling this method will increase the internal reference count on the
Locks a box on a volume texture resource.
+Specifies the level of the volume texture resource to lock.
Pointer to a
Pointer to the volume to lock. This parameter is specified by a reference to a
Combination of zero or more locking flags that describe the type of lock to perform. For this method, the valid flags are:
For a description of the flags, see
If the method succeeds, the return value is
For performance reasons, dirty regions are only recorded for level zero of a texture. Dirty regions are automatically recorded when LockBox is called without
Unlocks a box on a volume texture resource.
+Specifies the level of the volume texture resource to unlock.
If the method succeeds, the return value is
Adds a dirty region to a volume texture resource.
+Pointer to a
If the method succeeds, the return value is
For performance reasons, dirty regions are only recorded for level zero of a texture. For sublevels, it is assumed that the corresponding (scaled) box is also dirty. Dirty regions are automatically recorded when LockBox is called without
Using
Applications use the methods of the
An
The
The globally unique identifier (
The LPD3DXFILE type is defined as a reference to the
typedef interface+*LPD3DXFILE; +
Creates an enumerator object that will read a .x file.
+The data source. Either:
Depending on the value of loadflags.
Value that specifies the source of the data. This value can be one of the D3DXF_FILELOADOPTIONS flags.
Address of a reference to an
If the method succeeds, the return value is
After using this method, use one of the
Creates a save object that will be used to save data to a .x file.
+Pointer to the name of the file to use for saving data.
Value that specifies the name of the file to which data is to be saved. This value can be one of the File Save Options flags.
Indicates the format to use when saving the .x file. This value can be one of the File Formats flags. For more information, see Remarks.
Address of a reference to an
If the method succeeds, the return value is
After using this method, use methods of the
For the saved file format dwFileFormat, one of the binary, legacy binary, or text flags in File Formats must be specified. The file can be compressed by using the optional
The file format values can be combined in a logical OR to create compressed text or compressed binary files. If you indicate that the file format should be text and compressed, the file will be written out first as text and then compressed. However, compressed text files are not as efficient as binary text files; in most cases, therefore, you will want to indicate binary and compressed.
+Registers custom templates.
+Pointer to a buffer consisting of a .x file in text or binary format that contains templates.
Size of the buffer pointed to by pvData, in bytes.
If the method succeeds, the return value is
The following code fragment provides an example call to RegisterTemplates And example contents for the buffer to which pvData points.
#define XSKINEXP_TEMPLATES \ "xof 0303txt 0032\ template XSkinMeshHeader \ { \ <3CF169CE-FF7C-44ab-93C0-F78F62D172E2> \ WORD nMaxSkinWeightsPerVertex; \ WORD nMaxSkinWeightsPerFace; \ WORD nBones; \ } \ template VertexDuplicationIndices \ { \ <B8D65549-D7C9-4995-89CF-53A9A8B031E3> \ DWORD nIndices; \ DWORD nOriginalVertices; \ array DWORD indices[nIndices]; \ } \ template SkinWeights \ { \ <6F0D123B-BAD2-4167-A0D0-80224F25FABB> \ STRING transformNodeName;\ DWORD nWeights; \ array DWORD vertexIndices[nWeights]; \ array float weights[nWeights]; \ Matrix4x4 matrixOffset; \ }" + . + . + . LPD3DXFILE pD3DXFile =null ; if ( FAILED (hr = pD3DXFile->RegisterTemplates( (LPVOID)XSKINEXP_TEMPLATES, sizeof( XSKINEXP_TEMPLATES ) - 1 ) ) ) + goto End; +
All templates must specify a name and a UUID.
This method calls the RegisterEnumTemplates method, obtaining an
Registers custom templates, given an
If the method succeeds, the return value is
If the method fails, the following value will be returned: D3DXFERR_BADVALUE.
When this method is called, it copies templates stored with the
If an
Applications use the methods of the
Data types allowed by the template are called optional members. The optional members are not required, but an object might miss important information without them. These optional members are saved as children of the data object. A child can be another data object or a reference to an earlier data object.
The
The LPD3DXFILEDATA type is defined as a reference to this interface.
typedef interface+*LPD3DXFILEDATA; +
Retrieves the enumeration object in this file data object.
+Address of a reference to receive the enumeration object in this file data object.
If the method succeeds, the return value is
Retrieves the name of this file data object.
+Address of a reference to receive the name of this file data object. If this parameter is
Pointer to the size of the string that represents the name of this file data object. This parameter can be
If the method succeeds, the return value is
For this method to succeed, either szName or puiSize must be non-
Retrieves the
Pointer to a
If the method succeeds, the return value is
Accesses the .x file data.
+Pointer to the size of the .x file data.
Address of a reference to receive the
If the method succeeds, the return value is
The ppData reference is only valid during a
Because file data is not guaranteed to be aligned properly with byte boundaries, you should access ppData with UNALIGNED references.
Returned parameter values are not guaranteed to be valid due to possible file corruption; therefore, your code should verify the returned parameter values.
+Ends the lifespan of the ppData reference returned by
The return value is
You must ensure that the number of
Retrieves the template ID in this file data object.
+Pointer to the
If the method succeeds, the return value is
Indicates whether this file data object is a reference object that points to another child data object.
+Returns TRUE if the file data object is a reference object; returns
Retrieves the number of children in this file data object.
+Address of a reference to receive the number of children in this file data object.
If the method succeeds, the return value is
Retrieves a child object in this file data object.
+ID of the child object to retrieve.
Address of a reference to receive the child object's interface reference.
If the method succeeds, the return value is
Retrieves the enumeration object in this file data object.
+Retrieves the template ID in this file data object.
+Indicates whether this file data object is a reference object that points to another child data object.
+Retrieves the number of children in this file data object.
+Applications use the methods of the
The
The LPD3DXFILEENUMOBJECT type is defined as a reference to this interface.
typedef interface+*LPD3DXFILEENUMOBJECT; +
Retrieves the
If the method succeeds, the return value is
Retrieves the number of child objects in this file data object.
+Address of a reference to receive the number of child objects in this file data object.
If the method succeeds, the return value is
Retrieves a child object in this file data object.
+ID of the child object to retrieve.
Address of a reference to receive the child object's interface reference.
If the method succeeds, the return value is
Retrieves the data object that has the specified
Reference to the requested
Address of a reference to an
If the method succeeds, the return value is
Obtain the
Retrieves the data object that has the specified name.
+Pointer to the requested name.
Address of a reference to an
If the method succeeds, the return value is
Obtain the name szName of the current file data object with the
Retrieves the
Retrieves the number of child objects in this file data object.
+Applications use the methods of the
The
The LPD3DXFileSaveData type is defined as a reference to this interface.
typedef interface+*LPD3DXFILESAVEDATA; +
Retrieves a reference to this
If the method succeeds, the return value is
Retrieves the name of this
If the method succeeds, the return value is
For this method to succeed, either szName or puiSize must be non-
Retrieves the
If the method succeeds, the return value is
Retrieves the template ID of this file data node.
+Pointer to the
If the method succeeds, the return value is
Adds a data object as a child of the
If the method succeeds, the return value is
Adds a data reference as a child of this
If the method succeeds, the return value is
The file data object being referenced must have either a name or a
Retrieves a reference to this
Retrieves the template ID of this file data node.
+Applications use the methods of the
Templates are not required in every file. For example, you could put all templates into a single .x file rather than duplicating them in every .x file.
The
The globally unique identifier (
The LPD3DXFILESAVEOBJECT type is defined as a reference to this interface.
typedef interface+*LPD3DXFILESAVEOBJECT; +
Gets the
If the method succeeds, the return value is
Adds a data object as a child of the
If the method succeeds, the return value is
If a data reference object will reference the data object, either the szName or pId parameter must be non-
Save the created data to disk by using the
Saves a data object and its children to a .x file on disk.
+If the method succeeds, the return value is
After this method succeeds,
Gets the
Stores an attribute table entry.
+An attribute table is used to identify areas of the mesh that need to be drawn with different textures, render states, materials, and so on. In addition, the application can use the attribute table to hide portions of a mesh by not drawing a given attribute identifier (AttribId) when drawing the frame.
The LPD3DXATTRIBUTERANGE type is defined as a reference to the
typedef+* LPD3DXATTRIBUTERANGE; +
Attribute table identifier.
Starting face.
Face count.
Starting vertex.
Vertex count.
Specifies mesh weight attributes.
+This structure describes how a simplification operation will consider vertex data when calculating relative costs between collapsing edges. For example, if the Normal field is 0.0, the simplification operation will ignore the vertex normal component when calculating the error for the collapse. However, if the Normal field is 1.0, the simplification operation will use the vertex normal component. If the Normal field is 2.0, double the amount of errors; if the Normal field is 4.0, then quadruple the number of errors, and so on.
The LPD3DXATTRIBUTEWEIGHTS type is defined as a reference to the
typedef+* LPD3DXATTRIBUTEWEIGHTS; +
Position.
Blend weight.
Normal.
Diffuse lighting value.
Specular lighting value.
Tangent.
Binormal.
Eight texture coordinates.
Throughput metrics for help in understanding the performance of an application.
+The bandwidth or maximum data transfer rate from the host CPU to the GPU. This is typically the bandwidth of the PCI or AGP bus which connects the CPU and the GPU.
Memory utilized percentage when uploading data from the host CPU to the GPU.
Vertex throughput percentage. This is the number of vertices processed compared to the theoretical maximum vertex processing rate.
Triangle set-up throughput percentage. This is the number of triangles that are set up for rasterization compared to the theoretical maximum triangle set-up rate.
Pixel fill throughput percentage. This is the number of pixels that are filled compared to the theoretical pixel fill.
Defines a volume.
+The restrictions on side ordering observed for
Position of the left side of the box on the x-axis.
Position of the top of the box on the y-axis.
Position of the right side of the box on the x-axis.
Position of the bottom of the box on the y-axis.
Position of the front of the box on the z-axis.
Position of the back of the box on the z-axis.
Measure the cache hit rate performance for textures and indexed vertices.
+An efficient cache is typically closer to a 90 percent hit rate, and an inefficient cache is typically closer to a 10 percent hit rate (although a low percentage is not necessarily a problem).
+The hit rate for finding a texture in the texture cache. This assumes there is a texture cache. Increasing the level-of-detail bias to use the most detailed texture, using many large textures, or producing a near random texture access pattern on large textures with custom shader code can dramatically affect the texture cache hit rate.
The hit rate for finding transformed vertices in the vertex cache. The GPU is designed to transform indexed vertices and may store them in a vertex cache. If you are using meshes,
Describes a callback key for use in key frame animation.
+Key frame time stamp.
Pointer to user callback data.
Describes the current clip status.
+When clipping is enabled during vertex processing (by ProcessVertices, DrawPrimitive, or other drawing functions), Direct3D computes a clip code for every vertex. The clip code is a combination of D3DCS_* bits. When a vertex is outside a particular clipping plane, the corresponding bit is set in the clipping code. Direct3D maintains the clip status using
Clip status is not updated by DrawRectPatch and DrawTriPatch because there is no software emulation for them.
+Clip union flags that describe the current clip status. This member can be one or more of the following flags:
Value | Meaning |
---|---|
Combination of all clip flags. | |
All vertices are clipped by the left plane of the viewing frustum. | |
All vertices are clipped by the right plane of the viewing frustum. | |
All vertices are clipped by the top plane of the viewing frustum. | |
All vertices are clipped by the bottom plane of the viewing frustum. | |
All vertices are clipped by the front plane of the viewing frustum. | |
All vertices are clipped by the back plane of the viewing frustum. | |
Application-defined clipping planes. | |
Application-defined clipping planes. | |
Application-defined clipping planes. | |
Application-defined clipping planes. | |
Application-defined clipping planes. | |
Application-defined clipping planes. |
?
Clip intersection flags that describe the current clip status. This member can take the same flags as ClipUnion.
A description of a constant in a constant table.
+Name of the constant.
Constant data type. See
Zero-based index of the constant in the table.
Number of registers that contain data.
Parameter class. See
Parameter type. See
Number of rows.
Number of columns.
Number of elements in the array.
Number of structure member sub-parameters.
Data size in number of bytes.
Pointer to the default value.
A description of the constant table.
+Name of the constant table creator.
Shader version.
Number of constants in the constant table.
Describes the creation parameters for a device.
+Ordinal number that denotes the display adapter. D3DADAPTER_DEFAULT is always the primary display adapter. Use this ordinal as the Adapter parameter for any of the
Member of the
Window handle to which focus belongs for this Direct3D device. The value of this parameter mirrors the value passed to the CreateDevice call that created this device.
A combination of one or more
Specifies types of display modes to filter out.
+The size of this structure. This should always be set to sizeof(
The display mode format to filter out. See
Whether the scanline ordering is interlaced or progressive. See
Describes an effect object.
+An effect object can contain multiple rendering techniques and parameters for the same effect.
+String that contains the name of the effect creator.
Number of parameters used for effect.
Number of techniques that can render the effect.
Number of functions that can render the effect.
Describes an animation event.
+Event type, as defined in
Event track identifier.
Start time of the event in global time.
Duration of the event in global time.
Transition style of the event, as defined in
Track weight for the event.
Track speed for the event.
Track position for the event.
Enable flag.
Returns material information saved in Direct3D (.x) files.
+The
The LPD3DXMATERIAL type is defined as a reference to the
typedef struct+* LPD3DXMATERIAL; +
Pointer to a string that specifies the file name of the texture.
Defines the attributes of a font.
+The compiler setting also determines the structure type. If Unicode is defined, the
Possible values of the above members are given in the GDI
Height, in logical units, of the font's character cell or character.
Width, in logical units, of characters in the font.
Weight of the font in the range from 0 through 1000.
Number of mip levels requested. If this value is zero or D3DX_DEFAULT, a complete mipmap chain is created. If the value is 1, the texture space is mapped identically to the screen space.
Set to TRUE for an Italic font.
Character set.
Output precision. The output precision defines how closely the output must match the requested font height, width, character orientation, escapement, pitch, and font type.
Output quality.
Pitch and family of the font.
A null-terminated string or characters that specifies the typeface name of the font. The length of the string must not exceed 32 characters, including the terminating null character. If FaceName is an empty string, the first font that matches the other specified attributes will be used. If the compiler settings require Unicode, the data type TCHAR resolves to WCHAR; otherwise, the data type resolves to CHAR. See Remarks.
Encapsulates a transform frame in a transformation frame hierarchy.
+An application can derive from this structure to add other data.
+Name of the frame.
Transformation matrix.
Pointer to the mesh container.
Pointer to a sibling frame.
Pointer to a child frame.
Contains red, green, and blue ramp data.
+Array of 256 WORD element that describes the red gamma ramp.
Array of 256 WORD element that describes the green gamma ramp.
Array of 256 WORD element that describes the blue gamma ramp.
Returns a description of the original contents of an image file.
+Width of original image in pixels.
Height of original image in pixels.
Depth of original image in pixels.
Number of mip levels in original image.
A value from the
Represents the type of the texture stored in the file. It is either
Represents the format of the image file.
Describes an index buffer.
+Member of the
Member of the
Combination of one or more of the following flags, specifying the usage for this resource.
Value | Meaning |
---|---|
Set to indicate that the index buffer content will never require clipping. | |
Set to indicate that the index buffer requires dynamic memory use. This is useful for drivers because it enables them to decide where to place the buffer. In general, static index buffers are placed in video memory and dynamic index buffers are placed in AGP memory. Note that there is no separate static usage; if you do not specify For more information about using dynamic index buffers, see Using Dynamic Vertex and Index Buffers. Note that | |
Set to indicate when the index buffer is to be used for drawing high-order primitives. | |
Set to indicate when the index buffer is to be used for drawing N patches. | |
Set to indicate when the index buffer is to be used for drawing point sprites or indexed point lists. | |
Set to indicate that the buffer is to be used with software processing. | |
Informs the system that the application writes only to the index buffer. Using this flag enables the driver to choose the best memory location for efficient write operations and rendering. Attempts to read from an index buffer that is created with this capability can result in degraded performance. |
?
Member of the
Size of the index buffer, in bytes.
Percent of time processing data in the driver. These statistics may help identify cases when the driver is waiting for other resources.
+These metrics help identify when a driver is waiting and what it is waiting for. High percentages are not necessarily a problem.
These system-global metrics may or may not be implemented. Depending on the specific hardware, these metrics may not support multiple queries simultaneously.
+Percentage of time the driver spent waiting for the GPU to finish using a locked resource (and
Percentage of time the driver spent waiting for the GPU to finish processing some commands before the driver could send more. This indicates the driver has run out of room to send commands to the GPU.
Percentage of time the driver spent waiting for the GPU latency to reduce to less than three rendering frames.
If an application is GPU-limited, the driver must stall the CPU until the GPU gets within three frames. This prevents an application from queuing up many seconds' worth of rendering calls which may dramatically increase the latency between when the user inputs new data and when the user sees the results of that input. In general, the driver can track the number of times Present is called to prevent queuing up more than three frames of rendering work.
Percentage of time the driver spent waiting for a resource that cannot be pipelined (that is operated in parallel). An application may want to avoid using a non-pipelined resource for performance reasons.
Percentage of time the driver spent waiting for other GPU processing.
Defines a set of lighting properties.
+Type of the light source. This value is one of the members of the
Diffuse color emitted by the light. This member is a
Specular color emitted by the light. This member is a
Ambient color emitted by the light. This member is a
Position of the light in world space, specified by a
Direction that the light is pointing in world space, specified by a
Distance beyond which the light has no effect. The maximum allowable value for this member is the square root of FLT_MAX. This member does not affect directional lights.
Decrease in illumination between a spotlight's inner cone (the angle specified by Theta) and the outer edge of the outer cone (the angle specified by Phi).
The effect of falloff on the lighting is subtle. Furthermore, a small performance penalty is incurred by shaping the falloff curve. For these reasons, most developers set this value to 1.0.
Value specifying how the light intensity changes over distance. Attenuation values are ignored for directional lights. This member represents an attenuation constant. For information about attenuation, see Light Properties (Direct3D 9). Valid values for this member range from 0.0 to infinity. For non-directional lights, all three attenuation values should not be set to 0.0 at the same time.
Value specifying how the light intensity changes over distance. Attenuation values are ignored for directional lights. This member represents an attenuation constant. For information about attenuation, see Light Properties (Direct3D 9). Valid values for this member range from 0.0 to infinity. For non-directional lights, all three attenuation values should not be set to 0.0 at the same time.
Value specifying how the light intensity changes over distance. Attenuation values are ignored for directional lights. This member represents an attenuation constant. For information about attenuation, see Light Properties (Direct3D 9). Valid values for this member range from 0.0 to infinity. For non-directional lights, all three attenuation values should not be set to 0.0 at the same time.
Angle, in radians, of a spotlight's inner cone - that is, the fully illuminated spotlight cone. This value must be in the range from 0 through the value specified by Phi.
Angle, in radians, defining the outer edge of the spotlight's outer cone. Points outside this cone are not lit by the spotlight. This value must be between 0 and pi.
Describes a locked rectangular region.
+The pitch for DXTn formats is different from what was returned in DirectX 7. It now refers to the number of bytes in a row of blocks. For example, if you have a width of 16, then you will have a pitch of 4 blocks (4*8 for DXT1, 4*16 for DXT2-5.)
+Number of bytes in one row of the surface.
Pointer to the locked bits. If a
Describes preprocessor definitions used by an effect object.
+To use
sample= + macro.Name = "DO_CODE_BLOCK"; + macro.Definition = "/* here is a block of code */\\\n" "{ do something ... }\\\n"; +
Notice the 3 backslash characters at the end of the line. The first two are required to output a single '\', followed by the newline character "\n". Optionally, you may also want to terminate your lines using "\\\r\n".
+Preprocessor name.
Definition name.
Specifies material properties.
+To turn off specular highlights, set
For more information about using the lighting engine to calculate specular lighting, see Specular Lighting (Direct3D 9).
+Value specifying the diffuse color of the material. See
Value specifying the ambient color of the material. See
Value specifying the specular color of the material. See
Value specifying the emissive color of the material. See
Floating-point value specifying the sharpness of specular highlights. The higher the value, the sharper the highlight.
Mesh data structure.
+Defines the mesh data type. See
Pointer to a mesh. See
Pointer to a patch mesh. See
Pointer to a patch mesh. See
Describes a parameter used for an effect object.
+Name of the parameter.
Semantic meaning, also called the usage.
Parameter class. Set this to one of the values in
Parameter type. Set this to one of the values in
Number of rows in the array.
Number of columns in the array.
Number of elements in the array.
Number of annotations.
Number of structure members.
Parameter attributes. See Effect Constants.
The size of the parameter, in bytes.
Describes a pass for an effect object.
+String value used for the pass.
Annotations are user-specific data that can be attached to any technique, pass, or parameter. See Add Information to Effect Parameters with_Annotations.
Pointer to the vertex shader function. If an effect is created with
Pointer to the pixel shader function. If an effect is created with
Structure that contains the attributes of a patch mesh.
+A mesh is a set of faces, each of which is described by a simple polygon. Objects can be created by connecting several meshes together. A patch mesh is constructed from patches. A patch is a four-sided piece of geometry constructed from curves. The type of curve used and the order of the curve can be varied so that the patch surface will fit almost any surface shape.
The following types of patch combinations are supported:
Patch Type | Basis | Degree |
---|---|---|
Rectangle | Bezier | 2,3,5 |
Rectangle | B-Spline | 2,3,5 |
Rectangle | Catmull-Rom | 3 |
Triangle | Bezier | 2,3,5 |
N-patch | N/A | 3 |
?
+The patch type. For information about patch types, see
Degree of the curves used to construct the patch. For information about the degrees supported, see
Type of curve used to construct the patch. For information about the basis types supported, see
Percent of time processing data in the pipeline.
+For best performance, a balanced load is recommended.
+Percent of time spent running vertex shaders.
Percent of time spent running pixel shaders.
Percent of time spent doing other processing.
Percent of time not processing anything.
Pixel shader driver caps.
+Instruction predication is supported if this value is nonzero. See setp_comp - vs.
Either 0 or 24, which represents the depth of the dynamic flow control instruction nesting. See
The number of temporary registers supported. See
The depth of nesting of the loop - vs/rep - vs and call - vs/callnz bool - vs instructions. See
The number of instruction slots supported. See
Describes swapchain statistics relating to PresentEx calls.
+When a 9Ex application adopts Flip Mode present (
Applications can determine whether a frame has been dropped by sampling any two instances of PresentCount and GetPresentStats (by calling GetPresentStats API at any two points in time). For example, a media application that is presenting at the same rate as the monitor refresh rate (for example, monitor refresh rate is 60Hz, the application presents a frame every 1/60 seconds) wants to present frames A, B, C, D, E, each corresponding to Present IDs (PresentCount) 1, 2, 3, 7, 8.
The application code looks like the following sequence.
Note that GetPresentStatistics will be processed after it is called, regardless of the state of FLIPEX mode PresentEx calls.
Windows?Vista:??The Present calls will be queued and then processed before GetPresentStats call will be processed.
When an application detects that the presentation of certain frames are behind, it can skip those frames and correct the presentation to re-synchronize with the vblank. To do this, an application can simply not render the late frames and start rendering with the next correct frame in the queue. However, if an application has already started the rendering of late frames, it can use a new Present parameter in D3D9Ex called
Applications can synchronize video and audio streams in the same manner because the behavior of GetPresentStatistics does not change in that scenario.
D3D9Ex Flip Mode provides frame statistics information to windowed applications and full screen 9Ex applications.
Windows?Vista:??Use the DWM APIs for retrieving present statistics.
When Desktop Window Manager is turned off, windowed mode 9Ex applications using flip mode will receive present statistics information of limited accuracy.
Windows?Vista:??
If an application is not fast enough to keep up with the monitor's refresh rate, possibly due to slow hardware or lack of system resources, then it can experience a graphics glitch. A glitch is a so-called visual hiccup. If a monitor is set to refresh at 60 Hz, and the application can only manage 30 fps, then half of the frames will have glitches.
Applications can detect a glitch by keeping track of SynchRefreshCount. For example, an application might perform the following sequence of actions.
Describes the presentation parameters.
+Width of the new swap chain's back buffers, in pixels. If Windowed is
Height of the new swap chain's back buffers, in pixels. If Windowed is
The back buffer format. For more information about formats, see
In fact,
For windowed applications, the back buffer format no longer needs to match the display-mode format because color conversion can now be done by the hardware (if the hardware supports color conversion). The set of possible back buffer formats is constrained, but the runtime will allow any valid back buffer format to be presented to any desktop format. (There is the additional requirement that the device be operable in the desktop; devices typically do not operate in 8 bits per pixel modes.)
Full-screen applications cannot do color conversion.
This value can be between 0 and
The method fails if one back buffer cannot be created. The value of BackBufferCount influences what set of swap effects are allowed. Specifically, any
Member of the
Quality level. The valid range is between zero and one less than the level returned by pQualityLevels used by CheckDeviceMultiSampleType. Passing a larger value returns the error
Member of the
Differences between Direct3D9 and Direct3D9Ex In Direct3D9Ex, |
?
The device window determines the location and size of the back buffer on screen. This is used by Direct3D when the back buffer contents are copied to the front buffer during Present.
For a full-screen application, this is a handle to the top window (which is the focus window).
For applications that use multiple full-screen devices (such as a multimonitor system), exactly one device can use the focus window as the device window. All other devices must have unique device windows.
Note that no attempt is made by the runtime to reflect user changes in window size. The back buffer is not implicitly reset when this window is reset. However, the Present method does automatically track window position changes.
TRUE if the application runs windowed;
If this value is TRUE, Direct3D will manage depth buffers for the application. The device will create a depth-stencil buffer when it is created. The depth-stencil buffer will be automatically set as the render target of the device. When the device is reset, the depth-stencil buffer will be automatically destroyed and recreated in the new size.
If EnableAutoDepthStencil is TRUE, then AutoDepthStencilFormat must be a valid depth-stencil format.
Member of the
One of the
The rate at which the display adapter refreshes the screen. The value depends on the mode in which the application is running:
The maximum rate at which the swap chain's back buffers can be presented to the front buffer. For a detailed explanation of the modes and the intervals that are supported, see
Describes the raster status.
+TRUE if the raster is in the vertical blank period.
If InVBlank is
If InVBlank is TRUE, then this value is set to zero and can be ignored.
Describes a rectangular high-order patch.
+The following diagram identifies the parameters that specify a rectangle patch.
Each of the vertices in the vertex buffer is shown as a black dot. In this case, the vertex buffer has 20 vertices in it, 16 of which are in the rectangle patch. The stride is the number of vertices in the width of the vertex buffer, in this case five. The x offset to the first vertex is called the StartIndexVertexWidth and is in this case 1. The y offset to the first patch vertex is called the StartIndexVertexHeight and is in this case 0.
To render a stream of individual rectangular patches (non-mosaic), you should interpret your geometry as a long narrow (1 x N) rectangular patch. The
+RectInfo; RectInfo.Width = 4; + RectInfo.Height = 4; + RectInfo.Stride = 4; + RectInfo.Basis = ; + RectInfo.Order = D3DORDER_CUBIC; + RectInfo.StartVertexOffsetWidth = 0; + RectInfo.StartVertexOffsetHeight = 4*i; // The variable i is the index of the + // patch you want to render. +
Starting vertex offset width, in number of vertices.
Starting vertex offset height, in number of vertices.
Width of each vertex, in number of vertices.
Height of each vertex, in number of vertices.
Width of the imaginary two-dimensional vertex array, which occupies the same space as the vertex buffer. For an example, see the diagram below.
Member of the
Value | Order supported | Width and height |
---|---|---|
Linear, cubic, and quintic | Width = height = (DWORD)order + 1 | |
Linear, cubic, and quintic | Width = height > (DWORD)order | |
D3DBASIS_INTERPOLATE | Cubic | Width = height > (DWORD)order |
?
Member of the
Describes an off-screen render target used by an instance of
This method is used to return the creation parameters used when creating an
Describes a render surface.
+Width of the render surface, in pixels.
Height of the render surface, in pixels.
Member of the
If TRUE, the render surface supports a depth-stencil surface; otherwise this member is set to
If DepthStencil is set to TRUE, this parameter is a member of the
Resource statistics gathered by the D3DDEVINFO_ResourceManager when using the asynchronous query mechanism.
+Describes a vector key for use in key frame animation. It specifies a vector at a given time. This is used for scale and translation keys.
+Key frame time stamp.
Offset from the beginning of this structure, in bytes, to the string that contains the constant information.
Register set. See
The register index.
Number of registers.
Reserved.
Offset from the beginning of this structure, in bytes, to the string that contains the D3DXSHADER_TYPEINFO information.
Offset from the beginning of this structure, in bytes, to the string that contains the default value.
Helper structure for managing a shader constant table. This can also be done using
Shader constant information is included in a tab-delimited table of comments. All offsets are measured in bytes from the beginning of the structure. Entries in the constant table are sorted by Creator in ascending order.
A shader constant table can be managed with the
This size member is often initialized using the following:
+constantTable; + constantTable.Size = sizeof( ) +
Semantics map a parameter to vertex or pixel shader registers. They can also be optional descriptive strings attached to non-register parameters.
+Semantics are required for vertex and pixel shader, input and output registers.
+Options that identify how resources are used. See
Options that modify how the usage is interpreted. The usage and usage index make up a vertex declaration. See Vertex Declaration (Direct3D 9).
Percent of time processing shader data.
+For best performance, a balanced load is recommended.
+Percent of time in shader spent on memory accesses.
Percent of time processing (moving data around in registers or doing mathematical operations).
Describes a surface.
+Member of the
Member of the
Either the
Member of the
Member of the
Quality level. The valid range is between zero and one less than the level returned by pQualityLevels used by CheckDeviceMultiSampleType. Passing a larger value returns the error,
Width of the surface, in pixels.
Height of the surface, in pixels.
Describes a technique used by an effect.
+Some video cards can render two textures in a single pass. However, if a card does not have this capability, it is often possible to render the same effect in two passes, using one texture for each pass.
+String that contains the technique name.
Number of rendering passes the technique requires. See Remarks.
The number of annotations. See Add Information to Effect Parameters with_Annotations.
Describes an animation track and specifies blending weight, speed, and position for the track at a given time.
+Tracks with the same priority are blended together, and the two resulting values are then blended using the priority blend factor. A track must have an animation set (stored separately) associated with it.
+Priority type, as defined in
Weight value. The weight determines the proportion of this track to blend with other tracks.
Speed value. This is used similarly to a multiplier to scale the period of the track.
Time position of the track, in the local timeframe of its current animation set.
Track enable/disable. To enable, set to TRUE. To disable, set to
Describes a triangular high-order patch.
+For example, the following diagram identifies the vertex order and segment numbers for a cubic B?zier triangle patch. The vertex order determines the segment numbers used by DrawTriPatch. The offset is the number of bytes to the first triangle patch vertex in the vertex buffer.
+Starting vertex offset, in number of vertices.
Number of vertices.
Member of the
Member of the
Value | Number of vertices |
---|---|
10 | |
3 | |
N/A | |
21 |
?
N/A - Not available. Not supported.
DirectX 8.1 and later versions only.
The
DirectX 8.1 versions only. The Direct3D runtime calls a driver's D3dGetDriverState function to obtain vertex-cache information from the driver. In this D3dGetDriverState call, the runtime specifies the D3DDEVINFOID_VCACHE flag in the dwFlags member of the DD_GETDRIVERSTATEDATA structure that the runtime passes. The driver specifies vertex-cache information in a
DirectX 9.0 and later versions only. The Direct3D runtime specifies D3DDP2OP_CREATEQUERY and D3DDP2OP_ISSUEQUERY commands in calls to the driver's D3dDrawPrimitives2 callback to create driver-side resources for the query and then to asynchronously query the driver for vertex-cache information. In the call with the D3DDP2OP_CREATEQUERY command, the runtime specifies the
When the driver completes a vertex-cache query, the driver sets the total size of the response buffer in the dwErrorOffset member of the D3DHAL_DRAWPRIMITIVES2DATA structure and sets the ddrval member of D3DHAL_DRAWPRIMITIVES2DATA to
Specifies the bit pattern. The driver must specify the bit pattern as the CACH four-character code (FOURCC) value. The driver can use the MAKEFOURCC macro as follows to specify the FOURCC value as CACH:
MAKEFOURCC('C', 'A', 'C', 'H');
Specifies the method of mesh optimization. The driver can use one of the following values to specify the mesh optimization that it uses:
Value | Meaning |
---|---|
| Longest strips optimization |
D3DXMESHOPT_VCACHE (1) | Vertex-cache based optimization |
?
Specifies the effective size, in entries, for which the driver optimizes the vertex cache. The actual cache size is not required to be the size specified in CacheSize because in most cases the actual cache size turns out to be larger. The driver only specifies an optimized size in CacheSize if it also specifies D3DXMESHOPT_VCACHE in the OptMethod member.
Specifies the number that should be used as part of a trial-and-error procedure when determining when to restart the strips list. This number can be set from 1 to the value in the CacheSize member. Typically, the best values are near CacheSize/2.
Describes a vertex buffer.
+Member of the
Member of the
Combination of one or more
Member of the
Size of the vertex buffer, in bytes.
Combination of
Defines the vertex data layout. Each vertex can contain one or more data types, and each data type is described by a vertex element.
+Vertex data is defined using an array of
Stream number.
Offset from the beginning of the vertex data to the data associated with the particular data type.
The data type, specified as a
The method specifies the tessellator processing, which determines how the tessellator interprets (or operates on) the vertex data. For more information, see
Defines what the data will be used for; that is, the interoperability between vertex data layouts and vertex shaders. Each usage acts to bind a vertex declaration to a vertex shader. In some cases, they have a special interpretation. For example, an element that specifies
Modifies the usage data to allow the user to specify multiple usage types.
Vertex shader caps.
+Instruction predication is supported if this value is nonzero. See setp_comp - vs.
Either 0 or 24, which represents the depth of the dynamic flow control instruction nesting. See
The number of temporary registers supported. See
The depth of nesting of the loop - vs/rep - vs and call - vs/callnz bool - vs instructions. See
Reports the number of triangles that have been processed and clipped by the runtime's software vertex processing.
+Use the debug runtime and software vertex processing to get the number of non-clipped and clipped primitives for a particular scene. Primitives will typically be clipped based on a guard band (if one is present). The clipping guard band is set with parameters such as GuardBandLeft in
Total number of triangles that are not clipped in this frame.
Number of new triangles generated by clipping.
Describes a volume.
+Member of the
Member of the
Currently not used. Always returned as 0.
Member of the
Width of the volume, in pixels.
Height of the volume, in pixels.
Depth of the volume, in pixels.
Specifies tolerance values for each vertex component when comparing vertices to determine if they are similar enough to be welded together.
+The LPD3DXWELDEPSILONS type is defined as a reference to the
typedef+*LPD3DXWELDEPSILONS; +
Position
Blend weight
Normal
Point size value
Specular lighting value
Diffuse lighting value
Tangent
Binormal
Tessellation factor
Eight texture coordinates
Identifies compressed key frame animation data.
+Total size, in bytes, of the compressed data in the compressed key frame animation data buffer.
Number of animation key frame ticks that occur per second.
Type of the animation set playback loop. See
Minimum buffer size, in bytes, required to hold compressed key frame animation data. Value is equal to ( ( CompressedBlockSize + 3 ) / 4 ).
Callback interface to notify the application when an asynchronous method completes.
+For more information about asynchronous methods in Microsoft Media Foundation, see Asynchronous Callback Methods.
This interface is also used to perform a work item in a Media Foundation work-queue. For more information, see Work Queues.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Called when an asynchronous operation is completed.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Within your implementation of Invoke, call the corresponding End... method.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
This value can specify one of the standard Media Foundation work queues, or a work queue created by the application. For list of standard Media Foundation work queues, see Work Queue Identifiers. To create a new work queue, call
If the work queue is not compatible with the value returned in pdwFlags, the Media Foundation platform returns
Represents a byte stream from some data source, which might be a local file, a network file, or some other source. The
The following functions return
A byte stream for a media souce can be opened with read access. A byte stream for an archive media sink should be opened with both read and write access. (Read access may be required, because the archive sink might need to read portions of the file as it writes.)
Some implementations of this interface also expose one or more of the following interfaces:
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Represents a byte stream from some data source, which might be a local file, a network file, or some other source. The
The following functions return
A byte stream for a media souce can be opened with read access. A byte stream for an archive media sink should be opened with both read and write access. (Read access may be required, because the archive sink might need to read portions of the file as it writes.)
Some implementations of this interface also expose one or more of the following interfaces:
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Reads data from the stream.
+Pointer to a buffer that receives the data. The caller must allocate the buffer.
Size of the buffer in bytes.
This method reads at most cb bytes from the current position in the stream and copies them into the buffer provided by the caller. The number of bytes that were read is returned in the pcbRead parameter. The method does not return an error code on reaching the end of the file, so the application should check the value in pcbRead after the method returns.
This method is synchronous. It blocks until the read operation completes.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Begins an asynchronous read operation from the stream.
+Pointer to a buffer that receives the data. The caller must allocate the buffer.
Size of the buffer in bytes.
Pointer to the
Pointer to the
If this method succeeds, it returns
When all of the data has been read into the buffer, the callback object's
Do not read from, write to, free, or reallocate the buffer while an asynchronous read is pending.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Completes an asynchronous read operation.
+ Pointer to the
Call this method after the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Writes data to the stream.
+Pointer to a buffer that contains the data to write.
Size of the buffer in bytes.
If this method succeeds, it returns
This method writes the contents of the pb buffer to the stream, starting at the current stream position. The number of bytes that were written is returned in the pcbWritten parameter.
This method is synchronous. It blocks until the write operation completes.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Begins an asynchronous write operation to the stream.
+Pointer to a buffer containing the data to write.
Size of the buffer in bytes.
Pointer to the
Pointer to the
If this method succeeds, it returns
When all of the data has been written to the stream, the callback object's
Do not reallocate, free, or write to the buffer while an asynchronous write is still pending.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Completes an asynchronous write operation.
+Pointer to the
Call this method when the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Moves the current position in the stream by a specified offset.
+ Specifies the origin of the seek as a member of the
Specifies the new position, as a byte offset from the seek origin.
Specifies zero or more flags. The following flags are defined.
Value | Meaning |
---|---|
| All pending I/O requests are canceled after the seek request completes successfully. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Clears any internal buffers used by the stream. If you are writing to the stream, the buffered data is written to the underlying file or device.
+If this method succeeds, it returns
If the byte stream is read-only, this method has no effect.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Closes the stream and releases any resources associated with the stream, such as sockets or file handles. This method also cancels any pending asynchronous I/O requests.
+If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Retrieves the characteristics of the byte stream.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Retrieves the length of the stream.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Retrieves the current read or write position in the stream.
+The methods that update the current position are Read, BeginRead, Write, BeginWrite, SetCurrentPosition, and Seek.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Queries whether the current position has reached the end of the stream.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Reads data from the stream.
+Pointer to a buffer that receives the data. The caller must allocate the buffer.
Size of the buffer in bytes.
This method reads at most cb bytes from the current position in the stream and copies them into the buffer provided by the caller. The number of bytes that were read is returned in the pcbRead parameter. The method does not return an error code on reaching the end of the file, so the application should check the value in pcbRead after the method returns.
This method is synchronous. It blocks until the read operation completes.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Begins an asynchronous read operation from the stream.
+Pointer to a buffer that receives the data. The caller must allocate the buffer.
Size of the buffer in bytes.
Pointer to the
Pointer to the
If this method succeeds, it returns
When all of the data has been read into the buffer, the callback object's
Do not read from, write to, free, or reallocate the buffer while an asynchronous read is pending.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Completes an asynchronous read operation.
+ Pointer to the
Call this method after the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Writes data to the stream.
+Pointer to a buffer that contains the data to write.
Size of the buffer in bytes.
This method writes the contents of the pb buffer to the stream, starting at the current stream position. The number of bytes that were written is returned in the pcbWritten parameter.
This method is synchronous. It blocks until the write operation completes.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Begins an asynchronous write operation to the stream.
+Pointer to a buffer containing the data to write.
Size of the buffer in bytes.
Pointer to the
Pointer to the
If this method succeeds, it returns
When all of the data has been written to the stream, the callback object's
Do not reallocate, free, or write to the buffer while an asynchronous write is still pending.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Completes an asynchronous write operation.
+Pointer to the
Call this method when the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Moves the current position in the stream by a specified offset.
+ Specifies the origin of the seek as a member of the
Specifies the new position, as a byte offset from the seek origin.
Specifies zero or more flags. The following flags are defined.
Value | Meaning |
---|---|
| All pending I/O requests are canceled after the seek request completes successfully. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Clears any internal buffers used by the stream. If you are writing to the stream, the buffered data is written to the underlying file or device.
+If this method succeeds, it returns
If the byte stream is read-only, this method has no effect.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Closes the stream and releases any resources associated with the stream, such as sockets or file handles. This method also cancels any pending asynchronous I/O requests.
+If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the characteristics of the byte stream.
+Receives a bitwise OR of zero or more flags. The following flags are defined.
Value | Meaning |
---|---|
| The byte stream can be read. |
| The byte stream can be written to. |
| The byte stream can be seeked. |
| The byte stream is from a remote source, such as a network. |
| The byte stream represents a file directory. |
| Seeking within this stream might be slow. For example, the byte stream might download from a network. |
| The byte stream is currently downloading data to a local cache. Read operations on the byte stream might take longer until the data is completely downloaded. This flag is cleared after all of the data has been downloaded. If the MFBYTESTREAM_HAS_SLOW_SEEK flag is also set, it means the byte stream must download the entire file sequentially. Otherwise, the byte stream can respond to seek requests by restarting the download from a new point in the stream. |
| Another thread or process can open this byte stream for writing. If this flag is present, the length of thebyte stream could change while it is being read. This flag can affect the behavior of byte-stream handlers. For more information, see Note??Requires Windows?7 or later. |
| The byte stream is not currentlyusing the network to receive the content. Networking hardwaremay enter a power saving state when this bit is set. Note??Requires Windows?8 or later. |
?
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the length of the stream.
+Receives the length of the stream, in bytes. If the length is unknown, this value is -1.
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Sets the length of the stream.
+Length of the stream in bytes.
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the current read or write position in the stream.
+Receives the current position, in bytes.
If this method succeeds, it returns
The methods that update the current position are Read, BeginRead, Write, BeginWrite, SetCurrentPosition, and Seek.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Sets the current read or write position.
+New position in the stream, as a byte offset from the start of the stream.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
?
If the new position is larger than the length of the stream, the method returns E_INVALIDARG.
Implementation notes: This method should update the current position in the stream by setting the current position to the value passed in to the qwPosition parameter. Other methods that can update the current position are Read, BeginRead, Write, BeginWrite, and Seek. +
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Queries whether the current position has reached the end of the stream.
+ Receives the value TRUE if the end of the stream has been reached, or
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Reads data from the stream.
+Pointer to a buffer that receives the data. The caller must allocate the buffer.
Size of the buffer in bytes.
Receives the number of bytes that are copied into the buffer. This parameter cannot be
If this method succeeds, it returns
This method reads at most cb bytes from the current position in the stream and copies them into the buffer provided by the caller. The number of bytes that were read is returned in the pcbRead parameter. The method does not return an error code on reaching the end of the file, so the application should check the value in pcbRead after the method returns.
This method is synchronous. It blocks until the read operation completes.
Implementation notes: This method should update the current position in the stream by adding the number of bytes that were read, which is specified by the value returned in the pcbRead parameter, to the current position. Other methods that can update the current position are Read, Write, BeginWrite, Seek, and SetCurrentPosition. +
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Begins an asynchronous read operation from the stream.
+Pointer to a buffer that receives the data. The caller must allocate the buffer.
Size of the buffer in bytes.
Pointer to the
Pointer to the
If this method succeeds, it returns
When all of the data has been read into the buffer, the callback object's
Do not read from, write to, free, or reallocate the buffer while an asynchronous read is pending.
Implementation notes: This method should update the current position in the stream by adding the number of bytes that will be read, which is specified by the value returned in the pcbRead parameter, to the current position. Other methods that can update the current position are BeginRead, Write, BeginWrite, Seek, and SetCurrentPosition. +
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Completes an asynchronous read operation.
+ Pointer to the
Receives the number of bytes that were read.
If this method succeeds, it returns
Call this method after the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Represents a byte stream from some data source, which might be a local file, a network file, or some other source. The
The following functions return
A byte stream for a media souce can be opened with read access. A byte stream for an archive media sink should be opened with both read and write access. (Read access may be required, because the archive sink might need to read portions of the file as it writes.)
Some implementations of this interface also expose one or more of the following interfaces:
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Begins an asynchronous write operation to the stream.
+Pointer to a buffer containing the data to write.
Size of the buffer in bytes.
Pointer to the
Pointer to the
If this method succeeds, it returns
When all of the data has been written to the stream, the callback object's
Do not reallocate, free, or write to the buffer while an asynchronous write is still pending.
Implementation notes: This method should update the current position in the stream by adding the number of bytes that will be written to the stream, which is specified by the value returned in the pcbWritten, to the current position. Other methods that can update the current position are Read, BeginRead, Write, Seek, and SetCurrentPosition. +
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Completes an asynchronous write operation.
+Pointer to the
Receives the number of bytes that were written.
If this method succeeds, it returns
Call this method when the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Moves the current position in the stream by a specified offset.
+ Specifies the origin of the seek as a member of the
Specifies the new position, as a byte offset from the seek origin.
Specifies zero or more flags. The following flags are defined.
Value | Meaning |
---|---|
| All pending I/O requests are canceled after the seek request completes successfully. |
?
Receives the new position after the seek.
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Implementation notes: This method should update the current position in the stream by adding the qwSeekOffset to the seek SeekOrigin position. This should be the same value passed back in the pqwCurrentPosition parameter. + Other methods that can update the current position are Read, BeginRead, Write, BeginWrite, and SetCurrentPosition. +
+Clears any internal buffers used by the stream. If you are writing to the stream, the buffered data is written to the underlying file or device.
+If this method succeeds, it returns
If the byte stream is read-only, this method has no effect.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Closes the stream and releases any resources associated with the stream, such as sockets or file handles. This method also cancels any pending asynchronous I/O requests.
+If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Retrieves the characteristics of the byte stream.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Retrieves the length of the stream.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Retrieves the current read or write position in the stream.
+The methods that update the current position are Read, BeginRead, Write, BeginWrite, SetCurrentPosition, and Seek.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Queries whether the current position has reached the end of the stream.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Receives state-change notifications from the presentation clock.
+To receive state-change notifications from the presentation clock, implement this interface and call
This interface must be implemented by:
Presentation time sources. The presentation clock uses this interface to request change states from the time source.
Media sinks. Media sinks use this interface to get notifications when the presentation clock changes.
Other objects that need to be notified can implement this interface.
+Called when the sample-grabber sink receives a new media sample.
+If you use the sample-grabber sink in a playback topology, this method should return quickly, or it might interfere with playback. Do not block the thread, wait on events, or perform other lengthy operations inside this method.
+Callback interface to get media data from the sample-grabber sink.
+The sample-grabber sink enables an application to get data from the Media Foundation pipeline without implementing a custom media sink. To use the sample-grabber sink, the application must perform the following steps:
Implement the
Call
Create a topology that includes an output node with the sink's
Pass this topology to the Media Session.
During playback, the sample-grabber sink calls methods on the application's callback.
You cannot use the sample-grabber sink to get protected content.
+Applies to: desktop apps only
Gets the range of values for an image filter that the Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device supports.
+To find out which image filters the device supports, check the FilterCaps member of the
Gets the range of values for an image filter that the Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device supports.
+To find out which image filters the device supports, check the FilterCaps member of the
Applies to: desktop apps only
Creates a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+A reference to the
A reference to a
A member of the
Use the
Creates one or more Microsoft Direct3D video surfaces.
+The width of each surface, in pixels.
The height of each surface, in pixels.
The pixel format, specified as a
The memory pool in which the surface is created. This parameter must equal the InputPool member of the
Reserved. Set to 0.
The type of surface to create, specified as a member of the
The number of surfaces to create.
A reference to an array of
Reserved. Set to
If this method succeeds, it returns
Gets the capabilities of the Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+A reference to a
If this method succeeds, it returns
Gets a list of the output formats supported by the Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The number of formats to retrieve. This parameter must equal the OutputFormatCount member of the
A reference to an array of
If this method succeeds, it returns
The list of formats can include both
Gets a list of the input formats supported by the Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The number of formats to retrieve. This parameter must equal the InputFormatCount member of the
A reference to an array of
If this method succeeds, it returns
The list of formats can include both
Gets the capabilities of one or more Microsoft DirectX Video Acceleration High Definition (DXVA-HD) video processors.
+The number of elements in the pCaps array. This parameter must equal the VideoProcessorCount member of the
A reference to an array of
If this method succeeds, it returns
Gets a list of custom rates that a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) video processor supports. Custom rates are used for frame-rate conversion and inverse telecine (IVTC).
+A
The number of rates to retrieve. This parameter must equal the CustomRateCount member of the
A reference to an array of
If this method succeeds, it returns
Gets the range of values for an image filter that the Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device supports.
+The type of image filter, specified as a member of the
A reference to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Filter parameter is invalid or the device does not support the specified filter. |
?
To find out which image filters the device supports, check the FilterCaps member of the
Creates a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) video processor.
+A
Receives a reference to the
If this method succeeds, it returns
Gets the capabilities of the Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+Applies to: desktop apps only
Enables two threads to share the same Direct3D 9 device, and provides access to the DirectX Video Acceleration (DXVA) features of the device.
+This interface is exposed by the Direct3D Device Manager. To create the Direct3D device manager, call
To get this interface from the Enhanced Video Renderer (EVR), call
The Direct3D Device Manager supports Direct3D 9 devices only. It does not support DXGI devices.
+Enables two threads to share the same Direct3D 9 device, and provides access to the DirectX Video Acceleration (DXVA) features of the device.
+This interface is exposed by the Direct3D Device Manager. To create the Direct3D device manager, call
To get this interface from the Enhanced Video Renderer (EVR), call
The Direct3D Device Manager supports Direct3D 9 devices only. It does not support DXGI devices.
Windows Store apps must use IMFDXGIDeviceManager and Direct3D 11 Video APIs.
+Applies to: desktop apps only
Creates an instance of the Direct3D Device Manager.
+If this function succeeds, it returns
Sets the Direct3D device or notifies the device manager that the Direct3D device was reset.
+Pointer to the
Token received in the pResetToken parameter of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid token |
| Direct3D device error. |
?
When you first create the Direct3D device manager, call this method with a reference to the Direct3D device. The device manager does not create the device; the caller must provide the device reference initially.
Also call this method if the Direct3D device becomes lost and you need to reset the device or create a new device. This occurs if
The resetToken parameter ensures that only the component which originally created the device manager can invalidate the current device.
If this method succeeds, all open device handles become invalid.
+Gets a handle to the Direct3D device.
+Receives the device handle.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Direct3D device manager was not initialized. The owner of the device must call |
?
To get the Direct3D device's
To test whether a device handle is still valid, call
Closes a Direct3D device handle. Call this method to release a device handle retrieved by the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid handle. |
?
Tests whether a Direct3D device handle is valid.
+Handle to a Direct3D device. To get a device handle, call
The method returns an
Return code | Description |
---|---|
| The device handle is valid. |
| The specified handle is not a Direct3D device handle. |
| The device handle is invalid. |
?
If the method returns DXVA2_E_NEW_VIDEO_DEVICE, call
Gives the caller exclusive access to the Direct3D device.
+A handle to the Direct3D device. To get the device handle, call
Receives a reference to the device's
Specifies whether to wait for the device lock. If the device is already locked and this parameter is TRUE, the method blocks until the device is unlocked. Otherwise, if the device is locked and this parmater is
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The device handle is invalid. |
| The Direct3D device manager was not initialized. The owner of the device must call |
| The device is locked and fBlock is |
| The specified handle is not a Direct3D device handle. |
?
When you are done using the Direct3D device, call
If the method returns DXVA2_E_NEW_VIDEO_DEVICE, call
If fBlock is TRUE, this method can potentially deadlock. For example, it will deadlock if a thread calls LockDevice and then waits on another thread that calls LockDevice. It will also deadlock if a thread calls LockDevice twice without calling UnlockDevice in between.
+Unlocks the Direct3D device. Call this method to release the device after calling
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The specified device handle is not locked, or is not a valid handle. |
?
Gets a DirectX Video Acceleration (DXVA) service interface.
+ A handle to a Direct3D device. To get a device handle, call
The interface identifier (IID) of the requested interface. The Direct3D device might support the following DXVA service interfaces:
Receives a reference to the requested interface. The caller must release the interface.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The device handle is invalid. |
| The Direct3D device does not support video acceleration. |
| The Direct3D device manager was not initialized. The owner of the device must call |
| The specified handle is not a Direct3D device handle. |
?
If the method returns DXVA2_E_NEW_VIDEO_DEVICE, call
Applies to: desktop apps only
Provides DirectX Video Acceleration (DXVA) services from a Direct3D device. To get a reference to this interface, call
This is the base interface for DXVA services. The Direct3D device can support any of the following DXVA services, which derive from
Provides DirectX Video Acceleration (DXVA) services from a Direct3D device. To get a reference to this interface, call
This is the base interface for DXVA services. The Direct3D device can support any of the following DXVA services, which derive from
Applies to: desktop apps only
Creates a DirectX Video Acceleration (DXVA) services object. Call this function if your application uses DXVA directly, without using DirectShow or Media Foundation.
+ A reference to the
If this function succeeds, it returns
Creates a DirectX Video Acceleration (DXVA) video processor or DXVA decoder render target.
+The width of the surface, in pixels.
The height of the surface, in pixels.
The number of back buffers. The method creates BackBuffers + 1 surfaces.
The pixel format, specified as a
The memory pool in which to create the surface, specified as a
Reserved. Set this value to zero.
The type of surface to create. Use one of the following values.
Value | Meaning |
---|---|
Video decoder render target. | |
Video processor render target. Used for | |
Software render target. This surface type is for use with software DXVA devices. |
?
The address of an array of
A reference to a handle that is used to share the surfaces between Direct3D devices. Set this parameter to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid parameter |
| The DirectX Video Acceleration Manager is not initialized. |
| |
?
If the method returns E_FAIL, try calling
Applies to: desktop apps only
Provides access to DirectX Video Acceleration (DXVA) decoder services. Use this interface to query which hardware-accelerated decoding operations are available and to create DXVA video decoder devices.
To get a reference to this interface, call
Provides access to DirectX Video Acceleration (DXVA) decoder services. Use this interface to query which hardware-accelerated decoding operations are available and to create DXVA video decoder devices.
To get a reference to this interface, call
Applies to: desktop apps only
Creates a DirectX Video Acceleration (DXVA) services object. Call this function if your application uses DXVA directly, without using DirectShow or Media Foundation.
+ A reference to the
If this function succeeds, it returns
Retrieves an array of GUIDs that identifies the decoder devices supported by the graphics hardware.
+Receives the number of GUIDs.
Receives an array of GUIDs. The size of the array is retrieved in the Count parameter. The method allocates the memory for the array. The caller must free the memory by calling CoTaskMemFree.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Error from the Direct3D device. |
| If the Microsoft Basic Display Adapter is being used or the Direct3D?11 device type is the reference rasterizer. These devices do not support video decoders. |
?
The following decoder GUIDs are defined. Some of these GUIDs have alternate names, shown in parentheses.
Description | |
---|---|
DXVA2_ModeH264_A (DXVA2_ModeH264_MoComp_NoFGT) | H.264 motion compensation (MoComp), no film grain technology (FGT). |
DXVA2_ModeH264_B (DXVA2_ModeH264_MoComp_FGT) | H.264 MoComp, FGT. |
DXVA2_ModeH264_C (DXVA2_ModeH264_IDCT_NoFGT) | H.264 inverse discrete cosine transform (IDCT), no FGT. |
DXVA2_ModeH264_D (DXVA2_ModeH264_IDCT_FGT) | H.264 IDCT, FGT. |
DXVA2_ModeH264_E (DXVA2_ModeH264_VLD_NoFGT) | H.264 VLD, no FGT. |
DXVA2_ModeH264_F (DXVA2_ModeH264_VLD_FGT) | H.264 variable-length decoder (VLD), FGT. |
DXVA2_ModeMPEG2_IDCT | MPEG-2 IDCT. |
DXVA2_ModeMPEG2_MoComp | MPEG-2 MoComp. |
DXVA2_ModeMPEG2_VLD | MPEG-2 VLD. |
DXVA2_ModeVC1_A (DXVA2_ModeVC1_PostProc) | VC-1 post processing. |
DXVA2_ModeVC1_B (DXVA2_ModeVC1_MoComp) | VC-1 MoComp. |
DXVA2_ModeVC1_C (DXVA2_ModeVC1_IDCT) | VC-1 IDCT. |
DXVA2_ModeVC1_D (DXVA2_ModeVC1_VLD) | VC-1 VLD. |
DXVA2_ModeWMV8_A (DXVA2_ModeWMV8_PostProc) | Windows Media Video 8 post processing. |
DXVA2_ModeWMV8_B (DXVA2_ModeWMV8_MoComp) | Windows Media Video 8 MoComp. |
DXVA2_ModeWMV9_A (DXVA2_ModeWMV9_PostProc) | Windows Media Video 9 post processing. |
DXVA2_ModeWMV9_B (DXVA2_ModeWMV9_MoComp) | Windows Media Video 9 MoComp. |
DXVA2_ModeWMV9_C (DXVA2_ModeWMV9_IDCT) | Windows Media Video 9 IDCT. |
?
+
Retrieves the supported render targets for a specified decoder device.
+Receives the number of formats.
Receives an array of formats, specified as
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Gets the configurations that are available for a decoder device.
+A
A reference to a
Reserved. Set to
Receives the number of configurations.
Receives an array of
If this method succeeds, it returns
Creates a video decoder device.
+Pointer to a
Pointer to a
Pointer to an array of
Size of the ppDecoderRenderTargets array. This value cannot be zero.
Receives a reference to the decoder's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Creates a video decoder device.
+Pointer to a
Pointer to a
Pointer to an array of
Size of the ppDecoderRenderTargets array. This value cannot be zero.
Receives a reference to the decoder's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Applies to: desktop apps only
Provides access to DirectX Video Acceleration (DXVA) video processing services.
Use this interface to query which hardware-accelerated video processing operations are available and to create DXVA video processor devices. To obtain a reference to this interface, call
Provides access to DirectX Video Acceleration (DXVA) video processing services.
Use this interface to query which hardware-accelerated video processing operations are available and to create DXVA video processor devices. To obtain a reference to this interface, call
Applies to: desktop apps only
Creates a DirectX Video Acceleration (DXVA) services object. Call this function if your application uses DXVA directly, without using DirectShow or Media Foundation.
+ A reference to the
If this function succeeds, it returns
Registers a software video processing device.
+Pointer to an initialization function.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Gets an array of GUIDs which identify the video processors supported by the graphics hardware.
+ Pointer to a
Receives the number of GUIDs.
Receives an array of GUIDs. The size of the array is retrieved in the pCount parameter. The method allocates the memory for the array. The caller must free the memory by calling CoTaskMemFree.
If this method succeeds, it returns
The following video processor GUIDs are predefined.
Description | |
---|---|
DXVA2_VideoProcBobDevice | Bob deinterlace device. This device uses a "bob" algorithm to deinterlace the video. Bob algorithms create missing field lines by interpolating the lines in a single field. |
DXVA2_VideoProcProgressiveDevice | Progressive video device. This device is available for progressive video, which does not require a deinterlace algorithm. |
DXVA2_VideoProcSoftwareDevice | Reference (software) device. |
?
The graphics device may define additional vendor-specific GUIDs. The driver provides the list of GUIDs in descending quality order. The mode with the highest quality is first in the list. To get the capabilities of each mode, call
Gets the render target formats that a video processor device supports. The list may include RGB and YUV formats.
+ A
A reference to a
Receives the number of formats.
Receives an array of formats, specified as
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Gets a list of substream formats supported by a specified video processor device.
+ A
A reference to a
The format of the render target surface, specified as a
Receives the number of elements returned in the ppFormats array.
Receives an array of
If this method succeeds, it returns
Gets the capabilities of a specified video processor device.
+ A
A reference to a
The format of the render target surface, specified as a
A reference to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Gets the range of values for a video processor (ProcAmp) setting.
+A
A reference to a
The format of the render target surface, specified as a
The ProcAmp setting to query. See ProcAmp Settings.
A reference to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the range of values for an image filter supported by a video processor device.
+ A
A reference to a
The format of the render target surface, specified as a
The filter setting to query. See DXVA Image Filter Settings.
A reference to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Creates a video processor device.
+A
A reference to a
The format of the render target surface, specified as a
The maximum number of substreams that will be used with this device.
Receives a reference to the video processor's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Provides information about the result of an asynchronous operation.
+Use this interface to complete an asynchronous operation. You get a reference to this interface when your callback object's
If you are implementing an asynchronous method, call
Any custom implementation of this interface must inherit the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Returns the state object specified by the caller in the asynchronous Begin method.
+Receives a reference to the state object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| There is no state object associated with this asynchronous result. |
?
The caller of the asynchronous method specifies the state object, and can use it for any caller-defined purpose. The state object can be
If you are implementing an asynchronous method, set the state object on the through the punkState parameter of the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Returns the status of the asynchronous operation.
+The method returns an
Return code | Description |
---|---|
| The operation completed successfully. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Sets the status of the asynchronous operation.
+The status of the asynchronous operation.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If you implement an asynchronous method, call SetStatus to set the status code for the operation.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Returns an object associated with the asynchronous operation. The type of object, if any, depends on the asynchronous method that was called.
+Receives a reference to the object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| There is no object associated with this asynchronous result. |
?
Typically, this object is used by the component that implements the asynchronous method. It provides a way for the function that invokes the callback to pass information to the asynchronous End... method that completes the operation.
If you are implementing an asynchronous method, you can set the object through the punkObject parameter of the
If the asynchronous result object's internal
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Returns the state object specified by the caller in the asynchronous Begin method, without incrementing the object's reference count.
+Returns a reference to the state object's
This method cannot be called remotely.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
The caller of the asynchronous method specifies the state object, and can use it for any caller-defined purpose. The state object can be
If you are implementing an asynchronous method, set the state object on the through the punkState parameter of the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Get or sets the status of the asynchronous operation.
+The method returns an
Return code | Description |
---|---|
| The operation completed successfully. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Returns an object associated with the asynchronous operation. The type of object, if any, depends on the asynchronous method that was called.
+Receives a reference to the object's
Typically, this object is used by the component that implements the asynchronous method. It provides a way for the function that invokes the callback to pass information to the asynchronous End... method that completes the operation.
If you are implementing an asynchronous method, you can set the object through the punkObject parameter of the
If the asynchronous result object's internal
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Specifies the processing mode for the voice capture DSP. This enumeration is used with the MFPKEY_WMAAECMA_SYSTEM_MODE property.
+In all modes, the DSP applies noise suppression and automatic gain control by default. To disable noise suppression, set the MFPKEY_WMAAECMA_FEATR_NS property. To disable automatic gain control, set the MFPKEY_WMAAECMA_FEATR_AGC property.
+Specifies the type of voice activity detection (VAD) for the voice capture DSP. This enumeration is used with the MFPKEY_WMAAECMA_FEATR_VAD property.
+
Specifies how to open or create a file.
+Open an existing file. Fail if the file does not exist.
Create a new file. Fail if the file already exists.
Open an existing file and truncate it, so that the size is zero bytes. Fail if the file does not already exist.
If the file does not exist, create a new file. If the file exists, open it.
Create a new file. If the file exists, overwrite the file.
Specifies how the voice capture DSP performs microphone array processing. This enumeration is used with the MFPKEY_WMAAECMA_FEATR_MICARR_MODE property.
+A byte offset relative to the origin parameter.
A value of type SeekOrigin indicating the reference point used to obtain the new position.
Exception type | Condition |
---|---|
IOException | An I/O error occurs. |
NotSupportedException | The stream does not support seeking, such as if the stream is constructed from a pipe or console output. |
ObjectDisposedException | Methods were called after the stream was closed. |
For an example of creating a file and writing text to a file, see Writing Text to a File. For an example of reading text from a file, see Reading Text from a File. For an example of reading from and writing to a binary file, see Reading and Writing to a Newly Created Data File.
Use the CanSeek property to determine whether the current instance supports seeking.
If offset is negative, the new position is required to precede the position specified by origin by the number of bytes specified by offset. If offset is zero (0), the new position is required to be the position specified by origin. If offset is positive, the new position is required to follow the position specified by origin by the number of bytes specified by offset.
Classes derived from Stream that support seeking must override this method to provide the functionality described above.
Seeking to any location beyond the length of the stream is supported.
Windows 98, Windows Server 2000 SP4, Windows CE, Windows Millennium Edition, Windows Mobile for Pocket PC, Windows Mobile for Smartphone, Windows Server 2003, Windows XP Media Center Edition, Windows XP Professional x64 Edition, Windows XP SP2, Windows XP Starter Edition
The Microsoft .NET Framework 3.0 is supported on Windows Vista, Microsoft Windows XP SP2, and Windows Server 2003 SP1. .NET FrameworkSupported in: 3.0, 2.0, 1.1, 1.0.NET Compact FrameworkSupported in: 2.0, 1.0XNA FrameworkSupported in: 1.0ReferenceStream ClassStream MembersSystem.IO NamespaceOther ResourcesFile and Stream I/OReading Text from a FileWriting Text to a File +A byte offset relative to the origin parameter.
The
Currently, only
Defines the data types used for the codec and DSP properties that are accessed by using the methods of the
Most properties are accessed by using the methods of the
Configures the "leaky bucket" parameters on a video encoder.
This interface is implemented by all of the encoder objects. You can get a reference to the
Sets the buffer size in bits. +
+The buffer size, in bits.
This method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is not implemented on the audio encoder objects. If you call this method from the
The buffer size is equal to the bit rate of the stream multiplied by the buffer window. For example, a stream with a bit rate of 28 kilobits per second with a buffer window of 3 seconds would have a buffer of 28000 bits per second x 3 seconds = 84000 bits.
This method is an alternative to setting the MFPKEY_VIDEOWINDOW property. Using this method does not alter the bit rate of the stream, but does alter the buffer window. Using the stream with a bit rate of 28000 bits per second from the previous example, setting the buffer size to 84000 using this method would have exactly the same effect as setting MFPKEY_VIDEOWINDOW to 3000 milliseconds (3 seconds).
+Retrieves the current size of the buffer in bits. +
+Pointer to a variable containing the buffer size, in bits.
This method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The buffer size is equal to the bit rate of the stream multiplied by the buffer window. For example, a stream with a bit rate of 28 kilobits per second with a buffer window of 3 seconds would have a buffer of 28000 bits per second x 3 seconds = 84000 bits.
+Not implemented in this release. +
+This method always returns E_NOTIMPL.
Not implemented in this release. +
+This method always returns E_NOTIMPL.
Gets the time stamp of the next video frame to be decoded.
This interface is implemented by the video decoders. You can obtain a reference to
Queries the decoder for the time stamp of the upcoming output sample. Use this method if you need to know the time of the sample before calling
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is important when decoding video using frame interpolation, because the rendering application cannot predict the time stamps of interpolated frames.
+Gets the private codec data that must be appended to the output media type. This codec data is required for properly decoding Windows Media Video content.
This interface is implemented by the video encoder object and the screen capture encoder object. You do not need codec private data to decode content of the subtype WMCMEDIASUBTYPE_WMV1 (Windows Media Video version 7). For any other output type, you must obtain a reference to the encoder's
Gives the codec the output media type without the codec data. This enables the codec to generate the private data. +
+Address of the partial output media type.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The
If you are setting properties on an encoder, you must finish that configuration before getting the private data. Changing properties invalidates any private data previously retrieved. If you change properties after getting the private data, retrieve it again and reset the output type.
You must call this method before calling
Retrieves the codec data for the video content based on the output type passed using the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If you are setting properties on the encoder object, you must finish that configuration before getting the private data. Changing properties invalidates any private data that was previously retrieved. If you change properties after getting the private data, retrieve it again and reset the output type.
You must call this method after providing the codec with the output media type (without the private data appended) by calling
After retrieving the private data, allocate a buffer the size of
Gives the codec the output media type without the codec data. This enables the codec to generate the private data. +
+The
If you are setting properties on an encoder, you must finish that configuration before getting the private data. Changing properties invalidates any private data previously retrieved. If you change properties after getting the private data, retrieve it again and reset the output type.
You must call this method before calling
Provides methods that retrieve format-specific codec properties.
This interface is implemented by the video encoder objects. You can obtain a reference to
This interface enables you to receive information about a specific media type that is supported by a video encoder.
+Retrieves a format property for an output media type. Use this method to get information about enumerated audio formats. +
+Pointer to the output media type.
Wide-character, null-terminated string containing the property name. The properties listed in the following table are supported only through the
Property name constant | Meaning |
---|---|
| Retrieves the speech modes available for the format (used only by the Windows Media Audio 9 Voice codec). Value contains flags identical to the values used to specify the mode for MFPKEY_WMAVOICE_ENC_MusicSpeechClassMode. |
?
The properties in the following list are also supported. They are used with
Address of a variable that receives the data type of the property value.
Address of the byte buffer that receives the property value.
Pointer to the size of the value buffer, in bytes. If pValue is
If this method succeeds, it returns
Retrieves a codec property specific to an output format. +
+The output format to which the property applies. Set this value to the FOURCC value of the desired video format.
Wide-character, null-terminated string containing the property name. The properties listed in the following table are supported only through the
Property name constant | Meaning |
---|---|
| Retrieves the name of the codec that is associated with the format (or FOURCC). This is an alternative to the |
| Retrieves the encoding modes supported by the codec. The value returned contains one or more of the following flags:
|
?
Address of a variable that receives the data type of the property value.
Address of the byte buffer that receives the property value.
Pointer to the size of the value buffer, in bytes. If pValue is
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves names and descriptive strings for codecs and formats.
This interface is implemented by all of the codec encoder objects. You can retrieve a reference to the
Retrieves the name of a codec. +
+Pointer to the output media type. If
Size of szName buffer in wide characters.
Address of the wide-character buffer that receives the name. If
Pointer to the required buffer length in wide characters, including the null terminating character.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the description of an output format. +
+Pointer to the output media type. If
Size of szDescription buffer, in wide characters.
Address of the wide-character buffer that receives the description. If
Pointer to the required buffer length in wide characters, including the null terminating character.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets properties on the color converter DSP.
+Specifies whether the input video stream is interlaced. +
+Specifies one of the following values.
Value | Meaning |
---|---|
| Progressive video |
| Interlaced video |
?
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
By default, the color converter uses the input media type to determine whether the video is interlaced. You can call this method to override the media type setting.
This method is equivalent to setting the MFPKEY_COLORCONV_MODE property.
+Sets the source and destination rectangles. +
+Specifies the left edge of the source rectangle, in pixels.
Specifies the top edge of the source rectangle, in pixels.
Specifies the left edge of the destination rectangle, in pixels.
Specifies the top edge of the destination rectangle, in pixels.
Specifies the width of the source and destination rectangles, in pixels.
Specifies the height of the source and destination rectangles, in pixels.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
By default, the color converter copies the entire video frame. When you call this method, the color converter crops the video to the source rectangle and copies that portion to the destination rectangle.
This method is equivalent to setting the following properties:
Specifies whether the input video stream is interlaced. +
+By default, the color converter uses the input media type to determine whether the video is interlaced. You can call this method to override the media type setting.
This method is equivalent to setting the MFPKEY_COLORCONV_MODE property.
+The following code examples show how to read text from a text file. The second example notifies you when the end of the file is detected. This functionality can also be achieved by using the ReadAllLines or ReadAllText methods.
Example Imports System + Imports System.IO Class Test Public Shared Sub Main() Try ' Create an instance of StreamReader to read from a file. Using sr As StreamReader = New StreamReader("TestFile.txt") Dim line As String ' Read and display the lines from the file until the end ' of the file is reached. Do line = sr.ReadLine() Console.WriteLine(Line) Loop Until line Is Nothing sr.Close() End Using Catch E As Exception ' Let the user know what went wrong. Console.WriteLine("The file could not be read:") Console.WriteLine(E.Message) End Try End Sub + End Class using System; + using System.IO; class Test + { public static void Main() { try { // Create an instance of StreamReader to read from a file. // The using statement also closes the StreamReader. using (StreamReader sr = new StreamReader("TestFile.txt")) { String line; // Read and display lines from the file until the end of // the file is reached. while ((line = sr.ReadLine()) != null) { Console.WriteLine(line); } } } catch (Exception e) { // Let the user know what went wrong. Console.WriteLine("The file could not be read:"); Console.WriteLine(e.Message); } } + } Option Explicit On + Option Strict On + Imports System + Imports System.IO + Public Class TextFromFile Private Const FILE_NAME As String = "MyFile.txt" Public Shared Sub Main() If Not File.Exists(FILE_NAME) Then Console.WriteLine("{0} does not exist.", FILE_NAME) Return End If Using sr As StreamReader = File.OpenText(FILE_NAME) Dim input As String input = sr.ReadLine() While Not input Is Nothing Console.WriteLine(input) input = sr.ReadLine() End While Console.WriteLine("The end of the stream has been reached.") sr.Close() End Using End Sub + End Class using System; + using System.IO; + public class TextFromFile + { private const string FILE_NAME = "MyFile.txt"; public static void Main(String[] args) { if (!File.Exists(FILE_NAME)) { Console.WriteLine("{0} does not exist.", FILE_NAME); return; } using (StreamReader sr = File.OpenText(FILE_NAME)) { String input; while ((input=sr.ReadLine())!=null) { Console.WriteLine(input); } Console.WriteLine ("The end of the stream has been reached."); sr.Close(); } } Robust ProgrammingThis code creates a StreamReader that points to MyFile.txt through a call to File.OpenText. StreamReader.ReadLine returns each line as a string. When there are no more characters to read, a message is displayed to that effect, and the stream is closed.
Note |
---|
Visual Basic users may choose to use the methods and properties provided by the My.Computer.FileSystem object for file I/O. For more information, see My.Computer.FileSystem Object. |
The following code examples show how to write text to a text file.
The first example shows how to add text to an existing file. The second example shows how to create a new text file and write a string to it. Similar functionality can be provided by the WriteAllText methods.
Note |
---|
Visual Basic users may choose to use the methods and properties provided by the My.Computer.FileSystem object for file I/O. For more information, see My.Computer.FileSystem Object. |
The point relative to origin from which to begin seeking.
Specifies the beginning, the end, or the current position as a reference point for origin, using a value of type SeekOrigin.
Exception type | Condition |
---|---|
IOException | An I/O error occurs. |
NotSupportedException | The stream does not support seeking, such as if the FileStream is constructed from a pipe or console output. |
ArgumentException | Attempted seeking before the beginning of the stream. |
ObjectDisposedException | Methods were called after the stream was closed. |
This method overrides Seek.
Note: |
---|
Use the CanSeek property to determine whether the current instance supports seeking. For additional information, see CanSeek. |
Seeking to any location beyond the length of the stream is supported. When you seek beyond the length of the file, the file size grows. In Microsoft Windows NT and greater, any data added to the end of the file is set to zero. In Microsoft Windows 98 or earlier, any data added to the end of the file is not set to zero, which means that previously deleted data is visible to the stream.
For a list of common I/O tasks, see Common I/O Tasks.
The following code example shows how to write data to a file, byte by byte, and then verify that the data was written correctly.
Imports Microsoft.VisualBasic + Imports System + Imports System.IO + Imports System.Text Class FStream Shared Sub Main() Const fileName As String = "Test#@@#.dat" ' Create random data to write to the file. Dim dataArray(100000) As Byte Dim randomGenerator As New Random() randomGenerator.NextBytes(dataArray) Dim fileStream As FileStream = _ new FileStream(fileName, FileMode.Create) Try ' Write the data to the file, byte by byte. For i As Integer = 0 To dataArray.Length - 1 fileStream.WriteByte(dataArray(i)) Next i ' Set the stream position to the beginning of the stream. fileStream.Seek(0, SeekOrigin.Begin) ' Read and verify the data. For i As Integer = 0 To _ CType(fileStream.Length, Integer) - 1 If dataArray(i) <> fileStream.ReadByte() Then Console.WriteLine("Error writing data.") Return End If Next i Console.WriteLine("The data was written to {0} " & _ "and verified.", fileStream.Name) Finally fileStream.Close() End Try End Sub + End Class + using System; + using System.IO; class FStream + { static void Main() { const string fileName = "Test#@@#.dat"; // Create random data to write to the file. byte[] dataArray = new byte[100000]; new Random().NextBytes(dataArray); using(FileStream fileStream = new FileStream(fileName, FileMode.Create)) { // Write the data to the file, byte by byte. for(int i = 0; i < dataArray.Length; i++) { fileStream.WriteByte(dataArray[i]); } // Set the stream position to the beginning of the file. fileStream.Seek(0, SeekOrigin.Begin); // Read and verify the data. for(int i = 0; i < fileStream.Length; i++) { if(dataArray[i] != fileStream.ReadByte()) { Console.WriteLine("Error writing data."); return; } } Console.WriteLine("The data was written to {0} " + "and verified.", fileStream.Name); } } + } + using namespace System; + using namespace System::IO; + int main() + { String^ fileName = "Test@##@.dat"; // Create random data to write to the file. array<Byte>^dataArray = gcnew array<Byte>(100000); (gcnew Random)->NextBytes( dataArray ); FileStream^ fileStream = gcnew FileStream( fileName,FileMode::Create ); try { // Write the data to the file, byte by byte. for ( int i = 0; i < dataArray->Length; i++ ) { fileStream->WriteByte( dataArray[ i ] ); } // Set the stream position to the beginning of the file. fileStream->Seek( 0, SeekOrigin::Begin ); // Read and verify the data. for ( int i = 0; i < fileStream->Length; i++ ) { if ( dataArray[ i ] != fileStream->ReadByte() ) { Console::WriteLine( "Error writing data." ); return -1; } } Console::WriteLine( "The data was written to {0} " "and verified.", fileStream->Name ); } finally { fileStream->Close(); } } import System.*; + import System.IO.*; class FStream + { public static void main(String[] args) { final String fileName = "Test#@@#.dat"; // Create random data to write to the file. ubyte dataArray[] = new ubyte[100000]; new Random().NextBytes(dataArray); FileStream fileStream = new FileStream(fileName, FileMode.Create); try { // Write the data to the file, byte by byte. for(int i=0;i < dataArray.length;i++) { fileStream.WriteByte(dataArray[i]); } // Set the stream position to the beginning of the file. fileStream.Seek(0, SeekOrigin.Begin); // Read and verify the data. for(int i=0;i < fileStream.get_Length();i++) { if ( dataArray[i] != fileStream.ReadByte() ) { Console.WriteLine("Error writing data."); return; } } Console.WriteLine("The data was written to {0} " + "and verified.", fileStream.get_Name()); } finally { fileStream.Dispose(); } } //main + } //FStream +Windows 98, Windows Server 2000 SP4, Windows CE, Windows Millennium Edition, Windows Mobile for Pocket PC, Windows Mobile for Smartphone, Windows Server 2003, Windows XP Media Center Edition, Windows XP Professional x64 Edition, Windows XP SP2, Windows XP Starter Edition
The Microsoft .NET Framework 3.0 is supported on Windows Vista, Microsoft Windows XP SP2, and Windows Server 2003 SP1. .NET FrameworkSupported in: 3.0, 2.0, 1.1, 1.0.NET Compact FrameworkSupported in: 2.0, 1.0XNA FrameworkSupported in: 1.0ReferenceFileStream ClassFileStream MembersSystem.IO NamespaceOther ResourcesFile and Stream I/OReading Text from a FileWriting Text to a File +The point relative to origin from which to begin seeking.
Use the File class for typical operations such as copying, moving, renaming, creating, opening, deleting, and appending to files. You can also use the File class to get and set file attributes or DateTime information related to the creation, access, and writing of a file.
Many of the File methods return other I/O types when you create or open files. You can use these other types to further manipulate a file. For more information, see specific File members such as OpenText, CreateText, or Create.
Because all File methods are static, it might be more efficient to use a File method rather than a corresponding FileInfo instance method if you want to perform only one action. All File methods require the path to the file that you are manipulating.
The static methods of the File class perform security checks on all methods. If you are going to reuse an object several times, consider using the corresponding instance method of FileInfo instead, because the security check will not always be necessary.
By default, full read/write access to new files is granted to all users.
The following table describes the enumerations that are used to customize the behavior of various File methods.
Enumeration | Description |
---|---|
FileAccess | Specifies read and write access to a file. |
FileShare | Specifies the level of access permitted for a file that is already in use. |
FileMode | Specifies whether the contents of an existing file are preserved or overwritten, and whether requests to create an existing file cause an exception. |
Note: |
---|
In members that accept a path as an input string, that path must be well-formed or an exception is raised. For example, if a path is fully qualified but begins with a space, the path is not trimmed in methods of the class. Therefore, the path is malformed and an exception is raised. Similarly, a path or a combination of paths cannot be fully qualified twice. For example, "c:\temp c:\windows" also raises an exception in most cases. Ensure that your paths are well-formed when using methods that accept a path string. |
In members that accept a path, the path can refer to a file or just a directory. The specified path can also refer to a relative path or a Universal Naming Convention (UNC) path for a server and share name. For example, all the following are acceptable paths:
"c:\\MyDir\\MyFile.txt" in C#, or "c:\MyDir\MyFile.txt" in Visual Basic.
"c:\\MyDir" in C#, or "c:\MyDir" in Visual Basic.
"MyDir\\MySubdir" in C#, or "MyDir\MySubDir" in Visual Basic.
"\\\\MyServer\\MyShare" in C#, or "\\MyServer\MyShare" in Visual Basic.
For a list of common I/O tasks, see Common I/O Tasks.
The following example demonstrates some of the main members of the File class.
Imports System + Imports System.IO Public Class Test Public Shared Sub Main() Dim path As String = "c:\temp\MyTest.txt" If File.Exists(path) = False Then ' Create a file to write to. Dim sw As StreamWriter = File.CreateText(path) sw.WriteLine("Hello") sw.WriteLine("And") sw.WriteLine("Welcome") sw.Flush() sw.Close() End If Try ' Open the file to read from. Dim sr As StreamReader = File.OpenText(path) Do While sr.Peek() >= 0 Console.WriteLine(sr.ReadLine()) Loop sr.Close() Dim path2 As String = path + "temp" ' Ensure that the target does not exist. File.Delete(path2) ' Copy the file. File.Copy(path, path2) Console.WriteLine("{0} was copied to {1}.", path, path2) ' Delete the newly created file. File.Delete(path2) Console.WriteLine("{0} was successfully deleted.", path2) Catch e As Exception Console.WriteLine("The process failed: {0}", e.ToString()) End Try End Sub + End Class + using System; + using System.IO; class Test + { public static void Main() { string path = @"c:\temp\MyTest.txt"; if (!File.Exists(path)) { // Create a file to write to. using (StreamWriter sw = File.CreateText(path)) { sw.WriteLine("Hello"); sw.WriteLine("And"); sw.WriteLine("Welcome"); } } // Open the file to read from. using (StreamReader sr = File.OpenText(path)) { string s = ""; while ((s = sr.ReadLine()) != null) { Console.WriteLine(s); } } try { string path2 = path + "temp"; // Ensure that the target does not exist. File.Delete(path2); // Copy the file. File.Copy(path, path2); Console.WriteLine("{0} was copied to {1}.", path, path2); // Delete the newly created file. File.Delete(path2); Console.WriteLine("{0} was successfully deleted.", path2); } catch (Exception e) { Console.WriteLine("The process failed: {0}", e.ToString()); } } + } + using namespace System; + using namespace System::IO; + int main() + { String^ path = "c:\\temp\\MyTest.txt"; if ( !File::Exists( path ) ) { // Create a file to write to. StreamWriter^ sw = File::CreateText( path ); try { sw->WriteLine( "Hello" ); sw->WriteLine( "And" ); sw->WriteLine( "Welcome" ); } finally { if ( sw ) delete (IDisposable^)(sw); } } // Open the file to read from. StreamReader^ sr = File::OpenText( path ); try { String^ s = ""; while ( s = sr->ReadLine() ) { Console::WriteLine( s ); } } finally { if ( sr ) delete (IDisposable^)(sr); } try { String^ path2 = String::Concat( path, "temp" ); // Ensure that the target does not exist. File::Delete( path2 ); // Copy the file. File::Copy( path, path2 ); Console::WriteLine( "{0} was copied to {1}.", path, path2 ); // Delete the newly created file. File::Delete( path2 ); Console::WriteLine( "{0} was successfully deleted.", path2 ); } catch ( Exception^ e ) { Console::WriteLine( "The process failed: {0}", e ); } + } + import System.*; + import System.IO.*; class Test + { public static void main(String[] args) { String path = "c:\\temp\\MyTest.txt"; if (!(File.Exists(path))) { // Create a file to write to. StreamWriter sw = File.CreateText(path); try { sw.WriteLine("Hello"); sw.WriteLine("And"); sw.WriteLine("Welcome"); } finally { sw.Dispose(); } } // Open the file to read from. StreamReader sr = File.OpenText(path); try { String s = ""; while ((s = sr.ReadLine()) != null) { Console.WriteLine(s); } } finally { sr.Dispose(); } try { String path2 = path + "temp"; // Ensure that the target does not exist. File.Delete(path2); // Copy the file. File.Copy(path, path2); Console.WriteLine("{0} was copied to {1}.", path, path2); // Delete the newly created file. File.Delete(path2); Console.WriteLine("{0} was successfully deleted.", path2); } catch (System.Exception e) { Console.WriteLine("The process failed: {0}", e.ToString()); } } //main + } //Test + System.Object ??System.IO.FileAny public static (Shared in Visual Basic) members of this type are thread safe. Any instance members are not guaranteed to be thread safe.Windows 98, Windows Server 2000 SP4, Windows CE, Windows Millennium Edition, Windows Mobile for Pocket PC, Windows Mobile for Smartphone, Windows Server 2003, Windows XP Media Center Edition, Windows XP Professional x64 Edition, Windows XP SP2, Windows XP Starter Edition
The Microsoft .NET Framework 3.0 is supported on Windows Vista, Microsoft Windows XP SP2, and Windows Server 2003 SP1. .NET FrameworkSupported in: 3.0, 2.0, 1.1, 1.0.NET Compact FrameworkSupported in: 2.0, 1.0XNA FrameworkSupported in: 1.0ReferenceFile MembersSystem.IO NamespaceDriveInfoOther ResourcesFile and Stream I/OReading Text from a FileWriting Text to a FileBasic File I/OReading and Writing to a Newly Created Data File +Sets properties on the audio resampler DSP.
+Specifies the quality of the output. +
+Specifies the quality of the output. The valid range is 1 to 60, inclusive.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is equivalent to setting the MFPKEY_WMRESAMP_FILTERQUALITY property.
+Specifies the channel matrix. +
+Pointer to an array of floating-point values that represents a channel conversion matrix.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is equivalent to setting the MFPKEY_WMRESAMP_CHANNELMTX property, except that the matrix is represented differently:
To convert from the integer values given in the MFPKEY_WMRESAMP_CHANNELMTX property to floating-point values, use the following formula:
(float)pow(10.0,((double)Coeff)/(65536.0*20.0))
where Coeff is an integer coefficient.
+Specifies the quality of the output. +
+This method is equivalent to setting the MFPKEY_WMRESAMP_FILTERQUALITY property.
+Specifies the channel matrix. +
+This method is equivalent to setting the MFPKEY_WMRESAMP_CHANNELMTX property, except that the matrix is represented differently:
To convert from the integer values given in the MFPKEY_WMRESAMP_CHANNELMTX property to floating-point values, use the following formula:
(float)pow(10.0,((double)Coeff)/(65536.0*20.0))
where Coeff is an integer coefficient.
+Sets properties on the video resizer DSP.
+Specifies whether to use an algorithm that produces higher-quality video, or a faster algorithm. +
+Boolean value. If TRUE, the video resizer uses an algorithm that produces higher-quality video. If
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is equivalent to setting the MFPKEY_RESIZE_QUALITY property.
+The SetInterlaceMode method specifies whether the input video stream is interlaced. +
+Boolean value. If TRUE, the video is interlaced. If
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is equivalent to setting the MFPKEY_RESIZE_INTERLACE property.
+Sets the source rectangle. +
+Specifies the left edge of the source rectangle, in pixels.
Specifies the top edge of the source rectangle, in pixels.
Specifies the width of the source rectangle, in pixels.
Specifies the height of the source rectangle, in pixels.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
By default, the video resizer copies the entire video frame. When you call this method, the video resizer crops the video to the source rectangle and copies that portion to the output buffer.
This method is equivalent to setting the following properties:
Sets the source and destination rectangles. +
+Specifies the left edge of the source rectangle, in pixels.
Specifies the top edge of the source rectangle, in pixels.
Specifies the width of the source rectangle, in pixels.
Specifies the height of the source rectangle, in pixels.
Specifies the left edge of the destination rectangle, in pixels.
Specifies the top edge of the destination rectangle, in pixels.
Specifies the width of the destination rectangle, in pixels.
Specifies the height of the destination rectangle, in pixels.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
By default, the video resizer copies the entire video frame. When you call this method, the video resizer crops the video to the source rectangle and copies that portion to the destination rectangle.
This method is equivalent to setting the following properties:
Retrieves the source and destination rectangles. +
+Receives the left edge of the source rectangle, in pixels.
Receives the top edge of the source rectangle, in pixels.
Receives the width of the source rectangle, in pixels.
Receives the height of the source rectangle, in pixels.
Receives the left edge of the destination rectangle, in pixels.
Receives the top edge of the destination rectangle, in pixels.
Receives the width of the destination rectangle, in pixels.
Receives the height of the destination rectangle, in pixels.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Specifies whether to use an algorithm that produces higher-quality video, or a faster algorithm. +
+This method is equivalent to setting the MFPKEY_RESIZE_QUALITY property.
+The SetInterlaceMode method specifies whether the input video stream is interlaced. +
+This method is equivalent to setting the MFPKEY_RESIZE_INTERLACE property.
+Configures codec support for sample extensions.
+Configures whether the codec supports sample extensions. +
+Flag, true indicating to use extensions.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Configures whether the codec supports sample extensions. +
+The
The SetDescriptor method associates a descriptor with the table of contents.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The GetDescriptor method retrieves the descriptor, previously set by SetDescriptor, of the table of contents.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The SetDescription method associates a description with the table of contents.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You can use this method to associate any description with the table of contents. TOC parser does not inspect or interpret the description.
+The GetDescription method retrieves the description, set by a previous call to SetDescription, of the table of contents.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The method returns this error code if pwszDescription is not |
?
The SetContext method associates a caller-supplied context block with the table of contents.
+The size, in bytes, of the context block.
Pointer to the first byte of the context block.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You can use this method to associate any information with the table of contents. The type of information you store in the context block is completely up to you. TOC Parser does not inspect or interpret the context block.
+The GetContext method retrieves a block of bytes that was previously associated with the table of contents by a call to SetContext.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The method returns this error code if pbtContext is not |
?
The GetEntryListCount method retrieves the number of entry lists in the table of contents.
+Pointer to a WORD that receives the number of entry lists.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The GetEntryListByIndex method retrieves an entry list, specified by an index, from the table of contents.
+The index of the entry list to retrieve.
Pointer to a variable that receives a reference to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The AddEntryList method adds an entry list to the table of contents and assigns an index to the entry list.
+Pointer to an
Receives the index of the added entry list.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The AddEntryListByIndex method adds an entry list to the table of contents and associates a caller-supplied index with the entry list.
+The index, specified by the caller, to be associated with the entry list.
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The RemoveEntryListByIndex method removes an entry list, specified by an index, from the table of contents.
+The index of the entry list to be removed.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The
The GetEntryCount method retrieves the number of tables of contents in the collection.
+Pointer to a DWORD that receives the number of tables of contents.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
In the context of an
The GetEntryByIndex method retrieves a table of contents, specified by an index, from the collection.
+Specifies the index of the table of contents to retrieve.
Pointer to a variable that receives a reference to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
In the context of an
The AddEntry method adds an individual table of contents to the collection and assigns an index to the added table of contents.
+Pointer to an
Pointer to a DWORD that receives the index of the added table of contents.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
In the context of an
The AddEntryByIndex adds an individual table of contents to the collection and associates a caller-supplied index with the table of contents.
+The index, specified by the caller, to be associated with the table of contents.
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
In the context of an
The RemoveEntryByIndex method removes a table of contents, specified by an index, from the collection.
+Specifies the index of the table of contents to be removed.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
In the context of an
The
The SetTitle method sets the title of the entry.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The GetTitle method retrieves the title, set by a previous call to SetTitle, of the entry.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The method returns this error code if pwszTitle is not |
?
The SetDescriptor method associates a descriptor with the entry.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The GetDescriptor method retrieves the descriptor, previously set by a call to SetDescriptor, of the entry.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The SetSubEntries identifies a set of entries as being subentries of this entry.
+The number of indices in the array pointed to by pwSubEntryIndices.
Pointer to an array of WORDs. Each WORD in the array specifies the index of an entry that is to be considered a subentry of this entry.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The GetSubEntries method gets an array of subentry indices that were set by a previous call to SetSubEntries.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The method returns this error code if pwSubEntryIndices is not |
?
The SetDescriptionData method associates a caller-supplied data block with the entry.
+The size, in bytes, of the data block.
Pointer to the first byte of the data block.
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You can use this method to associate any information of your choice with the entry. The nature of the information you store in the description data block is completely up to you. TOC Parser does not inspect or interpret the description data block.
You can associate only one description data block with a given entry at a given time. However, you might want to design different types of description data blocks and identfy each type of block with a globally unique identifier (
The GetDescriptionData method gets a description data block that was previously associated with the entry by a call to SetDescriptionData.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The method returns this error code if pbtDescriptionData is not |
?
You can associate only one description data block with a given entry at a given time. However, you might want to design different types of description data blocks and identfy each type of block with a globally unique identifier (
The
The GetEntryCount method retrieves the number of entries in the list.
+Pointer to a DWORD that receives the number of entries.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The GetEntryByIndex method retrieves an entry, specified by an index, from the list.
+The index of the entry to retrieve.
Pointer to a variable that receives a reference to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The AddEntry method adds an individual entry to the list and assigns an index to the entry.
+Pointer to an
Pointer to a DWORD that receives the index of the added entry.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The AddEntryByIndex method adds an individual entry to the list and associates a caller-supplied index with the entry.
+The index of the entry to be added.
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The RemoveEntryByIndex method removes an entry, specified by an index, from the list.
+The index of the entry to be removed.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The
The Init method initializes the TOC Parser object and associates it with a media file.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The path that you pass in pwszFileName must be a long Universal Naming Convention (UNC) file path. A long UNC file path begins with "\\?\". The following line of code shows how to set the path for the file c:\experiment\seattle.wmv.
pTocParser->Init(L"\\\\?\\c:\\experiment\\seattle.wmv");
The GetTocCount method retrieves the number of tables of contents, of a specified position type, in the TOC Parser object.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The GetTocByIndex method retrieves a table of contents, specified by an index, from the TOC Parser object.
+A member of the
The index of the table of contents to be retrieved.
Pointer to a variable that receives a reference to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The GetTocByType retrieves all tables of contents of a specified type from the TOC Parser object.
+A member of the
A globally unique identifier (
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You might want to design several different type of tables of contents. In that case, you can distinguish between types by creating a
The AddToc method adds a table of contents to the TOC Parser object and assigns an index to the added table of contents.
+A member of the
Pointer to an
Pointer to a DWORD that receives the index of the added table of contents.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The RemoveTocByIndex method removes a table of contents, specified by an index, from the TOC Parser object.
+A member of the
The index of the table of contents to be removed.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The RemoveTocByType method removes all tables of contents of a specified type from the TOC Parser object.
+A member of the
A globally unique identifier (
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You might want to design several different type of tables of contents. In that case, you can distinguish between types by creating a
The Commit method stores the current state of the TOC Parser object in its associated media file.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You can associate a TOC Parser object with a media file by calling
Calling the Invalidate method does not force a synchronous paint; to force a synchronous paint, call the Update method after calling the Invalidate method. When this method is called with no parameters, the entire client area is added to the update region.
The following code example enables the user to drag an image or image file onto the form, and have it be displayed at the point on it is dropped. The OnPaint method is overridden to repaint the image each time the form is painted; otherwise the image would only persist until the next repainting. The DragEnter event-handling method determines the type of data being dragged into the form and provides the appropriate feedback. The DragDrop event-handling method displays the image on the form, if an Image can be created from the data. Because the DragEventArgs.X and DragEventArgs.Y values are screen coordinates, the example uses the PointToClient method to convert them to client coordinates.
Private picture As Image + Private pictureLocation As Point Public Sub New() ' Enable drag-and-drop operations. Me.AllowDrop = True + End Sub Protected Overrides Sub OnPaint(ByVal e As PaintEventArgs) MyBase.OnPaint(e) ' If there is an image and it has a location, ' paint it when the Form is repainted. If (Me.picture IsNot Nothing) And _ Not (Me.pictureLocation.Equals(Point.Empty)) Then e.Graphics.DrawImage(Me.picture, Me.pictureLocation) End If + End Sub Private Sub Form1_DragDrop(ByVal sender As Object, _ ByVal e As DragEventArgs) Handles MyBase.DragDrop ' Handle FileDrop data. If e.Data.GetDataPresent(DataFormats.FileDrop) Then ' Assign the file names to a string array, in ' case the user has selected multiple files. Dim files As String() = CType(e.Data.GetData(DataFormats.FileDrop), String()) Try ' Assign the first image to the 'picture' variable. Me.picture = Image.FromFile(files(0)) ' Set the picture location equal to the drop point. Me.pictureLocation = Me.PointToClient(New Point(e.X, e.Y)) Catch ex As Exception MessageBox.Show(ex.Message) Return End Try End If ' Handle Bitmap data. If e.Data.GetDataPresent(DataFormats.Bitmap) Then Try ' Create an Image and assign it to the picture variable. Me.picture = CType(e.Data.GetData(DataFormats.Bitmap), Image) ' Set the picture location equal to the drop point. Me.pictureLocation = Me.PointToClient(New Point(e.X, e.Y)) Catch ex As Exception MessageBox.Show(ex.Message) Return End Try End If ' Force the form to be redrawn with the image. Me.Invalidate() + End Sub Private Sub Form1_DragEnter(ByVal sender As Object, _ ByVal e As DragEventArgs) Handles MyBase.DragEnter ' If the data is a file or a bitmap, display the copy cursor. If e.Data.GetDataPresent(DataFormats.Bitmap) _ Or e.Data.GetDataPresent(DataFormats.FileDrop) Then e.Effect = DragDropEffects.Copy Else e.Effect = DragDropEffects.None End If + End Sub + private Image picture; + private Point pictureLocation; public Form1() + { // Enable drag-and-drop operations and // add handlers for DragEnter and DragDrop. this.AllowDrop = true; this.DragDrop += new DragEventHandler(this.Form1_DragDrop); this.DragEnter += new DragEventHandler(this.Form1_DragEnter); + } protected override void OnPaint(PaintEventArgs e) + { // If there is an image and it has a location, // paint it when the Form is repainted. base.OnPaint(e); if(this.picture != null && this.pictureLocation != Point.Empty) { e.Graphics.DrawImage(this.picture, this.pictureLocation); } + } private void Form1_DragDrop(object sender, DragEventArgs e) + { // Handle FileDrop data. if(e.Data.GetDataPresent(DataFormats.FileDrop) ) { // Assign the file names to a string array, in // case the user has selected multiple files. string[] files = (string[])e.Data.GetData(DataFormats.FileDrop); try { // Assign the first image to the picture variable. this.picture = Image.FromFile(files[0]); // Set the picture location equal to the drop point. this.pictureLocation = this.PointToClient(new Point(e.X, e.Y) ); } catch(Exception ex) { MessageBox.Show(ex.Message); return; } } // Handle Bitmap data. if(e.Data.GetDataPresent(DataFormats.Bitmap) ) { try { // Create an Image and assign it to the picture variable. this.picture = (Image)e.Data.GetData(DataFormats.Bitmap); // Set the picture location equal to the drop point. this.pictureLocation = this.PointToClient(new Point(e.X, e.Y) ); } catch(Exception ex) { MessageBox.Show(ex.Message); return; } } // Force the form to be redrawn with the image. this.Invalidate(); + } private void Form1_DragEnter(object sender, DragEventArgs e) + { // If the data is a file or a bitmap, display the copy cursor. if (e.Data.GetDataPresent(DataFormats.Bitmap) || e.Data.GetDataPresent(DataFormats.FileDrop) ) { e.Effect = DragDropEffects.Copy; } else { e.Effect = DragDropEffects.None; } + } + private: Image^ picture; Point pictureLocation; public: Form1() { // Enable drag-and-drop operations and // add handlers for DragEnter and DragDrop. this->AllowDrop = true; this->DragDrop += gcnew DragEventHandler( this, &Form1::Form1_DragDrop ); this->DragEnter += gcnew DragEventHandler( this, &Form1::Form1_DragEnter ); } protected: virtual void OnPaint( PaintEventArgs^ e ) override { // If there is an image and it has a location, // paint it when the Form is repainted. Form::OnPaint( e ); if ( this->picture != nullptr && this->pictureLocation != Point::Empty ) { e->Graphics->DrawImage( this->picture, this->pictureLocation ); } } private: void Form1_DragDrop( Object^ /*sender*/, DragEventArgs^ e ) { // Handle FileDrop data. if ( e->Data->GetDataPresent( DataFormats::FileDrop ) ) { // Assign the file names to a String* array, in // case the user has selected multiple files. array<String^>^files = (array<String^>^)e->Data->GetData( DataFormats::FileDrop ); try { // Assign the first image to the picture variable. this->picture = Image::FromFile( files[ 0 ] ); // Set the picture location equal to the drop point. this->pictureLocation = this->PointToClient( Point(e->X,e->Y) ); } catch ( Exception^ ex ) { MessageBox::Show( ex->Message ); return; } } // Handle Bitmap data. if ( e->Data->GetDataPresent( DataFormats::Bitmap ) ) { try { // Create an Image and assign it to the picture variable. this->picture = dynamic_cast<Image^>(e->Data->GetData( DataFormats::Bitmap )); // Set the picture location equal to the drop point. this->pictureLocation = this->PointToClient( Point(e->X,e->Y) ); } catch ( Exception^ ex ) { MessageBox::Show( ex->Message ); return; } } // Force the form to be redrawn with the image. this->Invalidate(); } void Form1_DragEnter( Object^ /*sender*/, DragEventArgs^ e ) { // If the data is a file or a bitmap, display the copy cursor. if ( e->Data->GetDataPresent( DataFormats::Bitmap ) || e->Data->GetDataPresent( DataFormats::FileDrop ) ) { e->Effect = DragDropEffects::Copy; } else { e->Effect = DragDropEffects::None; } } + private Image picture; + private Point pictureLocation; public Form1() + { // Enable drag-and-drop operations and // add handlers for DragEnter and DragDrop. this.set_AllowDrop(true); this.add_DragDrop(new DragEventHandler(this.Form1_DragDrop)); this.add_DragEnter(new DragEventHandler(this.Form1_DragEnter)); + } //Form1 protected void OnPaint(PaintEventArgs e) + { // If there is an image and it has a location, // paint it when the Form is repainted. super.OnPaint(e); if (this.picture != null && !this.pictureLocation.Equals(Point.Empty)) { e.get_Graphics().DrawImage(this.picture, this.pictureLocation); } + } //OnPaint private void Form1_DragDrop(Object sender, DragEventArgs e) + { // Handle FileDrop data. if (e.get_Data().GetDataPresent(DataFormats.FileDrop)) { // Assign the file names to a string array, in // case the user has selected multiple files. String files[] = (String[])(e.get_Data().GetData( DataFormats.FileDrop)); try { // Assign the first image to the picture variable. this.picture = Image.FromFile(files.toString()); // Set the picture location equal to the drop point. this.pictureLocation = this.PointToClient(new Point(e.get_X(), e.get_Y())); } catch (System.Exception ex) { MessageBox.Show(ex.get_Message()); return; } } // Handle Bitmap data. if (e.get_Data().GetDataPresent(DataFormats.Bitmap)) { try { // Create an Image and assign it to the picture variable. this.picture = (Image)e.get_Data().GetData(DataFormats.Bitmap); // Set the picture location equal to the drop point. this.pictureLocation = this.PointToClient(new Point(e.get_X(), e.get_Y())); } catch (System.Exception ex) { MessageBox.Show(ex.get_Message()); return; } } // Force the form to be redrawn with the image. this.Invalidate(); + } //Form1_DragDrop private void Form1_DragEnter(Object sender, DragEventArgs e) + { // If the data is a file or a bitmap, display the copy cursor. if (e.get_Data().GetDataPresent(DataFormats.Bitmap) || e.get_Data(). GetDataPresent(DataFormats.FileDrop)) { e.set_Effect(DragDropEffects.Copy); } else { e.set_Effect(DragDropEffects.None); } + } //Form1_DragEnter +Windows 98, Windows Server 2000 SP4, Windows CE, Windows Millennium Edition, Windows Mobile for Pocket PC, Windows Mobile for Smartphone, Windows Server 2003, Windows XP Media Center Edition, Windows XP Professional x64 Edition, Windows XP SP2, Windows XP Starter Edition
The Microsoft .NET Framework 3.0 is supported on Windows Vista, Microsoft Windows XP SP2, and Windows Server 2003 SP1. .NET FrameworkSupported in: 3.0, 2.0, 1.1, 1.0.NET Compact FrameworkSupported in: 2.0, 1.0ReferenceControl ClassControl MembersSystem.Windows.Forms NamespaceRefreshUpdate +
Controls the speed of the video decoder.
This interface is implemented by the video decoder objects. You can obtain a reference to
Sets the speed mode of the video decoder. +
+The speed mode of the video decoder.
Value | Meaning |
---|---|
| The decoder will determine the decoding speed. |
| The decoder will decode in real time. |
| The decoder will decode faster than real time. The higher the value, the faster the decoding. |
?
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the current speed mode of the video decoder. +
+Address of a variable that receives the decoder speed mode.
Value | Meaning |
---|---|
| The decoder will determine the decoding speed. |
| The decoder will decode in real time. |
| The decoder will decode faster than real time. |
?
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Note??This interface is obsolete and should not be used.
Manages reconstructed video frames.
+
Note??This method is obsolete and should not be used.
Retrieves the size of the current reconstructed video frame.
+Address of a variable that receives the frame size in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Note??This method is obsolete and should not be used.
Retrieves the current reconstructed video frame.
+Address of a media buffer that receives the reconstructed video frame.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Note??This method is obsolete and should not be used.
Restores the current reconstructed video frame.
+Address of an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Note??This method is obsolete and should not be used.
Restores the current reconstructed video frame.
+Forces the encoder to encode the current frame as a key frame.
+Specifies that the current frame will be encoded as a key frame.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To force the encoder to make the current frame a key frame, call this method before calling
Each call to this method applies to a single frame. After processing this frame, the encoder resumes automatically assigning key frames (bounded by the maximum key frame distance).
+Contains quality metrics for acoustic echo cancellation (AEC). This structure is used with the MFPKEY_WMAAECMA_RETRIEVE_TS_STATS property. +
+The
You might want to design several different type of tables of contents. In that case, you can distinguish between types by creating a
A globally unique identifier (
Not used.
A globally unique identifier (
An integer that identifies the language of a table of contents. This index has meaning only to you, the developer. TOC Parser does not inspect or interpret this index.
The
The start time, in 100-nanosecond units, of the portion of a media file represented by an entry in a table of contents.
The end time, in 100-nanosecond units, of the portion of a media file represented by an entry in a table of contents.
Not used.
Not used.
The presentation time, in 100-nanosecond units, of a frame that is a good representation of the entry. This frame could be used for a thumbnail image that represents the entry.
Specifies how the output alpha values are calculated for Microsoft DirectX Video Acceleration High Definition (DXVA-HD) blit operations.
+The Mode member of the
To find out which modes the device supports, call the
Alpha values inside the target rectangle are set to opaque.
Alpha values inside the target rectangle are set to the alpha value specified in the background color. See
Existing alpha values remain unchanged in the output surface.
Alpha values from the input stream are scaled and copied to the corresponding destination rectangle for that stream. If the input stream does not have alpha data, the DXVA-HD device sets the alpha values in the target rectangle to an opaque value. If the input stream is disabled or the source rectangle is empty, the alpha values in the target rectangle are not modified.
Specifies state parameters for blit operations when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
To set a state parameter, call the
Defines video processing capabilities for a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The device can blend video content in linear color space. Most video content is gamma corrected, resulting in nonlinear values. If the DXVA-HD device sets this flag, it means the device converts colors to linear space before blending, which produces better results. +
The device supports the xvYCC color space for YCbCr data.
The device can perform range conversion when the input and output are both RGB but use different color ranges (0-255 or 16-235, for 8-bit RGB).
The device can apply a matrix conversion to YCbCr values when the input and output are both YCbCr. For example, the driver can convert colors from BT.601 to BT.709.
Specifies the type of Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+Hardware device. Video processing is performed in the GPU by the driver.
Software device. Video processing is performed in the CPU by a software plug-in.
Reference device. Video processing is performed in the CPU by a software plug-in.
Other. The device is neither a hardware device nor a software plug-in.
Specifies the intended use for a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The graphics driver uses one of these enumeration constants as a hint when it creates the DXVA-HD device.
+Normal video playback. The graphics driver should expose a set of capabilities that are appropriate for real-time video playback.
Optimal speed. The graphics driver should expose a minimal set of capabilities that are optimized for performance.
Use this setting if you want better performance and can accept some reduction in video quality. For example, you might use this setting in power-saving mode or to play video thumbnails.
Optimal quality. The grahics driver should expose its maximum set of capabilities.
Specify this setting to get the best video quality possible. It is appropriate for tasks such as video editing, when quality is more important than speed. It is not appropriate for real-time playback.
Defines features that a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device can support.
+The device can set the alpha values on the video output. See
The device can downsample the video output. See
The device can perform luma keying. See
The device can apply alpha values from color palette entries. See
Defines the range of supported values for an image filter.
+The multiplier enables the filter range to have a fractional step value.
For example, a hue filter might have an actual range of [-180.0 ... +180.0] with a step size of 0.25. The device would report the following range and multiplier:
In this case, a filter value of 2 would be interpreted by the device as 0.50 (or 2 ? 0.25).
The device should use a multiplier that can be represented exactly as a base-2 fraction.
+The minimum value of the filter.
The maximum value of the filter.
The default value of the filter.
A multiplier. Use the following formula to translate the filter setting into the actual filter value: Actual Value = Set Value???Multiplier.
Defines capabilities related to image adjustment and filtering for a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The device can adjust the brightness level.
The device can adjust the contrast level.
The device can adjust hue.
The device can adjust the saturation level.
The device can perform noise reduction.
The device can perform edge enhancement.
The device can perform anamorphic scaling. Anamorphic scaling can be used to stretch 4:3 content to a widescreen 16:9 aspect ratio.
Describes how a video stream is interlaced.
+Frames are progressive.
Frames are interlaced. The top field of each frame is displayed first.
Frame are interlaced. The bottom field of each frame is displayed first.
Defines capabilities related to input formats for a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+These flags define video processing capabilities that are usually not needed, and therefore are not required for DXVA-HD devices to support.
The first three flags relate to RGB support for functions that are normally applied to YCbCr video: deinterlacing, color adjustment, and luma keying. A DXVA-HD device that supports these functions for YCbCr is not required to support them for RGB input. Supporting RGB input for these functions is an additional capability, reflected by these constants. The driver might convert the input to another color space, perform the indicated function, and then convert the result back to RGB.
Similarly, a device that supports de-interlacing is not required to support deinterlacing of palettized formats. This capability is indicated by the
The device can deinterlace an input stream that contains interlaced RGB video.
The device can perform color adjustment on RGB video.
The device can perform luma keying on RGB video.
The device can deinterlace input streams with palettized color formats.
Specifies the inverse telecine (IVTC) capabilities of a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) video processor.
+The video processor can reverse 3:2 pulldown.
The video processor can reverse 2:2 pulldown.
The video processor can reverse 2:2:2:4 pulldown.
The video processor can reverse 2:3:3:2 pulldown.
The video processor can reverse 3:2:3:2:2 pulldown.
The video processor can reverse 5:5 pulldown.
The video processor can reverse 6:4 pulldown.
The video processor can reverse 8:7 pulldown.
The video processor can reverse 2:2:2:2:2:2:2:2:2:2:2:3 pulldown.
The video processor can reverse other telecine modes not listed here.
Describes how to map color data to a normalized [0...1] range.
These flags are used in the
For YUV colors, these flags specify how to convert between Y'CbCr and Y'PbPr. The Y'PbPr color space has a range of [0..1] for Y' (luma) and [-0.5...0.5] for Pb/Pr (chroma).
Value | Description |
---|---|
Should not be used for YUV data. | |
For 8-bit Y'CbCr components:
For samples with n bits of precision, the general equations are:
The inverse equations to convert from Y'CbCr to Y'PbPr are:
| |
For 8-bit Y'CbCr values, Y' range of [0..1] maps to [48...208]. |
?
For RGB colors, the flags differentiate various RGB spaces.
Value | Description |
---|---|
sRGB | |
Studio RGB; ITU-R BT.709 | |
ITU-R BT.1361 RGB |
?
Video data might contain values above or below the nominal range.
Note??The values named
This enumeration is equivalent to the DXVA_NominalRange enumeration used in DXVA 1.0, although it defines additional values.
If you are using the
Specifies the output frame rates for an input stream, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
This enumeration type is used in the
Specifies the processing capabilities of a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) video processor.
+The video processor can perform blend deinterlacing.
In blend deinterlacing, the two fields from an interlaced frame are blended into a single progressive frame. A video processor uses blend deinterlacing when it deinterlaces at half rate, as when converting 60i to 30p. Blend deinterlacing does not require reference frames.
The video processor can perform bob deinterlacing.
In bob deinterlacing, missing field lines are interpolated from the lines above and below. Bob deinterlacing does not require reference frames.
The video processor can perform adaptive deinterlacing.
Adaptive deinterlacing uses spatial or temporal interpolation, and switches between the two on a field-by-field basis, depending on the amount of motion. If the video processor does not receive enough reference frames to perform adaptive deinterlacing, it falls back to bob deinterlacing.
The video processor can perform motion-compensated deinterlacing.
Motion-compensated deinterlacing uses motion vectors to recreate missing lines. If the video processor does not receive enough reference frames to perform motion-compensated deinterlacing, it falls back to bob deinterlacing.
The video processor can perform inverse telecine (IVTC).
If the video processor supports this capability, the ITelecineCaps member of the
The video processor can convert the frame rate by interpolating frames.
Describes the content of a video sample. These flags are used in the
This enumeration is equivalent to the DXVA_SampleFormat enumeration used in DXVA 1.0.
The following table shows the mapping from
No exact match. Use |
?
With the exception of
The value
Specifies the luma key for an input stream, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+To use this state, the device must support luma keying, indicated by the
If the device does not support luma keying, the
If the input format is RGB, the device must also support the
The values of Lower and Upper give the lower and upper bounds of the luma key, using a nominal range of [0...1]. Given a format with n bits per channel, these values are converted to luma values as follows:
val = f * ((1 << n)-1)
Any pixel whose luma value falls within the upper and lower bounds (inclusive) is treated as transparent.
For example, if the pixel format uses 8-bit luma, the upper bound is calculated as follows:
BYTE Y = BYTE(max(min(1.0, Upper), 0.0) * 255.0)
Note that the value is clamped to the range [0...1] before multiplying by 255.
+ If TRUE, luma keying is enabled. Otherwise, luma keying is disabled. The default value is
The lower bound for the luma key. The range is [0?1]. The default state value is 0.0.
The upper bound for the luma key. The range is [0?1]. The default state value is 0.0.
Describes a DirectX surface type for DirectX Video Acceleration (DXVA).
+The surface is a decoder render target.
The surface is a video processor render target.
The surface is a Direct3D texture render target.
Specifies the type of video surface created by a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+If the DXVA-HD device is a software plug-in and the surface type is
A surface for an input stream. This surface type is equivalent to an off-screen plain surface in Microsoft Direct3D. The application can use the surface in Direct3D calls.
A private surface for an input stream. This surface type is equivalent to an off-screen plain surface, except that the application cannot use the surface in Direct3D calls.
A surface for an output stream. This surface type is equivalent to an off-screen plain surface in Direct3D. The application can use the surface in Direct3D calls.
This surface type is recommended for video processing applications that need to lock the surface and access the surface memory. For video playback with optimal performance, a render-target surface or swap chain is recommended instead.
Describes how chroma values are positioned relative to the luma samples in a YUV video frame. These flags are used in the
The following diagrams show the most common arrangements.
+Describes the intended lighting conditions for viewing video content. These flags are used in the
This enumeration is equivalent to the DXVA_VideoLighting enumeration used in DXVA 1.0.
If you are using the
Specifies the color primaries of a video source. These flags are used in the
Color primaries define how to convert RGB colors into the CIE XYZ color space, and can be used to translate colors between different RGB color spaces. An RGB color space is defined by the chromaticity coordinates (x,y) of the RGB primaries plus the white point, as listed in the following table.
Color space | (Rx, Ry) | (Gx, Gy) | (Bx, By) | White point (Wx, Wy) |
---|---|---|---|---|
BT.709 | (0.64, 0.33) | (0.30, 0.60) | (0.15, 0.06) | D65 (0.3127, 0.3290) |
BT.470-2 System M; EBU 3212 | (0.64, 0.33) | (0.29, 0.60) | (0.15, 0.06) | D65 (0.3127, 0.3290) |
BT.470-4 System B,G | (0.67, 0.33) | (0.21, 0.71) | (0.14, 0.08) | CIE III.C (0.310, 0.316) |
SMPTE 170M; SMPTE 240M; SMPTE C | (0.63, 0.34) | (0.31, 0.595) | (0.155, 0.07) | D65 (0.3127, 0.3291) |
?
The z coordinates can be derived from x and y as follows: z = 1 - x - y. To convert between RGB colors to CIE XYZ tristimulus values, compute a matrix T as follows:
Given T, you can use the following formulas to convert between an RGB color value and a CIE XYZ tristimulus value. These formulas assume that the RGB components are linear (not gamma corrected) and are normalized to the range [0...1].
To convert colors directly from one RGB color space to another, use the following formula, where T1 is the matrix for color space RGB1, and T2 is the matrix for color space RGB2.
For a derivation of these formulas, refer to Charles Poynton, Digital Video and HDTV Algorithms and Interfaces (Morgan Kaufmann, 2003).
This enumeration is equivalent to the DXVA_VideoPrimaries enumeration used in DXVA 1.0.
If you are using the
Specifies the conversion function from linear RGB to non-linear RGB (R'G'B'). These flags are used in the
The following table shows the formulas for the most common transfer functions. In these formulas, L is the linear value and L' is the non-linear (gamma corrected) value. These values are relative to a normalized range [0...1].
Color space | Transfer function |
---|---|
sRGB (8-bit) | L' = 12.92L, for L < 0.031308 L' = 1.055L^1/2.4? 0.055, for L >= 0.031308 |
BT.470-2 System B, G | L' = L^0.36 |
BT.470-2 System M | L' = L^0.45 |
BT.709 | L' = 4.50L, for L < 0.018 L' = 1.099L^0.45? 0.099, for L >= 0.018 |
scRGB | L' = L |
SMPTE 240M | L' = 4.0L, for L < 0.0228 L' = 1.1115L^0.45? 0.01115, for L >= 0.0228 |
?
The following table shows the inverse formulas to obtain the original gamma-corrected values:
Color space | Transfer function |
---|---|
sRGB (8-bit) | L = 1/12.92L', for L' < 0.03928 L = ((L' + 0.055)/1055)^2.4, for L' >= 0.03928 |
BT.470-2 System B, G | L = L'^1/0.36 |
BT.470-2 System M | L = L'^1/0.45 |
BT.709 | L = L'/4.50, for L' < 0.081 L = ((L' + 0.099) / 1.099)^1/0.45, for L' >= 0.081 |
scRGB | L = L' |
SMPTE 240M | L = L'/4.0, for L' < 0.0913 L= ((L' + 0.1115)/1.1115)^1/0.45, for L' >= 0.0913 |
?
This enumeration is equivalent to the DXVA_VideoTransferFunction enumeration used in DXVA 1.0.
If you are using the
Bitmask to validate flag values. This value is not a valid flag.
Unknown. Treat as
Linear RGB (gamma = 1.0).
True 1.8 gamma, L' = L^1/1.8.
True 2.0 gamma, L' = L^1/2.0.
True 2.2 gamma, L' = L^1/2.2. This transfer function is used in ITU-R BT.470-2 System M (NTSC).
ITU-R BT.709 transfer function. Gamma 2.2 curve with a linear segment in the lower range. This transfer function is used in BT.709, BT.601, SMPTE 296M, SMPTE 170M, BT.470, and SMPTE 274M. In addition BT-1361 uses this function within the range [0...1].
SMPTE 240M transfer function. Gamma 2.2 curve with a linear segment in the lower range.
sRGB transfer function. Gamma 2.4 curve with a linear segment in the lower range.
True 2.8 gamma. L' = L^1/2.8. This transfer function is used in ITU-R BT.470-2 System B, G (PAL).
Describes the conversion matrices between Y'PbPr (component video) and studio R'G'B'. These flags are used in the
The transfer matrices are defined as follows.
BT.709 transfer matrices:
Y' 0.212600 0.715200 0.072200 R'
+ Pb = -0.114572 -0.385428 0.500000 x G'
+ Pr 0.500000 -0.454153 -0.045847 B' R' 1.000000 0.000000 1.574800 Y'
+ G' = 1.000000 -0.187324 -0.468124 x Pb
+ B' 1.000000 1.855600 0.000000 Pr
+
BT.601 transfer matrices:
Y' 0.299000 0.587000 0.114000 R'
+ Pb = -0.168736 -0.331264 0.500000 x G'
+ Pr 0.500000 -0.418688 -0.081312 B' R' 1.000000 0.000000 1.402000 Y'
+ G' = 1.000000 -0.344136 -0.714136 x Pb
+ B' 1.000000 1.772000 0.000000 Pr
+
SMPTE 240M (SMPTE RP 145) transfer matrices:
Y' 0.212000 0.701000 0.087000 R'
+ Pb = -0.116000 -0.384000 0.500000 x G'
+ Pr 0.500000 -0.445000 -0.055000 B' R' 1.000000 -0.000000 1.576000 Y'
+ G' = 1.000000 -0.227000 -0.477000 x Pb
+ B' 1.000000 1.826000 0.000000 Pr
+
This enumeration is equivalent to the DXVA_VideoTransferMatrix enumeration used in DXVA 1.0.
If you are using the
Creates an instance of the Direct3D Device Manager.
+If this function succeeds, it returns
Windows Store apps must use IMFDXGIDeviceManager and Direct3D 11 Video APIs.
+Creates a DirectX Video Acceleration (DXVA) services object. Call this function if your application uses DXVA directly, without using DirectShow or Media Foundation.
+ A reference to the
The interface identifier (IID) of the requested interface. Any of the following interfaces might be supported by the Direct3D device:
Receives a reference to the interface. The caller must release the interface.
If this function succeeds, it returns
Creates a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+A reference to the
A reference to a
A member of the
A reference to an initialization function for a software device. Set this reference if you are using a software plug-in device. Otherwise, set this parameter to
The function reference type is PDXVAHDSW_Plugin.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Direct3D device does not support DXVA-HD. |
?
Use the
Represents a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) video processor.
To get a reference to this interface, call the
Sets a state parameter for a blit operation by a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The state parameter to set, specified as a member of the
The size, in bytes, of the buffer pointed to by pData.
A reference to a buffer that contains the state data. The meaning of the data depends on the State parameter. Each state has a corresponding data structure; for more information, see
If this method succeeds, it returns
Gets the value of a state parameter for blit operations performed by a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The state parameter to query, specified as a member of the
The size, in bytes, of the buffer pointed to by pData.
A reference to a buffer allocated by the caller. The method copies the state data into the buffer. The buffer must be large enough to hold the data structure that corresponds to the state parameter. For more information, see
If this method succeeds, it returns
Sets a state parameter for an input stream on a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The zero-based index of the input stream. To get the maximum number of streams, call
The state parameter to set, specified as a member of the
The size, in bytes, of the buffer pointed to by pData.
A reference to a buffer that contains the state data. The meaning of the data depends on the State parameter. Each state has a corresponding data structure; for more information, see
If this method succeeds, it returns
Call this method to set state parameters that apply to individual input streams.
+Gets the value of a state parameter for an input stream on a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+The zero-based index of the input stream. To get the maximum number of streams, call
The state parameter to query, specified as a member of the
The size, in bytes, of the buffer pointed to by pData.
A reference to a buffer allocated by the caller. The method copies the state data into the buffer. The buffer must be large enough to hold the data structure that corresponds to the state parameter. For more information, see
If this method succeeds, it returns
Performs a video processing blit on one or more input samples and writes the result to a Microsoft Direct3D surface.
+A reference to the
Frame number of the output video frame, indexed from zero.
Number of input streams to process.
Pointer to an array of
If this method succeeds, it returns
The maximum value of StreamCount is given in the MaxStreamStates member of the
Represents a DirectX Video Acceleration (DXVA) video decoder device.
To get a reference to this interface, call
The
Retrieves the DirectX Video Acceleration (DXVA) decoder service that created this decoder device.
+Receives a reference to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the parameters that were used to create this device.
+Receives the device
Pointer to a
Pointer to a
Receives an array of
Receives the number of elements in the pppDecoderRenderTargets array. This parameter can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. At least one parameter must be non- |
?
You can set any parameter to
If you specify a non-
Retrieves a reference to a DirectX Video Acceleration (DXVA) decoder buffer.
+Type of buffer to retrieve. Use one of the following values.
Value | Meaning |
---|---|
Picture decoding parameter buffer. | |
Macroblock control command buffer. | |
Residual difference block data buffer. | |
Deblocking filter control command buffer. | |
Inverse quantization matrix buffer. | |
Slice-control buffer. | |
Bitstream data buffer. | |
Motion vector buffer. | |
Film grain synthesis data buffer. |
?
Receives a reference to the start of the memory buffer.
Receives the size of the buffer, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The method locks the Direct3D surface that contains the buffer. When you are done using the buffer, call
This method might block if too many operations have been queued on the GPU. The method unblocks when a free buffer becomes available.
+ Releases a buffer that was obtained by calling
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Starts the decoding operation.
+Pointer to the
Reserved; set to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid surface type. See Remarks. |
?
After this method is called, call
Each call to BeginFrame must have a matching call to EndFrame, and BeginFrame calls cannot be nested.
DXVA 1.0 migration note: Unlike the IAMVideoAccelerator::BeginFrame method, which specifies the buffer as an index, this method takes a reference directly to the uncompressed buffer.
The surface pointed to by pRenderTarget must be created by calling
Signals the end of the decoding operation.
+Reserved.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Executes a decoding operation on the current frame.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You must call
Retrieves the DirectX Video Acceleration (DXVA) decoder service that created this decoder device.
+Sets the type of video memory for uncompressed video surfaces. This interface is used by video decoders and transforms.
The DirectShow enhanced video renderer (EVR) filter exposes this interface as a service on the filter's input pins. To obtain a reference to this interface, call
A video decoder can use this interface to enumerate the EVR filter's preferred surface types and then select the surface type. The decoder should then create surfaces of that type to hold the results of the decoding operation.
This interface does not define a way to clear the surface type. In the case of DirectShow, disconnecting two filters invalidates the surface type.
+
Retrieves a supported video surface type.
+Zero-based index of the surface type to retrieve. Surface types are indexed in order of preference, starting with the most preferred type.
Receives a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The index was out of range. |
?
Sets the video surface type that a decoder will use for DirectX Video Acceleration (DVXA) 2.0.
+Member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The renderer does not support the specified surface type. |
?
By calling this method, the caller agrees to create surfaces of the type specified in the dwType parameter.
In DirectShow, during pin connection, a video decoder that supports DVXA 2.0 should call SetSurface with the value
The only way to undo the setting is to break the pin connection.
+
Sets the video surface type that a decoder will use for DirectX Video Acceleration (DVXA) 2.0.
+By calling this method, the caller agrees to create surfaces of the type specified in the dwType parameter.
In DirectShow, during pin connection, a video decoder that supports DVXA 2.0 should call SetSurface with the value
The only way to undo the setting is to break the pin connection.
+
Retrieves the parameters that were used to create this device.
+You can set any parameter to
Retrieves the DirectX Video Acceleration (DXVA) video processor service that created this video processor device.
+Receives a reference to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the parameters that were used to create this device.
+Receives the device
Pointer to a
Receives the render target format, specified as a
Receives the maximum number of streams supported by the device. This parameter can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. At least one parameter must be non- |
?
You can set any parameter to
Retrieves the capabilities of the video processor device.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the range of values for a video processor (ProcAmp) setting on this video processor device.
+The ProcAmp setting to query. See ProcAmp Settings.
Pointer to a
If this method succeeds, it returns
Retrieves the range of values for an image filter supported by this device.
+Filter setting to query. For more information, see DXVA Image Filter Settings.
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Performs a video process operation on one or more input samples and writes the result to a Direct3D9 surface.
+ A reference to the
A reference to a
A reference to an array of
The maximum number of input samples is given by the constant MAX_DEINTERLACE_SURFACES, defined in the header file dxva2api.h.
The number of elements in the pSamples array.
Reserved; set to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Internal driver error. |
| Invalid arguments. |
?
When the method returns, the operation might not be complete.
If the method returns E_INVALIDARG, check for the following:
Retrieves the DirectX Video Acceleration (DXVA) video processor service that created this video processor device.
+
Retrieves the capabilities of the video processor device.
+Contains an initialization vector (IV) for 128-bit Advanced Encryption Standard CTR mode (AES-CTR) block cipher encryption.
+For AES-CTR encyption, the pvPVPState member of the
The D3DAES_CTR_IV structure and the
The IV, in big-endian format.
The block count, in big-endian format.
Defines a 16-bit AYUV pixel value.
+Contains the Cr chroma value (also called V).
Contains the Cb chroma value (also called U).
Contains the luma value.
Contains the alpha value.
Defines an 8-bit AYUV pixel value.
+Contains the Cr chroma value (also called V).
Contains the Cb chroma value (also called U).
Contains the luma value.
Contains the alpha value.
Specifies how the output alpha values are calculated for blit operations when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+Specifies the alpha fill mode, as a member of the
If the FeatureCaps member of the
The default state value is
Zero-based index of the input stream to use for the alpha values. This member is used when the alpha fill mode is
To get the maximum number of streams, call
Specifies the background color for blit operations, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+The background color is used to fill the target rectangle wherever no video image appears. Areas outside the target rectangle are not affected. See
The color space of the background color is determined by the color space of the output. See
The alpha value of the background color is used only when the alpha fill mode is
The default background color is full-range RGB black, with opaque alpha.
+ If TRUE, the BackgroundColor member specifies a YCbCr color. Otherwise, it specifies an RGB color. The default device state is
A
Specifies whether the output is downsampled in a blit operation, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+If the Enable member is TRUE, the device downsamples the composed target rectangle to the size given in the Size member, and then scales it back to the size of the target rectangle.
The width and height of Size must be greater than zero. If the size is larger than the target rectangle, downsampling does not occur.
To use this state, the device must support downsampling, indicated by the
If the device does not support downsampling, the
Downsampling is sometimes used to reduce the quality of premium content when other forms of content protection are not available.
+If TRUE, downsampling is enabled. Otherwise, downsampling is disabled and the Size member is ignored. The default state value is
The sampling size. The default value is (1,1).
Specifies the output color space for blit operations, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+The RGB_Range member applies to RGB output, while the YCbCr_Matrix and YCbCr_xvYCC members apply to YCbCr (YUV) output. If the device performs color-space conversion on the background color, it uses the values that apply to both color spaces.
Extended YCbCr can be used with either transfer matrix. Extended YCbCr does not change the black point or white point?the black point is still 16 and the white point is still 235. However, extended YCbCr explicitly allows blacker-than-black values in the range 1?15, and whiter-than-white values in the range 236?254. When extended YCbCr is used, the driver should not clip the luma values to the nominal 16?235 range.
If the device supports extended YCbCr, it sets the
If the output format is a wide-gamut RGB format, output might fall outside the nominal [0...1] range of sRGB. This is particularly true if one or more input streams use extended YCbCr.
+Specifies whether the output is intended for playback or video processing (such as editing or authoring). The device can optimize the processing based on the type. The default state value is 0 (playback).
Value | Meaning |
---|---|
| Playback. |
| Video processing. |
?
Specifies the RGB color range. The default state value is 0 (full range).
Value | Meaning |
---|---|
| Full range (0-255). |
| Limited range (16-235). |
?
Specifies the YCbCr transfer matrix. The default state value is 0 (BT.601).
Value | Meaning |
---|---|
| ITU-R BT.601. |
| ITU-R BT.709. |
?
Specifies whether the output uses conventional YCbCr or extended YCbCr (xvYCC). The default state value is zero (conventional YCbCr).
Value | Meaning |
---|---|
| Conventional YCbCr. |
| Extended YCbCr (xvYCC). |
?
Contains data for a private blit state for Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+Use this structure for proprietary or device-specific state parameters.
The caller allocates the pData array. Set the DataSize member to the size of the array in bytes. When retrieving the state data, you can set pData to
A
The size, in bytes, of the buffer pointed to by the pData member.
A reference to a buffer that contains the private state data. The DXVA-HD runtime passes this buffer directly to the device without validation.
Specifies the target rectangle for blitting, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+Specifies whether to use the target rectangle. The default state value is
Value | Meaning |
---|---|
| Use the target rectangle specified by the TargetRect member. |
Use the entire destination surface as the target rectangle. Ignore the TargetRect member. |
?
Specifies the target rectangle. The target rectangle is the area within the destination surface where the output will be drawn. The target rectangle is given in pixel coordinates, relative to the destination surface. The default state value is an empty rectangle, (0, 0, 0, 0).
If the Enable member is
Defines a color value for Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+This union can represent both RGB and YCbCr colors. The interpretation of the union depends on the context.
+A
A
Specifies an RGB color value.
+The RGB values have a nominal range of [0...1]. For an RGB format with n bits per channel, the value of each color component is calculated as follows:
val = f * ((1 << n)-1)
For example, for RGB-32 (8 bits per channel), val = BYTE(f * 255.0)
.
For full-range RGB, reference black is (0.0, 0.0, 0.0), which corresponds to (0, 0, 0) in an 8-bit representation. For limited-range RGB, reference black is (0.0625, 0.0625, 0.0625), which corresponds to (16, 16, 16) in an 8-bit representation. For wide-gamut formats, the values might fall outside of the [0...1] range.
+The red value.
The green value.
The blue value.
The alpha value. Values range from 0 (transparent) to 1 (opaque).
Defines a color value for Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+This union can represent both RGB and YCbCr colors. The interpretation of the union depends on the context.
+A
A
Describes the configuration of a DXVA decoder device.
+Defines the encryption protocol type for bit-stream data buffers. If no encryption is applied, the value is DXVA_NoEncrypt. If ConfigBitstreamRaw is 0, the value must be DXVA_NoEncrypt.
Defines the encryption protocol type for macroblock control data buffers. If no encryption is applied, the value is DXVA_NoEncrypt. If ConfigBitstreamRaw is 1, the value must be DXVA_NoEncrypt.
Defines the encryption protocol type for residual difference decoding data buffers (buffers containing spatial-domain data or sets of transform-domain coefficients for accelerator-based IDCT). If no encryption is applied, the value is DXVA_NoEncrypt. If ConfigBitstreamRaw is 1, the value must be DXVA_NoEncrypt.
Indicates whether the host-decoder sends raw bit-stream data. If the value is 1, the data for the pictures will be sent in bit-stream buffers as raw bit-stream content. If the value is 0, picture data will be sent using macroblock control command buffers. If either ConfigResidDiffHost or ConfigResidDiffAccelerator is 1, the value must be 0.
Specifies whether macroblock control commands are in raster scan order or in arbitrary order. If the value is 1, the macroblock control commands within each macroblock control command buffer are in raster-scan order. If the value is 0, the order is arbitrary. For some types of bit streams, forcing raster order either greatly increases the number of required macroblock control buffers that must be processed, or requires host reordering of the control information. Therefore, supporting arbitrary order can be more efficient.
Contains the host residual difference configuration. If the value is 1, some residual difference decoding data may be sent as blocks in the spatial domain from the host. If the value is 0, spatial domain data will not be sent.
Indicates the word size used to represent residual difference spatial-domain blocks for predicted (non-intra) pictures when using host-based residual difference decoding.
If ConfigResidDiffHost is 1 and ConfigSpatialResid8 is 1, the host will send residual difference spatial-domain blocks for non-intra macroblocks using 8-bit signed samples and for intra macroblocks in predicted (non-intra) pictures in a format that depends on the value of ConfigIntraResidUnsigned:
If ConfigResidDiffHost is 1 and ConfigSpatialResid8 is 0, the host will send residual difference spatial-domain blocks of data for non-intra macroblocks using 16- bit signed samples and for intra macroblocks in predicted (non-intra) pictures in a format that depends on the value of ConfigIntraResidUnsigned:
If ConfigResidDiffHost is 0, ConfigSpatialResid8 must be 0.
For intra pictures, spatial-domain blocks must be sent using 8-bit samples if bits-per-pixel (BPP) is 8, and using 16-bit samples if BPP > 8. If ConfigIntraResidUnsigned is 0, these samples are sent as signed integer values relative to a constant reference value of 2^(BPP?1), and if ConfigIntraResidUnsigned is 1, these samples are sent as unsigned integer values relative to a constant reference value of 0.
If the value is 1, 8-bit difference overflow blocks are subtracted rather than added. The value must be 0 unless ConfigSpatialResid8 is 1.
The ability to subtract differences rather than add them enables 8-bit difference decoding to be fully compliant with the full ?255 range of values required in video decoder specifications, because +255 cannot be represented as the addition of two signed 8-bit numbers, but any number in the range ?255 can be represented as the difference between two signed 8-bit numbers (+255 = +127 minus ?128).
If the value is 1, spatial-domain blocks for intra macroblocks must be clipped to an 8-bit range on the host and spatial-domain blocks for non-intra macroblocks must be clipped to a 9-bit range on the host. If the value is 0, no such clipping is necessary by the host.
The value must be 0 unless ConfigSpatialResid8 is 0 and ConfigResidDiffHost is 1.
If the value is 1, any spatial-domain residual difference data must be sent in a chrominance-interleaved form matching the YUV format chrominance interleaving pattern. The value must be 0 unless ConfigResidDiffHost is 1 and the YUV format is NV12 or NV21.
Indicates the method of representation of spatial-domain blocks of residual difference data for intra blocks when using host-based difference decoding.
If ConfigResidDiffHost is 1 and ConfigIntraResidUnsigned is 0, spatial-domain residual difference data blocks for intra macroblocks must be sent as follows:
If ConfigResidDiffHost is 1 and ConfigIntraResidUnsigned is 1, spatial-domain residual difference data blocks for intra macroblocks must be sent as follows:
The value of the member must be 0 unless ConfigResidDiffHost is 1.
If the value is 1, transform-domain blocks of coefficient data may be sent from the host for accelerator-based IDCT. If the value is 0, accelerator-based IDCT will not be used. If both ConfigResidDiffHost and ConfigResidDiffAccelerator are 1, this indicates that some residual difference decoding will be done on the host and some on the accelerator, as indicated by macroblock-level control commands.
The value must be 0 if ConfigBitstreamRaw is 1.
If the value is 1, the inverse scan for transform-domain block processing will be performed on the host, and absolute indices will be sent instead for any transform coefficients. If the value is 0, the inverse scan will be performed on the accelerator.
The value must be 0 if ConfigResidDiffAccelerator is 0 or if Config4GroupedCoefs is 1.
If the value is 1, the IDCT specified in Annex W of ITU-T Recommendation H.263 is used. If the value is 0, any compliant IDCT can be used for off-host IDCT.
The H.263 annex does not comply with the IDCT requirements of MPEG-2 corrigendum 2, so the value must not be 1 for use with MPEG-2 video.
The value must be 0 if ConfigResidDiffAccelerator is 0, indicating purely host-based residual difference decoding.
If the value is 1, transform coefficients for off-host IDCT will be sent using the DXVA_TCoef4Group structure. If the value is 0, the DXVA_TCoefSingle structure is used. The value must be 0 if ConfigResidDiffAccelerator is 0 or if ConfigHostInverseScan is 1.
Specifies how many frames the decoder device processes at any one time.
Contains decoder-specific configuration information.
Describes a video stream for a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) video processor.
The display driver can use the information in this structure to optimize the capabilities of the video processor. For example, some capabilities might not be exposed for high-definition (HD) content, for performance reasons.
+Frame rates are expressed as ratios. For example, 30 frames per second (fps) is expressed as 30:1, and 29.97 fps is expressed as 30000/1001. For interlaced content, a frame consists of two fields, so that the frame rate is half the field rate.
If the application will composite two or more input streams, use the largest stream for the values of InputWidth and InputHeight.
+A member of the
The frame rate of the input video stream, specified as a
The width of the input frames, in pixels.
The height of the input frames, in pixels.
The frame rate of the output video stream, specified as a
The width of the output frames, in pixels.
The height of the output frames, in pixels.
Specifies a custom rate for frame-rate conversion or inverse telecine (IVTC).
+The CustomRate member gives the rate conversion factor, while the remaining members define the pattern of input and output samples.
Here are some example uses for this structure:
Frame rate conversion from 60p to 120p (doubling the frame rate).
Reverse 2:3 pulldown (IVTC) from 60i to 24p.
(Ten interlaced fields are converted into four progressive frames.)
The ratio of the output frame rate to the input frame rate, expressed as a
The number of output frames that will be generated for every N input samples, where N = InputFramesOrFields.
If TRUE, the input stream must be interlaced. Otherwise, the input stream must be progressive.
The number of input fields or frames for every N output frames that will be generated, where N = OutputFrames.
Describes a buffer sent from a decoder to a DirectX Video Acceleration (DXVA) device.
+This structure corresponds closely to the DXVA_BufferDescription structure in DXVA 1, but some of the fields are no longer used in DXVA 2.
+Identifies the type of buffer passed to the accelerator. Must be one of the following values.
Value | Meaning |
---|---|
Picture decoding parameter buffer. | |
Macroblock control command buffer. | |
Residual difference block data buffer. | |
Deblocking filter control command buffer. | |
Inverse quantization matrix buffer. | |
Slice-control buffer. | |
Bitstream data buffer. | |
Motion vector buffer. | |
Film grain synthesis data buffer. |
?
Reserved. Set to zero.
Specifies the offset of the relevant data from the beginning of the buffer, in bytes. Currently this value must be zero.
Specifies the amount of relevant data in the buffer, in bytes. The location of the last byte of content in the buffer is DataOffset + DataSize ? 1.
Specifies the macroblock address of the first macroblock in the buffer. The macroblock address is given in raster scan order.
Specifies the number of macroblocks of data in the buffer. This count includes skipped macroblocks. This value must be zero if the data buffer type is one of the following: picture decoding parameters, inverse-quantization matrix, AYUV, IA44/AI44, DPXD, Highlight, or DCCMD.
Reserved. Set to zero.
Reserved. Set to zero.
Reserved. Set to zero.
Reserved. Set to zero.
Pointer to a byte array that contains an initialization vector (IV) for encrypted data. If the decode buffer does not contain encrypted data, set this member to
Contains parameters for the
Contains private data for the
This structure corresponds to parameters of the IAMVideoAccelerator::Execute method in DirectX Video Acceleration (DXVA) version 1.
+Describes the format of a video stream.
+Most of the values in this structure can be translated directly to and from
Use this member to access all of the bits in the union.
Describes the interlacing of the video frames. Contains a value from the
Describes the chroma siting. Contains a value from the
Describes the nominal range of the Y'CbCr or RGB color data. Contains a value from the
Describes the transform from Y'PbPr (component video) to studio R'G'B'. Contains a value from the
Describes the intended viewing conditions. Contains a value from the
Describes the color primaries. Contains a value from the
Describes the gamma correction transfer function. Contains a value from the
Defines the range of supported values for an image filter.
+The multiplier enables the filter range to have a fractional step value.
For example, a hue filter might have an actual range of [-180.0 ... +180.0] with a step size of 0.25. The device would report the following range and multiplier:
In this case, a filter value of 2 would be interpreted by the device as 0.50 (or 2 ? 0.25).
The device should use a multiplier that can be represented exactly as a base-2 fraction.
+The minimum value of the filter.
The maximum value of the filter.
The default value of the filter.
A multiplier. Use the following formula to translate the filter setting into the actual filter value: Actual Value = Set Value???Multiplier.
Contains parameters for a DirectX Video Acceleration (DXVA) image filter.
+Filter level.
Filter threshold.
Filter radius.
Returns a
You can use this function for DirectX Video Acceleration (DXVA) operations that require alpha values expressed as fixed-point numbers.
+
Defines a video frequency.
+The value 0/0 indicates an unknown frequency. Values of the form n/0, where n is not zero, are invalid. Values of the form 0/n, where n is not zero, indicate a frequency of zero.
+Numerator of the frequency.
Denominator of the frequency.
Contains values for DirectX Video Acceleration (DXVA) video processing operations.
+Brightness value.
Contrast value.
Hue value.
Saturation value.
Contains a rational number (ratio).
+Values of the form 0/n are interpreted as zero. The value 0/0 is interpreted as zero. However, these values are not necessarily valid in all contexts.
Values of the form n/0, where n is nonzero, are invalid.
+The numerator of the ratio.
The denominator of the ratio.
Contains per-stream data for the
Specifies the planar alpha value for an input stream, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+For each pixel, the destination color value is computed as follows:
Cd = Cs * (As * Ap * Ae) + Cd * (1.0 - As * Ap * Ae)
where
Cd
= Color value of the destination pixel.Cs
= Color value of source pixel.As
= Per-pixel source alpha.Ap
= Planar alpha value.Ae
= Palette-entry alpha value, or 1.0 (see Note).Note??Palette-entry alpha values apply only to palettized color formats, and only when the device supports the
The destination alpha value is computed according to the
To get the device capabilities, call
If TRUE, alpha blending is enabled. Otherwise, alpha blending is disabled. The default state value is
Specifies the planar alpha value as a floating-point number from 0.0 (transparent) to 1.0 (opaque).
If the Enable member is
Specifies the pixel aspect ratio (PAR) for the source and destination rectangles.
+Pixel aspect ratios of the form 0/n and n/0 are not valid.
If the Enable member is
If TRUE, the SourceAspectRatio and DestinationAspectRatio members contain valid values. Otherwise, the pixel aspect ratios are unspecified.
A
A
Specifies the format for an input stream, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+The surface format, specified as a
The default state value is
Specifies the destination rectangle for an input stream, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+Specifies whether to use the destination rectangle, or use the entire output surface. The default state value is
Value | Meaning |
---|---|
| Use the destination rectangle given in the DestinationRect member. |
Use the entire output surface as the destination rectangle. |
?
The destination rectangle, which defines the portion of the output surface where the source rectangle is blitted. The destination rectangle is given in pixel coordinates, relative to the output surface. The default value is an empty rectangle, (0, 0, 0, 0).
If the Enable member is
Specifies the level for a filtering operation on a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) input stream.
+For a list of image filters that are defined for DXVA-HD, see
If TRUE, the filter is enabled. Otherwise, the filter is disabled.
The level for the filter. The meaning of this value depends on the implementation. To get the range and default value of a particular filter, call the
If the Enable member is
Specifies how a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) input stream is interlaced.
+Some devices do not support interlaced RGB. Interlaced RGB support is indicated by the
Some devices do not support interlaced formats with palettized color. This support is indicated by the
To get the device's capabilities, call
The video interlacing, specified as a
The default state value is
Specifies the color space for a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) input stream.
+The RGB_Range member applies to RGB input, while the YCbCr_Matrix and YCbCr_xvYCC members apply to YCbCr (YUV) input.
In some situations, the device might perform an intermediate color conversion on the input stream. If so, it uses the flags that apply to both color spaces. For example, suppose the device converts from RGB to YCbCr. If the RGB_Range member is 0 and the YCbCr_Matrix member is 1, the device will convert from full-range RGB to BT.709 YCbCr.
If the device supports xvYCC, it returns the
Specifies whether the input stream contains video or graphics. The device can optimize the processing based on the type. The default state value is 0 (video).
Value | Meaning |
---|---|
| Video. |
| Graphics. |
?
Specifies the RGB color range. The default state value is 0 (full range).
Value | Meaning |
---|---|
| Full range (0-255). |
| Limited range (16-235). |
?
Specifies the YCbCr transfer matrix. The default state value is 0 (BT.601).
Value | Meaning |
---|---|
| ITU-R BT.601. |
| ITU-R BT.709. |
?
Specifies whether the input stream uses conventional YCbCr or extended YCbCr (xvYCC). The default state value is 0 (conventional YCbCr).
Value | Meaning |
---|---|
| Conventional YCbCr. |
| Extended YCbCr (xvYCC). |
?
Specifies the luma key for an input stream, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+To use this state, the device must support luma keying, indicated by the
If the device does not support luma keying, the
If the input format is RGB, the device must also support the
The values of Lower and Upper give the lower and upper bounds of the luma key, using a nominal range of [0...1]. Given a format with n bits per channel, these values are converted to luma values as follows:
val = f * ((1 << n)-1)
Any pixel whose luma value falls within the upper and lower bounds (inclusive) is treated as transparent.
For example, if the pixel format uses 8-bit luma, the upper bound is calculated as follows:
BYTE Y = BYTE(max(min(1.0, Upper), 0.0) * 255.0)
Note that the value is clamped to the range [0...1] before multiplying by 255.
+ If TRUE, luma keying is enabled. Otherwise, luma keying is disabled. The default value is
The lower bound for the luma key. The range is [0?1]. The default state value is 0.0.
The upper bound for the luma key. The range is [0?1]. The default state value is 0.0.
Specifies the output frame rate for an input stream when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+The output rate might require the device to convert the frame rate of the input stream. If so, the value of RepeatFrame controls whether the device creates interpolated frames or simply repeats input frames.
+Specifies how the device performs frame-rate conversion, if required. The default state value is
Value | Meaning |
---|---|
| The device repeats frames. |
The device interpolates frames. |
?
Specifies the output rate, as a member of the
Specifies a custom output rate, as a
To get the list of custom rates supported by the video processor, call
Contains the color palette entries for an input stream, when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+This stream state is used for input streams that have a palettized color format. Palettized formats with 4 bits per pixel (bpp) use the first 16 entries in the list. Formats with 8 bpp use the first 256 entries.
If a pixel has a palette index greater than the number of entries, the device treats the pixel as being white with opaque alpha. For full-range RGB, this value will be (255, 255, 255, 255); for YCbCr the value will be (255, 235, 128, 128).
The caller allocates the pEntries array. Set the Count member to the number of elements in the array. When retrieving the state data, you can set the pEntries member to
If the DXVA-HD device does not have the
To get the device capabilities, call
The number of palette entries. The default state value is 0.
A reference to an array of
Contains data for a private stream state, for a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) input stream.
+Use this structure for proprietary or device-specific state parameters.
The caller allocates the pData array. Set the DataSize member to the size of the array in bytes. When retrieving the state data, you can set the pData member to
A
Value | Meaning |
---|---|
| Retrieves statistics about inverse telecine. The state data (pData) is a |
?
A device can define additional GUIDs for use with custom stream states. The interpretation of the data is then defined by the device.
The size, in bytes, of the buffer pointed to by the pData member.
A reference to a buffer that contains the private state data. The DXVA-HD runtime passes this buffer directly to the device, without validation.
Contains inverse telecine (IVTC) statistics from a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+If the DXVA-HD device supports IVTC statistics, it can detect when the input video contains telecined frames. You can use this information to enable IVTC in the device.
To enable IVTC statistics, do the following:
sizeof( )
.To get the most recent IVTC statistics from the device, call the
Typically, an application would use this feature as follows:
Specifies whether IVTC statistics are enabled. The default state value is
If the driver detects that the frames are telecined, and is able to perform inverse telecine, this field contains a member of the
The number of consecutive telecined frames that the device has detected.
The index of the most recent input field. The value of this member equals the most recent value of the InputFrameOrField member of the
Specifies the source rectangle for an input stream when using Microsoft DirectX Video Acceleration High Definition (DXVA-HD)
+Specifies whether to blit the entire input surface or just the source rectangle. The default state value is
Value | Meaning |
---|---|
| Use the source rectangle specified in the SourceRect member. |
Blit the entire input surface. Ignore the SourceRect member. |
?
The source rectangle, which defines the portion of the input sample that is blitted to the destination surface. The source rectangle is given in pixel coordinates, relative to the input surface. The default state value is an empty rectangle, (0, 0, 0, 0).
If the Enable member is
Contains references to functions implemented by a software plug-in for Microsoft DirectX Video Acceleration High Definition (DXVA-HD).
+If you provide a software plug-in for DXVA-HD, the plug-in must implement a set of functions that are defined by the function reference types in this structure.
At initialization, the DXVA-HD runtime calls the plug-in device's PDXVAHDSW_Plugin function. This function fills in a
Function reference of type PDXVAHDSW_CreateDevice.
Function reference of type PDXVAHDSW_ProposeVideoPrivateFormat.
Function reference of type PDXVAHDSW_GetVideoProcessorDeviceCaps.
Function reference of type PDXVAHDSW_GetVideoProcessorOutputFormats.
Function reference of type PDXVAHDSW_GetVideoProcessorInputFormats.
Function reference of type PDXVAHDSW_GetVideoProcessorCaps.
Function reference of type PDXVAHDSW_GetVideoProcessorCustomRates.
Function reference of type PDXVAHDSW_GetVideoProcessorFilterRange.
Function reference of type PDXVAHDSW_DestroyDevice.
Function reference of type PDXVAHDSW_CreateVideoProcessor.
Function reference of type PDXVAHDSW_SetVideoProcessBltState.
Function reference of type PDXVAHDSW_GetVideoProcessBltStatePrivate.
Function reference of type PDXVAHDSW_SetVideoProcessStreamState.
Function reference of type PDXVAHDSW_GetVideoProcessStreamStatePrivate.
Function reference of type PDXVAHDSW_VideoProcessBltHD.
Function reference of type PDXVAHDSW_DestroyVideoProcessor.
Defines the range of supported values for a DirectX Video Acceleration (DXVA) operation.
+All values in this structure are specified as
Minimum supported value.
Maximum supported value.
Default value.
Minimum increment between values.
Describes a video stream for a DXVA decoder device or video processor device.
+The InputSampleFreq member gives the frame rate of the decoded video stream, as received by the video renderer. The OutputFrameFreq member gives the frame rate of the video that is displayed after deinterlacing. If the input video is interlaced and the samples contain interleaved fields, the output frame rate is twice the input frame rate. If the input video is progressive or contains single fields, the output frame rate is the same as the input frame rate.
Decoders should set the values of InputSampleFreq and OutputFrameFreq if the frame rate is known. Otherwise, set these members to 0/0 to indicate an unknown frame rate.
+Width of the video frame, in pixels.
Height of the video frame, in pixels.
Additional details about the video format, specified as a
Surface format, specified as a
Frame rate of the input video stream, specified as a
Frame rate of the output video, specified as a
Level of data protection required when the user accessible bus (UAB) is present. If TRUE, the video must be protected when a UAB is present. If
Reserved. Must be zero.
Contains parameters for the
Describes the capabilities of a DirectX Video Acceleration (DVXA) video processor mode.
+Identifies the type of device. The following values are defined.
Value | Meaning |
---|---|
DXVA 2.0 video processing is emulated by using DXVA 1.0. An emulated device may be missing significant processing capabilities and have lower image quality and performance. | |
Hardware device. | |
Software device. |
?
The Direct3D memory pool used by the device.
Number of forward reference samples the device needs to perform deinterlacing. For the bob, progressive scan, and software devices, the value is zero.
Number of backward reference samples the device needs to perform deinterlacing. For the bob, progressive scan, and software devices, the value is zero.
Reserved. Must be zero.
Identifies the deinteracing technique used by the device. This value is a bitwise OR of one or more of the following flags.
Value | Meaning |
---|---|
The algorithm is unknown or proprietary. | |
The algorithm creates missing lines by repeating the line either above or below the missing line. This algorithm produces a jagged image and is not recommended. | |
The algorithm creates missing lines by averaging two lines. Slight vertical adjustments are made so that the resulting image does not bob up and down. | |
The algorithm creates missing lines by applying a [?1, 9, 9, ?1]/16 filter across four lines. Slight vertical adjustments are made so that the resulting image does not bob up and down. | |
The algorithm uses median filtering to recreate the pixels in the missing lines. | |
The algorithm uses an edge filter to create the missing lines. In this process, spatial directional filters are applied to determine the orientation of edges in the picture content. Missing pixels are created by filtering along (rather than across) the detected edges. | |
The algorithm uses spatial or temporal interpolation, switching between the two on a field-by-field basis, depending on the amount of motion. | |
The algorithm uses spatial or temporal interpolation, switching between the two on a pixel-by-pixel basis, depending on the amount of motion. | |
The algorithm identifies objects within a sequence of video fields. Before it recreates the missing pixels, it aligns the movement axes of the individual objects in the scene to make them parallel with the time axis. | |
The device can undo the 3:2 pulldown process used in telecine. |
?
Specifies the available video processor (ProcAmp) operations. The value is a bitwise OR of ProcAmp Settings constants.
Specifies operations that the device can perform concurrently with the
Value | Meaning |
---|---|
The device can convert the video from YUV color space to RGB color space, with at least 8 bits of precision for each RGB component. | |
The device can stretch or shrink the video horizontally. If this capability is present, aspect ratio correction can be performed at the same time as deinterlacing. | |
The device can stretch or shrink the video vertically. If this capability is present, image resizing and aspect ratio correction can be performed at the same time. | |
The device can alpha blend the video. | |
The device can operate on a subrectangle of the video frame. If this capability is present, source images can be cropped before further processing occurs. | |
The device can accept substreams in addition to the primary video stream, and can composite them. | |
The device can perform color adjustments on the primary video stream and substreams, at the same time that it deinterlaces the video and composites the substreams. The destination color space is defined in the DestFormat member of the | |
The device can convert the video from YUV to RGB color space when it writes the deinterlaced and composited pixels to the destination surface. An RGB destination surface could be an off-screen surface, texture, Direct3D render target, or combined texture/render target surface. An RGB destination surface must use at least 8 bits for each color channel. | |
The device can perform an alpha blend operation with the destination surface when it writes the deinterlaced and composited pixels to the destination surface. | |
The device can downsample the output frame, as specified by the ConstrictionSize member of the | |
The device can perform noise filtering. | |
The device can perform detail filtering. | |
The device can perform a constant alpha blend to the entire video stream when it composites the video stream and substreams. | |
The device can perform accurate linear RGB scaling, rather than performing them in nonlinear gamma space. | |
The device can correct the image to compensate for artifacts introduced when performing scaling in nonlinear gamma space. | |
The deinterlacing algorithm preserves the original field lines from the interlaced field picture, unless scaling is also applied. For example, in deinterlacing algorithms such as bob and median filtering, the device copies the original field into every other scan line and then applies a filter to reconstruct the missing scan lines. As a result, the original field can be recovered by discarding the scan lines that were interpolated. If the image is scaled vertically, however, the original field lines cannot be recovered. If the image is scaled horizontally (but not vertically), the resulting field lines will be equivalent to scaling the original field picture. (In other words, discarding the interpolated scan lines will yield the same result as stretching the original picture without deinterlacing.) |
?
Specifies the supported noise filters. The value is a bitwise OR of the following flags.
Value | Meaning |
---|---|
Noise filtering is not supported. | |
Unknown or proprietary filter. | |
Median filter. | |
Temporal filter. | |
Block noise filter. | |
Mosquito noise filter. |
?
Specifies the supported detail filters. The value is a bitwise OR of the following flags.
Value | Meaning |
---|---|
Detail filtering is not supported. | |
Unknown or proprietary filter. | |
Edge filter. | |
Sharpen filter. |
?
Specifies an input sample for the
Specifies the capabilities of the Microsoft DirectX Video Acceleration High Definition (DXVA-HD) video processor.
+A
The number of past reference frames required to perform the optimal video processing.
The number of future reference frames required to perform the optimal video processing.
A bitwise OR of zero or more flags from the
A bitwise OR of zero or more flags from the
The number of custom output frame rates. To get the list of custom frame rates, call the
Specifies the capabilities of a Microsoft DirectX Video Acceleration High Definition (DXVA-HD) device.
+In DXVA-HD, the device stores state information for each input stream. These states persist between blits. With each blit, the application selects which streams to enable or disable. Disabling a stream does not affect the state information for that stream.
The MaxStreamStates member gives the maximum number of stream states that can be set by the application. The MaxInputStreams member gives the maximum number of streams that can be enabled during a blit. These two values can differ.
To set the state data for a stream, call
Specifies the device type, as a member of the
A bitwise OR of zero or more flags from the
A bitwise OR of zero or more flags from the
A bitwise OR of zero or more flags from the
A bitwise OR of zero or more flags from the
The memory pool that is required for the input video surfaces.
The number of supported output formats. To get the list of output formats, call the
The number of supported input formats. To get the list of input formats, call the
The number of video processors. Each video processor represents a distinct set of processing capabilities. To get the capabilities of each video processor, call the
The maximum number of input streams that can be enabled at the same time.
The maximum number of input streams for which the device can store state data.
Defines the ASF indexer options.
+The indexer creates a new index object.
The indexer returns values for reverse playback.
The indexer creates an index object for a live ASF stream.
Defines the ASF multiplexer options.
+The multiplexer automatically adjusts the bit rate of the ASF content in response to the characteristics of the streams being multiplexed.
Defines the selection options for an ASF stream.
+No samples from the stream are delivered.
Only samples from the stream that are clean points are delivered.
All samples from the stream are delivered.
Defines the ASF splitter options.
+The splitter delivers samples for the ASF content in reverse order to accommodate reverse playback.
The splitter delivers samples for streams that are protected with Windows Media Digital Rights Management.
Defines status conditions for the
Defines the ASF stream selector options.
+The stream selector will not set thinning. Thinning is the process of removing samples from a stream to reduce the bit rate.
The stream selector will use the average bit rate of streams when selecting streams.
Creates a work queue that is guaranteed to serialize work items. The serial work queue wraps an existing multithreaded work queue. The serial work queue enforces a first-in, first-out (FIFO) execution order.
+When you are done using the work queue, call
Multithreaded queues use a thread pool, which can reduce the total number of threads in the pipeline. However, they do not serialize work items. A serial work queue enables the application to get the benefits of the thread pool, without needing to perform manual serialization of its own work items.
+The identifier of an existing work queue. This must be either a multithreaded queue or another serial work queue. Any of the following can be used:
Receives an identifier for the new serial work queue. Use this identifier when queuing work items.
Specifies the type of work queue for the
Defines flags for serializing and deserializing attribute stores.
+If this flag is set,
Specifies how to compare the attributes on two objects.
+Check whether all the attributes in pThis exist in pTheirs and have the same data, where pThis is the object whose Compare method is being called and pTheirs is the object given in the pTheirs parameter.
Check whether all the attributes in pTheirs exist in pThis and have the same data, where pThis is the object whose Compare method is being called and pTheirs is the object given in the pTheirs parameter.
Check whether both objects have identical attributes with the same data.
Check whether the attributes that exist in both objects have the same data.
Find the object with the fewest number of attributes, and check if those attributes exist in the other object and have the same data.
Defines the data type for a key/value pair.
+Unsigned 32-bit integer.
Unsigned 64-bit integer.
Floating-point number.
Byte array.
Specifies the origin for a seek request.
+The seek position is specified relative to the start of the stream.
The seek position is specified relative to the current read/write position in the stream.
Contains flags that describe the characteristics of a clock. These flags are returned by the
Defines properties of a clock.
+Jitter values are always negative. In other words, the time returned by
Defines the state of a clock.
+The clock is invalid. A clock might be invalid for several reasons. Some clocks return this state before the first start. This state can also occur if the underlying device is lost.
The clock is running. While the clock is running, the time advances at the clock's frequency and current rate.
The clock is stopped. While stopped, the clock reports a time of 0.
The clock is paused. While paused, the clock reports the time it was paused.
Specifies how the topology loader connects a topology node. This enumeration is used with the
Media Foundation transforms (MFTs) are an evolution of the transform model first introduced with DirectX Media Objects (DMOs). This topic summarizes the main ways in which MFTs differ from DMOs. Read this topic if you are already familiar with the DMO interfaces, or if you want to convert an existing DMO into an MFT.
This topic contains the following sections:
The DMO_INPUT_STREAM_INFO_FLAGS
enumeration defines flags that describe an input stream.
The stream requires whole samples. Samples must not span multiple buffers, and buffers must not contain partial samples.
Each buffer must contain exactly one sample.
All the samples in this stream must be the same size.
The DMO performs lookahead on the incoming data, and may hold multiple input buffers for this stream.
The
interface supports quality control on a Microsoft DirectX Media Object (DMO).
A DMO exposes this interface if it can respond to late samples. When quality control is enabled, the DMO attempts to process samples on time, discarding late samples if necessary. When quality control is disabled, the DMO processes every sample. By default, quality control is disabled.
Applications use this interface to enable or disable quality control. Using quality control is appropriate when you are viewing media data in real time. If you are capturing data to a file, do not enable quality control, because the DMO might discard samples. It does not matter in file capture whether samples arrive late, and you do not want to lose the data.
To use quality control, perform the following steps:
To disable quality control, call SetStatus with no flag.
+
The DMO_SET_TYPE_FLAGS
enumeration defines flags for setting the media type on a stream.
The
Test the media type but do not set it.
Clear the media type that was set for the stream.
The DMO_VIDEO_OUTPUT_STREAM_FLAGS
enumeration defines flags that describe requested features, for video optimizations.
Requests that every output buffer passed to the DMO contain the previous data that was generated.
Contains flags that are used to configure the Microsoft DirectShow enhanced video renderer (EVR) filter.
+Enables dynamic adjustments to video quality during playback.
Specifies the requested access mode for opening a file.
+Read mode.
Write mode.
Read and write mode.
Specifies the behavior when opening a file.
+Use the default behavior.
Open the file with no system caching.
Subsequent open operations can have write access to the file.
Note??Requires Windows?7 or later.
Specifies how to open or create a file.
+Open an existing file. Fail if the file does not exist.
Create a new file. Fail if the file already exists.
Open an existing file and truncate it, so that the size is zero bytes. Fail if the file does not already exist.
If the file does not exist, create a new file. If the file exists, open it.
Create a new file. If the file exists, overwrite the file.
Defines the characteristics of a media source. These flags are retrieved by the
To skip forward or backward in a playlist, call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Specifies options for the
The following typedef is defined for combining flags from this enumeration.
typedef UINT32 MFP_CREATION_OPTIONS;
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Contains flags for the
Some of these flags, marked [out], convey information back to the MFPlay player object. The application should set or clear these flags as appropriate, before returning from the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Contains flags that describe a media item.
+The following typedef is defined for combining flags from this enumeration.
typedef UINT32 MFP_MEDIAITEM_CHARACTERISTICS;
+
Not supported.
Note??Earlier versions of this documentation described the _MFT_DRAIN_TYPE enumeration incorrectly. The enumeration is not supported. For more information, see
Defines flags for the
The values in this enumeration are not bit flags, so they should not be combined with a bitwise OR. Also, the caller should test for these flags with the equality operator, not a bitwise AND:
// Correct.
+ if (Buffer.dwStatus == )
+ { ...
+ } // Incorrect.
+ if ((Buffer.dwStatus & ) != 0)
+ { ...
+ }
+
+
Indicates the status of an input stream on a Media Foundation transform (MFT).
+The input stream can receive more data at this time. To deliver more input data, call
Describes an input stream on a Media Foundation transform (MFT).
+Before the client sets the media types on the transform, the only flags guaranteed to be accurate are the
In the default processing model, an MFT holds a reference count on the sample that it receives in ProcessInput. It does not process the sample immediately inside ProcessInput. When ProcessOutput is called, the MFT produces output data and then discards the input sample. The following variations on this model are defined:
If an MFT never holds onto input samples between ProcessInput and ProcessOutput, it can set the
If an MFT holds some input samples beyond the next call to ProcessOutput, it can set the
Each media sample (
For uncompressed audio formats, this flag is always implied. (It is valid to set the flag, but not required.) An uncompressed audio frame should never span more than one media sample.
Each media sample that the client provides as input must contain exactly one unit of data, as defined for the
If this flag is present, the
An MFT that processes uncompressed audio should not set this flag. The MFT should accept buffers that contain more than a single audio frame, for efficiency.
All input samples must be the same size. The size is given in the cbSize member of the
The MFT might hold one or more input samples after
The MFT does not hold input samples after the
If this flag is absent, the MFT might hold a reference count on the samples that are passed to the ProcessInput method. The client must not re-use or delete the buffer memory until the MFT releases the sample's
If this flag is absent, it does not guarantee that the MFT holds a reference count on the input samples. It is valid for an MFT to release input samples in ProcessInput even if the MFT does not set this flag. However, setting this flag might enable to client to optimize how it re-uses buffers.
An MFT should not set this flag if it ever holds onto an input sample after returning from ProcessInput.
This input stream can be removed by calling
This input stream is optional. The transform can produce output without receiving input from this stream. The caller can deselect the stream by not setting a media type or by setting a
The MFT can perform in-place processing. In this mode, the MFT directly modifies the input buffer. When the client calls ProcessOutput, the same sample that was delivered to this stream is returned in the output stream that has a matching stream identifier. This flag implies that the MFT holds onto the input buffer, so this flag cannot combined with the
If this flag is present, the MFT must set the
Defines flags for the
The values in this enumeration are not bit flags, so they should not be combined with a bitwise OR. Also, the caller should test for these flags with the equality operator, not a bitwise AND:
// Correct.
+ if (Buffer.dwStatus == )
+ { ...
+ } // Incorrect.
+ if ((Buffer.dwStatus & ) != 0)
+ { ...
+ }
+
+
Indicates whether a Media Foundation transform (MFT) can produce output data.
+There is a sample available for at least one output stream. To retrieve the available output samples, call
Describes an output stream on a Media Foundation transform (MFT).
+Before the client sets the media types on the MFT, the only flag guaranteed to be accurate is the
The
MFT_OUTPUT_STREAM_DISCARDABLE: The MFT discards output data only if the client calls ProcessOutput with the
MFT_OUTPUT_STREAM_LAZY_READ: If the client continues to call ProcessInput without collecting the output from this stream, the MFT eventually discards the output. If all output streams have the
If neither of these flags is set, the MFT never discards output data.
+Each media sample (
For uncompressed audio formats, this flag is always implied. (It is valid to set the flag, but not required.) An uncompressed audio frame should never span more than one media sample.
Each output sample contains exactly one unit of data, as defined for the
If this flag is present, the
An MFT that outputs uncompressed audio should not set this flag. For efficiency, it should output more than one audio frame at a time.
All output samples are the same size.
The MFT can discard the output data from this output stream, if requested by the client. To discard the output, set the
This output stream is optional. The client can deselect the stream by not setting a media type or by setting a
The MFT provides the output samples for this stream, either by allocating them internally or by operating directly on the input samples. The MFT cannot use output samples provided by the client for this stream.
If this flag is not set, the MFT must set cbSize to a nonzero value in the
The MFT can either provide output samples for this stream or it can use samples that the client allocates. This flag cannot be combined with the
If the MFT does not set this flag or the
The MFT does not require the client to process the output for this stream. If the client continues to send input data without getting the output from this stream, the MFT simply discards the previous input.
The MFT might remove this output stream during streaming. This flag typically applies to demultiplexers, where the input data contains multiple streams that can start and stop during streaming. For more information, see
Defines flags for the setting or testing the media type on a Media Foundation transform (MFT).
+Test the proposed media type, but do not set it.
Specifies how the user's credentials will be used.
+The credentials will be used to authenticate with a proxy.
The credentials will be sent over the network unencrypted.
The credentials must be from a user who is currently logged on.
Describes options for the caching network credentials.
+Allow the credential cache object to save credentials in persistant storage.
Do not allow the credential cache object to cache the credentials in memory. This flag cannot be combined with the
The user allows credentials to be sent over the network in clear text.
By default,
Do not set this flag without notifying the user that credentials might be sent in clear text.
Specifies how the credential manager should obtain user credentials.
+The application implements the credential manager, which must expose the
The credential cache object sets the
The credential manager should prompt the user to provide the credentials.
Note??Requires Windows?7 or later.
The credentials are saved to persistent storage. This flag acts as a hint for the application's UI. If the application prompts the user for credentials, the UI can indicate that the credentials have already been saved.
Specifies how the default proxy locator will specify the connection settings to a proxy server. The application must set these values in the MFNETSOURCE_PROXYSETTINGS property.
+Defines statistics collected by the network source. The values in this enumeration define property identifiers (PIDs) for the MFNETSOURCE_STATISTICS property.
To retrieve statistics from the network source, call
In the descriptions that follow, the data type and value-type tag for the
Indicates the type of control protocol that is used in streaming or downloading.
+The protocol type has not yet been determined.
The protocol type is HTTP. This includes HTTPv9, WMSP, and HTTP download.
The protocol type is Real Time Streaming Protocol (RTSP).
The content is read from a file. The file might be local or on a remote share.
The protocol type is multicast.
Note??Requires Windows?7 or later.
Defines statistics collected by the network source. The values in this enumeration define property identifiers (PIDs) for the MFNETSOURCE_STATISTICS property.
To retrieve statistics from the network source, call
In the descriptions that follow, the data type and value-type tag for the
Describes the type of transport used in streaming or downloading data (TCP or UDP).
+The data transport type is UDP.
The data transport type is TCP.
Specifies whether color data includes headroom and toeroom. Headroom allows for values beyond 1.0 white ("whiter than white"), and toeroom allows for values below reference 0.0 black ("blacker than black").
+ This enumeration is used with the
For more information about these values, see the remarks for the
Unknown nominal range.
Equivalent to
Equivalent to
The normalized range [0...1] maps to [0...255] for 8-bit samples or [0...1023] for 10-bit samples.
The normalized range [0...1] maps to [16...235] for 8-bit samples or [64...940] for 10-bit samples.
The normalized range [0..1] maps to [48...208] for 8-bit samples or [64...940] for 10-bit samples.
The normalized range [0..1] maps to [64...127] for 8-bit samples or [256...508] for 10-bit samples. This range is used in the xRGB color space.
Note??Requires Windows?7 or later.
Defines the object types that are created by the source resolver.
+Media source. You can query the object for the
Byte stream. You can query the object for the
Invalid type.
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Defines event types for the
For each event type, the
In your implementation of OnMediaPlayerEvent, you must cast the pEventHeader parameter to the correct structure type. A set of macros is defined for this purpose. These macros check the value of the event type and return
Event type | Event structure Pointer cast macro |
MFP_GET_PLAY_EVENT | |
MFP_GET_PAUSE_EVENT | |
MFP_GET_STOP_EVENT | |
MFP_GET_POSITION_SET_EVENT | |
MFP_GET_RATE_SET_EVENT | |
MFP_GET_MEDIAITEM_CREATED_EVENT | |
MFP_GET_MEDIAITEM_SET_EVENT | |
MFP_GET_FRAME_STEP_EVENT | |
MFP_GET_MEDIAITEM_CLEARED_EVENT | |
MFP_GET_MF_EVENT | |
MFP_GET_ERROR_EVENT | |
MFP_GET_PLAYBACK_ENDED_EVENT | |
MFP_GET_ACQUIRE_USER_CREDENTIAL_EVENT |
?
+Specifies the object type for the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Specifies the current playback state.
+ Contains flags that define the behavior of the
Defines actions that can be performed on a stream.
+No action.
Play the stream.
Copy the stream.
Export the stream to another format.
Extract the data from the stream and pass it to the application. For example, acoustic echo cancellation requires this action.
Reserved.
Reserved.
Reserved.
Last member of the enumeration.
Contains flags for the
If the decoder sets the
Specifies how aggressively a pipeline component should drop samples.
+In drop mode, a component drops samples, more or less aggressively depending on the level of the drop mode. The specific algorithm used depends on the component. Mode 1 is the least aggressive mode, and mode 5 is the most aggressive. A component is not required to implement all five levels.
For example, suppose an encoded video stream has three B-frames between each pair of P-frames. A decoder might implement the following drop modes:
Mode 1: Drop one out of every three B frames.
Mode 2: Drop one out of every two B frames.
Mode 3: Drop all delta frames.
Modes 4 and 5: Unsupported.
The enhanced video renderer (EVR) can drop video frames before sending them to the EVR mixer.
+Normal processing of samples. Drop mode is disabled.
First drop mode (least aggressive).
Second drop mode.
Third drop mode.
Fourth drop mode.
Fifth drop mode (most aggressive, if it is supported; see Remarks).
Maximum number of drop modes. This value is not a valid flag.
Specifies the quality level for a pipeline component. The quality level determines how the component consumes or produces samples.
+Each successive quality level decreases the amount of processing that is needed, while also reducing the resulting quality of the audio or video. The specific algorithm used to reduce quality depends on the component. Mode 1 is the least aggressive mode, and mode 5 is the most aggressive. A component is not required to implement all five levels. Also, the same quality level might not be comparable between two different components.
Video decoders can often reduce quality by leaving out certain post-processing steps. The enhanced video renderer (EVR) can sometimes reduce quality by switching to a different deinterlacing mode.
+Normal quality.
One level below normal quality.
Two levels below normal quality.
Three levels below normal quality.
Four levels below normal quality.
Five levels below normal quality.
Maximum number of quality levels. This value is not a valid flag.
Specifies the direction of playback (forward or reverse).
+Forward playback.
Reverse playback.
Defines the version number for sample protection.
+No sample protection.
Version 1.
Version 2.
Version 3.
Contains flags for adding a topology to the sequencer source, or updating a topology already in the queue.
+This topology is the last topology in the sequence.
Retrieves an interface from the enhanced video renderer (EVR), or from the video mixer or video presenter.
+This method can be called only from inside the
The presenter can use this method to query the EVR and the mixer. The mixer can use it to query the EVR and the presenter. Which objects are queried depends on the caller and the service
Caller | Service | Objects queried |
---|---|---|
Presenter | MR_VIDEO_RENDER_SERVICE | EVR |
Presenter | MR_VIDEO_MIXER_SERVICE | Mixer |
Mixer | MR_VIDEO_RENDER_SERVICE | Presenter and EVR |
?
The following interfaces are available from the EVR:
IMediaEventSink. This interface is documented in the DirectShow SDK documentation.
The following interfaces are available from the mixer:
Specifies the scope of the search. Currently this parameter is ignored. Use the value
Reserved, must be zero.
Service
Interface identifier of the requested interface.
Array of interface references. If the method succeeds, each member of the array contains either a valid interface reference or
Pointer to a value that specifies the size of the ppvObjects array. The value must be at least 1. In the current implementation, there is no reason to specify an array size larger than one element. The value is not changed on output.
Defines flags for the
Defines the behavior of the
These flags are optional, and are not mutually exclusive. If no flags are set, the Media Session resolves the topology and then adds it to the queue of pending presentations.
+ Describes the current status of a call to the
Specifies how the ASF file sink should apply Windows Media DRM.
+Undefined action.
Encode the content using Windows Media DRM. Use this flag if the source content does not have DRM protection.
Transcode the content using Windows Media DRM. Use this flag if the source content has Windows Media DRM protection and you want to change the encoding parameters but not the DRM protection.
Transcrypt the content. Use this flag if the source content has DRM protection and you want to change the DRM protection; for example, if you want to convert from Windows Media DRM version 1 to Windows Media DRM version 7 or later.
Reserved. Do not use.
Contains flags for the
Contains flags that indicate the status of the
Contains values that specify common video formats.
+Reserved; do not use.
NTSC television (720 x 480i).
PAL television (720 x 576i).
DVD, NTSC standard (720 x 480).
DVD, PAL standard (720 x 576).
DV video, PAL standard.
DV video, NTSC standard.
ATSC digital television, SD (480i).
ATSC digital television, HD interlaced (1080i)
ATSC digital television, HD progressive (720p)
Defines stream marker information for the
If the Streaming Audio Renderer receives an
Contains flags for the
Defines messages for a Media Foundation transform (MFT). To send a message to an MFT, call
Some messages require specific actions from the MFT. These events have "MESSAGE" in the message name. Other messages are informational; they notify the MFT of some action by the client, and do not require any particular response from the MFT. These messages have "NOTIFY" in the messages name. Except where noted, an MFT should not rely on the client sending notification messages.
+Specifies whether the topology loader enables Microsoft DirectX Video Acceleration (DXVA) in the topology.
+The value of this attribute is an
This attribute controls which MFTs receive a reference to the Direct3D device manager. To enable full video acceleration, set the value to
The
Specifies whether to load hardware-based Microsoft Media Foundation transforms (MFTs) in the topology.
+This attribute is optional. Set the attribute before resolving the topology.
Value | Description |
---|---|
The Topology Loader will load hardware-based MFTs, such as hardware decoders, when available. The Topology Loader automatically falls back to software decoding if no hardware decoder is found, or if a hardware decoder fails to connect for some reason. | |
The Topology Loader will load only software MFTs, including software decoders. |
?
The default value is
If the Topology Loader inserts a hardware MFT into the topology, it sets the
The
Defines status flags for the
Specifies the status of a topology during playback.
+ This enumeration is used with the
For a single topology, the Media Session sends these status flags in numerical order, starting with
This value is not used.
The topology is ready to start. After this status flag is received, you can use the Media Session's
The Media Session has started to read data from the media sources in the topology.
The Media Session modified the topology, because the format of a stream changed.
The media sinks have switched from the previous topology to this topology. This status value is not sent for the first topology that is played. For the first topology, the
Playback of this topology is complete. The Media Session might still use the topology internally. The Media Session does not completely release the topology until it sends the next
Defines the type of a topology node.
+Output node. Represents a media sink in the topology.
Source node. Represents a media stream in the topology.
Transform node. Represents a Media Foundation Transform (MFT) in the topology.
Tee node. A tee node does not hold a reference to an object. Instead, it represents a fork in the stream. A tee node has one input and multiple outputs, and samples from the upstream node are delivered to all of the downstream nodes.
Reserved.
Defines at what times a transform in a topology is drained.
+The transform is drained when the end of a stream is reached. It is not drained when markout is reached at the end of a segment.
The transform is drained whenever a topology ends.
The transform is never drained.
Defines when a transform in a topology is flushed.
+The transform is flushed whenever the stream changes, including seeks and new segments.
The transform is flushed when seeking is performed on the stream.
The transform is never flushed during streaming. It is flushed only when the object is released.
Defines the profile flags that are set in the
These flags are checked by
For more information about the stream settings that an application can specify, see Using the Transcode API.
+If the
The
For the video stream, the required attributes are as follows:
If these attributes are not set,
Use the
For example, assume that your input source is an MP3 file. You set the container to be MFTranscodeContainerType_ASF, you do not set any stream attributes, and you set the
Defines flags for the
Contains flags for registering and enumeration Media Foundation transforms (MFTs).
These flags are used in the following functions:
For registration, these flags describe the MFT that is being registered. Some flags do not apply in that context. For enumeration, these flags control which MFTs are selected in the enumeration. For more details about the precise meaning of these flags, see the reference topics for
For registration, the
Defines flags for processing output samples in a Media Foundation transform (MFT).
+Do not produce output for streams in which the pSample member of the
Indicates the status of a call to
If the MFT sets this flag, the ProcessOutput method returns
Call
Call
Call
Until these steps are completed, all further calls to ProcessOutput return
Indicates whether the URL is from a trusted source.
+The validity of the URL cannot be guaranteed because it is not signed. The application should warn the user.
The URL is the original one provided with the content.
The URL was originally signed and has been tampered with. The file should be considered corrupted, and the application should not navigate to the URL without issuing a strong warning the user.
Defines flags for the
Specifies the aspect-ratio mode.
+Do not maintain the aspect ratio of the video. Stretch the video to fit the output rectangle.
Preserve the aspect ratio of the video by letterboxing or within the output rectangle.
Note??Currently the EVR ignores this flag.
Correct the aspect ratio if the physical size of the display device does not match the display resolution. For example, if the native resolution of the monitor is 1600 by 1200 (4:3) but the display resolution is 1280 by 1024 (5:4), the monitor will display non-square pixels.
If this flag is set, you must also set the
Apply a non-linear horizontal stretch if the aspect ratio of the destination rectangle does not match the aspect ratio of the source rectangle.
The non-linear stretch algorithm preserves the aspect ratio in the middle of the picture and stretches (or shrinks) the image progressively more toward the left and right. This mode is useful when viewing 4:3 content full-screen on a 16:9 display, instead of pillar-boxing. Non-linear vertical stretch is not supported, because the visual results are generally poor.
This mode may cause performance degradation.
If this flag is set, you must also set the
Contains flags that define the chroma encoding scheme for Y'Cb'Cr' data.
+These flags are used with the
For more information about these values, see the remarks for the
Unknown encoding scheme.
Chroma should be reconstructed as if the underlying video was progressive content, rather than skipping fields or applying chroma filtering to minimize artifacts from reconstructing 4:2:0 interlaced chroma.
Chroma samples are aligned horizontally with the luma samples, or with multiples of the luma samples. If this flag is not set, chroma samples are located 1/2 pixel to the right of the corresponding luma sample.
Chroma samples are aligned vertically with the luma samples, or with multiples of the luma samples. If this flag is not set, chroma samples are located 1/2 pixel down from the corresponding luma sample.
The U and V planes are aligned vertically. If this flag is not set, the chroma planes are assumed to be out of phase by 1/2 chroma sample, alternating between a line of U followed by a line of V.
Specifies the chroma encoding scheme for MPEG-2 video. Chroma samples are aligned horizontally with the luma samples, but are not aligned vertically. The U and V planes are aligned vertically.
Specifies the chroma encoding scheme for MPEG-1 video.
Specifies the chroma encoding scheme for PAL DV video.
Chroma samples are aligned vertically and horizontally with the luma samples. YUV formats such as 4:4:4, 4:2:2, and 4:1:1 are always cosited in both directions and should use this flag.
Reserved.
Reserved. This member forces the enumeration type to compile as a DWORD value.
Specifies the type of copy protection required for a video stream.
+Use these flags with the
No copy protection is required.
Analog copy protection should be applied.
Digital copy protection should be applied.
Contains flags that describe a video stream.
These flags are used in the
Developers are encouraged to use media type attributes instead of using the
Flags | Media Type Attribute |
---|---|
| |
| |
| |
| |
Use the |
?
The following flags were defined to describe per-sample interlacing information, but are obsolete:
Instead, components should use sample attributes to describe per-sample interlacing information, as described in the topic Video Interlacing.
+Specifies how a video stream is interlaced.
In the descriptions that follow, upper field refers to the field that contains the leading half scan line. Lower field refers to the field that contains the first full scan line.
+Scan lines in the lower field are 0.5 scan line lower than those in the upper field. In NTSC television, a frame consists of a lower field followed by an upper field. In PAL television, a frame consists of an upper field followed by a lower field.
The upper field is also called the even field, the top field, or field 2. The lower field is also called the odd field, the bottom field, or field 1.
If the interlace mode is
The type of interlacing is not known.
Progressive frames.
Interlaced frames. Each frame contains two fields. The field lines are interleaved, with the upper field appearing on the first line.
Interlaced frames. Each frame contains two fields. The field lines are interleaved, with the lower field appearing on the first line.
Interlaced frames. Each frame contains one field, with the upper field appearing first.
Interlaced frames. Each frame contains one field, with the lower field appearing first.
The stream contains a mix of interlaced and progressive modes.
Reserved.
Reserved. This member forces the enumeration type to compile as a DWORD value.
Describes the optimal lighting for viewing a particular set of video content.
+This enumeration is used with the
The optimal lighting is unknown.
Bright lighting; for example, outdoors.
Medium brightness; for example, normal office lighting.
Dim; for example, a living room with a television and additional low lighting.
Dark; for example, a movie theater.
Reserved.
Reserved. This member forces the enumeration type to compile as a DWORD value.
Contains flags that are used to configure how the enhanced video renderer (EVR) performs deinterlacing.
+To set these flags, call the
These flags control some trade-offs between video quality and rendering speed. The constants named "MFVideoMixPrefs_Allow..." enable lower-quality settings, but only when the quality manager requests a drop in quality. The constants named "MFVideoMixPrefs_Force..." force the EVR to use lower-quality settings regardless of what the quality manager requests. (For more information about the quality manager, see
Currently two lower-quality modes are supported, as described in the following table. Either is preferable to dropping an entire frame.
Mode | Description |
---|---|
Half interface | The EVR's video mixer skips the second field (relative to temporal order) of each interlaced frame. The video mixer still deinterlaces the first field, and this operation typically interpolates data from the second field. The overall frame rate is unaffected. |
Bob deinterlacing | The video mixer uses bob deinterlacing, even if the driver supports a higher-quality deinterlacing algorithm. |
?
+Force the EVR to skip the second field (in temporal order) of every interlaced frame.
If the EVR is falling behind, allow it to skip the second field (in temporal order) of every interlaced frame.
If the EVR is falling behind, allow it to use bob deinterlacing, even if the driver supports a higher-quality deinterlacing mode.
Force the EVR to use bob deinterlacing, even if the driver supports a higher-quality mode.
Specifies whether to pad a video image so that it fits within a specified aspect ratio.
+Use these flags with the
Do not pad the image.
Pad the image so that it can be displayed in a 4?3 area.
Pad the image so that it can be displayed in a 16?9 area.
Specifies the color primaries of a video source. The color primaries define how to convert colors from RGB color space to CIE XYZ color space.
+This enumeration is used with the
For more information about these values, see the remarks for the
The color primaries are unknown.
Reserved.
ITU-R BT.709. Also used for sRGB and scRGB.
ITU-R BT.470-4 System M (NTSC).
ITU-R BT.470-4 System B,G (NTSC).
SMPTE 170M.
SMPTE 240M.
EBU 3213.
SMPTE C (SMPTE RP 145).
Reserved.
Reserved. This member forces the enumeration type to compile as a DWORD value.
Contains flags that define how the enhanced video renderer (EVR) displays the video.
+To set these flags, call
The flags named "MFVideoRenderPrefs_Allow..." cause the EVR to use lower-quality settings only when requested by the quality manager. (For more information, see
If this flag is set, the EVR does not draw the border color. By default, the EVR draws a border on areas of the destination rectangle that have no video. See
If this flag is set, the EVR does not clip the video when the video window straddles two monitors. By default, if the video window straddles two monitors, the EVR clips the video to the monitor that contains the largest area of video.
Note??Requires Windows?7 or later.
Allow the EVR to limit its output to match GPU bandwidth.
Note??Requires Windows?7 or later.
Force the EVR to limit its output to match GPU bandwidth.
Note??Requires Windows?7 or later.
Force the EVR to batch Direct3D Present calls. This optimization enables the system to enter to idle states more frequently, which can reduce power consumption.
Note??Requires Windows?7 or later.
Allow the EVR to batch Direct3D Present calls.
Note??Requires Windows?7 or later.
Force the EVR to mix the video inside a rectangle that is smaller than the output rectangle. The EVR will then scale the result to the correct output size. The effective resolution will be lower if this setting is applied.
Note??Requires Windows?7 or later.
Allow the EVR to mix the video inside a rectangle that is smaller than the output rectangle.
Note??Requires Windows?7 or later.
Prevent the EVR from repainting the video window after a stop command. By default, the EVR repaints the video window black after a stop command.
Describes the intended aspect ratio for a video stream.
+Use these flags with the
The aspect ratio is unknown.
The source is 16?9 content encoded within a 4?3 area.
The source is 2.35:1 content encoded within a 16?9 or 4?3 area.
Specifies the conversion function from linear RGB to non-linear RGB (R'G'B').
+ These flags are used with the
For more information about these values, see the remarks for the
Unknown. Treat as
Linear RGB (gamma = 1.0).
True 1.8 gamma, L' = L^1/1.8.
True 2.0 gamma, L' = L^1/2.0.
True 2.2 gamma, L' = L^1/2.2. This transfer function is used in ITU-R BT.470-2 System M (NTSC).
ITU-R BT.709 transfer function. Gamma 2.2 curve with a linear segment in the lower range. This transfer function is used in BT.709, BT.601, SMPTE 296M, SMPTE 170M, BT.470, and SPMTE 274M. In addition BT-1361 uses this function within the range [0...1].
SPMTE 240M transfer function. Gamma 2.2 curve with a linear segment in the lower range.
sRGB transfer function. Gamma 2.4 curve with a linear segment in the lower range.
True 2.8 gamma. L' = L^1/2.8. This transfer function is used in ITU-R BT.470-2 System B, G (PAL).
Logarithmic transfer (100:1 range); for example, as used in H.264 video.
Note??Requires Windows?7 or later.
Logarithmic transfer (316.22777:1 range); for example, as used in H.264 video.
Note??Requires Windows?7 or later.
Symmetric ITU-R BT.709.
Note??Requires Windows?7 or later.
Reserved.
Reserved. This member forces the enumeration type to compile as a DWORD value.
Describes the conversion matrices between Y'PbPr (component video) and studio R'G'B'.
+This enumeration is used with the
For more information about these values, see the remarks for the
Unknown transfer matrix. Treat as
ITU-R BT.709 transfer matrix.
ITU-R BT.601 transfer matrix. Also used for SMPTE 170 and ITU-R BT.470-2 System B,G.
SMPTE 240M transfer matrix.
Reserved.
Reserved. This member forces the enumeration type to compile as a DWORD value.
Defines messages for an enhanced video renderer (EVR) presenter. This enumeration is used with the
Contains flags that specify how to convert an audio media type.
+Convert the media type to a
Convert the media type to a
The following constants identify the standard Media Foundation work queues.
Applications should use MFASYNC_CALLBACK_QUEUE_MULTITHREADED or use a work queue obtained from MFLockSharedWorkQueue if they want to control the execution priority. Note that the default platform work queue priorities can dynamically change when an application calls RegisterPlatformWithMMCSS. For more information about work queues, see Work Queues.
+
Creates an instance of the enhanced video renderer (EVR) media sink.
+Interface identifier (IID) of the requested interface on the EVR.
Receives a reference to the requested interface. The caller must release the interface.
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This function creates the Media Foundation version of the EVR. To create the DirectShow EVR filter, call CoCreateInstance with the class identifier CLSID_EnhancedVideoRenderer.
+Creates a media sample that manages a Direct3D surface.
+ A reference to the
Receives a reference to the sample's
If this function succeeds, it returns
The media sample created by this function exposes the following interfaces in addition to
If pUnkSurface is non-
Alternatively, you can set pUnkSurface to
Creates the default video presenter for the enhanced video renderer (EVR).
+Pointer to the owner of the object. If the object is aggregated, pass a reference to the aggregating object's
Interface identifier (IID) of the video device interface that will be used for processing the video. Currently the only supported value is IID_IDirect3DDevice9.
IID of the requested interface on the video presenter. The video presenter exposes the
Receives a reference to the requested interface on the video presenter. The caller must release the interface.
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Creates the default video mixer for the enhanced video renderer (EVR).
+Pointer to the owner of this object. If the object is aggregated, pass a reference to the aggregating object's
Interface identifier (IID) of the video device interface that will be used for processing the video. Currently the only supported value is IID_IDirect3DDevice9.
IID of the requested interface on the video mixer. The video mixer exposes the
Receives a reference to the requested interface. The caller must release the interface.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates the default video mixer and video presenter for the enhanced video renderer (EVR).
+Pointer to the owner of the video mixer. If the mixer is aggregated, pass a reference to the aggregating object's
Pointer to the owner of the video presenter. If the presenter is aggregated, pass a reference to the aggregating object's
Interface identifier (IID) of the requested interface on the video mixer. The video mixer exposes the
Receives a reference to the requested interface on the video mixer. The caller must release the interface.
IID of the requested interface on the video presenter. The video presenter exposes the
Receives a reference to the requested interface on the video presenter. The caller must release the interface.
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Creates a new instance of the MFPlay player object.
+If this function succeeds, it returns
Before calling this function, call CoIntialize(Ex) from the same thread to initialize the COM library.
Internally,
Note??If you use other Media Foundation APIs outside the life time of the player object, then your application should call
Creates the ASF profile object.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates the ASF Splitter.
+The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates an activation object that can be used to create the ASF media sink.
+Null-terminated wide-character string that contains the output file name.
A reference to the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates an activation object for the ASF streaming sink.
The ASF streaming sink enables an application to write streaming Advanced Systems Format (ASF) packets to an HTTP byte stream.
+A reference to a byte stream object in which the ASF media sink writes the streamed content.
Receives a reference to the
If this function succeeds, it returns
To create the ASF streaming sink in another process, call
An application can get a reference to the ASF ContentInfo Object by calling IUnknown::QueryInterface on the media sink object received in the ppIMediaSink parameter. The ContentInfo object is used to set the encoder configuration settings, provide stream properties supplied by an ASF profile, and add metadata information. These configuration settings populate the various ASF header objects of the encoded ASF file. For more information, see + Setting Properties in the ContentInfo Object.
+
Creates a presentation descriptor from an ASF profile object.
+Pointer to the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates an ASF profile object from a presentation descriptor.
+Pointer to the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates an activation object that can be used to create a Windows Media Video (WMV) encoder.
+A reference to the
A reference to the
Receives a reference to the
If this function succeeds, it returns
Creates the ASF Multiplexer.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates a byte stream to access the index in an ASF stream.
+Pointer to the
Byte offset of the index within the ASF stream. To get this value, call
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The call succeeded. |
| The offset specified in cbIndexStartOffset is invalid. |
?
Creates the ASF stream selector.
+Pointer to the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates an activation object for the ASF streaming sink.
The ASF streaming sink enables an application to write streaming Advanced Systems Format (ASF) packets to an HTTP byte stream. The activation object can be used to create the ASF streaming sink in another process.
+A reference to the
A reference to an ASF ContentInfo Object that contains the properties that describe the ASF content. These settings can contain stream settings, encoding properties, and metadata. For more information about these properties, see Setting Properties in the ContentInfo Object.
Receives a reference to the
If this function succeeds, it returns
Starting in Windows?7, Media Foundation provides an ASF streaming sink that writes the content in a live streaming scenario. This function should be used in secure transcode scenarios where this media sink needs to be created and configured in the remote + process. Like the ASF file sink, the new media sink performs ASF related tasks such as writing the ASF header, generating data packets (muxing). The content is written to a caller-implemented byte stream such as an HTTP byte stream. + The caller must also provide an activation object that media sink can use to create the byte stream remotely.
In addition, it performs transcryption for streaming protected content. It hosts the Windows Media Digital Rights Management (DRM) for Network Devices Output Trust Authority (OTA) that handles the license request and response. For more information, see
The new media sink does not perform any time adjustments. If the clock seeks, the timestamps are not changed.
+Creates an activation object that can be used to create a Windows Media Audio (WMA) encoder.
+ A reference to the
A reference to the
Receives a reference to the
If this function succeeds, it returns
Creates the ASF Indexer object.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates the ASF Header Object object.
+The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates the ASF media sink.
+Pointer to a byte stream that will be used to write the ASF stream.
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Initializes Microsoft Media Foundation.
+Version number. Use the value
This parameter is optional when using C++ but required in C. The value must be one of the following flags:
Value | Meaning |
---|---|
| Do not initialize the sockets library. |
| Equivalent to MFSTARTUP_NOSOCKET. |
| Initialize the entire Media Foundation platform. This is the default value when dwFlags is not specified. |
?
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Version parameter requires a newer version of Media Foundation than the version that is running. |
| The Media Foundation platform is disabled because the system was started in "Safe Mode" (fail-safe boot). |
| Media Foundation is not implemented on the system. This error can occur if the media components are not present (See KB2703761 for more info). |
?
An application must call this function before using Media Foundation. Before your application quits, call
Do not call
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Unregisters a Media Foundation transform (MFT) from the caller's process.
+The class identifier (CLSID) of the MFT.
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
| The MFT specified by the clsidMFT parameter was not registered in this process. |
?
Use this function to unregister a local MFT that was previously registered through the
[This API is not supported and may be altered or unavailable in the future.]
Creates an audio media type from a
Pointer to a
Receives a reference to the
If this function succeeds, it returns
The
Alternatively, you can call
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Cancels an asynchronous request to create a byte stream from a file.
+A reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
You can use this function to cancel a previous call to
Converts a video frame rate into a frame duration.
+The numerator of the frame rate.
The denominator of the frame rate.
Receives the average duration of a video frame, in 100-nanosecond units.
If this function succeeds, it returns
This function is useful for calculating time stamps on a sample, given the frame rate.
Also, average time per frame is used in the older
For certain common frame rates, the function gets the frame duration from a look-up table:
Frames per second (floating point) | Frames per second (fractional) | Average time per frame |
---|---|---|
59.94 | 60000/1001 | 166833 |
29.97 | 30000/1001 | 333667 |
23.976 | 24000/1001 | 417188 |
60 | 60/1 | 166667 |
30 | 30/1 | 333333 |
50 | 50/1 | 200000 |
25 | 25/1 | 400000 |
24 | 24/1 | 416667 |
?
Most video content uses one of the frame rates listed here. For other frame rates, the function calculates the duration.
+This function is not implemented.
+Reserved.
Reserved.
Reserved.
Reserved.
Reserved.
Reserved.
Reserved.
Reserved.
Reserved.
Returns E_FAIL.
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+ Attempts to cancel an asynchronous operation that was scheduled with
If this function succeeds, it returns
Because work items are asynchronous, the work-item callback might still be invoked after
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows Phone 8.1: This API is supported.
+Converts an array of 16-bit floating-point numbers into an array of 32-bit floating-point numbers.
+Pointer to an array of float values. The array must contain at least dwCount elements.
Pointer to an array of 16-bit floating-point values, typed as WORD values. The array must contain at least dwCount elements.
Number of elements in the pSrc array to convert.
If this function succeeds, it returns
The function converts dwCount values in the pSrc array and writes them into the pDest array.
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+Adds information about a Media Foundation transform (MFT) to the registry.
Applications can enumerate the MFT by calling the
If this function succeeds, it returns
The registry entries created by this function are read by the following functions:
Function | Description |
---|---|
| Enumerates MFTs by media type and category. |
| Extended version of |
| Looks up an MFT by CLSID and retrieves the registry information. |
?
This function does not register the CLSID of the MFT for the CoCreateInstance or CoGetClassObject functions.
To remove the entries from the registry, call
The formats given in the pInputTypes and pOutputTypes parameters are intended to help applications search for MFTs by format. Applications can use the
It is recommended to specify at least one input type in pInputTypes and one output type in the pOutputTypes parameter. Otherwise, the MFT might be skipped in the enumeration.
On 64-bit Windows, the 32-bit version of this function registers the MFT in the 32-bit node of the registry. For more information, see 32-bit and 64-bit Application Data in the Registry.
+Creates an empty attribute store.
+Receives a reference to the
The initial number of elements allocated for the attribute store. The attribute store grows as needed.
If this function succeeds, it returns
Attributes are used throughout Microsoft Media Foundation to configure objects, describe media formats, query object properties, and other purposes. For more information, see Attributes in Media Foundation.
For a complete list of all the defined attribute GUIDs in Media Foundation, see Media Foundation Attributes.
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+
Converts a Media Foundation audio media type to a
Pointer to the
Receives a reference to the
Receives the size of the
Contains a flag from the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
If the wFormatTag member of the returned structure is
Windows Phone 8.1: This API is supported.
+[This API is not supported and may be altered or unavailable in the future. Applications should avoid using the
Returns the FOURCC or
Returns a FOURCC or
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+Puts an asynchronous operation on a work queue.
+The identifier for the work queue. This value can specify one of the standard Media Foundation work queues, or a work queue created by the application. For list of standard Media Foundation work queues, see Work Queue Identifiers. To create a new work queue, call
A reference to the
Returns an
Return code | Description |
---|---|
| Success. |
| Invalid work queue identifier. For more information, see |
| The |
?
To invoke the work-item, this function passes pResult to the
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
[This API is not supported and may be altered or unavailable in the future. Applications should avoid using the
Creates an
If this function succeeds, it returns
Enumerates Media Foundation transforms (MFTs) in the registry.
Starting in Windows?7, applications should use the
If this function succeeds, it returns
This function returns a list of all the MFTs in the specified category that match the search criteria given by the pInputType, pOutputType, and pAttributes parameters. Any of those parameters can be
If no MFTs match the criteria, the method succeeds but returns the value zero in pcMFTs.
+
Completes an asynchronous request to associate a work queue with a Multimedia Class Scheduler Service (MMCSS) task.
+Pointer to the
The unique task identifier.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Call this function when the
To unregister the work queue from the MMCSS class, call
Retrieves a media type that was wrapped in another media type by the
If this function succeeds, it returns
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows Phone 8.1: This API is supported.
+
Compares a full media type to a partial media type.
+Pointer to the
Pointer to the
If the full media type is compatible with the partial media type, the function returns TRUE. Otherwise, the function returns
A pipeline component can return a partial media type to describe a range of possible formats the component might accept. A partial media type has at least a major type
This function returns TRUE if the following conditions are both true:
Otherwise, the function returns
Converts the contents of an attribute store to a byte array.
+Pointer to the
Pointer to an array that receives the attribute data.
Size of the pBuf array, in bytes. To get the required size of the buffer, call
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| The buffer given in pBuf is too small. |
?
The function skips any attributes with
To convert the byte array back into an attribute store, call
To write an attribute store to a stream, call the
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Copies an image or image plane from one buffer to another.
+Pointer to the start of the first row of pixels in the destination buffer.
Stride of the destination buffer, in bytes.
Pointer to the start of the first row of pixels in the source image.
Stride of the source image, in bytes.
Width of the image, in bytes.
Number of rows of pixels to copy.
If this function succeeds, it returns
This function copies a single plane of the image. For planar YUV formats, you must call the function once for each plane. In this case, pDest and pSrc must point to the start of each plane.
This function is optimized if the MMX, SSE, or SSE2 instruction sets are available on the processor. The function performs a non-temporal store (the data is written to memory directly without polluting the cache).
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
Windows Phone 8.1: This API is supported.
+Registers a Media Foundation transform (MFT) in the caller's process.
+A reference to the
A
A wide-character null-terminated string that contains the friendly name of the MFT.
A bitwise OR of zero or more flags from the _MFT_ENUM_FLAG enumeration.
The number of elements in the pInputTypes array.
A reference to an array of
The number of elements in the pOutputTypes array.
A reference to an array of
If this function succeeds, it returns
The primary purpose of this function is to make an MFT available for automatic topology resolution without making the MFT available to other processes or applications.
After you call this function, the MFT can be enumerated by calling the
The pClassFactory parameter specifies a class factory object that creates the MFT. The class factory's IClassFactory::CreateInstance method must return an object that supports the
Note??The
To unregister the MFT from the current process, call
If you need to register an MFT in the Protected Media Path (PMP) process, use the
[This API is not supported and may be altered or unavailable in the future. Applications should avoid using the
Creates a video media type from an
If this function succeeds, it returns
Instead of using the
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+Gets information from the registry about a Media Foundation transform (MFT).
+The CLSID of the MFT.
Receives a reference to a wide-character string containing the friendly name of the MFT. The caller must free the string by calling CoTaskMemFree. This parameter can be
Receives a reference to an array of
Receives the number of elements in the ppInputTypes array. If ppInputTypes is
Receives a reference to an array of
Receives the number of elements in the ppOutputType array. If ppOutputTypes is
Receives a reference to the
This parameter can be
If this function succeeds, it returns
Completes an asynchronous request to unregister a work queue from a Multimedia Class Scheduler Service (MMCSS) task.
+Pointer to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Call this function when the
Unregisters a work queue from a Multimedia Class Scheduler Service (MMCSS) task.
+The identifier of the work queue. For private work queues, the identifier is returned by the
Pointer to the
Pointer to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
This function unregisters a work queue that was associated with an MMCSS class through the
This function is asynchronous. When the operation completes, the callback object's
Begins an asynchronous request to create a byte stream from a file.
+The requested access mode, specified as a member of the
The behavior of the function if the file already exists or does not exist, specified as a member of the
Bitwise OR of values from the
Pointer to a null-terminated string containing the file name.
Pointer to the
Pointer to the
Receives an
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
When the request is completed, the callback object's
Queries whether a FOURCC code or
FOURCC code or
The function returns one of the following values.
Return code | Description |
---|---|
| The value specifies a YUV format. |
| The value does not specify a recognized YUV format. |
?
This function checks whether Format specifies a YUV format. Not every YUV format is recognized by this function. However, if a YUV format is not recognized by this function, it is probably not supported for video rendering or DirectX video acceleration (DXVA).
+[This API is not supported and may be altered or unavailable in the future. Applications should avoid using the
Sets the extended color information in a
If this function succeeds, it returns
This function sets the following fields in the
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+
Calculates the frame rate, in frames per second, from the average duration of a video frame.
+The average duration of a video frame, in 100-nanosecond units.
Receives the numerator of the frame rate.
Receives the denominator of the frame rate.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Average time per frame is used in the older
This function uses a look-up table for certain common durations. The table is listed in the Remarks section for the
Completes an asynchronous request to create a byte stream from a file.
+Pointer to the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Call this function when the
Locks a work queue.
+The identifier for the work queue. The identifier is returned by the
If this function succeeds, it returns
This function prevents the
Call
Note??The
Windows Phone 8.1: This API is supported.
+Creates a video media type from a
If the function succeeds, it returns
Creates an asynchronous result object. Use this function if you are implementing an asynchronous method.
+Pointer to the object stored in the asynchronous result. This reference is returned by the
Pointer to the
Pointer to the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
To invoke the callback specified in pCallback, call the
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+ Initializes a media type from a
Pointer to the
Pointer to a
Size of the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Windows Phone 8.1: This API is supported.
+[This API is not supported and may be altered or unavailable in the future. Applications should avoid using the
Converts the extended color information from an
If this function succeeds, it returns
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+Creates a byte stream that is backed by a temporary local file.
+ The requested access mode, specified as a member of the
The behavior of the function if the file already exists or does not exist, specified as a member of the
Bitwise OR of values from the
Receives a reference to the
If this function succeeds, it returns
This function creates a file in the system temporary folder, and then returns a byte stream object for that file. The full path name of the file is storted in the
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the size of the buffer needed for the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Use this function to find the size of the array that is needed for the
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Gets a reference to the Microsoft Media Foundation plug-in manager.
+Receives a reference to the
If this function succeeds, it returns
Retrieves the timer interval for the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates a new work queue.
+Receives an identifier for the work queue.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| The application exceeded the maximum number of work queues. |
| The application did not call |
?
When you are done using the work queue, call
[This API is not supported and may be altered or unavailable in the future. Applications should avoid using the
Initializes an
If this function succeeds, it returns
This function fills in some reasonable default values for the specified RGB format.
Developers are encouraged to use media type attributes instead of using the
In general, you should avoid calling this function. If you know all of the format details, you can fill in the
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+Converts an array of 32-bit floating-point numbers into an array of 16-bit floating-point numbers.
+Pointer to an array of 16-bit floating-point values, typed as WORD values. The array must contain at least dwCount elements.
Pointer to an array of float values. The array must contain at least dwCount elements.
Number of elements in the pSrc array to convert.
If this function succeeds, it returns
The function converts the values in the pSrc array and writes them into the pDest array.
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+
Cancels a callback function that was set by the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
The callback is dispatched on another thread, and this function does not attempt to synchronize with the callback thread. Therefore, it is possible for the callback to be invoked after this function returns.
+
Sets a callback function to be called at a fixed interval.
+Pointer to the callback function, of type MFPERIODICCALLBACK.
Pointer to a caller-provided object that implements
Receives a key that can be used to cancel the callback. To cancel the callback, call
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
To get the timer interval for the periodic callback, call
Initializes the contents of an attribute store from a byte array.
+Pointer to the
Pointer to the array that contains the initialization data.
Size of the pBuf array, in bytes.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| The buffer is not valid. |
?
Use this function to deserialize an attribute store that was serialized with the
This function deletes any attributes that were previously stored in pAttributes.
Windows Phone 8.1: This API is supported.
+Puts an asynchronous operation on a work queue.
+ The identifier for the work queue. This value can specify one of the standard Media Foundation work queues, or a work queue created by the application. For list of standard Media Foundation work queues, see Work Queue Identifiers. To create a new work queue, call
A reference to the
A reference to the
Returns an
Return code | Description |
---|---|
| Success. |
| Invalid work queue. For more information, see |
| The |
?
This function creates an asynchronous result object and puts the result object on the work queue. The work queue calls the
Unregisters a Media Foundation transform (MFT).
+The CLSID of the MFT.
If this function succeeds, it returns
This function removes the registry entries created by the
It is safe to call
Retrieves the Multimedia Class Scheduler Service (MMCSS) task identifier currently associated with this work queue.
+Identifier for the work queue. The identifier is retrieved by the
Receives the task identifier.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
To associate a work queue with an MMCSS task, call
Converts a Media Foundation media buffer into a buffer that is compatible with DirectX Media Objects (DMOs).
+Pointer to the
Pointer to the
Offset in bytes from the start of the Media Foundation buffer. This offset defines where the DMO buffer starts. If this parameter is zero, the DMO buffer starts at the beginning of the Media Foundation buffer.
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| Invalid argument. The pIMFMediaBuffer parameter must not be |
?
The DMO buffer created by this function also exposes the
If the Media Foundation buffer specified by pIMFMediaBuffer exposes the
Creates a media buffer that wraps an existing media buffer. The new media buffer points to the same memory as the original media buffer, or to an offset from the start of the memory.
+A reference to the
The start of the new buffer, as an offset in bytes from the start of the original buffer.
The size of the new buffer. The value of cbOffset + dwLength must be less than or equal to the size of valid data the original buffer. (The size of the valid data is returned by the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| The requested offset or the requested length is not valid. |
?
The maximum size of the wrapper buffer is limited to the size of the valid data in the original buffer. This might be less than the allocated size of the original buffer. To set the size of the valid data, call
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+
Unlocks the Media Foundation platform after it was locked by a call to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
The application must call
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Retrieves the image size for a video format. Given a
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| The |
?
Before calling this function, you must set at least the following members of the
Also, if biCompression is BI_BITFIELDS, the
This function fails if the
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Creates an event queue.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
This function creates a helper object that you can use to implement the
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows Phone 8.1: This API is supported.
+Creates a media buffer object that manages a Direct3D 9 surface.
+Identifies the type of Direct3D 9 surface. Currently this value must be IID_IDirect3DSurface9.
A reference to the
If TRUE, the buffer's
For more information about top-down versus bottom-up images, see Image Stride.
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
?
This function creates a media buffer object that holds a reference to the Direct3D surface specified in punkSurface. Locking the buffer gives the caller access to the surface memory. When the buffer object is destroyed, it releases the surface. For more information about media buffers, see Media Buffers.
Note??This function does not allocate the Direct3D surface itself.
The buffer object created by this function also exposes the
This function does not support DXGI surfaces.
+Associates a work queue with a Multimedia Class Scheduler Service (MMCSS) task.
+The identifier of the work queue. For private work queues, the identifier is returned by the
The name of the MMCSS task.For more information, see Multimedia Class Scheduler Service.
The unique task identifier. To obtain a new task identifier, set this value to zero.
A reference to the
A reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
This function is asynchronous. When the operation completes, the callback object's
To unregister the work queue from the MMCSS task, call
Creates a partial video media type with a specified subtype.
+ Pointer to a
Receives a reference to the
If this function succeeds, it returns
This function creates a media type and sets the major type equal to
You can get the same result with the following steps:
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+
Creates an empty media sample.
+Receives a reference to the
Initially the sample does not contain any media buffers.
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+
Creates a Media Foundation media type from another format representation.
+Description | |
---|---|
AM_MEDIA_TYPE_REPRESENTATION | Convert a DirectShow |
?
Pointer to a buffer that contains the format representation to convert. The layout of the buffer depends on the value of guidRepresentation.
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| The |
?
If the original format is a DirectShow audio media type, and the format type is not recognized, the function sets the following attributes on the converted media type.
Attribute | Description |
---|---|
| Contains the format type |
| Contains the format block. |
?
+Calculates the minimum surface stride for a video format.
+FOURCC code or
Width of the image, in pixels.
Receives the minimum surface stride, in pixels.
If this function succeeds, it returns
This function calculates the minimum stride needed to hold the image in memory. Use this function if you are allocating buffers in system memory. Surfaces allocated in video memory might require a larger stride, depending on the graphics card.
If you are working with a DirectX surface buffer, use the
For planar YUV formats, this function returns the stride for the Y plane. Depending on the format, the chroma planes might have a different stride.
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+Registers a Media Foundation transform (MFT) in the caller's process.
+The class identifier (CLSID) of the MFT.
A
A wide-character null-terminated string that contains the friendly name of the MFT.
A bitwise OR of zero or more flags from the _MFT_ENUM_FLAG enumeration.
The number of elements in the pInputTypes array.
A reference to an array of
The number of elements in the pOutputTypes array.
A reference to an array of
If this function succeeds, it returns
The primary purpose of this function is to make an MFT available for automatic topology resolution without making the MFT available to other processes or applications.
After you call this function, the MFT can be enumerated by calling the
To unregister the MFT from the current process, call
If you need to register an MFT in the Protected Media Path (PMP) process, use the
Frees a block of memory that was allocated by calling the
Allocates a block of memory.
+Number of bytes to allocate.
Zero or more flags. For a list of valid flags, see HeapAlloc in the Windows SDK documentation.
Reserved. Set to
Reserved. Set to zero.
Reserved. Set to eAllocationTypeIgnore.
If the function succeeds, it returns a reference to the allocated memory block. If the function fails, it returns
In the current version of Media Foundation, this function is equivalent to calling the HeapAlloc function and specifying the heap of the calling process.
To free the allocated memory, call
Creates an empty collection object.
+Receives a reference to the collection object's
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+
Schedules an asynchronous operation to be completed after a specified interval.
+Pointer to the
Pointer to the
Time-out interval, in milliseconds. Set this parameter to a negative value. The callback is invoked after ?Timeout milliseconds. For example, if Timeout is ?5000, the callback is invoked after 5000 milliseconds.
Receives a key that can be used to cancel the timer. To cancel the timer, call
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
This function creates an asynchronous result object. When the timer interval elapses, the
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the Multimedia Class Scheduler Service (MMCSS) class currently associated with this work queue.
+Identifier for the work queue. The identifier is retrieved by the
Pointer to a buffer that receives the name of the MMCSS class. This parameter can be
On input, specifies the size of the pwszClass buffer, in characters. On output, receives the required size of the buffer, in characters. The size includes the terminating null character.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| The pwszClass buffer is too small to receive the task name. |
?
If the work queue is not associated with an MMCSS task, the function retrieves an empty string.
To associate a work queue with an MMCSS task, call
Retrieves the image size, in bytes, for an uncompressed video format.
+FOURCC code or
Width of the image, in pixels.
Height of the image, in pixels.
Receives the size of one frame, in bytes. If the format is compressed or is not recognized, this value is zero.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
This function is equivalent to the
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll.
+Invokes a callback method to complete an asynchronous operation.
+Pointer to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| Invalid work queue. For more information, see |
| The |
?
If you are implementing an asynchronous method, use this function to invoke the caller's
The callback is invoked from a Media Foundation work queue. For more information, see Writing an Asynchronous Method.
The
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Shuts down the Microsoft Media Foundation platform. Call this function once for every call to
If this function succeeds, it returns
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+
Retrieves the image size, in bytes, for an uncompressed video format.
+Media subtype for the video format. For a list of subtypes, see Media Type GUIDs.
Width of the image, in pixels.
Height of the image, in pixels.
Receives the size of each frame, in bytes. If the format is compressed or is not recognized, the value is zero.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Gets the merit value of a hardware codec.
+A reference to the
The size, in bytes, of the verifier array.
The address of a buffer that contains one of the following:
Receives the merit value.
If this function succeeds, it returns
The function fails if the MFT does not represent a hardware device with a valid Output Protection Manager (OPM) certificate.
+Creates a media type that wraps another media type.
+ A reference to the
A
A
Applications can define custom subtype GUIDs.
Receives a reference to the
If this function succeeds, it returns
The original media type (pOrig) is stored in the new media type under the
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows Phone 8.1: This API is supported.
+Validates the size of a buffer for a video format block.
+Pointer to a buffer that contains the format block.
Size of the pBlock buffer, in bytes.
The function returns an
Return code | Description |
---|---|
| The buffer that contains the format block is large enough. |
| The buffer that contains the format block is too small, or the format block is not valid. |
| This function does not support the specified format type. |
?
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Schedules an asynchronous operation to be completed after a specified interval.
+Pointer to the
Time-out interval, in milliseconds. Set this parameter to a negative value. The callback is invoked after ?Timeout milliseconds. For example, if Timeout is ?5000, the callback is invoked after 5000 milliseconds.
Receives a key that can be used to cancel the timer. To cancel the timer, call
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
When the timer interval elapses, the timer calls
Allocates system memory with a specified byte alignment and creates a media buffer to manage the memory.
+Size of the buffer, in bytes.
Specifies the memory alignment for the buffer. Use one of the following constants.
Value | Meaning |
---|---|
| Align to 1 bytes. |
| Align to 2 bytes. |
| Align to 4 bytes. |
| Align to 8 bytes. |
| Align to 16 bytes. |
| Align to 32 bytes. |
| Align to 64 bytes. |
| Align to 128 bytes. |
| Align to 256 bytes. |
| Align to 512 bytes. |
?
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
When the media buffer object is destroyed, it releases the allocated memory.
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+
Unlocks a work queue.
+Identifier for the work queue to be unlocked. The identifier is returned by the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
The application must call
Windows Phone 8.1: This API is supported.
+
Blocks the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
This function prevents work queue threads from being shut down when
This function holds a lock on the Media Foundation platform. To unlock the platform, call
The
The default implementation of the
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+
Allocates system memory and creates a media buffer to manage it.
+Size of the buffer, in bytes.
Receives a reference to the
The function allocates a buffer with a 1-byte memory alignment. To allocate a buffer that is aligned to a larger memory boundary, call
When the media buffer object is destroyed, it releases the allocated memory.
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Unregisters one or more Media Foundation transforms (MFTs) from the caller's process.
+A reference to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
| The MFT specified by the pClassFactory parameter was not registered in this process. |
?
Use this function to unregister a local MFT that was previously registered through the
If the pClassFactory parameter is
Creates a media event object.
+The event type. See
The extended type. See
The event status. See
The value associated with the event, if any. See
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows Phone 8.1: This API is supported.
+[This API is not supported and may be altered or unavailable in the future. Applications should avoid using the
Initializes an
If this function succeeds, it returns
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+Calculates ((a * b) + d) / c, where each term is a 64-bit signed value.
+A multiplier.
Another multiplier.
The divisor.
The rounding factor.
Returns the result of the calculation. If numeric overflow occurs, the function returns _I64_MAX (positive overflow) or LLONG_MIN (negative overflow). If Mfplat.dll cannot be loaded, the function returns _I64_MAX.
Note??A previous version of this topic described the parameters incorrectly. The divisor is c and the rounding factor is d.
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Creates a byte stream from a file.
+ The requested access mode, specified as a member of the
The behavior of the function if the file already exists or does not exist, specified as a member of the
Bitwise OR of values from the
Pointer to a null-terminated string that contains the file name.
Receives a reference to the
If this function succeeds, it returns
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
[This API is not supported and may be altered or unavailable in the future. Applications should avoid using the
Initializes a media type from an
If this function succeeds, it returns
Creates an empty media type.
+ Receives a reference to the
If this function succeeds, it returns
The media type is created without any attributes.
Windows Phone 8.1: This API is supported.
+Creates a new work queue. This function extends the capabilities of the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| The application exceeded the maximum number of work queues. |
| Invalid argument. |
| The application did not call |
?
When you are done using the work queue, call
The
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Gets a list of Microsoft Media Foundation transforms (MFTs) that match specified search criteria. This function extends the
If this function succeeds, it returns
The Flags parameter controls which MFTs are enumerated, and the order in which they are returned. The flags for this parameter fall into several groups.
The first set of flags specifies how an MFT processes data.
Flag | Description |
---|---|
| The MFT performs synchronous data processing in software. This is the original MFT processing model, and is compatible with Windows?Vista. |
| The MFT performs asynchronous data processing in software. This processing model requires Windows?7. For more information, see Asynchronous MFTs. |
| The MFT performs hardware-based data processing, using either the AVStream driver or a GPU-based proxy MFT. MFTs in this category always process data asynchronously. For more information, see Hardware MFTs. |
?
Every MFT falls into exactly one of these categories. To enumerate a category, set the corresponding flag in the Flags parameter. You can combine these flags to enumerate more than one category. If none of these flags is specified, the default category is synchronous MFTs (
Next, the following flags include MFTs that are otherwise excluded from the results. By default, flags that match these criteria are excluded from the results. Use any these flags to include them.
Flag | Description |
---|---|
| Include MFTs that must be unlocked by the application. |
| Include MFTs that are registered in the caller's process through either the |
| Include MFTs that are optimized for transcoding rather than playback. |
?
The last flag is used to sort and filter the results:
Flag | Description |
---|---|
| Sort and filter the results. |
?
If the
If you do not set the
Setting the Flags parameter to zero is equivalent to using the value
Setting Flags to
If no MFTs match the search criteria, the function returns
Note??There is no way to enumerate just local MFTs and nothing else. Setting Flags equal to
Creates the presentation clock. The presentation clock is used to schedule the time at which samples are rendered and to synchronize multiple streams. +
+Receives a reference to the clock's
If this function succeeds, it returns
The caller must shut down the presentation clock by calling
Typically applications do not create the presentation clock. The Media Session automatically creates the presentation clock. To get a reference to the presentation clock from the Media Session, call
Creates a presentation time source that is based on the system time.
+Receives a reference to the object's
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Enumerates a list of audio or video capture devices.
+Pointer to an attribute store that contains search criteria. To create the attribute store, call
Value | Meaning |
---|---|
Specifies whether to enumerate audio or video devices. (Required.) | |
For audio capture devices, specifies the device role. (Optional.) | |
For video capture devices, specifies the device category. (Optional.) |
?
Receives an array of
Receives the number of elements in the pppSourceActivate array. If no capture devices match the search criteria, this parameter receives the value 0.
If this function succeeds, it returns
Each returned
Attribute | Description |
---|---|
| The display name of the device. |
| The major type and subtype GUIDs that describe the device's output format. |
| The type of capture device (audio or video). |
| The audio endpoint ID string. (Audio devices only.) |
| The device category. (Video devices only.) |
| Whether a device is a hardware or software device. (Video devices only.) |
| The symbolic link for the device driver. (Video devices only.) |
?
To create a media source from an
Creates an empty transcode profile object.
The transcode profile stores configuration settings for the output file. These configuration settings are specified by the caller, and include audio and video stream properties, encoder settings, and container settings. To set these properties, the caller must call the appropriate
The configured transcode profile is passed to the
If this function succeeds, it returns
The
For example code that uses this function, see the following topics:
Creates a credential cache object. An application can use this object to implement a custom credential manager.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates the sequencer source.
+Reserved. Must be
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates a Microsoft Media Foundation byte stream that wraps an
A reference to the
Receives a reference to the
Returns an
This function enables applications to pass an
Creates an activation object for the enhanced video renderer (EVR) media sink.
+Handle to the window where the video will be displayed.
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To create the EVR, call
To configure the EVR, set any of the following attributes on the
Attribute | Description |
---|---|
| Activation object for a custom mixer. |
| CLSID for a custom mixer. |
| Flags for creating a custom mixer. |
| Activation object for a custom presenter. |
| CLSID for a custom presenter. |
| Flags for creating a custom presenter. |
?
When
Creates a default proxy locator.
+The name of the protocol.
Note??In this release of Media Foundation, the default proxy locator does not support RTSP.
Pointer to the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates an activation object for the sample grabber media sink.
+ Pointer to the
Pointer to the
Receives a reference to the
If this function succeeds, it returns
To create the sample grabber sink, call
Before calling ActivateObject, you can configure the sample grabber by setting any of the following attributes on the ppIActivate reference:
Creates a media sink for authoring MP4 files.
+A reference to the
A reference to the
This parameter can be
A reference to the
This parameter can be
Receives a reference to the MP4 media sink's
If this function succeeds, it returns
The MP4 media sink supports a maximum of one video stream and one audio stream. The initial stream formats are given in the pVideoMediaType and pAudioMediaType parameters. To create an MP4 file with one stream, set the other stream type to
The number of streams is fixed when you create the media sink. The sink does not support the
To author 3GP files, use the
Creates the transcode sink activation object.
The transcode sink activation object can be used to create any of the following file sinks:
The transcode sink activation object exposes the
If this function succeeds, it returns
Queries an object for a specified service interface.
This function is a helper function that wraps the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
| The service requested cannot be found in the object represented by punkObject. |
?
Windows Phone 8.1: This API is supported.
+
Retrieves the MIME types that are registered for the source resolver.
+Pointer to a
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates a media sink for authoring 3GP files.
+A reference to the
A reference to the
This parameter can be
A reference to the
This parameter can be
Receives a reference to the 3GP media sink's
If this function succeeds, it returns
The 3GP media sink supports a maximum of one video stream and one audio stream. The initial stream formats are given in the pVideoMediaType and pAudioMediaType parameters. To create an MP4 file with one stream, set the other stream type to
The number of streams is fixed when you create the media sink. The sink does not support the
To author MP4 files, use the
Creates a partial transcode topology.
The underlying topology builder creates a partial topology by connecting the required pipeline objects: + source, encoder, and sink. The encoder and the sink are configured according to the settings specified by the caller in the transcode profile.
To create the transcode profile object, call the
The configured transcode profile is passed to the
The function returns an
Return code | Description |
---|---|
| The function call succeeded, and ppTranscodeTopo receives a reference to the transcode topology. |
| pwszOutputFilePath contains invalid characters. |
| No streams are selected in the media source. |
| The profile does not contain the |
| For one or more streams, cannot find an encoder that accepts the media type given in the profile. |
| The profile does not specify a media type for any of the selected streams on the media source. |
?
For example code that uses this function, see the following topics:
Creates the scheme handler for the network source.
+Interface identifier (IID) of the interface to retrieve.
Receives a reference to the requested interface. The caller must release the interface. The scheme handler exposes the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates an instance of the Media Session inside a Protected Media Path (PMP) process.
+ The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
You can use the pConfiguration parameter to set any of the following attributes:
If this function cannot create the PMP Media Session because a trusted binary was revoked, the ppEnablerActivate parameter receives an
If the function successfully creates the PMP Media Session, the ppEnablerActivate parameter receives the value
Do not make calls to the PMP Media Session from a thread that is processing a window message sent from another thread. To test whether the current thread falls into this category, call InSendMessage.
+[This API is not supported and may be altered or unavailable in the future. Instead, applications should use the PSCreateMemoryPropertyStore function to create property stores.]
Creates an empty property store object.
+ Receives a reference to the
If this function succeeds, it returns
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Creates an activation object for the Streaming Audio Renderer.
+If this function succeeds, it returns
To create the audio renderer, call
Note??To avoid a memory leak, call
To configure the audio renderer, set any of the following attributes on the
Attribute | Description |
---|---|
| The audio endpoint device identifier. |
| The audio endpoint role. |
| Miscellaneous configuration flags. |
| The audio policy class. |
MF_AUDIO_RENDERER_ATTRIBUTE_STREAM_CATEGORY | The audio stream category. |
MF_LOW_LATENCY | Enables low-latency audio streaming. |
?
+Creates a media source for a hardware capture device.
+Pointer to the
Receives a reference to the media source's
If this function succeeds, it returns
The pAttributes parameter specifies an attribute store. To create the attribute store, call the
For audio capture devices, optionally set one of the following attributes:
Attribute | Description |
---|---|
| Specifies the audio endpoint ID of the audio capture device. |
| Specifies the device role. If this attribute is set, the function uses the default audio capture device for that device role. Do not combine this attribute with the |
?
If neither attribute is specified, the function selects the default audio capture device for the eCommunications role.
For video capture devices, you must set the following attribute:
Attribute | Description |
---|---|
| Specifies the symbolic link to the device. |
?
+
Queries whether a media presentation requires the Protected Media Path (PMP).
+Pointer to the
The function returns an
Return code | Description |
---|---|
| This presentation requires a protected environment. |
| This presentation does not require a protected environment. |
?
If this function returns
If the function returns S_FALSE, you can use the unprotected pipeline. Call
Internally, this function checks whether any of the stream descriptors in the presentation have the
Creates a presentation descriptor.
+Number of elements in the apStreamDescriptors array.
Array of
Receives a reference to an
If this function succeeds, it returns
If you are writing a custom media source, you can use this function to create the source presentation descriptor. The presentation descriptor is created with no streams selected. Generally, a media source should select at least one stream by default. To select a stream, call
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows Phone 8.1: This API is supported.
+
Retrieves the URL schemes that are registered for the source resolver.
+Pointer to a
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Gets a list of output formats from an audio encoder.
+Specifies the subtype of the output media. The encoder uses this value as a filter when it is enumerating the available output types. For information about the audio subtypes, see Audio Subtype GUIDs.
Bitwise OR of zero or more flags from the _MFT_ENUM_FLAG enumeration.
A reference to the
Value | Meaning |
---|---|
Set this attribute to unlock an encoder that has field-of-use descriptions. | |
Specifies a device conformance profile for a Windows Media encoder. | |
Sets the tradeoff between encoding quality and encoding speed. |
?
Receives a reference to the
This function assumes the encoder will be used in its default encoding mode, which is typically constant bit-rate (CBR) encoding. Therefore, the types returned by the function might not work with other modes, such as variable bit-rate (VBR) encoding.
Internally, this function works by calling
Creates an instance of the sample copier transform.
+Receives a reference to the
If this function succeeds, it returns
The sample copier is a Media Foundation transform (MFT) that copies data from input samples to output samples without modifying the data. The following data is copied from the sample:
This MFT is useful in the following situation:
The following diagram shows this situation with a media source and a media sink.
In order for the media sink to receive data from the media source, the data must be copied into the media samples owned by the media sink. The sample copier can be used for this purpose.
A specific example of such a media sink is the Enhanced Video Renderer (EVR). The EVR allocates samples that contain Direct3D surface buffers, so it cannot receive video samples directly from a media source. Starting in Windows?7, the topology loader automatically handles this case by inserting the sample copier between the media source and the EVR.
+Creates a topology node.
+ The type of node to create, specified as a member of the
Receives a reference to the node's
If this function succeeds, it returns
Creates the source resolver, which is used to create a media source from a URL or byte stream.
+Receives a reference to the source resolver's
If this function succeeds, it returns
Note??Prior to Windows?7, this function was exported from mf.dll. Starting in Windows?7, this function is exported from mfplat.dll, and mf.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
Windows Phone 8.1: This API is supported.
+
Creates a topology object.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Gets the media type for a stream associated with a topology node.
+A reference to the
The identifier of the stream to query. This parameter is interpreted as follows:
If TRUE, the function gets an output type. If
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
| The stream index is invalid. |
?
This function gets the actual media type from the object that is associated with the topology node. The pNode parameter should specify a node that belongs to a fully resolved topology. If the node belongs to a partial topology, the function will probably fail.
Tee nodes do not have an associated object to query. For tee nodes, the function gets the node's input type, if available. Otherwise, if no input type is available, the function gets the media type of the node's primary output stream. The primary output stream is identified by the
Creates a media-type handler that supports a single media type at a time.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The media-type handler created by this function supports one media type at a time. Set the media type by calling
Creates a
Sequencer element identifier. This value specifies the segment in which to begin playback. The element identifier is returned in the
Starting position within the segment, in 100-nanosecond units.
Pointer to a
If this function succeeds, it returns
The
Shuts down a Media Foundation object and releases all resources associated with the object.
This function is a helper function that wraps the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
This function is not related to the
Creates the remote desktop plug-in object. Use this object if the application is running in a Terminal Services client session.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
| Remote desktop connections are not allowed by the current session policy. |
?
Creates a new instance of the topology loader.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates a stream descriptor.
+Stream identifier.
Number of elements in the apMediaTypes array.
Pointer to an array of
Receives a reference to the
If this function succeeds, it returns
If you are writing a custom media source, you can use this function to create stream descriptors for the source. This function automatically creates the stream descriptor media type handler and initializes it with the list of types given in apMediaTypes. The function does not set the current media type on the handler, however. To set the type, call
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Windows Phone 8.1: This API is supported.
+Returns the system time.
+Returns the system time, in 100-nanosecond units.
Windows Phone 8.1: This API is supported.
+Creates an activation object that represents a hardware capture device.
+Pointer to the
Receives a reference to the
This function creates an activation object that can be used to create a media source for a hardware device. To create the media source itself, call
The pAttributes parameter specifies an attribute store. To create the attribute store, call the
For audio capture devices, optionally set one of the following attributes:
Attribute | Description |
---|---|
| Specifies the audio endpoint ID of the audio capture device. |
| Specifies the device role. If this attribute is set, the function uses the default audio capture device for that device role. Do not combine this attribute with the |
?
If neither attribute is specified, the function selects the default audio capture device for the eCommunications role.
For video capture devices, you must set the following attribute:
Attribute | Description |
---|---|
| Specifies the symbolic link to the device. |
?
+
Deserializes a presentation descriptor from a byte array.
+Size of the pbData array, in bytes.
Pointer to an array of bytes that contains the serialized presentation descriptor.
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
[This API is not supported and may be altered or unavailable in the future. Instead, applications should use the PSCreateMemoryPropertyStore function to create named property stores.]
Creates an empty property store to hold name/value pairs.
+Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Creates the Media Session in the application's process.
+The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
If your application does not play protected content, you can use this function to create the Media Session in the application's process. To use the Media Session for protected content, you must call
You can use the pConfiguration parameter to specify any of the following attributes:
Creates a media source that aggregates a collection of media sources.
+A reference to the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
| The pSourceCollection collection does not contain any elements. |
?
The aggregated media source is useful for combining streams from separate media sources. For example, you can use it to combine a video capture source and an audio capture source.
Windows Phone 8.1: This API is supported.
+
Creates the protected media path (PMP) server object.
+A member of the
Receives a reference to the
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
Serializes a presentation descriptor to a byte array.
+Pointer to the
Receives the size of the ppbData array, in bytes.
Receives a reference to an array of bytes containing the serialized presentation descriptor. The caller must free the memory for the array by calling CoTaskMemFree.
The function returns an
Return code | Description |
---|---|
| The function succeeded. |
?
To deserialize the presentation descriptor, pass the byte array to the
Creates the default implementation of the quality manager.
+Receives a reference to the quality manager's
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Creates the Streaming Audio Renderer.
+If this function succeeds, it returns
To configure the audio renderer, set any of the following attributes on the
Attribute | Description |
---|---|
| The audio endpoint device identifier. |
| The audio endpoint role. |
| Miscellaneous configuration flags. |
| The audio policy class. |
MF_AUDIO_RENDERER_ATTRIBUTE_STREAM_CATEGORY | The audio stream category. |
MF_LOW_LATENCY | Enables low-latency audio streaming. |
?
+Creates the MP3 media sink.
+A reference to the
Receives a reference to the
If this function succeeds, it returns
The MP3 media sink takes compressed MP3 + audio samples as input, and writes an MP3 file with ID3 headers as output. The MP3 media sink does not perform MP3 audio encoding.
+Creates the source reader from a byte stream.
+A reference to the
Pointer to the
Receives a reference to the
If this function succeeds, it returns
Call CoInitialize(Ex) and
Internally, the source reader calls the
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
Windows Phone 8.1: This API is supported.
+Creates the source reader from a URL.
+The URL of a media file to open.
Pointer to the
Receives a reference to the
If this function succeeds, it returns
Call CoInitialize(Ex) and
Internally, the source reader calls the
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
Windows Phone 8.1: This API is supported.
+Creates the source reader from a media source.
+A reference to the
Pointer to the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The source contains protected content. |
?
Call CoInitialize(Ex) and
By default, when the application releases the source reader, the source reader shuts down the media source by calling
To change this default behavior, set the
When using the Source Reader, do not call any of the following methods on the media source:
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
Windows Phone 8.1: This API is supported.
+Creates the sink writer from a URL or byte stream.
+A null-terminated string that contains the URL of the output file. This parameter can be
Pointer to the
If this parameter is a valid reference, the sink writer writes to the provided byte stream. (The byte stream must be writable.) Otherwise, if pByteStream is
Pointer to the
Receives a reference to the
Call CoInitialize(Ex) and
The first three parameters to this function can be
Description | pwszOutputURL | pByteStream | pAttributes |
---|---|---|---|
Specify a byte stream, with no URL. | non- | Required (must not be | |
Specify a URL, with no byte stream. | not | Optional (may be | |
Specify both a URL and a byte stream. | non- | non- | Optional (may be |
?
The pAttributes parameter is required in the first case and optional in the others.
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
Windows Phone 8.1: This API is supported.
+Creates the sink writer from a media sink.
+Pointer to the
Pointer to the
Receives a reference to the
If this function succeeds, it returns
Call CoInitialize(Ex) and
When you are done using the media sink, call the media sink's
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
Windows Phone 8.1: This API is supported.
+
Loads attributes from a stream into an attribute store.
+Pointer to the
Bitwise OR of zero or more flags from the
Pointer to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Use this function to deserialize an attribute store that was serialized with the
If dwOptions contains the
If the
Otherwise, the function calls CoUnmarshalInterface to deserialize a proxy for the object.
This function deletes any attributes that were previously stored in pAttr.
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+
Writes the contents of an attribute store to a stream.
+Pointer to the
Bitwise OR of zero or more flags from the
Pointer to the
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If dwOptions contains the
If the
Otherwise, the function calls CoMarshalInterface to serialize a proxy for the object.
If dwOptions does not include the
To load the attributes from the stream, call
The main purpose of this function is to marshal attributes across process boundaries.
Windows?Phone?8: This API is supported.
Windows Phone 8.1: This API is supported.
+Creates a generic activation object for Media Foundation transforms (MFTs).
+Receives a reference to the
If this function succeeds, it returns
Most applications will not use this function; it is used internally by the
An activation object is a helper object that creates another object, somewhat similar to a class factory. The
Attribute | Description |
---|---|
| Required. Contains the CLSID of the MFT. The activation object creates the MFT by passing this CLSID to the CoCreateInstance function. |
| Optional. Specifies the category of the MFT. |
| Contains various flags that describe the MFT. For hardware-based MFTs, set the |
| Optional. Contains the merit value of a hardware codec. If this attribute is set and its value is greater than zero, the activation object calls |
| Required for hardware-based MFTs. Specifies the symbolic link for the hardware device. The device proxy uses this value to configure the MFT. |
| Optional. Contains an If this attribute is set and the |
| Optional. Contains the encoding profile for an encoder. The value of this attribute is an If this attribute is set and the value of the |
| Optional. Specifies the preferred output format for an encoder. If this attribute set and the value of the |
?
For more information about activation objects, see Activation Objects.
+Enumerates a list of audio or video capture devices.
+Pointer to an attribute store that contains search criteria. To create the attribute store, call
Value | Meaning |
---|---|
Specifies whether to enumerate audio or video devices. (Required.) | |
For audio capture devices, specifies the device role. (Optional.) | |
For video capture devices, specifies the device category. (Optional.) |
?
Receives an array of
Receives the number of elements in the pppSourceActivate array. If no capture devices match the search criteria, this parameter receives the value 0.
If this function succeeds, it returns
Each returned
Attribute | Description |
---|---|
| The display name of the device. |
| The major type and subtype GUIDs that describe the device's output format. |
| The type of capture device (audio or video). |
| The audio endpoint ID string. (Audio devices only.) |
| The device category. (Video devices only.) |
| Whether a device is a hardware or software device. (Video devices only.) |
| The symbolic link for the device driver. (Video devices only.) |
?
To create a media source from an
Applies to: desktop apps only
Creates an activation object for the sample grabber media sink.
+ Pointer to the
Pointer to the
Receives a reference to the
If this function succeeds, it returns
To create the sample grabber sink, call
Before calling ActivateObject, you can configure the sample grabber by setting any of the following attributes on the ppIActivate reference:
Applies to: desktop apps | Metro style apps
Copies an image or image plane from one buffer to another.
+Pointer to the start of the first row of pixels in the destination buffer.
Stride of the destination buffer, in bytes.
Pointer to the start of the first row of pixels in the source image.
Stride of the source image, in bytes.
Width of the image, in bytes.
Number of rows of pixels to copy.
If this function succeeds, it returns
This function copies a single plane of the image. For planar YUV formats, you must call the function once for each plane. In this case, pDest and pSrc must point to the start of each plane.
This function is optimized if the MMX, SSE, or SSE2 instruction sets are available on the processor. The function performs a non-temporal store (the data is written to memory directly without polluting the cache).
Note??Prior to Windows?7, this function was exported from evr.dll. Starting in Windows?7, this function is exported from mfplat.dll, and evr.dll exports a stub function that calls into mfplat.dll. For more information, see Library Changes in Windows?7.
+Enables the application to defer the creation of an object. This interface is exposed by activation objects.
+Typically, the application calls some function that returns an
Provides a generic way to store key/value pairs on an object. The keys are
For a list of predefined attribute
To create an empty attribute store, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the value associated with a key.
+ A
A reference to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The specified key was not found. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the data type of the value associated with a key.
+Receives a member of the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Queries whether a stored attribute value equals to a specified
Receives a Boolean value indicating whether the attribute matches the value given in Value. See Remarks. This parameter must not be
The method sets pbResult to
No attribute is found whose key matches the one given in guidKey.
The attribute's
The attribute value does not match the value given in Value.
The method fails.
Otherwise, the method sets pbResult to TRUE.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Compares the attributes on this object with the attributes on another object.
+Pointer to the
Member of the
Receives a Boolean value. The value is TRUE if the two sets of attributes match in the way specified by the MatchType parameter. Otherwise, the value is
If pThis is the object whose Compare method is called, and pTheirs is the object passed in as the pTheirs parameter, the following comparisons are defined by MatchType.
Match type | Returns TRUE if and only if |
---|---|
For every attribute in pThis, an attribute with the same key and value exists in pTheirs. | |
For every attribute in pTheirs, an attribute with the same key and value exists in pThis. | |
The key/value pairs are identical in both objects. | |
Take the intersection of the keys in pThis and the keys in pTheirs. The values associated with those keys are identical in both pThis and pTheirs. | |
Take the object with the smallest number of attributes. For every attribute in that object, an attribute with the same key and value exists in the other object. |
?
The pTheirs and pbResult parameters must not be
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a UINT32 value associated with a key.
+Receives a UINT32 value. If the key is found and the data type is UINT32, the method copies the value into this parameter. Otherwise, the original value of this parameter is not changed.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a UINT64 value associated with a key.
+Receives a UINT64 value. If the key is found and the data type is UINT64, the method copies the value into this parameter. Otherwise, the original value of this parameter is not changed.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a double value associated with a key.
+Receives a double value. If the key is found and the data type is double, the method copies the value into this parameter. Otherwise, the original value of this parameter is not changed.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a
Receives a
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the length of a string value associated with a key.
+If the key is found and the value is a string type, this parameter receives the number of characters in the string, not including the terminating
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a wide-character string associated with a key.
+Pointer to a wide-character array allocated by the caller. The array must be large enough to hold the string, including the terminating
The size of the pwszValue array, in characters. This value includes the terminating
Receives the number of characters in the string, excluding the terminating
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The length of the string is too large to fit in a UINT32 value. |
| The buffer is not large enough to hold the string. |
| The specified key was not found. |
| The attribute value is not a string. |
?
You can also use the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Gets a wide-character string associated with a key. This method allocates the memory for the string.
+A
If the key is found and the value is a string type, this parameter receives a copy of the string. The caller must free the memory for the string by calling CoTaskMemFree.
Receives the number of characters in the string, excluding the terminating
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The specified key was not found. |
| The attribute value is not a string. |
?
To copy a string value into a caller-allocated buffer, use the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Note??An earlier version of the documentation incorrectly stated that the pcchLength parameter can be
Retrieves the length of a byte array associated with a key.
+If the key is found and the value is a byte array, this parameter receives the size of the array, in bytes.
To get the byte array, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a byte array associated with a key. This method copies the array into a caller-allocated buffer.
+Pointer to a buffer allocated by the caller. If the key is found and the value is a byte array, the method copies the array into this buffer. To find the required size of the buffer, call
The size of the pBuf buffer, in bytes.
Receives the size of the byte array. This parameter can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The buffer is not large enough to the array. |
| The specified key was not found. |
| The attribute value is not a byte array. |
?
You can also use the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a byte array associated with a key. This method allocates the memory for the array.
+If the key is found and the value is a byte array, this parameter receives a copy of the array. The caller must free the memory for the array by calling CoTaskMemFree.
Receives the size of the array, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The specified key was not found. |
| The attribute value is not a byte array. |
?
To copy a byte array value into a caller-allocated buffer, use the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves an interface reference associated with a key.
+Interface identifier (IID) of the interface to retrieve.
Receives a reference to the requested interface. The caller must release the interface.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The attribute value is an |
| The specified key was not found. |
| The attribute value is not an |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Adds an attribute value with a specified key.
+ A
A
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Insufficient memory. |
| Invalid attribute type. |
?
This method checks whether the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Removes a key/value pair from the object's attribute list.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the specified key does not exist, the method returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Removes all key/value pairs from the object's attribute list.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Associates a UINT32 value with a key.
+New value for this key.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To retrieve the UINT32 value, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Associates a UINT64 value with a key.
+New value for this key.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To retrieve the UINT64 value, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Associates a double value with a key.
+New value for this key.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To retrieve the double value, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Associates a
New value for this key.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Insufficient memory. |
?
To retrieve the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Associates a wide-character string with a key.
+Null-terminated wide-character string to associate with this key. The method stores a copy of the string.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To retrieve the string, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Associates a byte array with a key.
+Pointer to a byte array to associate with this key. The method stores a copy of the array.
Size of the array, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To retrieve the byte array, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Associates an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To retrieve the
It is not an error to call SetUnknown with pUnknown equal to
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Locks the attribute store so that no other thread can access it. If the attribute store is already locked by another thread, this method blocks until the other thread unlocks the object. After calling this method, call
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method can cause a deadlock if a thread that calls LockStore waits on a thread that calls any other
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Unlocks the attribute store after a call to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the number of attributes that are set on this object.
+Receives the number of attributes. This parameter must not be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To enumerate all of the attributes, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves an attribute at the specified index.
+Index of the attribute to retrieve. To get the number of attributes, call
Receives the
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid index. |
?
To enumerate all of an object's attributes in a thread-safe way, do the following:
Call
Call
Call GetItemByIndex to get each attribute by index.
Call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Copies all of the attributes from this object into another attribute store.
+ A reference to the
If this method succeeds, it returns
This method deletes all of the attributes originally stored in pDest.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Attributes are used throughout Microsoft Media Foundation to configure objects, describe media formats, query object properties, and other purposes. For more information, see Attributes in Media Foundation.
For a complete list of all the defined attribute GUIDs in Media Foundation, see Media Foundation Attributes.
+Applies to: desktop apps | Metro style apps
Retrieves an attribute at the specified index.
+Index of the attribute to retrieve. To get the number of attributes, call
Receives the
To enumerate all of an object's attributes in a thread-safe way, do the following:
Call
Call
Call GetItemByIndex to get each attribute by index.
Call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Adds an attribute value with a specified key.
+ A
A
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Insufficient memory. |
| Invalid attribute type. |
?
This method checks whether the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Adds an attribute value with a specified key.
+ A
A
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Insufficient memory. |
| Invalid attribute type. |
?
This method checks whether the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the number of attributes that are set on this object.
+To enumerate all of the attributes, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Creates the object associated with this activation object.
+Interface identifier (IID) of the requested interface.
Receives a reference to the requested interface. The caller must release the interface.
If this method succeeds, it returns
Some Microsoft Media Foundation objects must be shut down before being released. If so, the caller is responsible for shutting down the object that is returned in ppv. To shut down the object, do one of the following:
The
After the first call to ActivateObject, subsequent calls return a reference to the same instance, until the client calls either ShutdownObject or
Shuts down the created object.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If you create an object by calling
The component that calls ActivateObject?not the component that creates the activation object?is responsible for calling ShutdownObject. For example, in a typical playback application, the application creates activation objects for the media sinks, but the Media Session calls ActivateObject. Therefore the Media Session, not the application, calls ShutdownObject.
After ShutdownObject is called, the activation object releases all of its internal references to the created object. If you call ActivateObject again, the activation object will create a new instance of the other object.
+
Detaches the created object from the activation object.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Not implemented. |
?
The activation object releases all of its internal references to the created object. If you call ActivateObject again, the activation object will create a new instance of the other object.
The DetachObject method does not shut down the created object. If the DetachObject method succeeds, the client must shut down the created object. This rule applies only to objects that have a shutdown method or that support the
Implementation of this method is optional. If the activation object does not support this method, the method returns E_NOTIMPL.
+Provides methods to work with the header section of files conforming to the Advanced Systems Format (ASF) specification.
The ASF ContentInfo Object exposes this interface. To create the get a reference to the
Retrieves the size of the header section of an Advanced Systems Format (ASF) file.
+The
Receives the size, in bytes, of the header section of the content. The value includes the size of the ASF Header Object plus the size of the header section of the Data Object. Therefore, the resulting value is the offset to the start of the data packets in the ASF Data Object.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The buffer does not contain valid ASF data. |
| The buffer does not contain enough valid data. |
?
The header of an ASF file or stream can be passed to the
Parses the information in an ASF header and uses that information to set values in the ContentInfo object. You can pass the entire header in a single buffer or send it in several pieces.
+Pointer to the
Offset, in bytes, of the first byte in the buffer relative to the beginning of the header.
The method returns an
Return code | Description |
---|---|
| The header is completely parsed and validated. |
| The input buffer does not contain valid ASF data. |
| The input buffer is to small. |
| The method succeeded, but the header passed was incomplete. This is the successful return code for all calls but the last one when passing the header in pieces. |
?
If you pass the header in pieces, the ContentInfo object will keep references to the buffer objects until the entire header is parsed. Therefore, do not write over the buffers passed into this method.
The start of the Header object has the following layout in memory:
Field Name | Size in bytes |
---|---|
Object ID | 16 |
Object Size | 8 |
Number of Header Objects | 4 |
Reserved1 | 1 |
Reserved2 | 1 |
?
The first call to ParseHeader reads everything up to and including Rerserved2, so it requires a minimum of 30 bytes. (Note that the
Encodes the data in the MFASFContentInfo object into a binary Advanced Systems Format (ASF) header.
+ A reference to the
Size of the encoded ASF header in bytes. If pIHeader is
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The ASF Header Objects do not exist for the media that the ContentInfo object holds reference to. |
| The ASF Header Object size exceeds 10 MB. |
| The buffer passed in pIHeader is not large enough to hold the ASF Header Object information. |
?
The size received in the pcbHeader parameter includes the padding size. The content information shrinks or expands the padding data depending on the size of the ASF Header Objects.
During this call, the stream properties are set based on the encoding properties of the profile. These properties are available through the
Retrieves an Advanced Systems Format (ASF) profile that describes the ASF content.
+Receives an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The profile is set by calling either
The ASF profile object returned by this method does not include any of the MF_PD_ASF_xxx attributes (see Presentation Descriptor Attributes). To get these attributes, do the following:
Call
(Optional.) Call
An ASF profile is a template for file encoding, and is intended mainly for creating ASF content. If you are reading an existing ASF file, it is recommended that you use the presentation descriptor to get information about the file. One exception is that the profile contains the mutual exclusion and stream prioritization objects, which are not exposed directly from the presentation descriptor.
+
Uses profile data from a profile object to configure settings in the ContentInfo object.
+The
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If there is already information in the ContentInfo object when this method is called, it is replaced by the information from the profile object.
+
Creates a presentation descriptor for ASF content.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves a property store that can be used to set encoding properties.
+Stream number to configure. Set to zero to configure file-level encoding properties.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves an Advanced Systems Format (ASF) profile that describes the ASF content.
+The profile is set by calling either
The ASF profile object returned by this method does not include any of the MF_PD_ASF_xxx attributes (see Presentation Descriptor Attributes). To get these attributes, do the following:
Call
(Optional.) Call
An ASF profile is a template for file encoding, and is intended mainly for creating ASF content. If you are reading an existing ASF file, it is recommended that you use the presentation descriptor to get information about the file. One exception is that the profile contains the mutual exclusion and stream prioritization objects, which are not exposed directly from the presentation descriptor.
+
Retrieves the index settings for a specified stream and index type.
+To read an existing ASF index, call
If an index exists for the stream and the value passed into pcbIndexDescriptor is smaller than the required size of the pbIndexDescriptor buffer, the method returns
If there is no index for the specified stream, the method returns
Sets indexer options.
+Bitwise OR of zero or more flags from the MFASF_INDEXER_FLAGS enumeration specifying the indexer options to use.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The indexer object was initialized before setting flags for it. For more information, see Remarks. |
?
Retrieves the flags that indicate the selected indexer options.
+Receives a bitwise OR of zero or more flags from the MFASF_INDEXER_FLAGS enumeration.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| pdwFlags is |
?
You must call this method before initializing the indexer object with
Initializes the indexer object. This method reads information in a ContentInfo object about the configuration of the content and the properties of the existing index, if present. Use this method before using the indexer for either writing or reading an index. You must make this call before using any of the other methods of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid ASF data. |
| Unexpected error. |
?
The indexer needs to examine the data in the ContentInfo object to properly write or read the index for the content. The indexer will not make changes to the content information and will not hold any references to the
In the ASF header, the maximum data-packet size must equal the minimum data-packet size. Otherwise, the method returns
Retrieves the offset of the index object from the start of the content.
+Pointer to the
Receives the offset of the index relative to the beginning of the content described by the ContentInfo object. This is the position relative to the beginning of the ASF file.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| pIContentInfo is |
?
The index continues from the offset retrieved by this method to the end of the file.
You must call
If the index is retrieved by using more than one call to
Adds byte streams to be indexed.
+An array of
The number of references in the ppIByteStreams array.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The indexer object has already been initialized and it has packets which have been indexed. |
?
For a reading scenario, only one byte stream should be used by the indexer object. For an index generating scenario, it depends how many index objects are needed to be generated.
+
Retrieves the number of byte streams that are in use by the indexer object.
+Receives the number of byte streams that are in use by the indexer object.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| pcByteStreams is |
?
Retrieves the index settings for a specified stream and index type.
+Pointer to an
A variable that retrieves a Boolean value specifying whether the index described by pIndexIdentifier has been created.
A buffer that receives the index descriptor. The index descriptor consists of an
On input, specifies the size, in bytes, of the buffer that pbIndexDescriptor points to. The value can be zero if pbIndexDescriptor is
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The buffer size specified in pcbIndexDescriptor is too small. |
?
To read an existing ASF index, call
If an index exists for the stream and the value passed into pcbIndexDescriptor is smaller than the required size of the pbIndexDescriptor buffer, the method returns
If there is no index for the specified stream, the method returns
Configures the index for a stream.
+The index descriptor to set. The index descriptor is an
The size, in bytes, of the index descriptor.
A Boolean value. Set to TRUE to have the indexer create an index of the type specified for the stream specified in the index descriptor.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| At attempt was made to change the index status in a seek-only scenario. For more information, see Remarks. |
?
You must make all calls to SetIndexStatus before making any calls to
The indexer object is configured to create temporal indexes for each stream by default. Call this method only if you want to override the default settings.
You cannot use this method in an index reading scenario. You can only use this method when writing indexes.
+Given a desired seek time, gets the offset from which the client should start reading data.
+The value of the index entry for which to get the position. The format of this value varies depending on the type of index, which is specified in the index identifier. For time-based indexing, the variant type is VT_I8 and the value is the desired seek time, in 100-nanosecond units.
Pointer to an
Receives the offset within the data segment of the ASF Data Object. The offset is in bytes, and is relative to the start of packet 0. The offset gives the starting location from which the client should begin reading from the stream. This location might not correspond exactly to the requested seek time.
For reverse playback, if no key frame exists after the desired seek position, this parameter receives the value MFASFINDEXER_READ_FOR_REVERSEPLAYBACK_OUTOFDATASEGMENT. In that case, the seek position should be 1 byte pass the end of the data segment.
Receives the approximate time stamp of the data that is located at the offset returned in the pcbOffsetWithinData parameter. The accuracy of this value is equal to the indexing interval of the ASF index, typically about 1 second.
If the approximate time stamp cannot be determined, this parameter receives the value MFASFINDEXER_APPROX_SEEK_TIME_UNKNOWN.
Receives the payload number of the payload that contains the information for the specified stream. Packets can contain multiple payloads, each containing data for a different stream. This parameter can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The requested seek time is out of range. |
| No index exists of the specified type for the specified stream. |
?
Accepts an ASF packet for the file and creates index entries for them.
+ Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The argument passed in is |
| The indexer is not initialized. |
?
The ASF indexer creates indexes for a file internally. You can get the completed index for all data packets sent to the indexer by committing the index with
When this method creates index entries, they are immediately available for use by
The media sample specified in pIASFPacketSample must hold a buffer that contains a single ASF packet. Get the sample from the ASF multiplexer by calling the
You cannot use this method while reading an index, only when writing an index.
+
Adds information about a new index to the ContentInfo object associated with ASF content. You must call this method before copying the index to the content so that the index will be readable by the indexer later.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The caller made an invalid request. For more information, see Remarks. |
?
For the index to function properly, you must call this method after all ASF packets in the file have been passed to the indexer by using the
An application must use the CommitIndex method only when writing a new index otherwise CommitIndex may return
You cannot use this method in an index reading scenario. You can only use this method when writing indexes.
+
Retrieves the size, in bytes, of the buffer required to store the completed index.
+Receives the size of the index, in bytes
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The index has not been committed. For more information; see Remarks. |
?
Use this method to get the size of the index and then allocate a buffer big enough to hold it.
The index must be committed with a call to
Call
You cannot use this method in a reading scenario. You can only use this method when writing indexes.
+
Retrieves the completed index from the ASF indexer object.
+Pointer to the
The offset of the data to be retrieved, in bytes from the start of the index data. Set to 0 for the first call. If subsequent calls are needed (the buffer is not large enough to hold the entire index), set to the byte following the last one retrieved.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The index was not committed before attempting to get the completed index. For more information, see Remarks. |
?
This method uses as much of the buffer as possible, and updates the length of the buffer appropriately.
If pIIndexBuffer is large enough to contain the entire buffer, cbOffsetWithinIndex should be 0, and the call needs to be made only once. Otherwise, there should be no gaps between successive buffers.
The user must write this data to the content at cbOffsetFromIndexStart bytes after the end of the ASF data object. You can call
This call will not succeed unless
You cannot use this method in an index reading scenario. You can only use this method when writing indexes.
+
Retrieves the next output ASF packet from the multiplexer.
+ The client needs to call this method, ideally after every call to
If no packets are ready, the method returns
Initializes the multiplexer with the data from an ASF ContentInfo object.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This call must be made once at the beginning of encoding, with pIContentInfo pointing to the ASF ContentInfo object that describes the content to be encoded. This enables the ASF multiplexer to see, among other things, which streams will be present in the encoding session. This call typically does not affect the data in the ASF ContentInfo object.
+
Sets multiplexer options.
+Bitwise OR of zero or more members of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves flags indicating the configured multiplexer options.
+Receives a bitwise OR of zero or more values from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Delivers input samples to the multiplexer.
+The stream number of the stream to which the sample belongs.
Pointer to the
The adjustment to apply to the time stamp of the sample. This parameter is used if the caller wants to shift the sample time on pISample. This value should be positive if the time stamp should be pushed ahead and negative if the time stamp should be pushed back. This time stamp is added to sample time on pISample, and the resulting time is used by the multiplexer instead of the original sample time. If no adjustment is needed, set this value to 0.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| There are too many packets waiting to be retrieved from the multiplexer. Call |
| The sample that was processed violates the bandwidth limitations specified for the stream in the ASF ContentInfo object. When this error is generated, the sample is dropped. |
| The value passed in wStreamNumber is invalid. |
| The presentation time of the input media sample is earlier than the send time. |
?
The application passes samples to ProcessSample, and the ASF multiplexer queues them internally until they are ready to be placed into ASF packets. Call
After each call to ProcessSample, call GetNextPacket in a loop to get all of the available data packets. For a code example, see Generating New ASF Data Packets.
+
Retrieves the next output ASF packet from the multiplexer.
+ Receives zero or more status flags. If more than one packet is waiting, the method sets the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The client needs to call this method, ideally after every call to
If no packets are ready, the method returns
Signals the multiplexer to process all queued output media samples. Call this method after passing the last sample to the multiplexer.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You must call Flush after the last sample has been passed into the ASF multiplexer and before you call
Collects data from the multiplexer and updates the ASF ContentInfo object to include that information in the ASF Header Object.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| There are pending output media samples waiting in the multiplexer. Call |
?
For non-live encoding scenarios (such as encoding to a file), the user should call End to update the specified ContentInfo object, adding data that the multiplexer has collected during the packet generation process. The user should then call
During live encoding, it is usually not possible to rewrite the header, so this call is not required for live encoding. (The header in those cases will simply lack some of the information that was not available until the end of the encoding session.)
+
Retrieves multiplexer statistics.
+The stream number for which to obtain statistics.
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets the maximum time by which samples from various streams can be out of synchronization. The multiplexer will not accept a sample with a time stamp that is out of synchronization with the latest samples from any other stream by an amount that exceeds the synchronization tolerance.
+Synchronization tolerance in milliseconds.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The synchronization tolerance is the maximum difference in presentation times at any given point between samples of different streams that the ASF multiplexer can accommodate. That is, if the synchronization tolerance is 3 seconds, no stream can be more than 3 seconds behind any other stream in the time stamps passed to the multiplexer. The multiplexer determines a default synchronization tolerance to use, but this method overrides it (usually to increase it). More tolerance means the potential for greater latency in the multiplexer. If the time stamps are synchronized among the streams, actual latency will be much lower than msSyncTolerance.
+
Sets the maximum time by which samples from various streams can be out of synchronization. The multiplexer will not accept a sample with a time stamp that is out of synchronization with the latest samples from any other stream by an amount that exceeds the synchronization tolerance.
+The synchronization tolerance is the maximum difference in presentation times at any given point between samples of different streams that the ASF multiplexer can accommodate. That is, if the synchronization tolerance is 3 seconds, no stream can be more than 3 seconds behind any other stream in the time stamps passed to the multiplexer. The multiplexer determines a default synchronization tolerance to use, but this method overrides it (usually to increase it). More tolerance means the potential for greater latency in the multiplexer. If the time stamps are synchronized among the streams, actual latency will be much lower than msSyncTolerance.
+Configures an Advanced Systems Format (ASF) mutual exclusion object, which manages information about a group of streams in an ASF profile that are mutually exclusive. When streams or groups of streams are mutually exclusive, only one of them is read at a time, they are not read concurrently.
A common example of mutual exclusion is a set of streams that each include the same content encoded at a different bit rate. The stream that is used is determined by the available bandwidth to the reader.
An
An ASF profile object can support multiple mutual exclusions. Each must be configured using a separate ASF mutual exclusion object.
+
Retrieves the type of mutual exclusion represented by the Advanced Systems Format (ASF) mutual exclusion object.
+A variable that receives the type identifier. For a list of predefined mutual exclusion type constants, see ASF Mutual Exclusion Type GUIDs.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sometimes, content must be made mutually exclusive in more than one way. For example, a video file might contain audio streams of several bit rates for each of several languages. To handle this type of complex mutual exclusion, you must configure more than one ASF mutual exclusion object. For more information, see
Sets the type of mutual exclusion that is represented by the Advanced Systems Format (ASF) mutual exclusion object.
+The type of mutual exclusion that is represented by the ASF mutual exclusion object. For a list of predefined mutual exclusion type constants, see ASF Mutual Exclusion Type GUIDs.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sometimes, content must be made mutually exclusive in more than one way. For example, a video file might contain audio streams in several bit rates for each of several languages. To handle this type of complex mutual exclusion, you must configure more than one ASF mutual exclusion object. For more information, see
Retrieves the number of records in the Advanced Systems Format mutual exclusion object.
+Receives the count of records.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Each record includes one or more streams. Every stream in a record is mutually exclusive of streams in every other record.
Use this method in conjunction with
Retrieves the stream numbers contained in a record in the Advanced Systems Format mutual exclusion object.
+The number of the record for which to retrieve the stream numbers.
An array that receives the stream numbers. Set to
On input, the number of elements in the array referenced by pwStreamNumArray. On output, the method sets this value to the count of stream numbers in the record. You can call GetStreamsForRecord with pwStreamNumArray set to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Adds a stream number to a record in the Advanced Systems Format mutual exclusion object.
+The record number to which the stream is added. A record number is set by the
The stream number to add to the record.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The specified stream number is already associated with the record. |
?
Each record includes one or more streams. Every stream in a record is mutually exclusive of all streams in every other record.
+
Removes a stream number from a record in the Advanced Systems Format mutual exclusion object.
+The record number from which to remove the stream number.
The stream number to remove from the record.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The stream number is not listed for the specified record. |
?
Removes a record from the Advanced Systems Format (ASF) mutual exclusion object.
+The index of the record to remove.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
When a record is removed, the ASF mutual exclusion object indexes the remaining records so that they are sequential starting with zero. You should enumerate the records to ensure that you have the correct index for each record. If the record removed is the one with the highest index, removing it has no effect on the other indexes.
+
Adds a record to the mutual exclusion object. A record specifies streams that are mutually exclusive with the streams in all other records.
+Receives the index assigned to the new record. Record indexes are zero-based and sequential.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
A record can include one or more stream numbers. All of the streams in a record are mutually exclusive with all the streams in all other records in the ASF mutual exclusion object.
You can use records to create complex mutual exclusion scenarios by using multiple ASF mutual exclusion objects.
+
Creates a copy of the Advanced Systems Format mutual exclusion object.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The cloned object is a new object, completely independent of the object from which it was cloned.
+
Retrieves a stream from the profile by stream index, and/or retrieves the stream number for a stream index.
+This method does not create a copy of the stream configuration object. The reference that is retrieved points to the object within the profile object. You must not make any changes to the stream configuration object using this reference, because doing so can affect the profile object in unexpected ways.
To change the configuration of the stream configuration object in the profile, you must first clone the stream configuration object by calling
Retrieves the number of streams in the profile.
+Receives the number of streams in the profile.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves a stream from the profile by stream index, and/or retrieves the stream number for a stream index.
+The index of the stream to retrieve. Stream indexes are sequential and zero-based. You can get the number of streams that are in the profile by calling the
Receives the stream number of the requested stream. Stream numbers are one-based and are not necessarily sequential. This parameter can be set to
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method does not create a copy of the stream configuration object. The reference that is retrieved points to the object within the profile object. You must not make any changes to the stream configuration object using this reference, because doing so can affect the profile object in unexpected ways.
To change the configuration of the stream configuration object in the profile, you must first clone the stream configuration object by calling
Retrieves an Advanced Systems Format (ASF) stream configuration object for a stream in the profile. This method references the stream by stream number instead of stream index.
+The stream number for which to obtain the interface reference.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method does not create a copy of the stream configuration object. The reference that is retrieved points to the object within the profile object. You must not make any changes to the stream configuration object using this reference, because doing so can affect the profile object in unexpected ways.
To change the configuration of the stream configuration object in the profile, you must first clone the stream configuration object by calling
Adds a stream to the profile or reconfigures an existing stream.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the stream number in the ASF stream configuration object is already included in the profile, the information in the new object replaces the old one. If the profile does not contain a stream for the stream number, the ASF stream configuration object is added as a new stream.
+
Removes a stream from the Advanced Systems Format (ASF) profile object.
+Stream number of the stream to remove.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
After a stream is removed, the ASF profile object reassigns stream indexes so that the index values are sequential starting from zero. Any previously stored stream index numbers are no longer valid after deleting a stream.
+
Creates an Advanced Systems Format (ASF) stream configuration object.
+Pointer to the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| ppIStream is |
| stream configuration object could not be created due to insufficient memory. |
?
The ASF stream configuration object created by this method is not included in the profile. To include the stream, you must first configure the stream configuration and then call
Retrieves the number of Advanced Systems Format (ASF) mutual exclusion objects that are associated with the profile.
+Receives the number of mutual exclusion objects.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Multiple mutual exclusion objects may be required for streams that are mutually exclusive in more than one way. For more information, see
Retrieves an Advanced Systems Format (ASF) mutual exclusion object from the profile.
+Index of the mutual exclusion object in the profile.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method does not create a copy of the mutual exclusion object. The returned reference refers to the mutual exclusion contained in the profile object. You must not make any changes to the mutual exclusion object using this reference, because doing so can affect the profile object in unexpected ways.
To change the configuration of the mutual exclusion object in the profile, you must first clone the mutual exclusion object by calling
Adds a configured Advanced Systems Format (ASF) mutual exclusion object to the profile.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You can create a mutual exclusion object by calling the
Removes an Advanced Systems Format (ASF) mutual exclusion object from the profile.
+The index of the mutual exclusion object to remove from the profile.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
When a mutual exclusion object is removed from the profile, the ASF profile object reassigns the mutual exclusion indexes so that they are sequential starting with zero. Any previously stored indexes are no longer valid after calling this method.
+
Creates a new Advanced Systems Format (ASF) mutual exclusion object. Mutual exclusion objects can be added to a profile by calling the AddMutualExclusion method.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The ASF mutual exclusion object created by this method is not associated with the profile. Call
Note??This method is not supported.
+Reserved.
If this method succeeds, it returns
Note??This method is not supported.
+Reserved.
If this method succeeds, it returns
Note??This method is not supported.
+If this method succeeds, it returns
Note??This method is not implemented.
+Reserved.
Returns E_NOTIMPL.
Creates a copy of the Advanced Systems Format profile object.
+Receives a reference to the
If this method succeeds, it returns
The cloned object is completely independent of the original.
+
Adds a stream to the profile or reconfigures an existing stream.
+If the stream number in the ASF stream configuration object is already included in the profile, the information in the new object replaces the old one. If the profile does not contain a stream for the stream number, the ASF stream configuration object is added as a new stream.
+Note??This method is not supported.
+
Sends packetized Advanced Systems Format (ASF) data to the ASF splitter for processing.
+After using this method to parse data, you must call
If your ASF data contains variable-sized packets, you must set the
If the method returns ME_E_NOTACCEPTING, call GetNextSample to get the output samples, or call
The splitter might hold a reference count on the input buffer. Therefore, do not write over the valid data in the buffer after calling this method.
+
Resets the Advanced Systems Format (ASF) splitter and configures it to parse data from an ASF data section.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The pIContentInfo parameter is |
?
Sets option flags on the Advanced Systems Format (ASF) splitter.
+A bitwise combination of zero or more members of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The splitter is not initialized. |
| The dwFlags parameter does not contain a valid flag. |
| The |
?
This method can only be called after the splitter is initialized.
+
Retrieves the option flags that are set on the ASF splitter.
+Receives the option flags. This value is a bitwise OR of zero or more members of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| pdwFlags is |
?
Sets the streams to be parsed by the Advanced Systems Format (ASF) splitter.
+An array of variables containing the list of stream numbers to select.
The number of valid elements in the stream number array.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| pwStreamNumbers is |
| Invalid stream number was passed in the array. |
?
Calling this method supersedes any previous stream selections; only the streams specified in the pwStreamNumbers array will be selected.
By default, no streams are selected by the splitter.
You can obtain a list of the currently selected streams by calling the
Gets a list of currently selected streams.
+ The address of an array of WORDs. This array receives the stream numbers of the selected streams. This parameter can be
On input, points to a variable that contains the number of elements in the pwStreamNumbers array. Set the variable to zero if pwStreamNumbers is
On output, receives the number of elements that were copied into pwStreamNumbers. Each element is the identifier of a selected stream.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The pwStreamNumbers array is smaller than the number of selected streams. See Remarks. |
?
To get the number of selected streams, set pwStreamNumbers to *pwNumStreams
equal to the number of selected streams. Then allocate an array of that size and call the method again, passing the array in the pwStreamNumbers parameter.
The following code shows these steps:
DisplaySelectedStreams( *pSplitter) + { WORD count = 0; hr = pSplitter->GetSelectedStreams( null , &count); if (hr ==) { WORD *pStreamIds = new (std::nothrow) WORD[count]; if (pStreamIds) { hr = pSplitter->GetSelectedStreams(pStreamIds, &count); if (SUCCEEDED(hr)) { for (WORD i = 0; i < count; i++) { printf("Selected stream ID: %d\n", pStreamIds[i]); } } delete [] pStreamIds; } else { hr = E_OUTOFMEMORY; } } return hr; + } +
Alternatively, you can allocate an array that is equal to the total number of streams and pass that to pwStreamNumbers.
Before calling this method, initialize *pwNumStreams
to the number of elements in pwStreamNumbers. If pwStreamNumbers is *pwNumStreams
to zero.
By default, no streams are selected by the splitter. Select streams by calling the
Sends packetized Advanced Systems Format (ASF) data to the ASF splitter for processing.
+Pointer to the
The offset into the data buffer where the splitter should begin parsing. This value is typically set to 0.
The length, in bytes, of the data to parse. This value is measured from the offset specified by cbBufferOffset. Set to 0 to process to the end of the buffer.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The pIBuffer parameter is The specified offset value in cbBufferOffset is greater than the length of the buffer. The total value of cbBufferOffset and cbLength is greater than the length of the buffer. |
| The |
| The splitter cannot process more input at this time. |
?
After using this method to parse data, you must call
If your ASF data contains variable-sized packets, you must set the
If the method returns ME_E_NOTACCEPTING, call GetNextSample to get the output samples, or call
The splitter might hold a reference count on the input buffer. Therefore, do not write over the valid data in the buffer after calling this method.
+
Retrieves a sample from the Advanced Systems Format (ASF) splitter after the data has been parsed.
+Receives one of the following values.
Value | Meaning |
---|---|
More samples are ready to be retrieved. Call GetNextSample in a loop until the pdwStatusFlags parameter receives the value zero. | |
| No additional samples are ready. Call |
?
If the method returns a sample in the ppISample parameter, this parameter receives the number of the stream to which the sample belongs.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The ASF data in the buffer is invalid. |
| There is a gap in the ASF data. |
?
Before calling this method, call
The ASF splitter skips samples for unselected streams. To select streams, call
Resets the Advanced Systems Format (ASF) splitter and releases all pending samples.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Any samples waiting to be retrieved when Flush is called are lost.
+
Retrieves the send time of the last sample received.
+Receives the send time of the last sample received.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| pdwLastSendTime is |
?
Retrieves information about an existing payload extension.
+Gets the major media type of the stream.
+Receives the major media type for the stream. For a list of possible values, see Major Media Types.
If this method succeeds, it returns
Retrieves the stream number of the stream.
+The method returns the stream number.
Assigns a stream number to the stream.
+The number to assign to the stream.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Stream numbers start from 1 and do not need to be sequential.
+
Retrieves the media type of the stream.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To reduce unnecessary copying, the method returns a reference to the media type that is stored internally by the object. Do not modify the returned media type, as the results are not defined.
+
Sets the media type for the Advanced Systems Format (ASF) stream configuration object.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Some validation of the media type is performed by this method. However, a media type can be successfully set, but cause an error when the stream is added to the profile.
+
Retrieves the number of payload extensions that are configured for the stream.
+Receives the number of payload extensions.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves information about an existing payload extension.
+The payload extension index. Valid indexes range from 0, to one less than the number of extensions obtained by calling
Receives a
Receives the number of bytes added to each sample for the extension.
Pointer to a buffer that receives information about this extension system. This information is the same for all samples and is stored in the content header (not in each sample). This parameter can be
On input, specifies the size of the buffer pointed to by pbExtensionSystemInfo. On output, receives the required size of the pbExtensionSystemInfo buffer in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The buffer specified in pbExtensionSystemInfo is too small. |
| The wPayloadExtensionNumber parameter is out of range. |
?
Configures a payload extension for the stream.
+Pointer to a
Number of bytes added to each sample for the extension.
A reference to a buffer that contains information about this extension system. This information is the same for all samples and is stored in the content header (not with each sample). This parameter can be
Amount of data, in bytes, that describes this extension system. If this value is 0, then pbExtensionSystemInfo can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Removes all payload extensions that are configured for the stream.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
None.
+
Creates a copy of the Advanced Systems Format (ASF) stream configuration object.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The cloned object is completely independent of the original.
+
Retrieves the stream number of the stream.
+
Retrieves the media type of the stream.
+To reduce unnecessary copying, the method returns a reference to the media type that is stored internally by the object. Do not modify the returned media type, as the results are not defined.
+Note??This interface is not implemented.
Manages information about the relative priorities of a group of streams in an Advanced Systems Format (ASF) profile. This interface manages information about the relative priorities of a group of streams in an ASF profile. Priority is used in streaming to determine which streams should be dropped first when available bandwidth decreases.
The ASF stream prioritization object exposes this interface. The stream prioritization object maintains a list of stream numbers in priority order. The methods of this interface manipulate and interrogate that list. To obtain a reference to this interface, call the
Note??This interface is not implemented in this version of Media Foundation.
Retrieves the number of entries in the stream priority list.
+Receives the number of streams in the stream priority list.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Note??This interface is not implemented in this version of Media Foundation.
Retrieves the stream number of a stream in the stream priority list.
+Zero-based index of the entry to retrieve from the stream priority list. To get the number of entries in the priority list, call
Receives the stream number of the stream priority entry.
Receives a Boolean value. If TRUE, the stream is mandatory.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| |
?
Note??This interface is not implemented in this version of Media Foundation.
Adds a stream to the stream priority list.
+Stream number of the stream to add.
If TRUE, the stream is mandatory.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream number. |
?
The stream priority list is built by appending entries to the list with each call to AddStream. The list is evaluated in descending order of importance. The most important stream should be added first, and the least important should be added last.
+
Note??This interface is not implemented in this version of Media Foundation.
Removes a stream from the stream priority list.
+Index of the entry in the stream priority list to remove. Values range from zero, to one less than the stream count retrieved by calling
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
When a stream is removed from the stream priority list, the index values of all streams that follow it in the list are decremented.
+
Note??This interface is not implemented in this version of Media Foundation.
Creates a copy of the ASF stream prioritization object.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The new object is completely independent of the original.
+
Sets the selection status of an output, overriding other selection criteria.
+
Retrieves the number of streams that are in the Advanced Systems Format (ASF) content.
+Receives the number of streams in the content.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the number of outputs for the Advanced Systems Format (ASF) content.
+Receives the number of outputs.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Outputs are streams in the ASF data section that will be parsed.
+
Retrieves the number of streams associated with an output.
+The output number for which to retrieve the stream count.
Receives the number of streams associated with the output.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid output number. |
?
An output is a stream in an ASF data section that will be parsed. If mutual exclusion is used, mutually exclusive streams share the same output.
+
Retrieves the stream numbers for all of the streams that are associated with an output.
+The output number for which to retrieve stream numbers.
Address of an array that receives the stream numbers associated with the output. The caller allocates the array. The array size must be at least as large as the value returned by the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid output number. |
?
An output is a stream in an ASF data section that will be parsed. If mutual exclusion is used, mutually exclusive streams share the same output.
+
Retrieves the output number associated with a stream.
+The stream number for which to retrieve an output number.
Receives the output number.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream number. |
?
Outputs are streams in the ASF data section that will be parsed.
+
Retrieves the manual output override selection that is set for a stream.
+Stream number for which to retrieve the output override selection.
Receives the output override selection. The value is a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets the selection status of an output, overriding other selection criteria.
+Output number for which to set selection.
Member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the number of mutual exclusion objects associated with an output.
+Output number for which to retrieve the count of mutually exclusive relationships.
Receives the number of mutual exclusions.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves a mutual exclusion object for an output.
+Output number for which to retrieve a mutual exclusion object.
Mutual exclusion number. This is an index of mutually exclusive relationships associated with the output. Set to a number between 0, and 1 less than the number of mutual exclusion objects retrieved by calling
Receives a reference to the mutual exclusion object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Outputs are streams in the ASF data section that will be parsed.
+
Selects a mutual exclusion record to use for a mutual exclusion object associated with an output.
+The output number for which to set a stream.
Index of the mutual exclusion for which to select.
Record of the specified mutual exclusion to select.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
An output is a stream in an Advanced Systems Format (ASF) data section that will be parsed. If mutual exclusion is used, mutually exclusive streams share the same output.
An ASF file can contain multiple mutually exclusive relationships, such as a file with both language based and bit-rate based mutual exclusion. If an output is involved in multiple mutually exclusive relationships, a record from each must be selected.
+
Retrieves the number of bandwidth steps that exist for the content. This method is used for multiple bit rate (MBR) content.
+Receives the number of bandwidth steps.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Bandwidth steps are bandwidth levels used for multiple bit rate (MBR) content. If you stream MBR content, you can choose the bandwidth step that matches the network conditions to avoid interruptions during playback.
+
Retrieves the stream numbers that apply to a bandwidth step. This method is used for multiple bit rate (MBR) content.
+Bandwidth step number for which to retrieve information. Set this value to a number between 0, and 1 less than the number of bandwidth steps returned by
Receives the bit rate associated with the bandwidth step.
Address of an array that receives the stream numbers. The caller allocates the array. The array size must be at least as large as the value returned by the
Address of an array that receives the selection status of each stream, as an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Bandwidth steps are bandwidth levels used for MBR content. If you stream MBR content, you can choose the bandwidth step that matches the network conditions to avoid interruptions during playback.
+
Retrieves the index of a bandwidth step that is appropriate for a specified bit rate. This method is used for multiple bit rate (MBR) content.
+The bit rate to find a bandwidth step for.
Receives the step number. Use this number to retrieve information about the step by calling
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
In a streaming multiple bit rate (MBR) scenario, call this method with the current data rate of the network connection to determine the correct step to use. You can also call this method periodically throughout streaming to ensure that the best step is used.
+
Sets options for the stream selector.
+Bitwise OR of zero or more members of the MFASF_STREAMSELECTOR_FLAGS enumeration specifying the options to use.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets options for the stream selector.
+[This API is not supported and may be altered or unavailable in the future.]
Creates an audio media type from a
The
Alternatively, you can call
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Represents a description of a media format.
+ To create a new media type, call
All of the information in a media type is stored as attributes. To clone a media type, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Gets the major type of the format.
+Receives the major type
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The major type is not set. |
?
This method is equivalent to getting the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Queries whether the media type is a temporally compressed format. Temporal compression uses information from previously decoded samples when decompressing the current sample.
+Receives a Boolean value. The value is TRUE if the format uses temporal compression, or
If this method succeeds, it returns
This method returns
If the method returns TRUE in pfCompressed, it is a hint that the format has temporal compression applied to it. If the method returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Compares two media types and determines whether they are identical. If they are not identical, the method indicates how the two formats differ.
+Pointer to the
Receives a bitwise OR of zero or more flags, indicating the degree of similarity between the two media types. The following flags are defined.
Value | Meaning |
---|---|
| The major types are the same. The major type is specified by the |
| The subtypes are the same, or neither media type has a subtype. The subtype is specified by the |
| The attributes in one of the media types are a subset of the attributes in the other, and the values of these attributes match, excluding the value of the Specifically, the method takes the media type with the smaller number of attributes and checks whether each attribute from that type is present in the other media type and has the same value (not including To perform other comparisons, use the |
| The user data is identical, or neither media type contains user data. User data is specified by the |
?
The method returns an
Return code | Description |
---|---|
| The types are not equal. Examine the pdwFlags parameter to determine how the types differ. |
| The types are equal. |
| One or both media types are invalid. |
?
Both of the media types must have a major type, or the method returns E_INVALIDARG.
If the method succeeds and all of the comparison flags are set in pdwFlags, the return value is
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves an alternative representation of the media type. Currently only the DirectShow
Value | Meaning |
---|---|
| Convert the media type to a DirectShow |
| Convert the media type to a DirectShow |
| Convert the media type to a DirectShow |
| Convert the media type to a DirectShow |
?
Receives a reference to a structure that contains the representation. The method allocates the memory for the structure. The caller must release the memory by calling
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The details of the media type do not match the requested representation. |
| The media type is not valid. |
| The media type does not support the requested representation. |
?
If you request a specific format structure in the guidRepresentation parameter, such as
You can also use the MFInitAMMediaTypeFromMFMediaType function to convert a Media Foundation media type into a DirectShow media type.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Frees memory that was allocated by the
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
The media type is created without any attributes.
+Applies to: desktop apps | Metro style apps
Converts a Media Foundation audio media type to a
Receives the size of the
Contains a flag from the
If the wFormatTag member of the returned structure is
Gets the major type of the format.
+ This method is equivalent to getting the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Queries whether the media type is a temporally compressed format. Temporal compression uses information from previously decoded samples when decompressing the current sample.
+ This method returns
If the method returns TRUE in pfCompressed, it is a hint that the format has temporal compression applied to it. If the method returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Configures the audio session that is associated with the streaming audio renderer (SAR). Use this interface to change how the audio session appears in the Windows volume control.
The SAR exposes this interface as a service. To get a reference to the interface, call
Assigns the audio session to a group of sessions.
+A
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If two or more audio sessions share the same group, the Windows volume control displays one slider control for the entire group. Otherwise, it displays a slider for each session. For more information, see IAudioSessionControl::SetGroupingParam in the core audio API documentation.
+
Retrieves the group of sessions to which this audio session belongs.
+Receives a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If two or more audio sessions share the same group, the Windows volume control displays one slider control for the entire group. Otherwise, it displays a slider for each session. For more information, see IAudioSessionControl::SetGroupingParam in the core audio API documentation.
+
Sets the display name of the audio session. The Windows volume control displays this name.
+A null-terminated wide-character string that contains the display name.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the application does not set a display name, Windows creates one.
+
Retrieves the display name of the audio session. The Windows volume control displays this name.
+Receives a reference to the display name string. The caller must free the memory allocated for the string by calling CoTaskMemFree.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the application does not set a display name, Windows creates one.
+Sets the icon resource for the audio session. The Windows volume control displays this icon.
+A wide-character string that specifies the icon. See Remarks.
If this method succeeds, it returns
The icon path has the format "path,index" or "path,-id", where path is the fully qualified path to a DLL, executable file, or icon file; index is the zero-based index of the icon within the file; and id is a resource identifier. Note that resource identifiers are preceded by a minus sign (-) to distinguish them from indexes. The path can contain environment variables, such as "%windir%". For more information, see IAudioSessionControl::SetIconPath in the Windows SDK.
+
Retrieves the icon resource for the audio session. The Windows volume control displays this icon.
+Receives a reference to a wide-character string that specifies a shell resource. The format of the string is described in the topic
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the application did not set an icon path, the method returns an empty string ("").
For more information, see IAudioSessionControl::GetIconPath in the core audio API documentation.
+
Retrieves the group of sessions to which this audio session belongs.
+If two or more audio sessions share the same group, the Windows volume control displays one slider control for the entire group. Otherwise, it displays a slider for each session. For more information, see IAudioSessionControl::SetGroupingParam in the core audio API documentation.
+Controls the volume levels of individual audio channels.
The streaming audio renderer (SAR) exposes this interface as a service. To get a reference to the interface, call
If your application does not require channel-level volume control, you can use the
Volume is expressed as an attenuation level, where 0.0 indicates silence and 1.0 indicates full volume (no attenuation). For each channel, the attenuation level is the product of:
For example, if the master volume is 0.8 and the channel volume is 0.5, the attenuation for that channel is 0.8 ? 0.5 = 0.4. Volume levels can exceed 1.0 (positive gain), but the audio engine clips any audio samples that exceed zero decibels.
Use the following formula to convert the volume level to the decibel (dB) scale:
Attenuation (dB) = 20 * log10(Level)
For example, a volume level of 0.50 represents 6.02 dB of attenuation.
+
Retrieves the number of channels in the audio stream.
+Receives the number of channels in the audio stream.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets the volume level for a specified channel in the audio stream.
+Zero-based index of the audio channel. To get the number of channels, call
Volume level for the channel.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the volume level for a specified channel in the audio stream.
+Zero-based index of the audio channel. To get the number of channels, call
Receives the volume level for the channel.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets the individual volume levels for all of the channels in the audio stream.
+Number of elements in the pfVolumes array. The value must equal the number of channels. To get the number of channels, call
Address of an array of size dwCount, allocated by the caller. The array specifies the volume levels for all of the channels. Before calling the method, set each element of the array to the desired volume level for the channel.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the volume levels for all of the channels in the audio stream.
+Number of elements in the pfVolumes array. The value must equal the number of channels. To get the number of channels, call
Address of an array of size dwCount, allocated by the caller. The method fills the array with the volume level for each channel in the stream.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the number of channels in the audio stream.
+Represents a buffer that contains a two-dimensional surface, such as a video frame.
+To get a reference to this interface, call QueryInterface on the media buffer.
To use a 2-D buffer, it is important to know the stride, which is the number of bytes needed to go from one row of pixels to the next. The stride may be larger than the image width, because the surface may contain padding bytes after each row of pixels. Stride can also be negative, if the pixels are oriented bottom-up in memory. For more information, see Image Stride.
Every video format defines a contiguous or packed representation. This representation is compatible with the standard layout of a DirectX surface in system memory, with no additional padding. For RGB video, the contiguous representation has a pitch equal to the image width in bytes, rounded up to the nearest DWORD boundary. For YUV video, the layout of the contiguous representation depends on the YUV format. For planar YUV formats, the Y plane might have a different pitch than the U and V planes.
If a media buffer supports the
Call the Lock2D method to access the 2-D buffer in its native format. The native format might not be contiguous. The buffer's
For uncompressed images, the amount of valid data in the buffer is determined by the width, height, and pixel layout of the image. For this reason, if you call Lock2D to access the buffer, do not rely on the values returned by
Gives the caller access to the memory in the buffer.
+Receives a reference to the first byte of the top row of pixels in the image. The top row is defined as the top row when the image is presented to the viewer, and might not be the first row in memory.
Receives the surface stride, in bytes. The stride might be negative, indicating that the image is oriented from the bottom up in memory.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Cannot lock the Direct3D surface. |
| The buffer cannot be locked at this time. |
?
If p is a reference to the first byte in a row of pixels, p + (*plPitch) points to the first byte in the next row of pixels. A buffer might contain padding after each row of pixels, so the stride might be wider than the width of the image in bytes. Do not access the memory that is reserved for padding bytes, because it might not be read-accessible or write-accessible. For more information, see Image Stride.
The reference returned in pbScanline0 remains valid as long as the caller holds the lock. When you are done accessing the memory, call
The values returned by the
The
When the underlying buffer is a Direct3D surface, the method fails if the surface is not lockable.
+
Unlocks a buffer that was previously locked. Call this method once for each call to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves a reference to the buffer memory and the surface stride.
+Receives a reference to the first byte of the top row of pixels in the image.
Receives the stride, in bytes. For more information, see Image Stride.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| You must lock the buffer before calling this method. |
?
Before calling this method, you must lock the buffer by calling
Queries whether the buffer is contiguous in its native format.
+Receives a Boolean value. The value is TRUE if the buffer is contiguous, and
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
For a definition of contiguous as it applies to 2-D buffers, see the Remarks section in
Retrieves the number of bytes needed to store the contents of the buffer in contiguous format.
+Receives the number of bytes needed to store the contents of the buffer in contiguous format.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
For a definition of contiguous as it applies to 2-D buffers, see the Remarks section in
Copies this buffer into the caller's buffer, converting the data to contiguous format.
+Pointer to the destination buffer where the data will be copied. The caller allocates the buffer.
Size of the destination buffer, in bytes. To get the required size, call
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid size specified in pbDestBuffer. |
?
If the original buffer is not contiguous, this method converts the contents into contiguous format during the copy. For a definition of contiguous as it applies to 2-D buffers, see the Remarks section in
Copies data to this buffer from a buffer that has a contiguous format.
+Pointer to the source buffer. The caller allocates the buffer.
Size of the source buffer, in bytes. To get the maximum size of the buffer, call
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method copies the contents of the source buffer into the buffer that is managed by this
For a definition of contiguous as it applies to 2-D buffers, see the Remarks section in the
Queries whether the buffer is contiguous in its native format.
+For a definition of contiguous as it applies to 2-D buffers, see the Remarks section in
Retrieves the number of bytes needed to store the contents of the buffer in contiguous format.
+For a definition of contiguous as it applies to 2-D buffers, see the Remarks section in
Controls how a byte stream buffers data from a network.
To get a reference to this interface, call QueryInterface on the byte stream object.
+If a byte stream implements this interface, a media source can use it to control how the byte stream buffers data. This interface is designed for byte streams that read data from a network.
A byte stream that implements this interface should also implement the
The byte stream must send a matching
After the byte stream sends an
The byte stream should not send any more buffering events after it reaches the end of the file.
If buffering is disabled, the byte stream does not send any buffering events. Internally, however, it might still buffer data while it waits for I/O requests to complete. Therefore,
If the byte stream is buffering data internally and the media source calls EnableBuffering with the value TRUE, the byte stream can send
After the presentation has started, the media source should forward and
Sets the buffering parameters.
+Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Enables or disables buffering.
+Specifies whether the byte stream buffers data. If TRUE, buffering is enabled. If
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Before calling this method, call
Stops any buffering that is in progress.
+The method returns an
Return code | Description |
---|---|
| The byte stream successfully stopped buffering. |
| No buffering was in progress. |
?
If the byte stream is currently buffering data, it stops and sends an
Sets the buffering parameters.
+Stops the background transfer of data to the local cache.
+The byte stream resumes transferring data to the cache if the application does one of the following:
Stops the background transfer of data to the local cache.
+If this method succeeds, it returns
The byte stream resumes transferring data to the cache if the application does one of the following:
Creates a media source from a byte stream.
+Applications do not use this interface directly. This interface is exposed by byte-stream handlers, which are used by the source resolver. When the byte-stream handler is given a byte stream, it parses the stream and creates a media source. Byte-stream handlers are registered by file name extension or MIME type.
+
Begins an asynchronous request to create a media source from a byte stream.
+Pointer to the byte stream's
String that contains the original URL of the byte stream. This parameter can be
Bitwise OR of zero or more flags. See Source Resolver Flags.
Pointer to the
Receives an
Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Unable to parse the byte stream. |
?
The dwFlags parameter must contain the
The byte-stream handler is responsible for parsing the stream and validating the contents. If the stream is not valid or the byte stream handler cannot parse the stream, the handler should return a failure code. The byte stream is not guaranteed to match the type of stream that the byte handler is designed to parse.
If the pwszURL parameter is not
When the operation completes, the byte-stream handler calls the
Completes an asynchronous request to create a media source.
+Pointer to the
Receives a member of the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The operation was canceled. See |
| Unable to parse the byte stream. |
?
Call this method from inside the
Cancels the current request to create a media source.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You can use this method to cancel a previous call to BeginCreateObject. Because that method is asynchronous, however, it might be completed before the operation can be canceled. Therefore, your callback might still be invoked after you call this method.
+
Retrieves the maximum number of bytes needed to create the media source or determine that the byte stream handler cannot parse this stream.
+Receives the maximum number of bytes that are required.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the maximum number of bytes needed to create the media source or determine that the byte stream handler cannot parse this stream.
+Retrieves the last clock time that was correlated with system time.
+At some fixed interval, a clock correlates its internal clock ticks with the system time. (The system time is the time returned by the high-resolution performance counter.) This method returns:
The clock time is returned in the pllClockTime parameter and is expressed in units of the clock's frequency. If the clock's
The system time is returned in the phnsSystemTime parameter, and is always expressed in 100-nanosecond units.
To find out how often the clock correlates its clock time with the system time, call GetProperties. The correlation interval is given in the qwCorrelationRate member of the
Some clocks support rate changes through the
For the presentation clock, the clock time is the presentation time, and is always relative to the starting time specified in
Retrieves the characteristics of the clock.
+Receives a bitwise OR of values from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the last clock time that was correlated with system time.
+Reserved, must be zero.
Receives the last known clock time, in units of the clock's frequency.
Receives the system time that corresponds to the clock time returned in pllClockTime, in 100-nanosecond units.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The clock does not have a time source. |
?
At some fixed interval, a clock correlates its internal clock ticks with the system time. (The system time is the time returned by the high-resolution performance counter.) This method returns:
The clock time is returned in the pllClockTime parameter and is expressed in units of the clock's frequency. If the clock's
The system time is returned in the phnsSystemTime parameter, and is always expressed in 100-nanosecond units.
To find out how often the clock correlates its clock time with the system time, call GetProperties. The correlation interval is given in the qwCorrelationRate member of the
Some clocks support rate changes through the
For the presentation clock, the clock time is the presentation time, and is always relative to the starting time specified in
Retrieves the clock's continuity key. (Not supported.)
+Receives the continuity key.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Continuity keys are currently not supported in Media Foundation. Clocks must return the value zero in the pdwContinuityKey parameter.
+
Retrieves the current state of the clock.
+Reserved, must be zero.
Receives the clock state, as a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the properties of the clock.
+Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the characteristics of the clock.
+
Retrieves the clock's continuity key. (Not supported.)
+Continuity keys are currently not supported in Media Foundation. Clocks must return the value zero in the pdwContinuityKey parameter.
+
Retrieves the properties of the clock.
+
Retrieves an object in the collection.
+ This method does not remove the object from the collection. To remove an object, call
Retrieves the number of objects in the collection.
+Receives the number of objects in the collection.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves an object in the collection.
+Zero-based index of the object to retrieve. Objects are indexed in the order in which they were added to the collection.
Receives a reference to the object's
This method does not remove the object from the collection. To remove an object, call
Adds an object to the collection.
+Pointer to the object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If pUnkElement is
Removes an object from the collection.
+Zero-based index of the object to remove. Objects are indexed in the order in which they were added to the collection.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Adds an object at the specified index in the collection.
+The zero-based index where the object will be added to the collection.
The object to insert.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Removes all items from the collection.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the number of objects in the collection.
+Implements one step that must be performed for the user to access media content. For example, the steps might be individualization followed by license acquisition. Each of these steps would be encapsulated by a content enabler object that exposes the
Retrieves the type of operation that this content enabler performs.
+Receives a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The following GUIDs are defined for the pType parameter.
Value | Description |
---|---|
MFENABLETYPE_MF_RebootRequired | The user must reboot his or her computer. |
MFENABLETYPE_MF_UpdateRevocationInformation | Update revocation information. |
MFENABLETYPE_MF_UpdateUntrustedComponent | Update untrusted components. |
MFENABLETYPE_WMDRMV1_LicenseAcquisition | License acquisition for Windows Media Digital Rights Management (DRM) version 1. |
MFENABLETYPE_WMDRMV7_Individualization | Individualization. |
MFENABLETYPE_WMDRMV7_LicenseAcquisition | License acquisition for Windows Media DRM version 7 or later. |
?
+
Retrieves a URL for performing a manual content enabling action.
+Receives a reference to a buffer that contains the URL. The caller must release the memory for the buffer by calling CoTaskMemFree.
Receives the number of characters returned in ppwszURL, including the terminating
Receives a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No URL is available. |
?
If the enabling action can be performed by navigating to a URL, this method returns the URL. If no such URL exists, the method returns a failure code.
The purpose of the URL depends on the content enabler type, which is obtained by calling
Enable type | Purpose of URL |
---|---|
Individualization | Not applicable. |
License acquisition | URL to obtain the license. Call |
Revocation | URL to a webpage where the user can download and install an updated component. |
?
+
Retrieves the data for a manual content enabling action.
+Receives a reference to a buffer that contains the data. The caller must free the buffer by calling CoTaskMemFree.
Receives the size of the ppbData buffer.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No data is available. |
?
The purpose of the data depends on the content enabler type, which is obtained by calling
Enable type | Purpose of data |
---|---|
Individualization | Not applicable. |
License acquisition | HTTP POST data. |
Revocation | |
?
+
Queries whether the content enabler can perform all of its actions automatically.
+Receives a Boolean value. If TRUE, the content enabler can perform the enabing action automatically.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If this method returns TRUE in the pfAutomatic parameter, call the
If this method returns
Performs a content enabling action without any user interaction.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is asynchronous. When the operation is complete, the content enabler sends an
To find out whether the content enabler supports this method, call
Requests notification when the enabling action is completed.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The method succeeded and no action was required. |
?
If you use a manual enabling action, call this method to be notified when the operation completes. If this method returns
You do not have to call MonitorEnable when you use automatic enabling by calling
Cancels a pending content enabling action.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The content enabler sends an
Retrieves the type of operation that this content enabler performs.
+The following GUIDs are defined for the pType parameter.
Value | Description |
---|---|
MFENABLETYPE_MF_RebootRequired | The user must reboot his or her computer. |
MFENABLETYPE_MF_UpdateRevocationInformation | Update revocation information. |
MFENABLETYPE_MF_UpdateUntrustedComponent | Update untrusted components. |
MFENABLETYPE_WMDRMV1_LicenseAcquisition | License acquisition for Windows Media Digital Rights Management (DRM) version 1. |
MFENABLETYPE_WMDRMV7_Individualization | Individualization. |
MFENABLETYPE_WMDRMV7_LicenseAcquisition | License acquisition for Windows Media DRM version 7 or later. |
?
+
Queries whether the content enabler can perform all of its actions automatically.
+If this method returns TRUE in the pfAutomatic parameter, call the
If this method returns
Enables playback of protected content by providing the application with a reference to a content enabler object.
Applications that play protected content should implement this interface.
+A content enabler is an object that performs some action that is required to play a piece of protected content. For example, the action might be obtaining a DRM license. Content enablers expose the
To use this interface, do the following:
Implement the interface in your application.
Create an attribute store by calling
Set the
Call
If the content requires a content enabler, the application's BeginEnableContent method is called. Usually this method called during the
Many content enablers send machine-specific data to the network, which can have privacy implications. One of the purposes of the
Begins an asynchronous request to perform a content enabling action.
This method requests the application to perform a specific step needed to acquire rights to the content, using a content enabler object.
+ Pointer to the
Pointer to the
Pointer to the
Reserved. Currently this parameter is always
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Do not block within this callback method. Instead, perform the content enabling action asynchronously on another thread. When the operation is finished, notify the protected media path (PMP) through the pCallback parameter.
If you return a success code from this method, you must call Invoke on the callback. Conversely, if you return an error code from this method, you must not call Invoke. If the operation fails after the method returns a success code, use status code on the
After the callback is invoked, the PMP will call the application's
This method is not necessarily called every time the application plays protected content. Generally, the method will not be called if the user has a valid, up-to-date license for the content. Internally, the input trust authority (ITA) determines whether BeginEnableContent is called, based on the content provider's DRM policy. For more information, see Protected Media Path.
+
Ends an asynchronous request to perform a content enabling action. This method is called by the protected media path (PMP) to complete an asynchronous call to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
When the BeginEnableContent method completes asynchronously, the application notifies the PMP by invoking the asynchronous callback. The PMP calls EndEnableContent on the application to get the result code. This method is called on the application's thread from inside the callback method. Therefore, it must not block the thread that invoked the callback.
The application must return the success or failure code of the asynchronous processing that followed the call to BeginEnableContent.
+Enables the presenter for the enhanced video renderer (EVR) to request a specific frame from the video mixer.
The sample objects created by the
Called by the mixer to get the time and duration of the sample requested by the presenter.
+Receives the desired sample time that should be mixed.
Receives the sample duration that should be mixed.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No time stamp was set for this sample. See |
?
Called by the presenter to set the time and duration of the sample that it requests from the mixer.
+The time of the requested sample.
The duration of the requested sample.
This value should be set prior to passing the buffer to the mixer for a Mix operation. The mixer sets the actual start and duration times on the sample before sending it back.
+
Clears the time stamps previously set by a call to
After this method is called, the
This method also clears the time stamp and duration and removes all attributes from the sample.
+Initializes the Digital Living Network Alliance (DLNA) media sink.
The DLNA media sink exposes this interface. To get a reference to this interface, call CoCreateInstance. The CLSID is CLSID_MPEG2DLNASink.
+Initializes the Digital Living Network Alliance (DLNA) media sink.
+Pointer to a byte stream. The DLNA media sink writes data to this byte stream. The byte stream must be writable.
If TRUE, the DLNA media sink accepts PAL video formats. Otherwise, it accepts NTSC video formats.
This method can return one of these values.
Return code | Description |
---|---|
| The method succeeded. |
| The method was already called. |
| The media sink's |
?
Configures Windows Media Digital Rights Management (DRM) for Network Devices on a network sink.
The Advanced Systems Format (ASF) streaming media sink exposes this interface. To get a reference to the
For more information, see Remarks.
+To stream protected content over a network, the ASF streaming media sink provides an output trust authority (OTA) that supports Windows Media DRM for Network Devices and implements the
The application gets a reference to
To stream the content, the application does the following:
To stream DRM-protected content over a network from a server to a client, an application must use the Microsoft Media Foundation Protected Media Path (PMP). The media sink and the application-provided HTTP byte stream exist in mfpmp.exe. Therefore, the byte stream must expose the
Note??This might affect how the code is packaged. The DLL that contains the HTTP byte stream and other dependent DLLs must be signed for the Protected Environment (PE-signed).
When the clock starts for the first time or restarts , the encrypter that is used for encrypting samples is retrieved, and the license response is cached.
Gets the license response for the specified request.
+Pointer to a byte array that contains the license request.
Size, in bytes, of the license request.
Receives a reference to a byte array that contains the license response. The caller must free the array by calling CoTaskMemFree.
Receives the size, in bytes, of the license response.
Receives the key identifier. The caller must release the string by calling SysFreeString.
The function returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink was shut down. |
?
Not implemented in this release.
+Receives a reference to a byte array that contains the license response. The caller must free the array by calling CoTaskMemFree.
Receives the size, in bytes, of the license response.
The method returns E_NOTIMPL.
Enables an application to use a Media Foundation transform (MFT) that has restrictions on its use.
+If you register an MFT that requires unlocking, include the
Unlocks a Media Foundation transform (MFT) so that the application can use it.
+A reference to the
If this method succeeds, it returns
This method authenticates the caller, using a private communication channel between the MFT and the object that implements the
Sets the number of input pins on the DirectShow Enhanced Video Renderer (EVR) filter. To get a reference to this interface, call QueryInterface on the DirectShow EVR filter.
+The DirectShow EVR filter starts with one input pin, which corresponds to the reference stream. To create additional pins for video substreams, call SetNumberOfStreams.
The EVR media sink for Media Foundation does not support this interface. To add new streams to the EVR media sink, call
Sets the number of input pins on the EVR filter.
+Specifies the total number of input pins on the EVR filter. This value includes the input pin for the reference stream, which is created by default. For example, to mix one substream plus the reference stream, set this parameter to 2.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid number of streams. The minimum is one, and the maximum is 16. |
| This method has already been called, or at least one pin is already connected. |
?
After this method has been called, it cannot be called a second time on the same instance of the EVR filter. Also, the method fails if any input pins are connected.
+
Retrieves the number of input pins on the EVR filter. The EVR filter always has at least one input pin, which corresponds to the reference stream.
+Receives the number of streams.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the number of input pins on the EVR filter. The EVR filter always has at least one input pin, which corresponds to the reference stream.
+Gets the configuration parameters for the Microsoft DirectShow Enhanced Video Renderer Filter filter.
+Sets the configuration parameters for the Microsoft DirectShow Enhanced Video Renderer Filter (EVR).
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
?
Gets the configuration parameters for the Microsoft DirectShow Enhanced Video Renderer Filter filter.
+If this method succeeds, it returns
Gets or sets the configuration parameters for the Microsoft DirectShow Enhanced Video Renderer Filter filter.
+Optionally supported by media sinks to perform required tasks before shutdown. This interface is typically exposed by archive sinks?that is, media sinks that write to a file. It is used to perform tasks such as flushing data to disk or updating a file header.
To get a reference to this interface, call QueryInterface on the media sink.
+If a media sink exposes this interface, the Media Session will call BeginFinalize on the sink before the session closes.
+Implemented by media sink objects. This interface is the base interface for all Media Foundation media sinks. Stream sinks handle the actual processing of data on each stream.
+
Gets the characteristics of the media sink.
+Receives a bitwise OR of zero or more flags. The following flags are defined:
Value | Meaning |
---|---|
| The media sink has a fixed number of streams. It does not support the |
| The media sink cannot match rates with an external clock. For best results, this media sink should be used as the time source for the presentation clock. If any other time source is used, the media sink cannot match rates with the clock, with poor results (for example, glitching). This flag should be used sparingly, because it limits how the pipeline can be configured. For more information about the presentation clock, see Presentation Clock. |
| The media sink is rateless. It consumes samples as quickly as possible, and does not synchronize itself to a presentation clock. Most archiving sinks are rateless. |
| The media sink requires a presentation clock. The presentation clock is set by calling the media sink's This flag is obsolete, because all media sinks must support the SetPresentationClock method, even if the media sink ignores the clock (as in a rateless media sink). |
| The media sink can accept preroll samples before the presentation clock starts. The media sink exposes the |
| The first stream sink (index 0) is a reference stream. The reference stream must have a media type before the media types can be set on the other stream sinks. |
?
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink's Shutdown method has been called. |
?
The characteristics of a media sink are fixed throughout the life time of the sink.
+
Adds a new stream sink to the media sink.
+Identifier for the new stream. The value is arbitrary but must be unique.
Pointer to the
Receives a reference to the new stream sink's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The specified stream identifier is not valid. |
| The media sink's Shutdown method has been called. |
| There is already a stream sink with the same stream identifier. |
| This media sink has a fixed set of stream sinks. New stream sinks cannot be added. |
?
Not all media sinks support this method. If the media sink does not support this method, the
If pMediaType is
Removes a stream sink from the media sink.
+Identifier of the stream to remove. The stream identifier is defined when you call
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| This particular stream sink cannot be removed. |
| The stream number is not valid. |
| The media sink has not been initialized. |
| The media sink's Shutdown method has been called. |
| This media sink has a fixed set of stream sinks. Stream sinks cannot be removed. |
?
After this method is called, the corresponding stream sink object is no longer valid. The
Not all media sinks support this method. If the media sink does not support this method, the
In some cases, the media sink supports this method but does not allow every stream sink to be removed. (For example, it might not allow stream 0 to be removed.)
+
Gets the number of stream sinks on this media sink.
+Receives the number of stream sinks.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink's Shutdown method has been called. |
?
Gets a stream sink, specified by index.
+Zero-based index of the stream. To get the number of streams, call
Receives a reference to the stream's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid index. |
| The media sink's Shutdown method has been called. |
?
Enumerating stream sinks is not a thread-safe operation, because stream sinks can be added or removed between calls to this method.
+
Gets a stream sink, specified by stream identifier.
+Stream identifier of the stream sink.
Receives a reference to the stream's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The stream identifier is not valid. |
| The media sink's Shutdown method has been called. |
?
If you add a stream sink by calling the
To enumerate the streams by index number instead of stream identifier, call
Sets the presentation clock on the media sink.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The presentation clock does not have a time source. Call SetTimeSource on the presentation clock. |
| The media sink's Shutdown method has been called. |
?
During streaming, the media sink attempts to match rates with the presentation clock. Ideally, the media sink presents samples at the correct time according to the presentation clock and does not fall behind. Rateless media sinks are an exception to this rule, as they consume samples as quickly as possible and ignore the clock. If the sink is rateless, the
The presentation clock must have a time source. Before calling this method, call
If pPresentationClock is non-
All media sinks must support this method.
+
Gets the presentation clock that was set on the media sink.
+Receives a reference to the presentation clock's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No clock has been set. To set the presentation clock, call |
| The media sink's Shutdown method has been called. |
?
Shuts down the media sink and releases the resources it is using.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink was shut down. |
?
If the application creates the media sink, it is responsible for calling Shutdown to avoid memory or resource leaks. In most applications, however, the application creates an activation object for the media sink, and the Media Session uses that object to create the media sink. In that case, the Media Session ? not the application ? shuts down the media sink. (For more information, see Activation Objects.)
After this method returns, all methods on the media sink return
Gets the characteristics of the media sink.
+The characteristics of a media sink are fixed throughout the life time of the sink.
+
Gets the number of stream sinks on this media sink.
+
Gets the presentation clock that was set on the media sink.
+
Notifies the media sink to asynchronously take any steps it needs to finish its tasks.
+Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Many archive media sinks have steps they need to do at the end of archiving to complete their file operations, such as updating the header (for some formats) or flushing all pending writes to disk. In some cases, this may include expensive operations such as indexing the content. BeginFinalize is an asynchronous way to initiate final tasks.
When the finalize operation is complete, the callback object's
Completes an asynchronous finalize operation.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Call this method after the
The SetNow
method specifies the earliest time stamp that the DMO will deliver.
Reference time specifying the earliest time stamp to deliver.
Returns an
Return code | Description |
---|---|
| Failure |
| Success |
?
If quality control is enabled, the DMO discards any samples whose time stamp is less than rtNow. Samples whose time stamp is rtNow or later are processed as efficiently as possible. Depending on the implementation, the DMO might drop some samples to keep pace.
If quality control is disabled, this method has no immediate effect. However, the DMO stores the specified reference time. It uses this value if quality control is enabled at a later time. To enable quality control, call the
If incoming samples are not time-stamped, the DMO never performs quality control. The application sets the time stamp in the
The SetStatus
method enables or disables quality control.
Value that specifies whether to enable or disable quality control. Use
Returns an
Return code | Description |
---|---|
| Invalid argument |
| Success |
?
With quality control enabled, the DMO attempts to deliver samples on time. It can skip late samples if necessary. With quality control disabled, the DMO delivers every sample. If you enable quality control, call the SetStatus
succeeds but the DMO does not perform quality control.
By default, quality control is disabled.
+
The SetNow
method specifies the earliest time stamp that the DMO will deliver.
If quality control is enabled, the DMO discards any samples whose time stamp is less than rtNow. Samples whose time stamp is rtNow or later are processed as efficiently as possible. Depending on the implementation, the DMO might drop some samples to keep pace.
If quality control is disabled, this method has no immediate effect. However, the DMO stores the specified reference time. It uses this value if quality control is enabled at a later time. To enable quality control, call the
If incoming samples are not time-stamped, the DMO never performs quality control. The application sets the time stamp in the
The QueryOperationModePreferences
method retrieves the DMO's preferred optimization features.
Zero-based index of an output stream on the DMO.
Pointer to a variable that receives the DMO's requested features. The returned value is a bitwise combination of zero or more flags from the DMO_VIDEO_OUTPUT_STREAM_FLAGS enumeration.
Returns an
Return code | Description |
---|---|
| Invalid stream index |
| |
| Success |
?
The GetCurrentOperationMode
method retrieves the optimization features in effect.
Zero-based index of an output stream on the DMO.
Pointer to a variable that receives the current features. The returned value is a bitwise combination of zero or more flags from the DMO_VIDEO_OUTPUT_STREAM_FLAGS enumeration.
Returns an
Return code | Description |
---|---|
| Invalid stream index |
| |
| Success |
?
The Next
method retrieves a specified number of items in the enumeration sequence.
Number of items to retrieve.
Array of size cItemsToFetch that is filled with the CLSIDs of the enumerated DMOs.
Array of size cItemsToFetch that is filled with the friendly names of the enumerated DMOs.
Pointer to a variable that receives the actual number of items retrieved. Can be
Returns an
Return code | Description |
---|---|
| Invalid argument. |
| Insufficient memory. |
| |
| Retrieved fewer items than requested. |
| Retrieved the requested number of items. |
?
If the method succeeds, the arrays given by the pCLSID and Names parameters are filled with CLSIDs and wide-character strings. The value of *pcItemsFetched specifies the number of items returned in these arrays.
The method returns
The caller must free the memory allocated for each string returned in the Names parameter, using the CoTaskMemFree function.
+
The Skip
method skips over a specified number of items in the enumeration sequence.
Number of items to skip.
Returns
The Reset
method resets the enumeration sequence to the beginning.
Returns
This method is not implemented.
+Reserved.
Returns E_NOTIMPL.
The GetBufferAndLength
method retrieves the buffer and the size of the valid data in the buffer.
Either parameter can be
The value returned in the pcbLength parameter is the size of the valid data in the buffer, not the buffer's allocated size. To obtain the buffer's allocated size, call the
The SetLength
method specifies the length of the data currently in the buffer.
Size of the data, in bytes. The value must not exceed the buffer's maximum size. Call the
Returns
This method sets the size of the valid data currently in the buffer, not the buffer's allocated size.
+
The GetBufferAndLength
method retrieves the buffer and the size of the valid data in the buffer.
Address of a reference that receives the buffer array. Can be
Pointer to a variable that receives the size of the valid data, in bytes. Can be
Returns an
Return code | Description |
---|---|
| |
| Success |
?
Either parameter can be
The value returned in the pcbLength parameter is the size of the valid data in the buffer, not the buffer's allocated size. To obtain the buffer's allocated size, call the
The SetLength
method specifies the length of the data currently in the buffer.
This method sets the size of the valid data currently in the buffer, not the buffer's allocated size.
+
The
interface provides methods for manipulating a Microsoft DirectX Media Object (DMO).
The GetStreamCount
method retrieves the number of input and output streams.
Pointer to a variable that receives the number of input streams. Cannot be
Pointer to a variable that receives the number of output streams. Cannot be
Returns an
Return code | Description |
---|---|
| |
| Success |
?
The DMO might have zero input streams or zero output streams. The number of streams does not change; a DMO cannot dynamically add or remove streams.
+
The GetInputStreamInfo
method retrieves information about an input stream, such as any restrictions on the number of samples per buffer, and whether the stream performs lookahead on the input data. This information never changes.
Zero-based index of an input stream on the DMO.
Pointer to a variable that receives a bitwise combination of zero or more DMO_INPUT_STREAM_INFO_FLAGS flags.
Returns an
Return code | Description |
---|---|
| Invalid stream index |
| |
| Success |
?
The
The application must be sure to allocate sufficient buffers for the DMO to process the input. Call the
The GetInputType
method retrieves a preferred media type for a specified input stream.
Zero-based index of an input stream on the DMO.
Zero-based index on the set of acceptable media types.
Pointer to a
Returns an
Return code | Description |
---|---|
| Invalid stream index. |
| Type index is out of range. |
| Insufficient memory. |
| |
| Success. |
?
Call this method to enumerate an input stream's preferred media types. The DMO assigns each media type an index value in order of preference. The most preferred type has an index of zero. To enumerate all the types, make successive calls while incrementing the type index until the method returns DMO_E_NO_MORE_ITEMS. The DMO is not guaranteed to enumerate every media type that it supports.
The format block in the returned type might be
If the method succeeds, call MoFreeMediaType to free the format block. (This function is also safe to call when the format block is
To set the media type, call the
To test whether a particular media type is acceptable, call SetInputType with the
To test whether the dwTypeIndex parameter is in range, set pmt to
The SetInputType
method sets the media type on an input stream, or tests whether a media type is acceptable.
Zero-based index of an input stream on the DMO.
Pointer to a
Bitwise combination of zero or more flags from the DMO_SET_TYPE_FLAGS enumeration.
Returns an
Return code | Description |
---|---|
| Invalid stream index |
| Media type was not accepted |
| Media type is not acceptable |
| Media type was set successfully, or is acceptable |
?
Call this method to test, set, or clear the media type on an input stream:
The media types that are currently set on other streams can affect whether the media type is acceptable.
+
The SetOutputType
method sets the media type on an output stream, or tests whether a media type is acceptable.
Zero-based index of an output stream on the DMO.
Pointer to a
Bitwise combination of zero or more flags from the DMO_SET_TYPE_FLAGS enumeration.
Returns an
Return code | Description |
---|---|
| Invalid stream index |
| Media type was not accepted |
| Media type is not acceptable |
| Media type was set successfully, or is acceptable |
?
Call this method to test, set, or clear the media type on an output stream:
The media types that are currently set on other streams can affect whether the media type is acceptable.
+
The GetInputCurrentType
method retrieves the media type that was set for an input stream, if any.
Zero-based index of an input stream on the DMO.
Pointer to a
Returns an
Return code | Description |
---|---|
| Invalid stream index. |
| Media type was not set. |
| Insufficient memory. |
| Success. |
?
The caller must set the media type for the stream before calling this method. To set the media type, call the
If the method succeeds, call MoFreeMediaType to free the format block.
+
The GetOutputCurrentType
method retrieves the media type that was set for an output stream, if any.
Zero-based index of an output stream on the DMO.
Pointer to a
Returns an
Return code | Description |
---|---|
| Invalid stream index. |
| Media type was not set. |
| Insufficient memory. |
| Success. |
?
The caller must set the media type for the stream before calling this method. To set the media type, call the
If the method succeeds, call MoFreeMediaType to free the format block.
+
The Discontinuity
method signals a discontinuity on the specified input stream.
Zero-based index of an input stream on the DMO.
Returns an
Return code | Description |
---|---|
| Invalid stream index |
| The DMO is not accepting input. |
| The input and output types have not been set. |
| Success |
?
A discontinuity represents a break in the input. A discontinuity might occur because no more data is expected, the format is changing, or there is a gap in the data. After a discontinuity, the DMO does not accept further input on that stream until all pending data has been processed. The application should call the
This method might fail if it is called before the client sets the input and output types on the DMO.
+
The AllocateStreamingResources
method allocates any resources needed by the DMO. Calling this method is always optional.
Returns
An application can call this method as a streaming optimization. It gives the DMO an opportunity to perform any time-consuming initializations before streaming begins. If you call this method, do so after you set the media types on the DMO, but before you make the first calls to ProcessInput or ProcessOutput.
This method is optional in the following sense:
If the DMO supports this method, it should also support the
The FreeStreamingResources
method frees resources allocated by the DMO. Calling this method is always optional.
Returns
This method releases any resources that the
If the DMO does not support this method, the method returns
Regardless of whether the method fails or succeeds, the application can continue to call other methods on the DMO. The DMO might need to re-initialize resources that were previously freed.
+
The GetInputStatus
method queries whether an input stream can accept more input data.
Zero-based index of an input stream on the DMO.
Pointer to a variable that receives either zero or
Returns an
Return code | Description |
---|---|
| Invalid stream index |
| Success |
?
If the input stream will accept more data, the method returns the
The status of an input stream can change only as the result of one of the following method calls.
Method | Description |
---|---|
| Signals a discontinuity on the specified input stream. |
| Flushes all internally buffered data. |
| Delivers a buffer to the specified input stream. |
| Generates output from the current input data. |
?
+
The ProcessOutput
method generates output from the current input data.
Bitwise combination of zero or more flags from the DMO_PROCESS_OUTPUT_FLAGS enumeration.
Number of output buffers.
Pointer to an array of
Pointer to a variable that receives a reserved value (zero). The application should ignore this value.
Returns an
Return code | Description |
---|---|
| Failure |
| Invalid argument |
| |
| No output was generated |
| Success |
?
The pOutputBuffers parameter points to an array of
Each
When the application calls ProcessOutput
, the DMO processes as much input data as possible. It writes the output data to the output buffers, starting from the end of the data in each buffer. (To find the end of the data, call the
If the DMO fills an entire output buffer and still has input data to process, the DMO returns the
If the method returns S_FALSE, no output was generated. However, a DMO is not required to return S_FALSE in this situation; it might return
Discarding data:
You can discard data from a stream by setting the
For each stream in which pBuffer is
To check whether a stream is discardable or optional, call the
The Lock
method acquires or releases a lock on the DMO. Call this method to keep the DMO serialized when performing multiple operations.
Value that specifies whether to acquire or release the lock. If the value is non-zero, a lock is acquired. If the value is zero, the lock is released.
Returns an
Return code | Description |
---|---|
| Failure |
| Success |
?
This method prevents other threads from calling methods on the DMO. If another thread calls a method on the DMO, the thread blocks until the lock is released.
If you are using the Active Template Library (ATL) to implement a DMO, the name of the Lock method conflicts with the CComObjectRootEx::Lock method. To work around this problem, define the preprocessor symbol FIX_LOCK_NAME before including the header file Dmo.h:
#define FIX_LOCK_NAME + #include <dmo.h> +
This directive causes the preprocessor to rename the
The Process
method processes a block of data. The application supplies a reference to a block of input data. The DMO processes the data in place.
Size of the data, in bytes.
Pointer to a buffer of size ulSize. On input, the buffer holds the input data. If the method returns successfully, the buffer contains the output data.
Start time of the data.
Either
Returns an
Return code | Description |
---|---|
| Failure |
| Success. There is still data to process. |
| Success. There is no remaining data to process. |
?
If the method fails, the buffer might contain garbage. The application should not use the contents of the buffer.
The DMO might produce output data beyond the length of the input data. This is called an effect tail. For example, a reverb effect continues after the input reaches silence. If the DMO has an effect tail, this method returns S_FALSE.
While the application has input data for processing, call the Process
method with the dwFlags parameter set to Process
again, this time with a zeroed input buffer and the Process
in this way until the return value is
If the DMO has no effect tail, this method returns S_TRUE or an error code.
+
The Clone
method creates a copy of the DMO in its current state.
Address of a reference to receive the new DMO's
Returns
If the method succeeds, the
The GetLatency
method retrieves the latency introduced by this DMO.
Pointer to a variable that receives the latency, in 100-nanosecond units.
Returns
This method returns the average time required to process each buffer. This value usually depends on factors in the run-time environment, such as the processor speed and the CPU load. One possible way to implement this method is for the DMO to keep a running average based on historical data.
+
The GetLatency
method retrieves the latency introduced by this DMO.
This method returns the average time required to process each buffer. This value usually depends on factors in the run-time environment, such as the processor speed and the CPU load. One possible way to implement this method is for the DMO to keep a running average based on historical data.
+Enables other components in the protected media path (PMP) to use the input protection system provided by an input trust authorities (ITA). An ITA is a component that implements an input protection system for media content. ITAs expose the
An ITA translates policy from the content's native format into a common format that is used by other PMP components. It also provides a decrypter, if one is needed to decrypt the stream.
The topology contains one ITA instance for every protected stream in the media source. The ITA is obtained from the media source by calling
Retrieves a decrypter transform.
+Interface identifier (IID) of the interface being requested. Currently this value must be IID_IMFTransform, which requests the
Receives a reference to the interface. The caller must release the interface.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The decrypter does not support the requested interface. |
| This input trust authority (ITA) does not provide a decrypter. |
?
The decrypter should be created in a disabled state, where any calls to
An ITA is not required to provide a decrypter. If the source content is not encrypted, the method should return
The ITA must create a new instance of its decrypter for each call to GetDecrypter. Do not return multiple references to the same decrypter. They must be separate instances because the Media Session might place them in two different branches of the topology.
+
Requests permission to perform a specified action on the stream.
+The requested action, specified as a member of the
Receives the value
The method returns an
Return code | Description |
---|---|
| The user has permission to perform this action. |
| The user must individualize the application. |
| The user must obtain a license. |
?
This method verifies whether the user has permission to perform a specified action on the stream. The ITA does any work needed to verify the user's right to perform the action, such as checking licenses.
To verify the user's rights, the ITA might need to perform additional steps that require interaction with the user or consent from the user. For example, it might need to acquire a new license or individualize a DRM component. In that case, the ITA creates an activation object for a content enabler and returns the activation object's
The Media Session returns the
The application calls
The application calls
The Media Session calls RequestAccess again.
The return value signals whether the user has permission to perform the action:
If the user already has permission to perform the action, the method returns
If the user does not have permission, the method returns a failure code and sets *ppContentEnablerActivate to
If the ITA must perform additional steps that require interaction with the user, the method returns a failure code and returns the content enabler's
The Media Session will not allow the action unless this method returns
A stream can go to multiple outputs, so this method might be called multiple times with different actions, once for every output.
+
Retrieves the policy that defines which output protection systems are allowed for this stream, and the configuration data for each protection system.
+The action that will be performed on this stream, specified as a member of the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Notifies the input trust authority (ITA) that a requested action is about to be performed.
+Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Before calling this method, the Media Session calls
Notifies the input trust authority (ITA) when the number of output trust authorities (OTAs) that will perform a specified action has changed.
+Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The ITA can update its internal state if needed. If the method returns a failure code, the Media Session cancels the action.
+
Resets the input trust authority (ITA) to its initial state.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
When this method is called, the ITA should disable any decrypter that was returned in the
Registers one or more Media Foundation transforms (MFTs) in the caller's process.
+This method is similar to the
Unlike
Registers one or more Media Foundation transforms (MFTs) in the caller's process.
+A reference to an array of
The number of elements in the pMFTs array.
If this method succeeds, it returns
This method is similar to the
Unlike
Represents a block of memory that contains media data. Use this interface to access the data in the buffer.
+If the buffer contains 2-D image data (such as an uncompressed video frame), you should query the buffer for the
To get a buffer from a media sample, call one of the following
To create a new buffer object, use one of the following functions.
Function | Description |
---|---|
| Creates a buffer and allocates system memory. |
| Creates a media buffer that wraps an existing media buffer. |
| Creates a buffer that manages a DirectX surface. |
| Creates a buffer and allocates system memory with a specified alignment. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Gives the caller access to the memory in the buffer, for reading or writing
+Receives the maximum amount of data that can be written to the buffer. This parameter can be
Receives the length of the valid data in the buffer, in bytes. This parameter can be
Receives a reference to the start of the buffer.
This method gives the caller access to the entire buffer, up to the maximum size returned in the pcbMaxLength parameter. The value returned in pcbCurrentLength is the size of any valid data already in the buffer, which might be less than the total buffer size.
The reference returned in ppbBuffer is guaranteed to be valid, and can safely be accessed across the entire buffer for as long as the lock is held. When you are done accessing the buffer, call
Locking the buffer does not prevent other threads from calling Lock, so you should not rely on this method to synchronize threads.
This method does not allocate any memory, or transfer ownership of the memory to the caller. Do not release or free the memory; the media buffer will free the memory when the media buffer is destroyed.
If you modify the contents of the buffer, update the current length by calling
If the buffer supports the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Unlocks a buffer that was previously locked. Call this method once for every call to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| For Direct3D surface buffers, an error occurred when unlocking the surface. |
?
It is an error to call Unlock if you did not call Lock previously.
After calling this method, do not use the reference returned by the Lock method. It is no longer guaranteed to be valid.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the length of the valid data in the buffer.
+Receives the length of the valid data, in bytes. If the buffer does not contain any valid data, the value is zero.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Sets the length of the valid data in the buffer.
+Length of the valid data, in bytes. This value cannot be greater than the allocated size of the buffer, which is returned by the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The specified length is greater than the maximum size of the buffer. |
?
Call this method if you write data into the buffer.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the allocated size of the buffer.
+Receives the allocated size of the buffer, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The buffer might or might not contain any valid data, and if there is valid data in the buffer, it might be smaller than the buffer's allocated size. To get the length of the valid data, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the length of the valid data in the buffer.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the allocated size of the buffer.
+The buffer might or might not contain any valid data, and if there is valid data in the buffer, it might be smaller than the buffer's allocated size. To get the length of the valid data, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Represents an event generated by a Media Foundation object. Use this interface to get information about the event.
To get a reference to this interface, call
If you are implementing an object that generates events, call the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the event type. The event type indicates what happened to trigger the event. It also defines the meaning of the event value.
+Receives the event type. For a list of event types, see Media Foundation Events.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the extended type of the event.
+Receives a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
To define a custom event, create a new extended-type
Some standard Media Foundation events also use the extended type to differentiate between types of event data.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves an
Receives the event status. If the operation that generated the event was successful, the value is a success code. A failure code means that an error condition triggered the event.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the value associated with the event, if any. The value is retrieved as a
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Before calling this method, call PropVariantInit to initialize the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the event type. The event type indicates what happened to trigger the event. It also defines the meaning of the event value.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the extended type of the event.
+To define a custom event, create a new extended-type
Some standard Media Foundation events also use the extended type to differentiate between types of event data.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves an
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the value associated with the event, if any. The value is retrieved as a
Before calling this method, call PropVariantInit to initialize the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves events from any Media Foundation object that generates events.
+An object that supports this interface maintains a queue of events. The client of the object can retrieve the events either synchronously or asynchronously. The synchronous method is GetEvent. The asynchronous methods are BeginGetEvent and EndGetEvent.
+
Retrieves the next event in the queue. This method is synchronous.
+Specifies one of the following values.
Value | Meaning |
---|---|
| The method blocks until the event generator queues an event. |
| The method returns immediately. |
?
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| |
| There is a pending request. |
| There are no events in the queue. |
| The object was shut down. |
?
This method executes synchronously.
If the queue already contains an event, the method returns
If dwFlags is 0, the method blocks indefinitely until a new event is queued, or until the event generator is shut down.
If dwFlags is MF_EVENT_FLAG_NO_WAIT, the method fails immediately with the return code
This method returns
Begins an asynchronous request for the next event in the queue.
+Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| |
| There is a pending request with the same callback reference and a different state object. |
| There is a pending request with a different callback reference. |
| The object was shut down. |
| There is a pending request with the same callback reference and state object. |
?
When a new event is available, the event generator calls the
Do not call BeginGetEvent a second time before calling EndGetEvent. While the first call is still pending, additional calls to the same object will fail. Also, the
Completes an asynchronous request for the next event in the queue.
+Pointer to the
Receives a reference to the
Call this method from inside your application's
Puts a new event in the object's queue.
+Specifies the event type. The event type is returned by the event's
The extended type. If the event does not have an extended type, use the value GUID_NULL. The extended type is returned by the event's
A success or failure code indicating the status of the event. This value is returned by the event's
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object was shut down. |
?
Applies to: desktop apps | Metro style apps
Retrieves the next event in the queue. This method is synchronous.
+This method executes synchronously.
If the queue already contains an event, the method returns
If dwFlags is 0, the method blocks indefinitely until a new event is queued, or until the event generator is shut down.
If dwFlags is MF_EVENT_FLAG_NO_WAIT, the method fails immediately with the return code
This method returns
Applies to: desktop apps | Metro style apps
Begins an asynchronous request for the next event in the queue.
+Pointer to the
When a new event is available, the event generator calls the
Do not call BeginGetEvent a second time before calling EndGetEvent. While the first call is still pending, additional calls to the same object will fail. Also, the
Shuts down the event queue.
+Call this method when your component shuts down. After this method is called, all
This method removes all of the events from the queue.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Begins an asynchronous request for the next event in the queue.
Call this method inside your implementation of
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Shutdown method was called. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Completes an asynchronous request for the next event in the queue.
Call this method inside your implementation of
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Shutdown method was called. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Puts an event in the queue.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Shutdown method was called. |
?
Call this method when your component needs to raise an event that contains attributes. To create the event object, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Creates an event, sets a
Call this method inside your implementation of
You can also call this method when your component needs to raise an event that does not contain attributes. If the event data is an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Shutdown method was called. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Creates an event, sets an
Specifies the event type of the event to be added to the queue. The event type is returned by the event's
The extended type of the event. If the event does not have an extended type, use the value GUID_NULL. The extended type is returned by the event's
A success or failure code indicating the status of the event. This value is returned by the event's
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Shutdown method was called. |
?
Call this method when your component needs to raise an event that contains an
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Shuts down the event queue.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Call this method when your component shuts down. After this method is called, all
This method removes all of the events from the queue.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Provides playback controls for protected and unprotected content. The Media Session and the protected media path (PMP) session objects expose this interface. This interface is the primary interface that applications use to control the Media Foundation pipeline.
To obtain a reference to this interface, call
Sets a topology on the Media Session.
+ Bitwise OR of zero or more flags from the
Pointer to the topology object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The operation cannot be performed in the Media Session's current state. |
| The Media Session has been shut down. |
| The topology has invalid values for one or more of the following attributes: |
| Protected content cannot be played while debugging. |
?
If pTopology is a full topology, set the
If the Media Session is currently paused or stopped, the SetTopology method does not take effect until the next call to
If the Media Session is currently running, or on the next call to Start, the SetTopology method does the following:
This method is asynchronous. If the method returns
Clears all of the presentations that are queued for playback in the Media Session.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The operation cannot be performed in the Media Session's current state. |
| The Media Session has been shut down. |
?
This method is asynchronous. When the operation completes, the Media Session sends an
This method does not clear the current topology; it only removes topologies that are placed in the queue, waiting for playback. To remove the current topology, call
Starts the Media Session.
+Pointer to a
The following time format GUIDs are defined:
Value | Meaning |
---|---|
| Presentation time. The pvarStartPosition parameter must have one of the following
All media sources support this time format. |
| Segment offset. This time format is supported by the Sequencer Source. The starting time is an offset within a segment. Call the |
| Note??Requires Windows?7 or later. Skip to a playlist entry. The pvarStartPosition parameter specifies the index of the playlist entry, relative to the current entry. For example, the value 2 skips forward two entries. To skip backward, pass a negative value. The If a media source supports this time format, the |
?
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The operation cannot be performed in the Media Session's current state. |
| The Media Session has been shut down. |
?
When this method is called, the Media Session starts the presentation clock and begins to process media samples.
This method is asynchronous. When the method completes, the Media Session sends an
Provides playback controls for protected and unprotected content. The Media Session and the protected media path (PMP) session objects expose this interface. This interface is the primary interface that applications use to control the Media Foundation pipeline.
To obtain a reference to this interface, call
Provides playback controls for protected and unprotected content. The Media Session and the protected media path (PMP) session objects expose this interface. This interface is the primary interface that applications use to control the Media Foundation pipeline.
To obtain a reference to this interface, call
Closes the Media Session and releases all of the resources it is using.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Media Session has been shut down. |
?
This method is asynchronous. When the operation completes, the Media Session sends an
After the Close method is called, the only valid methods on the Media Session are the following:
All other methods return
Shuts down the Media Session and releases all the resources used by the Media Session.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Call this method when you are done using the Media Session, before the final call to IUnknown::Release. Otherwise, your application will leak memory.
After this method is called, other
Retrieves the Media Session's presentation clock.
+Receives a reference to the presentation clock's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Media Session does not have a presentation clock. |
| The Media Session has been shut down. |
?
The application can query the returned
Retrieves the capabilities of the Media Session, based on the current presentation.
+Receives a bitwise OR of zero or more of the following flags.
Value | Meaning |
---|---|
| The Media Session can be paused. |
| The Media Session supports forward playback at rates faster than 1.0. |
| The Media Session supports reverse playback. |
| The Media Session can be seeked. |
| The Media Session can be started. |
?
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| |
| The Media Session has been shut down. |
?
Gets a topology from the Media Session.
This method can get the current topology or a queued topology.
+ Bitwise OR of zero or more flags from the
The identifier of the topology. This parameter is ignored if the dwGetFullTopologyFlags parameter contains the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The Media Session has been shut down. |
?
If the
This method can be used to retrieve the topology for the current presentation or any pending presentations. It cannot be used to retrieve a topology that has already ended.
The topology returned in ppFullTopo is a full topology, not a partial topology.
+
Retrieves the Media Session's presentation clock.
+The application can query the returned
Retrieves the capabilities of the Media Session, based on the current presentation.
+Enables a media sink to receive samples before the presentation clock is started.
To get a reference to this interface, call QueryInterface on the media sink.
+Media sinks can implement this interface to support seamless playback and transitions. If a media sink exposes this interface, it can receive samples before the presentation clock starts. It can then pre-process the samples, so that rendering can begin immediately when the clock starts. Prerolling helps to avoid glitches during playback.
If a media sink supports preroll, the media sink's
Notifies the media sink that the presentation clock is about to start.
+ The upcoming start time for the presentation clock, in 100-nanosecond units. This time is the same value that will be given to the
If this method succeeds, it returns
After this method is called, the media sink sends any number of
During preroll, the media sink can prepare the samples that it receives, so that they are ready to be rendered. It does not actually render any samples until the clock starts.
+Implemented by media source objects.
Media sources are objects that generate media data. For example, the data might come from a video file, a network stream, or a hardware device, such as a camera. Each media source contains one or more streams, and each stream delivers data of one type, such as audio or video.
+In Windows?8, this interface is extended with IMFMediaSourceEx.
+
Retrieves the characteristics of the media source.
+Receives a bitwise OR of zero or more flags from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media source's Shutdown method has been called. |
?
The characteristics of a media source can change at any time. If this happens, the source sends an
Retrieves a copy of the media source's presentation descriptor. Applications use the presentation descriptor to select streams and to get information about the source content.
+Receives a reference to the presentation descriptor's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media source's Shutdown method has been called. |
?
The presentation descriptor contains the media source's default settings for the presentation. The application can change these settings by selecting or deselecting streams, or by changing the media type on a stream. Do not modify the presentation descriptor unless the source is stopped. The changes take affect when the source's
Starts, seeks, or restarts the media source by specifying where to start playback.
+ Pointer to the
Pointer to a
Specifies where to start playback. The units of this parameter are indicated by the time format given in pguidTimeFormat. If the time format is GUID_NULL, the variant type must be VT_I8 or VT_EMPTY. Use VT_I8 to specify a new starting position, in 100-nanosecond units. Use VT_EMPTY to start from the current position. Other time formats might use other
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The start position is past the end of the presentation (ASF media source). |
| A hardware device was unable to start streaming. This error code can be returned by a media source that represents a hardware device, such as a camera. For example, if the camera is already being used by another application, the method might return this error code. |
| The start request is not valid. For example, the start position is past the end of the presentation. |
| The media source's Shutdown method has been called. |
| The media source does not support the time format specified in pguidTimeFormat. |
?
This method is asynchronous. If the operation succeeds, the media source sends the following events:
If the start operation fails asynchronously (after the method returns
A call to Start results in a seek if the previous state was started or paused, and the new starting position is not VT_EMPTY. Not every media source can seek. If a media source can seek, the
Events from the media source are not synchronized with events from the media streams. If you seek a media source, therefore, you can still receive samples from the earlier position after getting the
Stops all active streams in the media source.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media source's Shutdown method has been called. |
?
This method is asynchronous. When the operation completes, the media source sends and
When a media source is stopped, its current position reverts to zero. After that, if the Start method is called with VT_EMPTY for the starting position, playback starts from the beginning of the presentation.
While the source is stopped, no streams produce data.
+
Pauses all active streams in the media source.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid state transition. The media source must be in the started state. |
| The media source's Shutdown method has been called. |
?
This method is asynchronous. When the operation completes, the media source sends and
The media source must be in the started state. The method fails if the media source is paused or stopped.
While the source is paused, calls to
Not every media source can pause. If a media source can pause, the
Shuts down the media source and releases the resources it is using.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the application creates the media source, either directly or through the source resolver, the application is responsible for calling Shutdown to avoid memory or resource leaks.
After this method is called, methods on the media source and all of its media streams return
Retrieves the characteristics of the media source.
+The characteristics of a media source can change at any time. If this happens, the source sends an
Notifies the source when playback has reached the end of a segment. For timelines, this corresponds to reaching a mark-out point.
+
Notifies the source when playback has reached the end of a segment. For timelines, this corresponds to reaching a mark-out point.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Enables an application to get a topology from the sequencer source. This interface is exposed by the sequencer source object.
+
Returns a topology for a media source that builds an internal topology.
+A reference to the
Receives a reference to the topology's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. For example, a |
?
Represents one stream in a media source.
+Streams are created when a media source is started. For each stream, the media source sends an
Retrieves a reference to the media source that created this media stream.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media source's Shutdown method has been called. |
?
Retrieves a stream descriptor for this media stream.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media source's Shutdown method has been called. |
?
Do not modify the stream descriptor. To change the presentation, call
Requests a sample from the media source.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The end of the stream was reached. |
| The media source is stopped. |
| The source's Shutdown method has been called. |
?
If pToken is not
When the next sample is available, the media stream stream does the following:
If the media stream cannot fulfill the caller's request for a sample, it simply releases the token object and skips steps 2 and 3.
The caller should monitor the reference count on the request token. If the media stream sends an
Because the Media Foundation pipeline is multithreaded, the source's RequestSample method might get called after the source has stopped. If the media source is stopped, the method should return
Note??Earlier versions of the documentation listed the wrong error code for this case.
If the media source is paused, the method succeeds, but the stream does not deliver the sample until the source is started again.
If a media source enounters an error asynchronously while processing data, it should signal the error in one of the following ways (but not both):
Retrieves a reference to the media source that created this media stream.
+
Retrieves a stream descriptor for this media stream.
+Do not modify the stream descriptor. To change the presentation, call
Sets the object's media type.
+For media sources, setting the media type means the source will generate data that conforms to that media type. For media sinks, setting the media type means the sink can receive data that conforms to that media type.
Any implementation of this method should check whether pMediaType differs from the object's current media type. If the types are identical, the method should return
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Queries whether the object supports a specified media type.
+ Pointer to the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object does not support this media type. |
?
If the object supports the media type given in pMediaType, the method returns
The ppMediaType parameter is optional. If the method fails, the object might use ppMediaType to return a media type that the object does support, and which closely matches the one given in pMediaType. The method is not guaranteed to return a media type in ppMediaType. If no type is returned, this parameter receives a
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the number of media types in the object's list of supported media types.
+Receives the number of media types in the list.
If this method succeeds, it returns
To get the supported media types, call
For a media source, the media type handler for each stream must contain at least one supported media type. For media sinks, the media type handler for each stream might contain zero media types. In that case, the application must provide the media type. To test whether a particular media type is supported, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a media type from the object's list of supported media types.
+ Zero-based index of the media type to retrieve. To get the number of media types in the list, call
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The dwIndex parameter is out of range. |
?
Media types are returned in the approximate order of preference. The list of supported types is not guaranteed to be complete. To test whether a particular media type is supported, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Sets the object's media type.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid request. |
?
For media sources, setting the media type means the source will generate data that conforms to that media type. For media sinks, setting the media type means the sink can receive data that conforms to that media type.
Any implementation of this method should check whether pMediaType differs from the object's current media type. If the types are identical, the method should return
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the current media type of the object.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No media type is set. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Gets the major media type of the object.
+Receives a
If this method succeeds, it returns
The major type identifies what kind of data is in the stream, such as audio or video. To get the specific details of the format, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the number of media types in the object's list of supported media types.
+ To get the supported media types, call
For a media source, the media type handler for each stream must contain at least one supported media type. For media sinks, the media type handler for each stream might contain zero media types. In that case, the application must provide the media type. To test whether a particular media type is supported, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the current media type of the object.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Gets the major media type of the object.
+The major type identifies what kind of data is in the stream, such as audio or video. To get the specific details of the format, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Manages metadata for an object. Metadata is information that describes a media file, stream, or other content. Metadata consists of individual properties, where each property contains a descriptive name and a value. A property may be associated with a particular language.
To get this interface from a media source, use the
Sets the language for setting and retrieving metadata.
+Pointer to a null-terminated string containing an RFC 1766-compliant language tag.
If this method succeeds, it returns
For more information about language tags, see RFC 1766, "Tags for the Identification of Languages".
+Gets the current language setting.
+Receives a reference to a null-terminated string containing an RFC 1766-compliant language tag. The caller must release the string by calling CoTaskMemFree.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The metadata provider does not support multiple languages. |
| No language was set. |
?
For more information about language tags, see RFC 1766, "Tags for the Identification of Languages."
The
Gets a list of the languages in which metadata is available.
+ A reference to a
The returned
If this method succeeds, it returns
For more information about language tags, see RFC 1766, "Tags for the Identification of Languages".
To set the current language, call
Sets the value of a metadata property.
+Pointer to a null-terminated string containing the name of the property.
Pointer to a
If this method succeeds, it returns
Gets the value of a metadata property.
+ A reference to a null-terminated string that containings the name of the property. To get the list of property names, call
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The requested property was not found. |
?
Deletes a metadata property.
+Pointer to a null-terminated string containing the name of the property.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The property was not found. |
?
For a media source, deleting a property from the metadata collection does not change the original content.
+Gets a list of all the metadata property names on this object.
+Pointer to a
If this method succeeds, it returns
Gets a list of the languages in which metadata is available.
+For more information about language tags, see RFC 1766, "Tags for the Identification of Languages".
To set the current language, call
Gets a list of all the metadata property names on this object.
+Gets metadata from a media source or other object.
If a media source supports this interface, it must expose the interface as a service. To get a reference to this interface from a media source, call
Use this interface to get a reference to the
Gets a collection of metadata, either for an entire presentation, or for one stream in the presentation.
+ Pointer to the
If this parameter is zero, the method retrieves metadata that applies to the entire presentation. Otherwise, this parameter specifies a stream identifier, and the method retrieves metadata for that stream. To get the stream identifier for a stream, call
Reserved. Must be zero.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No metadata is available for the requested stream or presentation. |
?
Contains data that is needed to implement the
Any custom implementation of the
Sets and retrieves user-name and password information for authentication purposes.
+
Sets the user name.
+Pointer to a buffer that contains the user name. If fDataIsEncrypted is
Size of pbData, in bytes. If fDataIsEncrypted is
If TRUE, the user name is encrypted. Otherwise, the user name is not encrypted.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets the password.
+Pointer to a buffer that contains the password. If fDataIsEncrypted is
Size of pbData, in bytes. If fDataIsEncrypted is
If TRUE, the password is encrypted. Otherwise, the password is not encrypted.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the user name.
+Pointer to a buffer that receives the user name. To find the required buffer size, set this parameter to
On input, specifies the size of the pbData buffer, in bytes. On output, receives the required buffer size. If fEncryptData is
If TRUE, the method returns an encrypted string. Otherwise, the method returns an unencrypted string.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the user name is not available, the method might succeed and set *pcbData to zero.
+
Retrieves the password.
+Pointer to a buffer that receives the password. To find the required buffer size, set this parameter to
On input, specifies the size of the pbData buffer, in bytes. On output, receives the required buffer size. If fEncryptData is
If TRUE, the method returns an encrypted string. Otherwise, the method returns an unencrypted string.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the password is not available, the method might succeed and set *pcbData to zero.
+
Queries whether logged-on credentials should be used.
+Receives a Boolean value. If logged-on credentials should be used, the value is TRUE. Otherwise, the value is
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Gets credentials from the credential cache.
This interface is implemented by the credential cache object. Applications that implement the
Retrieves the credential object for the specified URL.
+A null-terminated wide-character string containing the URL for which the credential is needed.
A null-terminated wide-character string containing the realm for the authentication.
Bitwise OR of zero or more flags from the
Receives a reference to the
Receives a bitwise OR of zero or more flags from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Reports whether the credential object provided successfully passed the authentication challenge.
+Pointer to the
TRUE if the credential object succeeded in the authentication challenge; otherwise,
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is called by the network source into the credential manager.
+
Specifies how user credentials are stored.
+Pointer to the
Bitwise OR of zero or more flags from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If no flags are specified, the credentials are cached in memory. This method can be implemented by the credential manager and called by the network source.
+Implemented by applications to provide user credentials for a network source.
To use this interface, implement it in your application. Then create a property store object and set the MFNETSOURCE_CREDENTIAL_MANAGER property. The value of the property is a reference to your application's
Media Foundation does not provide a default implementation of this interface. Applications that support authentication must implement this interface.
+
Begins an asynchronous request to retrieve the user's credentials.
+Pointer to an
Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Completes an asynchronous request to retrieve the user's credentials.
+Pointer to an
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Specifies whether the user's credentials succeeded in the authentication challenge. The network source calls this method to informs the application whether the user's credentials were authenticated.
+Pointer to the
Boolean value. The value is TRUE if the credentials succeeded in the authentication challenge. Otherwise, the value is
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Determines the proxy to use when connecting to a server. The network source uses this interface.
Applications can create the proxy locator configured by the application by implementing the
To create the default proxy locator, call
Initializes the proxy locator object.
+Null-terminated wide-character string containing the hostname of the destination server.
Null-terminated wide-character string containing the destination URL.
Reserved. Set to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Determines the next proxy to use.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| There are no more proxy objects. |
?
Keeps a record of the success or failure of using the current proxy.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the current proxy information including hostname and port.
+Pointer to a buffer that receives a null-terminated string containing the proxy hostname and port. This parameter can be
On input, specifies the number of elements in the pszStr array. On output, receives the required size of the buffer.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The buffer specified in pszStr is too small. |
?
Creates a new instance of the default proxy locator.
+Receives a reference to the new proxy locator object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Creates a proxy locator object, which determines the proxy to use.
The network source uses this interface to create the proxy locator object. Applications can provide their own implementation of this interface by setting the MFNETSOURCE_PROXYLOCATORFACTORY property. on the source resolver. If the application does not set this property, the network source uses the default proxy locator provided by Media Foundation.
+
Creates an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves a supported protocol by index
+
Retrieves the number of protocols supported by the network scheme plug-in.
+Receives the number of protocols.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves a supported protocol by index
+Zero-based index of the protocol to retrieve. To get the number of supported protocols, call
Receives a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The value passed in the nProtocolIndex parameter was greater than the total number of supported protocols, returned by GetNumberOfSupportedProtocols. |
?
Not implemented in this release.
+This method returns
Retrieves the number of protocols supported by the network scheme plug-in.
+
Stores the data needed to marshal an interface across a process boundary.
+
Stores the data needed to marshal an interface across a process boundary.
+Interface identifier of the interface to marshal.
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Marshals an interface from data stored in the stream.
+Interface identifier of the interface to marshal.
Receives a reference to the requested interface. The caller must release the interface.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Encapsulates a usage policy from an input trust authority (ITA). Output trust authorities (OTAs) use this interface to query which protection systems they are required to enforce by the ITA.
+Retrieves a list of the output protection systems that the output trust authority (OTA) must enforce, along with configuration data for each protection system.
+Describes the output that is represented by the OTA calling this method. This value is a bitwise OR of zero or more of the following flags.
Value | Meaning |
---|---|
| Hardware bus. |
| The output sends compressed data. If this flag is absent, the output sends uncompressed data. |
| Reserved. Do not use. |
| The output sends a digital signal. If this flag is absent, the output sends an analog signal. |
| Reserved. Do not use. |
| Reserved. Do not use. |
| The output sends video data. If this flag is absent, the output sends audio data. |
?
Indicates a specific family of output connectors that is represented by the OTA calling this method. Possible values include the following.
Value | Meaning |
---|---|
| AGP bus. |
| Component video. |
| Composite video. |
| Japanese D connector. (Connector conforming to the EIAJ RC-5237 standard.) |
| Embedded DisplayPort connector. |
| External DisplayPort connector. |
| Digital video interface (DVI) connector. |
| High-definition multimedia interface (HDMI) connector. |
| Low voltage differential signaling (LVDS) connector. A connector using the LVDS interface to connect internally to a display device. The connection between the graphics adapter and the display device is permanent and not accessible to the user. Applications should not enable High-Bandwidth Digital Content Protection (HDCP) for this connector. |
| PCI bus. |
| PCI Express bus. |
| PCI-X bus. |
| Audio data sent over a connector via S/PDIF. |
| Serial digital interface connector. |
| S-Video connector. |
| Embedded Unified Display Interface (UDI). |
| External UDI. |
| Unknown connector type. See Remarks. |
| VGA connector. |
| Miracast wireless connector. Supported in Windows?8.1 and later. |
?
Pointer to an array of
Number of elements in the rgGuidProtectionSchemasSupported array.
Receives a reference to the
If this method succeeds, it returns
The video OTA returns the MFCONNECTOR_UNKNOWN connector type unless the Direct3D device is in full-screen mode. (Direct3D windowed mode is not generally a secure video mode.) You can override this behavior by implementing a custom EVR presenter that implements the
Retrieives a
Receives a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
All of the policy objects and output schemas from the same ITA should return the same originator identifier (including dynamic policy changes). This value enables the OTA to distinguish policies that originate from different ITAs, so that the OTA can update dynamic policies correctly.
+
Retrieves the minimum version of the global revocation list (GRL) that must be enforced by the protected environment for this policy.
+Receives the minimum GRL version.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieives a
All of the policy objects and output schemas from the same ITA should return the same originator identifier (including dynamic policy changes). This value enables the OTA to distinguish policies that originate from different ITAs, so that the OTA can update dynamic policies correctly.
+
Retrieves the minimum version of the global revocation list (GRL) that must be enforced by the protected environment for this policy.
+Encapsulates information about an output protection system and its corresponding configuration data.
+If the configuration information for the output protection system does not require more than a DWORD of space, the configuration information is retrieved in the GetConfigurationData method. If more than a DWORD of configuration information is needed, it is stored using the
Retrieves the output protection system that is represented by this object. Output protection systems are identified by
Receives the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Returns configuration data for the output protection system. The configuration data is used to enable or disable the protection system, and to set the protection levels.
+Receives the configuration data. The meaning of this data depends on the output protection system.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves a
Receives a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
All of the policy objects and output schemas from the same ITA should return the same originator identifier (including dynamic policy changes). This value enables the OTA to distinguish policies that originate from different ITAs, so that the OTA can update dynamic policies correctly.
+
Retrieves the output protection system that is represented by this object. Output protection systems are identified by
Returns configuration data for the output protection system. The configuration data is used to enable or disable the protection system, and to set the protection levels.
+
Retrieves a
All of the policy objects and output schemas from the same ITA should return the same originator identifier (including dynamic policy changes). This value enables the OTA to distinguish policies that originate from different ITAs, so that the OTA can update dynamic policies correctly.
+Encapsulates the functionality of one or more output protection systems that a trusted output supports. This interface is exposed by output trust authority (OTA) objects. Each OTA represents a single action that the trusted output can perform, such as play, copy, or transcode. An OTA can represent more than one physical output if each output performs the same action.
+
Retrieves the action that is performed by this output trust authority (OTA).
+Receives a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets one or more policy objects on the output trust authority (OTA).
+The address of an array of
The number of elements in the ppPolicy array.
Receives either a reference to a buffer allocated by the OTA, or the value
Note??Currently this parameter is reserved. An OTA should set the reference to
Receives the size of the ppbTicket buffer, in bytes. If ppbTicket receives the value
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The policy was negotiated successfully, but the OTA will enforce it asynchronously. |
| The OTA does not support the requirements of this policy. |
?
If the method returns MF_S_WAIT_FOR_POLICY_SET, the OTA sends an
Sets one or more policy objects on the output trust authority (OTA).
+The address of an array of
The number of elements in the ppPolicy array.
Receives either a reference to a buffer allocated by the OTA, or the value
Note??Currently this parameter is reserved. An OTA should set the reference to
Receives the size of the ppbTicket buffer, in bytes. If ppbTicket receives the value
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The policy was negotiated successfully, but the OTA will enforce it asynchronously. |
| The OTA does not support the requirements of this policy. |
?
If the method returns MF_S_WAIT_FOR_POLICY_SET, the OTA sends an
Retrieves the action that is performed by this output trust authority (OTA).
+Controls how media sources and transforms are enumerated in Microsoft Media Foundation.
To get a reference to this interface, call
Media Foundation provides a set of built-in media sources and decoders. Applications can enumerate them as follows:
Applications might also enumerate these objects indirectly. For example, if an application uses the topology loader to resolve a partial topology, the topology loader calls
Third parties can implement their own custom media sources and decoders, and register them for enumeration so that other applications can use them.
To control the enumeration order, Media Foundation maintains two process-wide lists of CLSIDs: a preferred list and a blocked list. An object whose CLSID appears in the preferred list appears first in the enumeration order. An object whose CLSID appears on the blocked list is not enumerated.
The lists are initially populated from the registry. Applications can use the
The preferred list contains a set of key/value pairs, where the keys are strings and the values are CLSIDs. These key/value pairs are defined as follows:
The following examples show the various types of key:
To search the preferred list by key name, call the
The blocked list contains a list of CLSIDs. To enumerate the entire list, call the
Searches the preferred list for a class identifier (CLSID) that matches a specified key name.
+Member of the
The key name to match. For more information about the format of key names, see the Remarks section of
Receives a CLSID from the preferred list.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| No CLSID matching this key was found. |
?
Gets a class identifier (CLSID) from the preferred list, specified by index value.
+Member of the
The zero-based index of the CLSID to retrieve.
Receives the key name associated with the CLSID. The caller must free the memory for the returned string by calling the CoTaskMemFree function. For more information about the format of key names, see the Remarks section of
Receives the CLSID at the specified index.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The index parameter is out of range. |
?
Adds a class identifier (CLSID) to the preferred list or removes a CLSID from the list.
+Member of the
The key name for the CLSID. For more information about the format of key names, see the Remarks section of
The CLSID to add to the list. If this parameter is
If this method succeeds, it returns
The preferred list is global to the caller's process. Calling this method does not affect the list in other process.
+Queries whether a class identifier (CLSID) appears in the blocked list.
+Member of the
The CLSID to search for.
The method returns an
Return code | Description |
---|---|
| The specified CLSID appears in the blocked list. |
| Invalid argument. |
| The specified CLSID is not in the blocked list. |
?
Gets a class identifier (CLSID) from the blocked list.
+Member of the
The zero-based index of the CLSID to retrieve.
Receives the CLSID at the specified index.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The index parameter is out of range. |
?
Adds a class identifier (CLSID) to the blocked list, or removes a CLSID from the list.
+Member of the
The CLSID to add or remove.
Specifies whether to add or remove the CSLID. If the value is TRUE, the method adds the CLSID to the blocked list. Otherwise, the method removes it from the list.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
?
The blocked list is global to the caller's process. Calling this method does not affect the list in other processes.
+
Note??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Represents a media item. A media item is an abstraction for a source of media data, such as a video file. Use this interface to get information about the source, or to change certain playback settings, such as the start and stop times. To get a reference to this interface, call one of the following methods:
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets a reference to the MFPlay player object that created the media item.
+If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the URL that was used to create the media item.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No URL is associated with this media item. |
| The |
?
This method applies when the application calls
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the object that was used to create the media item.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media item was created from a URL, not from an object. |
| The |
?
The object reference is set if the application uses
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the application-defined value stored in the media item.
+If this method succeeds, it returns
You can assign this value when you first create the media item, by specifying it in the dwUserData parameter of the
This method can be called after the player object is shut down.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Stores an application-defined value in the media item.
+This method can return one of these values.
This method can be called after the player object is shut down.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the start and stop times for the media item.
+If this method succeeds, it returns
The pguidStartPositionType and pguidStopPositionType parameters receive the units of time that are used. Currently, the only supported value is MFP_POSITIONTYPE_100NS.
Value | Description |
---|---|
MFP_POSITIONTYPE_100NS | 100-nanosecond units. The time parameter (pvStartValue or pvStopValue) uses the following data type:
|
?
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Sets the start and stop time for the media item.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| Invalid start or stop time. Any of the following can cause this error:
|
?
By default, a media item plays from the beginning to the end of the file. This method adjusts the start time and/or the stop time:
The pguidStartPositionType and pguidStopPositionType parameters give the units of time that are used. Currently, the only supported value is MFP_POSITIONTYPE_100NS.
Value | Description |
---|---|
MFP_POSITIONTYPE_100NS | 100-nanosecond units. The time parameter (pvStartValue or pvStopValue) uses the following data type:
To clear a previously set time, use an empty |
?
The adjusted start and stop times are used the next time that
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries whether the media item contains a video stream.
+If this method succeeds, it returns
To select or deselect streams before playback starts, call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries whether the media item contains an audio stream.
+If this method succeeds, it returns
To select or deselect streams before playback starts, call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries whether the media item contains protected content.
Note??Currently
If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the duration of the media item.
+If this method succeeds, it returns
The method returns the total duration of the content, regardless of any values set through
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the number of streams (audio, video, and other) in the media item.
+If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries whether a stream is selected to play.
+If this method succeeds, it returns
To select or deselect a stream, call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Selects or deselects a stream.
+If this method succeeds, it returns
You can use this method to change which streams are selected. The change goes into effect the next time that
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries the media item for a stream attribute.
+If this method succeeds, it returns
Stream attributes describe an individual stream (audio, video, or other) within the presentation. To get an attribute that applies to the entire presentation, call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries the media item for a presentation attribute.
+If this method succeeds, it returns
Presentation attributes describe the presentation as a whole. To get an attribute that applies to an individual stream within the presentation, call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets various flags that describe the media item.
+If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Sets a media sink for the media item. A media sink is an object that consumes the data from one or more streams.
+If this method succeeds, it returns
By default, the MFPlay player object renders audio streams to the Streaming Audio Renderer (SAR) and video streams to the Enhanced Video Renderer (EVR). You can use the SetStreamSink method to provide a different media sink for an audio or video stream; or to support other stream types besides audio and video. You can also use it to configure the SAR or EVR before they are used.
Call this method before calling
To reset the media item to use the default media sink, set pMediaSink to
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets a property store that contains metadata for the source, such as author or title.
+If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets a reference to the MFPlay player object that created the media item.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the object that was used to create the media item.
+The object reference is set if the application uses
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the application-defined value stored in the media item.
+You can assign this value when you first create the media item, by specifying it in the dwUserData parameter of the
This method can be called after the player object is shut down.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries whether the media item contains protected content.
Note??Currently
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the number of streams (audio, video, and other) in the media item.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets various flags that describe the media item.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets a property store that contains metadata for the source, such as author or title.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Contains methods to play media files.
The MFPlay player object exposes this interface. To get a reference to this interface, call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Starts playback.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object's Shutdown method was called. |
?
This method completes asynchronously. When the operation completes, the application's
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Pauses playback. While playback is paused, the most recent video frame is displayed, and audio is silent.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object's Shutdown method was called. |
?
This method completes asynchronously. When the operation completes, the application's
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Stops playback.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object's Shutdown method was called. |
?
This method completes asynchronously. When the operation completes, the application's
The current media item is still valid. After playback stops, the playback position resets to the beginning of the current media item.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Steps forward one video frame.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Cannot frame step. Reasons for this error code include:
|
| The object's Shutdown method was called. |
| The media source does not support frame stepping, or the current playback rate is negative. |
?
This method completes asynchronously. When the operation completes, the application's
The player object does not support frame stepping during reverse playback (that is, while the playback rate is negative).
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Sets the playback position.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The value of pvPositionValue is not valid. |
| No media item has been queued. |
| The object's Shutdown method was called. |
?
If you call this method while playback is stopped, the new position takes effect after playback resumes.
This method completes asynchronously. When the operation completes, the application's
If playback was started before SetPosition is called, playback resumes at the new position. If playback was paused, the video is refreshed to display the current frame at the new position.
If you make two consecutive calls to SetPosition with guidPositionType equal to MFP_POSITIONTYPE_100NS, and the second call is made before the first call has completed, the second call supersedes the first. The status code for the superseded call is set to S_FALSE in the event data for that call. This behavior prevents excessive latency from repeated calls to SetPosition, as each call may force the media source to perform a relatively lengthy seek operation.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current playback position.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| No media item has been queued. |
| The object's Shutdown method was called. |
?
The playback position is calculated relative to the start time of the media item, which can be specified by calling
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the playback duration of the current media item.
+This method can return one of these values.
Return code | Description |
---|---|
| The method succeeded. |
| The media source does not have a duration. This error can occur with a live source, such as a video camera. |
| There is no current media item. |
?
This method calculates the playback duration, taking into account the start and stop times for the media item. To set the start and stop times, call
For example, suppose that you load a 30-second audio file and set the start time equal to 2 seconds and stop time equal to 10 seconds. The
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Sets the playback rate.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The flRate parameter is zero. |
| The object's Shutdown method was called. |
?
This method completes asynchronously. When the operation completes, the application's
The method sets the nearest supported rate, which will depend on the underlying media source. For example, if flRate is 50 and the source's maximum rate is 8? normal rate, the method will set the rate to 8.0. The actual rate is indicated in the event data for the
To find the range of supported rates, call
This method does not support playback rates of zero, although Media Foundation defines a meaning for zero rates in some other contexts.
The new rate applies only to the current media item. Setting a new media item resets the playback rate to 1.0.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current playback rate.
+If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the range of supported playback rates.
+This method can return one of these values.
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not support playback in the requested direction (either forward or reverse). |
?
Playback rates are expressed as a ratio of the current rate to the normal rate. For example, 1.0 indicates normal playback speed, 0.5 indicates half speed, and 2.0 indicates twice speed. Positive values indicate forward playback, and negative values indicate reverse playback. +
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current playback state of the MFPlay player object.
+If this method succeeds, it returns
This method can be called after the player object has been shut down.
Many of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Creates a media item from a URL.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| Invalid request. This error can occur when fSync is |
| The object's Shutdown method was called. |
| Unsupported protocol. |
?
This method does not queue the media item for playback. To queue the item for playback, call
The CreateMediaItemFromURL method can be called either synchronously or asynchronously:
The callback interface is set when you first call
If you make multiple asynchronous calls to CreateMediaItemFromURL, they are not guaranteed to complete in the same order. Use the dwUserData parameter to match created media items with pending requests.
Currently, this method returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Creates a media item from an object.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| Invalid request. This error can occur when fSync is |
| The object's Shutdown method was called. |
?
The pIUnknownObj parameter must specify one of the following:
This method does not queue the media item for playback. To queue the item for playback, call
The CreateMediaItemFromObject method can be called either synchronously or asynchronously:
The callback interface is set when you first call
If you make multiple asynchronous calls to CreateMediaItemFromObject, they are not guaranteed to complete in the same order. Use the dwUserData parameter to match created media items with pending requests.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queues a media item for playback.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The media item contains protected content. MFPlay currently does not support protected content. |
| No audio playback device was found. This error can occur if the media source contains audio, but no audio playback devices are available on the system. |
| The object's Shutdown method was called. |
?
This method completes asynchronously. When the operation completes, the application's
To create a media item, call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Clears the current media item.
Note??This method is currently not implemented.
+If this method succeeds, it returns
This method stops playback and releases the player object's references to the current media item.
This method completes asynchronously. When the operation completes, the application's
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets a reference to the current media item.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| There is no current media item. |
| There is no current media item. |
| The object's Shutdown method was called. |
?
The
The previous remark also applies to setting the media item in the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current audio volume.
+If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Sets the audio volume.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The flVolume parameter is invalid. |
?
If you call this method before playback starts, the setting is applied after playback starts.
This method does not change the master volume level for the player's audio session. Instead, it adjusts the per-channel volume levels for audio stream(s) that belong to the current media item. Other streams in the audio session are not affected. For more information, see Managing the Audio Session.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current audio balance.
+If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Sets the audio balance.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The flBalance parameter is invalid. |
?
If you call this method before playback starts, the setting is applied when playback starts.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries whether the audio is muted.
+If this method succeeds, it returns
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Mutes or unmutes the audio.
+If this method succeeds, it returns
If you call this method before playback starts, the setting is applied after playback starts.
This method does not mute the entire audio session to which the player belongs. It mutes only the streams from the current media item. Other streams in the audio session are not affected. For more information, see Managing the Audio Session. +
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the size and aspect ratio of the video. These values are computed before any scaling is done to fit the video into the destination window.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
At least one parameter must be non-
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the range of video sizes that can be displayed without significantly degrading performance or image quality.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
At least one parameter must be non-
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Sets the video source rectangle.
MFPlay clips the video to this rectangle and stretches the rectangle to fill the video window.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
MFPlay stretches the source rectangle to fill the entire video window. By default, MFPlay maintains the source's correct aspect ratio, letterboxing if needed. The letterbox color is controlled by the
This method fails if no media item is currently set, or if the current media item does not contain video.
To set the video position before playback starts, call this method inside your event handler for the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the video source rectangle.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Specifies whether the aspect ratio of the video is preserved during playback.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
This method fails if no media item is currently set, or if the current media item does not contain video.
To set the aspect-ratio mode before playback starts, call this method inside your event handler for the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current aspect-ratio correction mode. This mode controls whether the aspect ratio of the video is preserved during playback.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the window where the video is displayed.
+If this method succeeds, it returns
The video window is specified when you first call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Updates the video frame.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
Call this method when your application's video playback window receives either a WM_PAINT or WM_SIZE message. This method performs two functions:
Important??Call the GDI BeginPaint function before calling UpdateVideo.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Sets the color for the video border. The border color is used to letterbox the video.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
This method fails if no media item is currently set, or if the current media item does not contain video.
To set the border color before playback starts, call this method inside your event handler for the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current color of the video border. The border color is used to letterbox the video.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The current media item does not contain video. |
| The object's Shutdown method was called. |
?
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Applies an audio or video effect to playback.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| This effect was already added. |
?
The object specified in the pEffect parameter can implement either a video effect or an audio effect. The effect is applied to any media items set after the method is called. It is not applied to the current media item.
For each media item, the effect is applied to the first selected stream of the matching type (audio or video). If a media item has two selected streams of the same type, the second stream does not receive the effect. The effect is ignored if the media item does not contain a stream that matches the effect type. For example, if you set a video effect and play a file that contains just audio, the video effect is ignored, although no error is raised.
The effect is applied to all subsequent media items, until the application removes the effect. To remove an effect, call
If you set multiple effects of the same type (audio or video), they are applied in the same order in which you call InsertEffect.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Removes an effect that was added with the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The effect was not found. |
?
The change applies to the next media item that is set on the player. The effect is not removed from the current media item.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Removes all effects that were added with the
If this method succeeds, it returns
The change applies to the next media item that is set on the player. The effects are not removed from the current media item.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Shuts down the MFPlay player object and releases any resources the object is using.
+If this method succeeds, it returns
After this method is called, most
The player object automatically shuts itself down when its reference count reaches zero. You can use the Shutdown method to shut down the player before all of the references have been released.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current playback rate.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current playback state of the MFPlay player object.
+This method can be called after the player object has been shut down.
Many of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets a reference to the current media item.
+The
The previous remark also applies to setting the media item in the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current audio volume.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current audio balance.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Queries whether the audio is muted.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the video source rectangle.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current aspect-ratio correction mode. This mode controls whether the aspect ratio of the video is preserved during playback.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the window where the video is displayed.
+The video window is specified when you first call
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Gets the current color of the video border. The border color is used to letterbox the video.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Callback interface for the
To set the callback, pass an
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Called by the MFPlay player object to notify the application of a playback event.
+ The specific type of playback event is given in the eEventType member of the
It is safe to call
Enables a media source to receive a reference to the
If a media source exposes this interface, the Protected Media Path (PMP) Media Session calls SetPMPHost with a reference to the
Provides a reference to the
If this method succeeds, it returns
The
Provides a reference to the
The
Blocks the protected media path (PMP) process from ending.
+When this method is called, it increments the lock count on the PMP process. For every call to this method, the application should make a corresponding call to
Blocks the protected media path (PMP) process from ending.
+If this method succeeds, it returns
When this method is called, it increments the lock count on the PMP process. For every call to this method, the application should make a corresponding call to
Decrements the lock count on the protected media path (PMP) process. Call this method once for each call to
If this method succeeds, it returns
Creates an object in the protect media path (PMP) process, from a CLSID.
+The CLSID of the object to create.
A reference to the
The interface identifier (IID) of the interface to retrieve.
Receives a reference to the requested interface. The caller must release the interface.
If this method succeeds, it returns
You can use the pStream parameter to initialize the object after it is created.
+Enables two instances of the Media Session to share the same protected media path (PMP) process.
+If your application creates more than one instance of the Media Session, you can use this interface to share the same PMP process among several instances. This can be more efficient than re-creating the PMP process each time.
Use this interface as follows:
Blocks the protected media path (PMP) process from ending.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
When this method is called, it increments the lock count on the PMP process. For every call to this method, the application should make a corresponding call to
Decrements the lock count on the protected media path (PMP) process. Call this method once for each call to
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Creates an object in the protected media path (PMP) process.
+CLSID of the object to create.
Interface identifier of the interface to retrieve.
Receives a reference to the requested interface. The caller must release the interface.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Represents a presentation clock, which is used to schedule when samples are rendered and to synchronize multiple streams.
+To create a new instance of the presentation clock, call the
To get the presentation clock from the Media Session, call
Sets the time source for the presentation clock. The time source is the object that drives the clock by providing the current time.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The time source does not have a frequency of 10 MHz. |
| The time source has not been initialized. |
?
The presentation clock cannot start until it has a time source.
The time source is automatically registered to receive state change notifications from the clock, through the time source's
This time source have a frequency of 10 MHz. See
Retrieves the clock's presentation time source.
+Receives a reference to the time source's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No time source was set on this clock. |
?
Retrieves the latest clock time.
+Receives the latest clock time, in 100-nanosecond units. The time is relative to when the clock was last started.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The clock does not have a presentation time source. Call |
?
This method does not attempt to smooth out jitter or otherwise account for any inaccuracies in the clock time.
+
Registers an object to be notified whenever the clock starts, stops, or pauses, or changes rate.
+Pointer to the object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Before releasing the object, call
Unregisters an object that is receiving state-change notifications from the clock.
+Pointer to the object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Starts the presentation clock.
+Initial starting time, in 100-nanosecond units. At the time the Start method is called, the clock's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No time source was set on this clock. |
?
This method is valid in all states (stopped, paused, or running).
If the clock is paused and restarted from the same position (llClockStartOffset is PRESENTATION_CURRENT_POSITION), the presentation clock sends an
The presentation clock initiates the state change by calling OnClockStart or OnClockRestart on the clock's time source. This call is made synchronously. If it fails, the state change does not occur. If the call succeeds, the state changes, and the clock notifies the other state-change subscribers by calling their OnClockStart or OnClockRestart methods. These calls are made asynchronously.
If the clock is already running, calling Start again has the effect of seeking the clock to the new StartOffset position.
+
Stops the presentation clock. While the clock is stopped, the clock time does not advance, and the clock's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No time source was set on this clock. |
| The clock is already stopped. |
?
This method is valid when the clock is running or paused.
The presentation clock initiates the state change by calling
Pauses the presentation clock. While the clock is paused, the clock time does not advance, and the clock's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No time source was set on this clock. |
| The clock is already paused. |
| The clock is stopped. This request is not valid when the clock is stopped. |
?
This method is valid when the clock is running. It is not valid when the clock is paused or stopped.
The presentation clock initiates the state change by calling
Retrieves the clock's presentation time source.
+Retrieves the latest clock time.
+This method does not attempt to smooth out jitter or otherwise account for any inaccuracies in the clock time.
+Describes the details of a presentation. A presentation is a set of related media streams that share a common presentation time.
+Presentation descriptors are used to configure media sources and some media sinks. To get the presentation descriptor from a media source, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the number of stream descriptors in the presentation. Each stream descriptor contains information about one stream in the media source. To retrieve a stream descriptor, call the
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a stream descriptor for a stream in the presentation. The stream descriptor contains information about the stream.
+Zero-based index of the stream. To find the number of streams in the presentation, call the
Receives a Boolean value. The value is TRUE if the stream is currently selected, or
Receives a reference to the stream descriptor's
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Selects a stream in the presentation.
+The stream number to select, indexed from zero. To find the number of streams in the presentation, call
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| dwDescriptorIndex is out of range. |
?
If a stream is selected, the media source will generate data for that stream. The media source will not generated data for deselected streams. To deselect a stream, call
To query whether a stream is selected, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Deselects a stream in the presentation.
+ The stream number to deselect, indexed from zero. To find the number of streams in the presentation, call the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| dwDescriptorIndex is out of range. |
?
If a stream is deselected, no data is generated for that stream. To select the stream again, call
To query whether a stream is selected, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Creates a copy of this presentation descriptor.
+Receives a reference to the
If this method succeeds, it returns
This method performs a shallow copy of the presentation descriptor. The stream descriptors are not cloned. Therefore, use caution when modifying the presentation presentation descriptor or its stream descriptors.
If the original presentation descriptor is from a media source, do not modify the presentation descriptor unless the source is stopped. If you use the presentation descriptor to configure a media sink, do not modify the presentation descriptor after the sink is configured.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the number of stream descriptors in the presentation. Each stream descriptor contains information about one stream in the media source. To retrieve a stream descriptor, call the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Provides the clock times for the presentation clock.
+This interface is implemented by presentation time sources. A presentation time source is an object that provides the clock time for the presentation clock. For example, the audio renderer is a presentation time source. The rate at which the audio renderer consumes audio samples determines the clock time. If the audio format is 44100 samples per second, the audio renderer will report that one second has passed for every 44100 audio samples it plays. In this case, the timing is provided by the sound card.
To set the presentation time source on the presentation clock, call
A presentation time source must also implement the
Media Foundation provides a presentation time source that is based on the system clock. To create this object, call the
Retrieves the underlying clock that the presentation time source uses to generate its clock times.
+Receives a reference to the clock's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| This time source does not expose an underlying clock. |
?
A presentation time source must support stopping, starting, pausing, and rate changes. However, in many cases the time source derives its clock times from a hardware clock or other device. The underlying clock is always running, and might not support rate changes.
Optionally, a time source can expose the underlying clock by implementing this method. The underlying clock is always running, even when the presentation time source is paused or stopped. (Therefore, the underlying clock returns the
The underlying clock is useful if you want to make decisions based on the clock times while the presentation clock is stopped or paused.
If the time source does not expose an underlying clock, the method returns
Retrieves the underlying clock that the presentation time source uses to generate its clock times.
+A presentation time source must support stopping, starting, pausing, and rate changes. However, in many cases the time source derives its clock times from a hardware clock or other device. The underlying clock is always running, and might not support rate changes.
Optionally, a time source can expose the underlying clock by implementing this method. The underlying clock is always running, even when the presentation time source is paused or stopped. (Therefore, the underlying clock returns the
The underlying clock is useful if you want to make decisions based on the clock times while the presentation clock is stopped or paused.
If the time source does not expose an underlying clock, the method returns
Enables the quality manager to adjust the audio or video quality of a component in the pipeline.
This interface is exposed by pipeline components that can adjust their quality. Typically it is exposed by decoders and stream sinks. For example, the enhanced video renderer (EVR) implements this interface. However, media sources can also implement this interface.
To get a reference to this interface from a media source, call
The quality manager typically obtains this interface when the quality manager's
Sets the drop mode. In drop mode, a component drops samples, more or less aggressively depending on the level of the drop mode.
+Requested drop mode, specified as a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The component does not support the specified mode or any higher modes. |
?
If this method is called on a media source, the media source might switch between thinned and non-thinned output. If that occurs, the affected streams will send an
Sets the quality level. The quality level determines how the component consumes or produces samples.
+Requested quality level, specified as a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The component does not support the specified quality level or any levels below it. |
?
Retrieves the current drop mode.
+Receives the drop mode, specified as a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the current quality level.
+Receives the quality level, specified as a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Drops samples over a specified interval of time.
+Amount of time to drop, in 100-nanosecond units. This value is always absolute. If the method is called multiple times, do not add the times from previous calls.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object does not support this method. |
?
Ideally the quality manager can prevent a renderer from falling behind. But if this does occur, then simply lowering quality does not guarantee the renderer will ever catch up. As a result, audio and video might fall out of sync. To correct this problem, the quality manager can call DropTime to request that the renderer drop samples quickly over a specified time interval. After that period, the renderer stops dropping samples.
This method is primarily intended for the video renderer. Dropped audio samples cause audio glitching, which is not desirable.
If a component does not support this method, it should return
Retrieves the current drop mode.
+
Retrieves the current quality level.
+Enables a pipeline object to adjust its own audio or video quality, in response to quality messages.
+This interface enables a pipeline object to respond to quality messages from the media sink. Currently, it is supported only for video decoders.
If a video decoder exposes
If the decoder exposes
The preceding remarks apply to the default implementation of the quality manager; custom quality managers can implement other behaviors.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Forwards an
If this method succeeds, it returns
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Queries an object for the number of quality modes it supports. Quality modes are used to adjust the trade-off between quality and speed when rendering audio or video.
The default presenter for the enhanced video renderer (EVR) implements this interface. The EVR uses the interface to respond to quality messages from the quality manager.
+Gets the maximum drop mode. A higher drop mode means that the object will, if needed, drop samples more aggressively to match the presentation clock.
+Receives the maximum drop mode, specified as a member of the
If this method succeeds, it returns
To get the current drop mode, call the
Gets the minimum quality level that is supported by the component.
+Receives the minimum quality level, specified as a member of the
If this method succeeds, it returns
To get the current quality level, call the
Gets the maximum drop mode. A higher drop mode means that the object will, if needed, drop samples more aggressively to match the presentation clock.
+To get the current drop mode, call the
Gets the minimum quality level that is supported by the component.
+To get the current quality level, call the
Adjusts playback quality. This interface is exposed by the quality manager.
+Media Foundation provides a default quality manager that is tuned for playback. Applications can provide a custom quality manager to the Media Session by setting the
Called when the Media Session is about to start playing a new topology.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
In a typical quality manager this method does the following:
Enumerates the nodes in the topology.
Calls
Queries for the
The quality manager can then use the
Called when the Media Session selects a presentation clock.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Called when the media processor is about to deliver an input sample to a pipeline component.
+Pointer to the
Index of the input stream on the topology node.
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is called for every sample passing through every pipeline component. Therefore, the method must return quickly to avoid introducing too much latency into the pipeline.
+
Called after the media processor gets an output sample from a pipeline component.
+Pointer to the
Index of the output stream on the topology node.
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is called for every sample passing through every pipeline component. Therefore, the method must return quickly to avoid introducing too much latency into the pipeline.
+
Called when a pipeline component sends an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Called when the Media Session is shutting down.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The quality manager should release all references to the Media Session when this method is called.
+Gets or sets the playback rate.
+Objects can expose this interface as a service. To obtain a reference to the interface, call
For more information, see About Rate Control.
To discover the playback rates that an object supports, use the
Sets the playback rate.
+If TRUE, the media streams are thinned. Otherwise, the stream is not thinned. For media sources and demultiplexers, the object must thin the streams when this parameter is TRUE. For downstream transforms, such as decoders and multiplexers, this parameter is informative; it notifies the object that the input streams are thinned. For information, see About Rate Control.
The requested playback rate. Postive values indicate forward playback, negative values indicate reverse playback, and zero indicates scrubbing (the source delivers a single frame).
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object does not support reverse playback. |
| The object does not support thinning. |
| The object does not support the requested playback rate. |
| The object cannot change to the new rate while in the running state. |
?
The Media Session prevents some transitions between rate boundaries, depending on the current playback state:
Playback State | Forward/Reverse | Forward/Zero | Reverse/Zero |
---|---|---|---|
Running | No | No | No |
Paused | No | Yes | No |
Stopped | Yes | Yes | Yes |
?
If the transition is not supported, the method returns
When a media source completes a call to SetRate, it sends the
If a media source switches between thinned and non-thinned playback, the streams send an
When the Media Session completes a call to SetRate, it sends the
Gets the current playback rate.
+Receives the current playback rate.
Receives the value TRUE if the stream is currently being thinned. If the object does not support thinning, this parameter always receives the value
Queries the range of playback rates that are supported, including reverse playback.
To get a reference to this interface, call
Applications can use this interface to discover the fastest and slowest playback rates that are possible, and to query whether a given playback rate is supported. Applications obtain this interface from the Media Session. Internally, the Media Session queries the objects in the pipeline. For more information, see How to Determine Supported Rates.
To get the current playback rate and to change the playback rate, use the
Playback rates are expressed as a ratio the normal playback rate. Reverse playback is expressed as a negative rate. Playback is either thinned or non-thinned. In thinned playback, some of the source data is skipped (typically delta frames). In non-thinned playback, all of the source data is rendered.
You might need to implement this interface if you are writing a pipeline object (media source, transform, or media sink). For more information, see Implementing Rate Control.
+
Retrieves the slowest playback rate supported by the object.
+Specifies whether to query to the slowest forward playback rate or reverse playback rate. The value is a member of the
If TRUE, the method retrieves the slowest thinned playback rate. Otherwise, the method retrieves the slowest non-thinned playback rate. For information about thinning, see About Rate Control.
Receives the slowest playback rate that the object supports.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object does not support reverse playback. |
| The object does not support thinning. |
?
The value returned in plfRate represents a lower bound. Playback at this rate is not guaranteed. Call
If eDirection is
Gets the fastest playback rate supported by the object.
+Specifies whether to query to the fastest forward playback rate or reverse playback rate. The value is a member of the
If TRUE, the method retrieves the fastest thinned playback rate. Otherwise, the method retrieves the fastest non-thinned playback rate. For information about thinning, see About Rate Control.
Receives the fastest playback rate that the object supports.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The object does not support reverse playback. |
| The object does not support thinning. |
?
For some formats (such as ASF), thinning means dropping all frames that are not I-frames. If a component produces stream data, such as a media source or a demultiplexer, it should pay attention to the fThin parameter and return
If the component processes or receives a stream (most transforms or media sinks), it may ignore this parameter if it does not care whether the stream is thinned. In the Media Session's implementation of rate support, if the transforms do not explicitly support reverse playback, the Media Session will attempt to playback in reverse with thinning but not without thinning. Therefore, most applications will set fThin to TRUE when using the Media Session for reverse playback.
If eDirection is
Queries whether the object supports a specified playback rate.
+If TRUE, the method queries whether the object supports the playback rate with thinning. Otherwise, the method queries whether the object supports the playback rate without thinning. For information about thinning, see About Rate Control.
The playback rate to query.
If the object does not support the playback rate given in flRate, this parameter receives the closest supported playback rate. If the method returns
The method returns an
Return code | Description |
---|---|
| The object supports the specified rate. |
| The object does not support reverse playback. |
| The object does not support thinning. |
| The object does not support the specified rate. |
?
Creates an instance of either the sink writer or the source reader.
+To get a reference to this interface, call the CoCreateInstance function. The CLSID is CLSID_MFReadWriteClassFactory. Call the
As an alternative to using this interface, you can call any of the following functions:
Internally, these functions use the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Creates an instance of the sink writer or source reader, given a URL.
+The CLSID of the object to create.
Value | Meaning |
---|---|
| Create the sink writer. The ppvObject parameter receives an |
| Create the source reader. The ppvObject parameter receives an |
?
A null-terminated string that contains a URL. If clsid is CLSID_MFSinkWriter, the URL specifies the name of the output file. The sink writer creates a new file with this name. If clsid is CLSID_MFSourceReader, the URL specifies the input file for the source reader.
A reference to the
This parameter can be
The IID of the requested interface.
Receives a reference to the requested interface. The caller must release the interface.
If this method succeeds, it returns
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Creates an instance of the sink writer or source reader, given an
The CLSID of the object to create.
Value | Meaning |
---|---|
| Create the sink writer. The ppvObject parameter receives an |
| Create the source reader. The ppvObject parameter receives an |
?
A reference to the
Value | Meaning |
---|---|
Pointer to a byte stream. If clsid is CLSID_MFSinkWriter, the sink writer writes data to this byte stream. If clsid is CLSID_MFSourceReader, this byte stream provides the source data for the source reader. | |
Pointer to a media sink. Applies only when clsid is CLSID_MFSinkWriter. | |
Pointer to a media source. Applies only when clsid is CLSID_MFSourceReader. |
?
A reference to the
This parameter can be
The IID of the requested interface.
Receives a reference to the requested interface. The caller must release the interface.
If this method succeeds, it returns
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Notifies a pipeline object to register itself with the Multimedia Class Scheduler Service (MMCSS).
Any pipeline object that creates worker threads should implement this interface.
+Media Foundation provides a mechanism for applications to associate branches in the topology with MMCSS tasks. A topology branch is defined by a source node in the topology and all of the nodes downstream from it. An application registers a topology branch with MMCSS by setting the
When the application registers a topology branch with MMCSS, the Media Session queries every pipeline object in that branch for the
When the application unregisters the topology branch, the Media Session calls UnregisterThreads.
If a pipeline object creates its own worker threads but does not implement this interface, it can cause priority inversions in the Media Foundation pipeline, because high-priority processing threads might be blocked while waiting for the component to process data on a thread with lower priority.
Pipeline objects that do not create worker threads do not need to implement this interface.
In Windows?8, this interface is extended with IMFRealTimeClientEx.
+Notifies the object to register its worker threads with the Multimedia Class Scheduler Service (MMCSS).
+The MMCSS task identifier.
The name of the MMCSS task.
If this method succeeds, it returns
The object's worker threads should register themselves with MMCSS by calling AvSetMmThreadCharacteristics, using the task name and identifier specified in this method.
+Notifies the object to unregister its worker threads from the Multimedia Class Scheduler Service (MMCSS).
+If this method succeeds, it returns
The object's worker threads should unregister themselves from MMCSS by calling AvRevertMmThreadCharacteristics.
+Specifies the work queue for the topology branch that contains this object.
+The identifier of the work queue, or the value
If this method succeeds, it returns
An application can register a branch of the topology to use a private work queue. The Media Session notifies any pipeline object that supports
When the application unregisters the topology branch, the Media Session calls SetWorkQueue again with the value
Specifies the work queue for the topology branch that contains this object.
+ An application can register a branch of the topology to use a private work queue. The Media Session notifies any pipeline object that supports
When the application unregisters the topology branch, the Media Session calls SetWorkQueue again with the value
Used by the Microsoft Media Foundation proxy/stub DLL to marshal certain asynchronous method calls across process boundaries.
Applications do not use or implement this interface.
+
Used by the Microsoft Media Foundation proxy/stub DLL to marshal certain asynchronous method calls across process boundaries.
Applications do not use or implement this interface.
+Modifies a topology for use in a Terminal Services environment.
+To use this interface, do the following:
The application must call UpdateTopology before calling
Modifies a topology for use in a Terminal Services environment.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
If the application is running in a Terminal Services client session, call this method before calling
Retrieves a reference to the remote object for which this object is a proxy.
+
Retrieves a reference to the remote object for which this object is a proxy.
+Interface identifier (IID) of the requested interface.
Receives a reference to the requested interface. The caller must release the interface.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves a reference to the object that is hosting this proxy.
+Interface identifier (IID) of the requested interface.
Receives a reference to the requested interface. The caller must release the interface.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets the current style on the SAMI media source.
+Gets the number of styles defined in the SAMI file.
+Receives the number of SAMI styles in the file.
If this method succeeds, it returns
Gets a list of the style names defined in the SAMI file.
+Pointer to a
If this method succeeds, it returns
Sets the current style on the SAMI media source.
+Pointer to a null-terminated string containing the name of the style. To clear the current style, pass an empty string (""). To get the list of style names, call
If this method succeeds, it returns
Gets the current style from the SAMI media source.
+Receives a reference to a null-terminated string that contains the name of the style. If no style is currently set, the method returns an empty string. The caller must free the memory for the string by calling CoTaskMemFree.
If this method succeeds, it returns
Gets the number of styles defined in the SAMI file.
+Gets a list of the style names defined in the SAMI file.
+Represents a media sample, which is a container object for media data. For video, a sample typically contains one video frame. For audio data, a sample typically contains multiple audio samples, rather than a single sample of audio.
A media sample contains zero or more buffers. Each buffer manages a block of memory, and is represented by the
To create a new media sample, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves flags associated with the sample.
Currently no flags are defined. Instead, metadata for samples is defined using attributes. To get attibutes from a sample, use the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Sets flags associated with the sample.
Currently no flags are defined. Instead, metadata for samples is defined using attributes. To set attibutes on a sample, use the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the presentation time of the sample.
+Receives the presentation time, in 100-nanosecond units.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The sample does not have a presentation time. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the duration of the sample.
+Receives the duration, in 100-nanosecond units.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The sample does not have a specified duration. |
?
If the sample contains more than one buffer, the duration includes the data from all of the buffers.
If the retrieved duration is zero, or if the method returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Sets the duration of the sample.
+Duration of the sample, in 100-nanosecond units.
If this method succeeds, it returns
This method succeeds if the duration is negative, although negative durations are probably not valid for most types of data. It is the responsibility of the object that consumes the sample to validate the duration.
The duration can also be zero. This might be valid for some types of data. For example, the sample might contain stream metadata with no buffers.
Until this method is called, the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the number of buffers in the sample.
+Receives the number of buffers in the sample. A sample might contain zero buffers.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Gets a buffer from the sample, by index.
Note??In most cases, it is safer to use the
A sample might contain more than one buffer. Use the GetBufferByIndex method to enumerate the individual buffers.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Converts a sample with multiple buffers into a sample with a single buffer.
+Receives a reference to the
If the sample contains more than one buffer, this method copies the data from the original buffers into a new buffer, and replaces the original buffer list with the new buffer. The new buffer is returned in the ppBuffer parameter.
If the sample contains a single buffer, this method returns a reference to the original buffer. In typical use, most samples do not contain multiple buffers.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Adds a buffer to the end of the list of buffers in the sample.
+Pointer to the buffer's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| |
?
For uncompressed video data, each buffer should contain a single video frame, and samples should not contain multiple frames. In general, storing multiple buffers in a sample is discouraged.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Removes a buffer at a specified index from the sample.
+Index of the buffer. To find the number of buffers in the sample, call
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Removes all of the buffers from the sample.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the total length of the valid data in all of the buffers in the sample. The length is calculated as the sum of the values retrieved by the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Copies the sample data to a buffer. This method concatenates the valid data from all of the buffers of the sample, in order.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| |
| The buffer is not large enough to contain the data. |
?
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves flags associated with the sample.
Currently no flags are defined. Instead, metadata for samples is defined using attributes. To get attibutes from a sample, use the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the presentation time of the sample.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the duration of the sample.
+If the sample contains more than one buffer, the duration includes the data from all of the buffers.
If the retrieved duration is zero, or if the method returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the number of buffers in the sample.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves the total length of the valid data in all of the buffers in the sample. The length is calculated as the sum of the values retrieved by the
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Provides encryption for media data inside the protected media path (PMP).
+
Retrieves the version of sample protection that the component implements on input.
+Receives a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the version of sample protection that the component implements on output.
+Receives a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the sample protection certificate.
+Specifies the version number of the sample protection scheme for which to receive a certificate. The version number is specified as a
Receives a reference to a buffer containing the certificate. The caller must free the memory for the buffer by calling CoTaskMemFree.
Receives the size of the ppCert buffer, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Not implemented. |
?
For certain version numbers of sample protection, the downstream component must provide a certificate. Components that do not support these version numbers can return E_NOTIMPL.
+
Retrieves initialization information for sample protection from the upstream component.
+Specifies the version number of the sample protection scheme. The version number is specified as a
Identifier of the output stream. The identifier corresponds to the output stream identifier returned by the
Pointer to a certificate provided by the downstream component.
Size of the certificate, in bytes.
Receives a reference to a buffer that contains the initialization information for downstream component. The caller must free the memory for the buffer by calling CoTaskMemFree.
Receives the size of the ppbSeed buffer, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Not implemented. |
?
This method must be implemented by the upstream component. The method fails if the component does not support the requested sample protection version. Downstream components do not implement this method and should return E_NOTIMPL.
+
Initializes sample protection on the downstream component.
+Specifies the version number of the sample protection scheme. The version number is specified as a
Identifier of the input stream. The identifier corresponds to the output stream identifier returned by the
Pointer to a buffer that contains the initialization data provided by the upstream component. To retrieve this buffer, call
Size of the pbSeed buffer, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the version of sample protection that the component implements on input.
+
Retrieves the version of sample protection that the component implements on output.
+Persists media data from a source byte stream to an application-provided byte stream.
The byte stream used for HTTP download implements this interface. To get a reference to this interface, call
Begins saving a Windows Media file to the application's byte stream.
+Pointer to the
Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
When the operation completes, the callback object's
Completes the operation started by
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Cancels the operation started by
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the percentage of content saved to the provided byte stream.
+Receives the percentage of completion.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the percentage of content saved to the provided byte stream.
+Creates a media source or a byte stream from a URL.
+Applications do not use this interface. This interface is exposed by scheme handlers, which are used by the source resolver. A scheme handler is designed to parse one type of URL scheme. When the scheme handler is given a URL, it parses the resource that is located at that URL and creates either a media source or a byte stream.
+
Begins an asynchronous request to create an object from a URL.
When the Source Resolver creates a media source from a URL, it passes the request to a scheme handler. The scheme handler might create a media source directly from the URL, or it might return a byte stream. If it returns a byte stream, the source resolver use a byte-stream handler to create the media source from the byte stream.
+ The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Cannot open the URL with the requested access (read or write). |
| Unsupported byte stream type. |
?
The dwFlags parameter must contain the
If the
The following table summarizes the behavior of these two flags when passed to this method:
Flag | Object created |
---|---|
Media source or byte stream | |
Byte stream |
?
The
When the operation completes, the scheme handler calls the
Completes an asynchronous request to create an object from a URL.
+Pointer to the
Receives a member of the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The operation was canceled. |
?
Call this method from inside the
Cancels the current request to create an object from a URL.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You can use this method to cancel a previous call to BeginCreateObject. Because that method is asynchronous, however, it might be completed before the operation can be canceled. Therefore, your callback might still be invoked after you call this method.
The operation cannot be canceled if BeginCreateObject returns
Retrieves the client's certificate.
+
Retrieves the client's certificate.
+Receives a reference to a buffer allocated by the object. The buffer contains the client's certificate. The caller must release the buffer by calling CoTaskMemFree.
Receives the size of the ppCert buffer, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Passes the encrypted session key to the client.
+Pointer to a buffer that contains the encrypted session key. This parameter can be
Size of the pbEncryptedSessionKey buffer, in bytes.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Maps a presentation descriptor to its associated sequencer element identifier and the topology it represents.
+The topology returned in ppTopology is the original topology that the application specified in AppendTopology. The source nodes in this topology contain references to the native sources. Do not queue this topology on the Media Session. Instead, call
Adds a topology to the end of the queue.
+Pointer to the
A combination of flags from the
Receives the sequencer element identifier for this topology.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The source topology node is missing one of the following attributes: |
?
The sequencer plays topologies in the order they are queued. You can queue as many topologies as you want to preroll.
The application must indicate to the sequencer when it has queued the last topology on the Media Session. To specify the last topology, set the SequencerTopologyFlags_Last flag in the dwFlags parameter when you append the topology. The sequencer uses this information to end playback with the pipeline. Otherwise, the sequencer waits indefinitely for a new topology to be queued.
+
Deletes a topology from the queue.
+The sequencer element identifier of the topology to delete.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Maps a presentation descriptor to its associated sequencer element identifier and the topology it represents.
+Pointer to the
Receives the sequencer element identifier. This value is assigned by the sequencer source when the application calls
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The presentation descriptor is not valid. |
| This segment was canceled. |
?
The topology returned in ppTopology is the original topology that the application specified in AppendTopology. The source nodes in this topology contain references to the native sources. Do not queue this topology on the Media Session. Instead, call
Updates a topology in the queue.
+Sequencer element identifier of the topology to update.
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The sequencer source has been shut down. |
?
This method is asynchronous. When the operation is completed, the sequencer source sends an
Updates the flags for a topology in the queue.
+Sequencer element identifier of the topology to update.
Bitwise OR of flags from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Queries an object for a specified service interface.
+A service is an interface that is exposed by one object but might be implemented by another object. The GetService method is equivalent to QueryInterface, with the following difference: when QueryInterface retrieves a reference to an interface, it is guaranteed that you can query the returned interface and get back the original interface. The GetService method does not make this guarantee, because the retrieved interface might be implemented by a separate object.
The
Retrieves a service interface.
+The service identifier (SID) of the service. For a list of service identifiers, see Service Interfaces.
The interface identifier (IID) of the interface being requested.
Receives the interface reference. The caller must release the interface.
Applies to: desktop apps | Metro style apps
Retrieves a service interface.
+The service identifier (SID) of the service. For a list of service identifiers, see Service Interfaces.
Exposed by some Media Foundation objects that must be explicitly shut down.
+The following types of object expose
Any component that creates one of these objects is responsible for calling Shutdown on the object before releasing the object. Typically, applications do not create any of these objects directly, so it is not usually necessary to use this interface in an application.
To obtain a reference to this interface, call QueryInterface on the object.
If you are implementing a custom object, your object can expose this interface, but only if you can guarantee that your application will call Shutdown.
Media sources, media sinks, and synchronous MFTs should not implement this interface, because the Media Foundation pipeline will not call Shutdown on these objects. Asynchronous MFTs must implement this interface.
This interface is not related to the
Some Media Foundation interfaces define a Shutdown method, which serves the same purpose as
Shuts down a Media Foundation object and releases all resources associated with the object.
+If this method succeeds, it returns
The
Queries the status of an earlier call to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The Shutdown method has not been called on this object. |
?
Until Shutdown is called, the GetShutdownStatus method returns
If an object's Shutdown method is asynchronous, pStatus might receive the value
Queries the status of an earlier call to the
Until Shutdown is called, the GetShutdownStatus method returns
If an object's Shutdown method is asynchronous, pStatus might receive the value
Controls the master volume level of the audio session associated with the streaming audio renderer (SAR) and the audio capture source.
The SAR and the audio capture source expose this interface as a service. To get a reference to the interface, call
To control the volume levels of individual channels, use the
Volume is expressed as an attenuation level, where 0.0 indicates silence and 1.0 indicates full volume (no attenuation). For each channel, the attenuation level is the product of:
The master volume level of the audio session.
The volume level of the channel.
For example, if the master volume is 0.8 and the channel volume is 0.5, the attenuaton for that channel is 0.8 ? 0.5 = 0.4. Volume levels can exceed 1.0 (positive gain), but the audio engine clips any audio samples that exceed zero decibels. To change the volume level of individual channels, use the
Use the following formula to convert the volume level to the decibel (dB) scale:
Attenuation (dB) = 20 * log10(Level)
For example, a volume level of 0.50 represents 6.02 dB of attenuation.
+
Sets the master volume level.
+Volume level. Volume is expressed as an attenuation level, where 0.0 indicates silence and 1.0 indicates full volume (no attenuation).
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The audio renderer is not initialized. |
| The audio renderer was removed from the pipeline. |
?
Events outside of the application can change the master volume level. For example, the user can change the volume from the system volume-control program (SndVol). If an external event changes the master volume, the audio renderer sends an
Retrieves the master volume level.
+Receives the volume level. Volume is expressed as an attenuation level, where 0.0 indicates silence and 1.0 indicates full volume (no attenuation).
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The audio renderer is not initialized. |
| The audio renderer was removed from the pipeline. |
?
If an external event changes the master volume, the audio renderer sends an
Mutes or unmutes the audio.
+Specify TRUE to mute the audio, or
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The audio renderer is not initialized. |
| The audio renderer was removed from the pipeline. |
?
This method does not change the volume level returned by the
Queries whether the audio is muted.
+Receives a Boolean value. If TRUE, the audio is muted; otherwise, the audio is not muted.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The audio renderer is not initialized. |
| The audio renderer was removed from the pipeline. |
?
Calling
Retrieves the master volume level.
+If an external event changes the master volume, the audio renderer sends an
Queries whether the audio is muted.
+Calling
Implemented by the Microsoft Media Foundation sink writer object.
+To create the sink writer, call one of the following functions:
Alternatively, use the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
In Windows?8, this interface is extended with IMFSinkWriterEx.
+Adds a stream to the sink writer.
+A reference to the
Receives the zero-based index of the new stream.
If this method succeeds, it returns
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Sets the input format for a stream on the sink writer.
+The zero-based index of the stream. The index is received by the pdwStreamIndex parameter of the
A reference to the
A reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The underlying media sink does not support the format, no conversion is possible, or a dynamic format change is not possible. |
| The dwStreamIndex parameter is invalid. |
| Could not find an encoder for the encoded format. |
?
The input format does not have to match the target format that is written to the media sink. If the formats do not match, the method attempts to load an encoder that can encode from the input format to the target format.
After streaming begins?that is, after the first call to
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Initializes the sink writer for writing.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The request is invalid. |
?
Call this method after you configure the input streams and before you send any data to the sink writer.
You must call BeginWriting before calling any of the following methods:
The underlying media sink must have at least one input stream. Otherwise, BeginWriting returns
If BeginWriting succeeds, any further calls to BeginWriting return
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Delivers a sample to the sink writer.
+The zero-based index of the stream for this sample.
A reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The request is invalid. |
?
You must call
By default, the sink writer limits the rate of incoming data by blocking the calling thread inside the WriteSample method. This prevents the application from delivering samples too quickly. To disable this behavior, set the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Indicates a gap in an input stream.
+The zero-based index of the stream.
The position in the stream where the gap in the data occurs. The value is given in 100-nanosecond units, relative to the start of the stream.
If this method succeeds, it returns
For video, call this method once for each missing frame. For audio, call this method at least once per second during a gap in the audio. Set the
Internally, this method calls
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Places a marker in the specified stream.
+The zero-based index of the stream.
Pointer to an application-defined value. The value of this parameter is returned to the caller in the pvContext parameter of the caller's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The request is invalid. |
?
To use this method, you must provide an asynchronous callback when you create the sink writer. Otherwise, the method returns
Markers provide a way to be notified when the media sink consumes all of the samples in a stream up to a certain point. The media sink does not process the marker until it has processed all of the samples that came before the marker. When the media sink processes the marker, the sink writer calls the application's OnMarker method. When the callback is invoked, you know that the sink has consumed all of the previous samples for that stream.
For example, to change the format midstream, call PlaceMarker at the point where the format changes. When OnMarker is called, it is safe to call
Internally, this method calls
Note??The pvContext parameter of the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Notifies the media sink that a stream has reached the end of a segment.
+The zero-based index of a stream, or
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The request is invalid. |
?
You must call
This method sends an
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Flushes one or more streams.
+The zero-based index of the stream to flush, or
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The request is invalid. |
?
You must call
For each stream that is flushed, the sink writer drops all pending samples, flushes the encoder, and sends an
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Completes all writing operations on the sink writer.
+If this method succeeds, it returns
Call this method after you send all of the input samples to the sink writer. The method performs any operations needed to create the final output from the media sink.
If you provide a callback interface when you create the sink writer, this method completes asynchronously. When the operation completes, the
Internally, this method calls
After this method is called, the following methods will fail:
If you do not call Finalize, the output from the media sink might be incomplete or invalid. For example, required file headers might be missing from the output file.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Queries the underlying media sink or encoder for an interface.
+The zero-based index of a stream to query, or
A service identifier
The interface identifier (IID) of the interface being requested.
Receives a reference to the requested interface. The caller must release the interface.
If this method succeeds, it returns
If the dwStreamIndex parameter equals
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Gets statistics about the performance of the sink writer.
+The zero-based index of a stream to query, or
A reference to an
This method can return one of these values.
Return code | Description |
---|---|
| Success. |
| Invalid stream number. |
?
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Called when the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Called when the
Returns an
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Called when the
Returns an
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+
Called by the network source when the open operation begins or ends.
+The networks source calls this method with the following event types.
For more information, see How to Get Events from the Network Source.
+
Called by the network source when the open operation begins or ends.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The networks source calls this method with the following event types.
For more information, see How to Get Events from the Network Source.
+Implemented by the Microsoft Media Foundation source reader object.
+To create the source reader, call one of the following functions:
Alternatively, use the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
In Windows?8, this interface is extended with IMFSourceReaderEx.
+Queries whether a stream is selected.
+The stream to query. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
?
Receives TRUE if the stream is selected and will generate data. Receives
If this method succeeds, it returns
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Selects or deselects one or more streams.
+The stream to set. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| All streams. |
?
Specify TRUE to select streams or
If this method succeeds, it returns
There are two common uses for this method:
For an example of deselecting a stream, see Tutorial: Decoding Audio.
If a stream is deselected, the
Stream selection does not affect how the source reader loads or unloads decoders in memory. In particular, deselecting a stream does not force the source reader to unload the decoder for that stream.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Gets a format that is supported natively by the media source.
+Specifies which stream to query. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
?
The zero-based index of the media type to retrieve.
Receives a reference to the
This method queries the underlying media source for its native output format. Potentially, each source stream can produce more than one output format. Use the dwMediaTypeIndex parameter to loop through the available formats. Generally, file sources offer just one format per stream, but capture devices might offer several formats.
The method returns a copy of the media type, so it is safe to modify the object received in the ppMediaType parameter.
To set the output type for a stream, call the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Gets the current media type for a stream.
+The stream to query. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
?
Receives a reference to the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Sets the media type for a stream.
This media type defines that format that the Source Reader produces as output. It can differ from the native format provided by the media source. See Remarks for more information.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| At least one decoder was found for the native stream type, but the type specified by pMediaType was rejected. |
| One or more sample requests are still pending. |
| The dwStreamIndex parameter is invalid. |
| Could not find a decoder for the native stream type. |
?
For each stream, you can set the media type to any of the following:
Audio resampling support was added to the source reader with Windows?8. In versions of Windows prior to Windows?8, the source reader does not support audio resampling. If you need to resample the audio in versions of Windows earlier than Windows?8, you can use the Audio Resampler DSP.
If you set the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Seeks to a new position in the media source.
+A
Value | Meaning |
---|---|
| 100-nanosecond units. |
?
Some media sources might support additional values.
The position from which playback will be started. The units are specified by the guidTimeFormat parameter. If the guidTimeFormat parameter is GUID_NULL, set the variant type to VT_I8.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| One or more sample requests are still pending. |
?
The SetCurrentPosition method does not guarantee exact seeking. The accuracy of the seek depends on the media content. If the media content contains a video stream, the SetCurrentPosition method typically seeks to the nearest key frame before the desired position. The distance between key frames depends on several factors, including the encoder implementation, the video content, and the particular encoding settings used to encode the content. The distance between key frame can vary within a single video file (for example, depending on scene complexity).
After seeking, the application should call
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Reads the next sample from the media source.
+The stream to pull data from. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| Get the next available sample, regardless of which stream. |
?
A bitwise OR of zero or more flags from the
Receives the zero-based index of the stream.
Receives a bitwise OR of zero or more flags from the
Receives the time stamp of the sample, or the time of the stream event indicated in pdwStreamFlags. The time is given in 100-nanosecond units.
Receives a reference to the
If the requested stream is not selected, the return code is
This method can complete synchronously or asynchronously. If you provide a callback reference when you create the source reader, the method is asynchronous. Otherwise, the method is synchronous. For more information about setting the callback reference, see
Flushes one or more streams.
+The stream to flush. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| All streams. |
?
If this method succeeds, it returns
The Flush method discards all queued samples and cancels all pending sample requests.
This method can complete either synchronously or asynchronously. If you provide a callback reference when you create the source reader, the method is asynchronous. Otherwise, the method is synchronous. For more information about the setting the callback reference, see
In synchronous mode, the method blocks until the operation is complete.
In asynchronous mode, the application's
Note??In Windows?7, there was a bug in the implementation of this method, which causes OnFlush to be called before the flush operation completes. A hotfix is available that fixes this bug. For more information, see http://support.microsoft.com/kb/979567.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Queries the underlying media source or decoder for an interface.
+The stream or object to query. If the value is
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| The media source. |
?
A service identifier
The interface identifier (IID) of the interface being requested.
Receives a reference to the requested interface. The caller must release the interface.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Gets an attribute from the underlying media source.
+The stream or object to query. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| The media source. |
?
A
Otherwise, if the dwStreamIndex parameter specifies a stream, guidAttribute specifies a stream descriptor attribute. For a list of values, see Stream Descriptor Attributes.
A reference to a
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Pointer to the
Call CoInitialize(Ex) and
Internally, the source reader calls the
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+A reference to the
Pointer to the
Call CoInitialize(Ex) and
Internally, the source reader calls the
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+A reference to the
Pointer to the
Call CoInitialize(Ex) and
Internally, the source reader calls the
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Pointer to the
Call CoInitialize(Ex) and
By default, when the application releases the source reader, the source reader shuts down the media source by calling
To change this default behavior, set the
When using the Source Reader, do not call any of the following methods on the media source:
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
Windows Phone 8.1: This API is supported.
+A reference to the
Pointer to the
Call CoInitialize(Ex) and
Internally, the source reader calls the
This function is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Gets a format that is supported natively by the media source.
+Specifies which stream to query. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
?
The zero-based index of the media type to retrieve.
Receives a reference to the
This method queries the underlying media source for its native output format. Potentially, each source stream can produce more than one output format. Use the dwMediaTypeIndex parameter to loop through the available formats. Generally, file sources offer just one format per stream, but capture devices might offer several formats.
The method returns a copy of the media type, so it is safe to modify the object received in the ppMediaType parameter.
To set the output type for a stream, call the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Selects or deselects one or more streams.
+The stream to set. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| All streams. |
?
Specify TRUE to select streams or
If this method succeeds, it returns
There are two common uses for this method:
For an example of deselecting a stream, see Tutorial: Decoding Audio.
If a stream is deselected, the
Stream selection does not affect how the source reader loads or unloads decoders in memory. In particular, deselecting a stream does not force the source reader to unload the decoder for that stream.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Sets the media type for a stream.
This media type defines that format that the Source Reader produces as output. It can differ from the native format provided by the media source. See Remarks for more information.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| At least one decoder was found for the native stream type, but the type specified by pMediaType was rejected. |
| One or more sample requests are still pending. |
| The dwStreamIndex parameter is invalid. |
| Could not find a decoder for the native stream type. |
?
For each stream, you can set the media type to any of the following:
The source reader does not support audio resampling. If you need to resample the audio, you can use the Audio Resampler DSP.
If you set the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Sets the media type for a stream.
This media type defines that format that the Source Reader produces as output. It can differ from the native format provided by the media source. See Remarks for more information.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| At least one decoder was found for the native stream type, but the type specified by pMediaType was rejected. |
| One or more sample requests are still pending. |
| The dwStreamIndex parameter is invalid. |
| Could not find a decoder for the native stream type. |
?
For each stream, you can set the media type to any of the following:
The source reader does not support audio resampling. If you need to resample the audio, you can use the Audio Resampler DSP.
If you set the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Seeks to a new position in the media source.
+The SetCurrentPosition method does not guarantee exact seeking. The accuracy of the seek depends on the media content. If the media content contains a video stream, the SetCurrentPosition method typically seeks to the nearest key frame before the desired position. The distance between key frames depends on several factors, including the encoder implementation, the video content, and the particular encoding settings used to encode the content. The distance between key frame can vary within a single video file (for example, depending on scene complexity).
After seeking, the application should call
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Gets the current media type for a stream.
+The stream to query. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
?
Receives a reference to the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Reads the next sample from the media source.
+The stream to pull data from. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| Get the next available sample, regardless of which stream. |
?
A bitwise OR of zero or more flags from the
Receives the zero-based index of the stream.
Receives a bitwise OR of zero or more flags from the
Receives the time stamp of the sample, or the time of the stream event indicated in pdwStreamFlags. The time is given in 100-nanosecond units.
Receives a reference to the
If the requested stream is not selected, the return code is MF_E_INVALIDREQUEST. See
This method can complete synchronously or asynchronously. If you provide a callback reference when you create the source reader, the method is asynchronous. Otherwise, the method is synchronous. For more information about setting the callback reference, see
In asynchronous mode:
[out]
parameters must be In synchronous mode:
In synchronous mode, if the dwStreamIndex parameter is
This method can return flags in the pdwStreamFlags parameter without returning a media sample in ppSample. Therefore, the ppSample parameter can receive a
If there is a gap in the stream, pdwStreamFlags receives the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Flushes one or more streams.
+The stream to flush. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| All streams. |
?
If this method succeeds, it returns
The Flush method discards all queued samples and cancels all pending sample requests.
This method can complete either synchronously or asynchronously. If you provide a callback reference when you create the source reader, the method is asynchronous. Otherwise, the method is synchronous. For more information about the setting the callback reference, see
In synchronous mode, the method blocks until the operation is complete.
In asynchronous mode, the application's
Note??In Windows?7, there was a bug in the implementation of this method, which causes OnFlush to be called before the flush operation completes. A hotfix is available that fixes this bug. For more information, see http://support.microsoft.com/kb/979567.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Queries the underlying media source or decoder for an interface.
+The stream or object to query. If the value is
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| The media source. |
?
A service identifier
The interface identifier (IID) of the interface being requested.
Receives a reference to the requested interface. The caller must release the interface.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Gets an attribute from the underlying media source.
+The stream or object to query. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| The media source. |
?
A
Otherwise, if the dwStreamIndex parameter specifies a stream, guidAttribute specifies a stream descriptor attribute. For a list of values, see Stream Descriptor Attributes.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Applies to: desktop apps | Metro style apps
Gets an attribute from the underlying media source.
+The stream or object to query. The value can be any of the following.
Value | Meaning |
---|---|
| The zero-based index of a stream. |
| The first video stream. |
| The first audio stream. |
| The media source. |
?
A
Otherwise, if the dwStreamIndex parameter specifies a stream, guidAttribute specifies a stream descriptor attribute. For a list of values, see Stream Descriptor Attributes.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Callback interface for the Microsoft Media Foundation source reader.
+Use the
The callback methods can be called from any thread, so an object that implements this interface must be thread-safe.
If you do not specify a callback reference, the source reader operates synchronously.
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Called when the
Returns an
The pSample parameter might be
If there is a gap in the stream, dwStreamFlags contains the
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Called when the
Returns an
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Called when the source reader receives certain events from the media source.
+For stream events, the value is the zero-based index of the stream that sent the event. For source events, the value is
A reference to the
Returns an
In the current implementation, the source reader uses this method to forward the following events to the application:
This interface is available on Windows?Vista if Platform Update Supplement for Windows?Vista is installed.
+Creates a media source or a byte stream from a URL. This method is synchronous.
+The dwFlags parameter must contain either the
It is recommended that you do not set
For local files, you can pass the file name in the pwszURL parameter; the file:
scheme is not required.
Note??This method cannot be called remotely.
+Creates a media source or a byte stream from a URL. This method is synchronous.
+Null-terminated string that contains the URL to resolve.
Bitwise OR of one or more flags. See Source Resolver Flags. See remarks below.
Pointer to the
Receives a member of the
Receives a reference to the object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The dwFlags parameter contains mutually exclusive flags. |
| The URL scheme is not supported. |
?
The dwFlags parameter must contain either the
It is recommended that you do not set
For local files, you can pass the file name in the pwszURL parameter; the file:
scheme is not required.
Note??This method cannot be called remotely.
+Creates a media source from a byte stream. This method is synchronous.
+ Pointer to the byte stream's
Null-terminated string that contains the URL of the byte stream. The URL is optional and can be
Bitwise OR of flags. See Source Resolver Flags.
Pointer to the
Receives a member of the
Receives a reference to the media source's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The dwFlags parameter contains mutually exclusive flags. |
| This byte stream is not supported. |
?
The dwFlags parameter must contain the
The source resolver attempts to find one or more byte-stream handlers for the byte stream, based on the file name extension of the URL, or the MIME type of the byte stream (or both). The URL is specified in the optional pwszURL parameter, and the MIME type may be specified in the
Note??This method cannot be called remotely.
+
Begins an asynchronous request to create a media source or a byte stream from a URL.
+Null-terminated string that contains the URL to resolve.
Bitwise OR of flags. See Source Resolver Flags.
Pointer to the
Receives an
Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The dwFlags parameter contains mutually exclusive flags. |
| The URL scheme is not supported. |
?
The dwFlags parameter must contain either the
For local files, you can pass the file name in the pwszURL parameter; the file:
scheme is not required.
When the operation completes, the source resolver calls the
The usage of the pProps parameter depends on the implementation of the media source.
+Completes an asynchronous request to create an object from a URL.
+ Pointer to the
Receives a member of the
Receives a reference to the media source's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The operation was canceled. |
?
Call this method from inside your application's
Begins an asynchronous request to create a media source from a byte stream.
+A reference to the byte stream's
A null-terminated string that contains the original URL of the byte stream. This parameter can be
A bitwise OR of one or more flags. See Source Resolver Flags.
A reference to the
Receives an
A reference to the
A oointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The dwFlags parameter contains mutually exclusive flags. |
| The byte stream is not supported. |
| The byte stream does not support seeking. |
?
The dwFlags parameter must contain the
The source resolver attempts to find one or more byte-stream handlers for the byte stream, based on the file name extension of the URL, or the MIME type of the byte stream (or both). The URL is specified in the optional pwszURL parameter, and the MIME type may be specified in the
When the operation completes, the source resolver calls the
Completes an asynchronous request to create a media source from a byte stream.
+Pointer to the
Receives a member of the
Receives a reference to the media source's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The application canceled the operation. |
?
Call this method from inside your application's
Cancels an asynchronous request to create an object.
+ Pointer to the
If this method succeeds, it returns
You can use this method to cancel a previous call to BeginCreateObjectFromByteStream or BeginCreateObjectFromURL. Because these methods are asynchronous, however, they might be completed before the operation can be canceled. Therefore, your callback might still be invoked after you call this method.
Note??This method cannot be called remotely.
+Applies to: desktop apps | Metro style apps
Creates a media source or a byte stream from a URL. This method is synchronous.
+Null-terminated string that contains the URL to resolve.
Bitwise OR of one or more flags. See Source Resolver Flags.
The dwFlags parameter must contain either the
For local files, you can pass the file name in the pwszURL parameter; the file:
scheme is not required.
Note??This method cannot be called remotely.
+Applies to: desktop apps | Metro style apps
Creates a media source or a byte stream from a URL. This method is synchronous.
+Null-terminated string that contains the URL to resolve.
Bitwise OR of one or more flags. See Source Resolver Flags.
Receives a member of the
The dwFlags parameter must contain either the
For local files, you can pass the file name in the pwszURL parameter; the file:
scheme is not required.
Note??This method cannot be called remotely.
+Applies to: desktop apps | Metro style apps
Creates a media source or a byte stream from a URL. This method is synchronous.
+Null-terminated string that contains the URL to resolve.
Bitwise OR of one or more flags. See Source Resolver Flags.
Pointer to the
Receives a member of the
The dwFlags parameter must contain either the
For local files, you can pass the file name in the pwszURL parameter; the file:
scheme is not required.
Note??This method cannot be called remotely.
+Applies to: desktop apps | Metro style apps
Creates a media source from a byte stream. This method is synchronous.
+ Pointer to the byte stream's
Null-terminated string that contains the URL of the byte stream. The URL is optional and can be
Bitwise OR of flags. See Source Resolver Flags.
The dwFlags parameter must contain the
The source resolver attempts to find one or more byte-stream handlers for the byte stream, based on the file name extension of the URL, or the MIME type of the byte stream (or both). The URL is specified in the optional pwszURL parameter, and the MIME type may be specified in the
Note??This method cannot be called remotely.
+Applies to: desktop apps | Metro style apps
Creates a media source from a byte stream. This method is synchronous.
+ Pointer to the byte stream's
Null-terminated string that contains the URL of the byte stream. The URL is optional and can be
Bitwise OR of flags. See Source Resolver Flags.
Receives a member of the
The dwFlags parameter must contain the
The source resolver attempts to find one or more byte-stream handlers for the byte stream, based on the file name extension of the URL, or the MIME type of the byte stream (or both). The URL is specified in the optional pwszURL parameter, and the MIME type may be specified in the
Note??This method cannot be called remotely.
+Applies to: desktop apps | Metro style apps
Creates a media source from a byte stream. This method is synchronous.
+ Pointer to the byte stream's
Null-terminated string that contains the URL of the byte stream. The URL is optional and can be
Bitwise OR of flags. See Source Resolver Flags.
Pointer to the
Receives a member of the
The dwFlags parameter must contain the
The source resolver attempts to find one or more byte-stream handlers for the byte stream, based on the file name extension of the URL, or the MIME type of the byte stream (or both). The URL is specified in the optional pwszURL parameter, and the MIME type may be specified in the
Note??This method cannot be called remotely.
+Indicates whether the server SSL certificate must be verified by the caller, Media Foundation, or the
Gets the client SSL certificate synchronously.
+Pointer to a string that contains the URL for which a client-side SSL certificate is required. Media Foundation can resolve the scheme and send the request to the server.
Pointer to the buffer that stores the certificate.This caller must free the buffer by calling CoTaskMemFree.
Pointer to a DWORD variable that receives the number of bytes required to hold the certificate data in the buffer pointed by *ppbData.
If this method succeeds, it returns
Starts an asynchronous call to get the client SSL certificate.
+A null-terminated string that contains the URL for which a client-side SSL certificate is required. Media Foundation can resolve the scheme and send the request to the server.
A reference to the
A reference to the
If this method succeeds, it returns
When the operation completes, the callback object's
Completes an asynchronous request to get the client SSL certificate.
+A reference to the
Receives a reference to the buffer that stores the certificate.The caller must free the buffer by calling CoTaskMemFree.
Receives the size of the ppbData buffer, in bytes.
If this method succeeds, it returns
Call this method after the
Indicates whether the server SSL certificate must be verified by the caller, Media Foundation, or the
Pointer to a string that contains the URL that is sent to the server.
Pointer to a
Pointer to a
If this method succeeds, it returns
Called by Media Foundation when the server SSL certificate has been received; indicates whether the server certificate is accepted.
+Pointer to a string that contains the URL used to send the request to the server, and for which a server-side SSL certificate has been received.
Pointer to a buffer that contains the server SSL certificate.
Pointer to a DWORD variable that indicates the size of pbData in bytes.
Pointer to a
If this method succeeds, it returns
Gets information about one stream in a media source.
+A presentation descriptor contains one or more stream descriptors. To get the stream descriptors from a presentation descriptor, call
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves an identifier for the stream.
+Receives the stream identifier.
If this method succeeds, it returns
The stream identifier uniquely identifies a stream within a presentation. It does not change throughout the lifetime of the stream. For example, if the presentation changes while the source is running, the index number of the stream may change, but the stream identifier does not.
In general, stream identifiers do not have a specific meaning, other than to identify the stream. Some media sources may assign stream identifiers based on meaningful values, such as packet identifiers, but this depends on the implementation.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a media type handler for the stream. The media type handler can be used to enumerate supported media types for the stream, get the current media type, and set the media type.
+Receives a reference to the
If this method succeeds, it returns
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves an identifier for the stream.
+The stream identifier uniquely identifies a stream within a presentation. It does not change throughout the lifetime of the stream. For example, if the presentation changes while the source is running, the index number of the stream may change, but the stream identifier does not.
In general, stream identifiers do not have a specific meaning, other than to identify the stream. Some media sources may assign stream identifiers based on meaningful values, such as packet identifiers, but this depends on the implementation.
This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Retrieves a media type handler for the stream. The media type handler can be used to enumerate supported media types for the stream, get the current media type, and set the media type.
+This interface is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Called by the streaming media client before the Media Session starts streaming to specify the byte offset or the time offset.
+Called by the streaming media client before the Media Session starts streaming to specify the byte offset or the time offset.
+A Boolean value that specifies whether qwSeekOffset gives a byte offset of a time offset.
Value | Meaning |
---|---|
| The qwSeekOffset parameter specifies a byte offset. |
The qwSeekOffset parameter specifies the time position in 100-nanosecond units. |
?
A byte offset or a time offset, depending on the value passed in fSeekOffsetIsByteOffset. Time offsets are specified in 100-nanosecond units.
If this method succeeds, it returns
Represents a stream on a media sink object.
+
Retrieves the media sink that owns this stream sink.
+Receives a reference to the media sink's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink's Shutdown method has been called. |
| This stream was removed from the media sink and is no longer valid. |
?
Retrieves the stream identifier for this stream sink.
+Receives the stream identifier. If this stream sink was added by calling
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink's Shutdown method has been called. |
| This stream was removed from the media sink and is no longer valid. |
?
Retrieves the media type handler for the stream sink. You can use the media type handler to find which formats the stream supports, and to set the media type on the stream.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink's Shutdown method has been called. |
| This stream was removed from the media sink and is no longer valid. |
?
If the stream sink currently does not support any media types, this method returns a media type handler that fails any calls to
Delivers a sample to the stream. The media sink processes the sample.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink is in the wrong state to receive a sample. For example, preroll is complete but the presenation clock has not started yet. |
| The sample has an invalid time stamp. See Remarks. |
| The media sink is paused or stopped and cannot process the sample. |
| The presentation clock was not set. Call |
| The sample does not have a time stamp. |
| The stream sink has not been initialized. |
| The media sink's Shutdown method has been called. |
| This stream was removed from the media sink and is no longer valid. |
?
Call this method when the stream sink sends an
This method can return
Negative time stamps.
Time stamps that jump backward (within the same stream).
The time stamps for one stream have drifted too far from the time stamps on another stream within the same media sink (for example, an archive sink that multiplexes the streams).
Not every media sink returns an error code in these situations.
+
Places a marker in the stream.
+ Specifies the marker type, as a member of the
Optional reference to a
Optional reference to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media sink's Shutdown method has been called. |
| This stream was removed from the media sink and is no longer valid. |
?
This method causes the stream sink to send an
Causes the stream sink to drop any samples that it has received and has not rendered yet.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The stream sink has not been initialized yet. You might need to set a media type. |
| The media sink's Shutdown method has been called. |
| This stream was removed from the media sink and is no longer valid. |
?
If any samples are still queued from previous calls to the
Any pending marker events from the
This method is synchronous. It does not return until the sink has discarded all pending samples.
+
Retrieves the media sink that owns this stream sink.
+
Retrieves the stream identifier for this stream sink.
+
Retrieves the media type handler for the stream sink. You can use the media type handler to find which formats the stream supports, and to set the media type on the stream.
+If the stream sink currently does not support any media types, this method returns a media type handler that fails any calls to
Converts between Society of Motion Picture and Television Engineers (SMPTE) time codes and 100-nanosecond time units.
+If an object supports this interface, it must expose the interface as a service. To get a reference to the interface, call
The Advanced Streaming Format (ASF) media source exposes this interface.
+Starts an asynchronous call to convert Society of Motion Picture and Television Engineers (SMPTE) time code to 100-nanosecond units.
+Time in SMPTE time code to convert. The vt member of the
Pointer to the
PPointer to the
The method returns an
Return code | Description |
---|---|
| pPropVarTimecode is not VT_I8. |
| The object's Shutdown method was called. |
| The byte stream is not seekable. The time code cannot be read from the end of the byte stream. |
?
When the asynchronous method completes, the callback object's
The value of pPropVarTimecode is a 64-bit unsigned value typed as a LONGLONG. The upper DWORD contains the range. (A range is a continuous series of time codes.) The lower DWORD contains the time code in the form of a hexadecimal number 0xhhmmssff, where each 2-byte sequence is read as a decimal value.
void CreateTimeCode( DWORD dwFrames, DWORD dwSeconds, DWORD dwMinutes, DWORD dwHours, DWORD dwRange,+*pvar ) + { ULONGLONG ullTimecode = ((ULONGLONG)dwRange) << 32; ullTimecode += dwFrames % 10; ullTimecode += (( (ULONGLONG)dwFrames ) / 10) << 4; ullTimecode += (( (ULONGLONG)dwSeconds ) % 10) << 8; ullTimecode += (( (ULONGLONG)dwSeconds ) / 10) << 12; ullTimecode += (( (ULONGLONG)dwMinutes ) % 10) << 16; ullTimecode += (( (ULONGLONG)dwMinutes ) / 10) << 20; ullTimecode += (( (ULONGLONG)dwHours ) % 10) << 24; ullTimecode += (( (ULONGLONG)dwHours ) / 10) << 28; pvar->vt = VT_I8; pvar->hVal.QuadPart = (LONGLONG)ullTimecode; + } +
Completes an asynchronous request to convert time in Society of Motion Picture and Television Engineers (SMPTE) time code to 100-nanosecond units.
+Pointer to the
Receives the converted time.
If this method succeeds, it returns
Call this method after the
Starts an asynchronous call to convert time in 100-nanosecond units to Society of Motion Picture and Television Engineers (SMPTE) time code.
+The time to convert, in 100-nanosecond units.
Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The object's Shutdown method was called. |
| The byte stream is not seekable. The time code cannot be read from the end of the byte stream. |
?
When the asynchronous method completes, the callback object's
Completes an asynchronous request to convert time in 100-nanosecond units to Society of Motion Picture and Television Engineers (SMPTE) time code.
+A reference to the
A reference to a
If this method succeeds, it returns
Call this method after the
The value of pPropVarTimecode is a 64-bit unsigned value typed as a LONGLONG. The upper DWORD contains the range. (A range is a continuous series of time codes.) The lower DWORD contains the time code in the form of a hexadecimal number 0xhhmmssff, where each 2-byte sequence is read as a decimal value.
+ParseTimeCode( const & var, DWORD *pdwRange, DWORD *pdwFrames, DWORD *pdwSeconds, DWORD *pdwMinutes, DWORD *pdwHours ) + { if (var.vt != VT_I8) { return E_INVALIDARG; } ULONGLONG ullTimeCode = (ULONGLONG)var.hVal.QuadPart; DWORD dwTimecode = (DWORD)(ullTimeCode & 0xFFFFFFFF); *pdwRange = (DWORD)(ullTimeCode >> 32); *pdwFrames = dwTimecode & 0x0000000F; *pdwFrames += (( dwTimecode & 0x000000F0) >> 4 ) * 10; *pdwSeconds = ( dwTimecode & 0x00000F00) >> 8; *pdwSeconds += (( dwTimecode & 0x0000F000) >> 12 ) * 10; *pdwMinutes = ( dwTimecode & 0x000F0000) >> 16; *pdwMinutes += (( dwTimecode & 0x00F00000) >> 20 ) * 10; *pdwHours = ( dwTimecode & 0x0F000000) >> 24; *pdwHours += (( dwTimecode & 0xF0000000) >> 28 ) * 10; return ; + } +
Sets a timer that invokes a callback at the specified time.
+If the clock is stopped, the method returns MF_S_CLOCK_STOPPED. The callback will not be invoked until the clock is started.
+
Sets a timer that invokes a callback at the specified time.
+Bitwise OR of zero or more flags from the
The time at which the timer should fire, in units of the clock's frequency. The time is either absolute or relative to the current time, depending on the value of dwFlags.
Pointer to the
Pointer to the
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The clock was shut down. |
| The method succeeded, but the clock is stopped. |
?
If the clock is stopped, the method returns MF_S_CLOCK_STOPPED. The callback will not be invoked until the clock is started.
+
Cancels a timer that was set using the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Because the timer is dispatched asynchronously, the application's timer callback might get invoked even if this method succeeds.
+Converts a partial topology into a full topology. The topology loader exposes this interface.
+To create the topology loader, call the
Creates a fully loaded topology from the input partial topology.
+A reference to the
Receives a reference to the
A reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| One or more output nodes contain |
?
This method creates any intermediate transforms that are needed to complete the topology. It also sets the input and output media types on all of the objects in the topology. If the method succeeds, the full topology is returned in the ppOutputTopo parameter.
You can use the pCurrentTopo parameter to provide a full topology that was previously loaded. If this topology contains objects that are needed in the new topology, the topology loader can re-use them without creating them again. This caching can potentially make the process faster. The objects from pCurrentTopo will not be reconfigured, so you can specify a topology that is actively streaming data. For example, while a topology is still running, you can pre-load the next topology.
Before calling this method, you must ensure that the output nodes in the partial topology have valid
Gets the source nodes in the topology.
+Gets the identifier of the topology.
+Receives the identifier, as a TOPOID value.
If this method succeeds, it returns
Adds a node to the topology.
+Pointer to the node's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| pNode is invalid, possibly because the node already exists in the topology. |
?
Removes a node from the topology.
+Pointer to the node's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The specified node is not a member of this topology. |
?
This method does not destroy the node, so the
The method breaks any connections between the specified node and other nodes.
+Gets the number of nodes in the topology.
+Receives the number of nodes.
If this method succeeds, it returns
Gets a node in the topology, specified by index.
+ The zero-based index of the node. To get the number of nodes in the topology, call
Receives a reference to the node's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The index is less than zero. |
| No node can be found at the index wIndex. |
?
Removes all nodes from the topology.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
You do not need to clear a topology before disposing of it. The Clear method is called automatically when the topology is destroyed.
+Converts this topology into a copy of another topology.
+ A reference to the
If this method succeeds, it returns
This method does the following:
Gets a node in the topology, specified by node identifier.
+ The identifier of the node to retrieve. To get a node's identifier, call
Receives a reference to the node's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The topology does not contain a node with this identifier. |
?
Gets the source nodes in the topology.
+Receives a reference to the
If this method succeeds, it returns
Gets the output nodes in the topology.
+ Receives a reference to the
If this method succeeds, it returns
Gets the identifier of the topology.
+Gets the number of nodes in the topology.
+Gets the source nodes in the topology.
+Gets the output nodes in the topology.
+Represents a node in a topology. The following node types are supported:
To create a new node, call the
Sets the object associated with this node.
+A reference to the object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
All node types support this method, but the object reference is not used by every node type.
Node type | Object reference |
---|---|
Source node. | Not used. |
Transform node. | |
Output node | |
Tee node. | Not used. |
?
If the object supports
Gets the object associated with this node.
+ Receives a reference to the object's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| There is no object associated with this node. |
?
Retrieves the node type.
+Receives the node type, specified as a member of the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the identifier of the node.
+Receives the identifier.
If this method succeeds, it returns
When a node is first created, it is assigned an identifier. Node identifiers are unique within a topology, but can be reused across several topologies. The topology loader uses the identifier to look up nodes in the previous topology, so that it can reuse objects from the previous topology.
To find a node in a topology by its identifier, call
Sets the identifier for the node.
+The identifier for the node.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The TOPOID has already been set for this object. |
?
When a node is first created, it is assigned an identifier. Typically there is no reason for an application to override the identifier. Within a topology, each node identifier should be unique.
+
Retrieves the number of input streams that currently exist on this node.
+Receives the number of input streams.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The input streams may or may not be connected to output streams on other nodes. To get the node that is connected to a specified input stream, call
The
Retrieves the number of output streams that currently exist on this node.
+Receives the number of output streams.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The output streams may or may not be connected to input streams on other nodes. To get the node that is connected to a specific output stream on this node, call
The
Connects an output stream from this node to the input stream of another node.
+Zero-based index of the output stream on this node.
Pointer to the
Zero-based index of the input stream on the other node.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The method failed. |
| Invalid parameter. |
?
Node connections represent data flow from one node to the next. The streams are logical, and are specified by index.
If the node is already connected at the specified output, the method breaks the existing connection. If dwOutputIndex or dwInputIndexOnDownstreamNode specify streams that do not exist yet, the method adds as many streams as needed.
This method checks for certain invalid conditions:
An output node cannot have any output connections. If you call this method on an output node, the method returns E_FAIL.
A node cannot be connected to itself. If pDownstreamNode specifies the same node as the method call, the method returns E_INVALIDARG.
However, if the method succeeds, it does not guarantee that the node connection is valid. It is possible to create a partial topology that the topology loader cannot resolve. If so, the
To break an existing node connection, call
Disconnects an output stream on this node.
+Zero-based index of the output stream to disconnect.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The dwOutputIndex parameter is out of range. |
| The specified output stream is not connected to another node. |
?
If the specified output stream is connected to another node, this method breaks the connection.
+
Retrieves the node that is connected to a specified input stream on this node.
+Zero-based index of an input stream on this node.
Receives a reference to the
Receives the index of the output stream that is connected to this node's input stream.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The index is out of range. |
| The specified input stream is not connected to another node. |
?
Retrieves the node that is connected to a specified output stream on this node.
+Zero-based index of an output stream on this node.
Receives a reference to the
Receives the index of the input stream that is connected to this node's output stream.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The index is out of range. |
| The specified input stream is not connected to another node. |
?
Sets the preferred media type for an output stream on this node.
+Zero-based index of the output stream.
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| This node is an output node. |
?
The preferred type is a hint for the topology loader.
Do not call this method after loading a topology or setting a topology on the Media Session. Changing the preferred type on a running topology can cause connection errors.
If no output stream exists at the specified index, the method creates new streams up to and including the specified index number.
Output nodes cannot have outputs. If this method is called on an output node, it returns E_NOTIMPL
+
Retrieves the preferred media type for an output stream on this node.
+Zero-based index of the output stream.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| This node does not have a preferred output type. |
| Invalid stream index. |
| This node is an output node. |
?
Output nodes cannot have outputs. If this method is called on an output node, it returns E_NOTIMPL.
The preferred output type provides a hint to the topology loader. In a fully resolved topology, there is no guarantee that every topology node will have a preferred output type. To get the actual media type for a node, you must get a reference to the node's underlying object. (For more information, see
Sets the preferred media type for an input stream on this node.
+Zero-based index of the input stream.
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| This node is a source node. |
?
The preferred type is a hint for the topology loader.
Do not call this method after loading a topology or setting a topology on the Media Session. Changing the preferred type on a running topology can cause connection errors.
If no input stream exists at the specified index, the method creates new streams up to and including the specified index number.
Source nodes cannot have inputs. If this method is called on a source node, it returns E_NOTIMPL.
+
Retrieves the preferred media type for an input stream on this node.
+Zero-based index of the input stream.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| This node does not have a preferred input type. |
| Invalid stream index. |
| This node is a source node. |
?
Source nodes cannot have inputs. If this method is called on a source node, it returns E_NOTIMPL.
The preferred input type provides a hint to the topology loader. In a fully resolved topology, there is no guarantee that every topology node will have a preferred input type. To get the actual media type for a node, you must get a reference to the node's underlying object. (For more information, see
Copies the data from another topology node into this node.
+ A reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The node types do not match. |
?
The two nodes must have the same node type. To get the node type, call
This method copies the object reference, preferred types, and attributes from pNode to this node. It also copies the TOPOID that uniquely identifies each node in a topology. It does not duplicate any of the connections from pNode to other nodes.
The purpose of this method is to copy nodes from one topology to another. Do not use duplicate nodes within the same topology.
+
Sets the object associated with this node.
+All node types support this method, but the object reference is not used by every node type.
Node type | Object reference |
---|---|
Source node. | Not used. |
Transform node. | |
Output node | |
Tee node. | Not used. |
?
If the object supports
Gets the object associated with this node.
+
Retrieves the node type.
+Retrieves or sets the identifier of the node.
+When a node is first created, it is assigned an identifier. Node identifiers are unique within a topology, but can be reused across several topologies. The topology loader uses the identifier to look up nodes in the previous topology, so that it can reuse objects from the previous topology.
To find a node in a topology by its identifier, call
Retrieves the number of input streams that currently exist on this node.
+The input streams may or may not be connected to output streams on other nodes. To get the node that is connected to a specified input stream, call
The
Retrieves the number of output streams that currently exist on this node.
+The output streams may or may not be connected to input streams on other nodes. To get the node that is connected to a specific output stream on this node, call
The
Updates the attributes of one or more nodes in the current topology.
+Currently the only attribute that can be updated is the
Updates the attributes of one or more nodes in the current topology.
+Reserved.
The number of elements in the pUpdates array.
Pointer to an array of
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Currently the only attribute that can be updated is the
Enables a custom video mixer or video presenter to get interface references from the Enhanced Video Renderer (EVR). The mixer can also use this interface to get interface references from the presenter, and the presenter can use it to get interface references from the mixer.
To use this interface, implement the
Retrieves an interface from the enhanced video renderer (EVR), or from the video mixer or video presenter.
+Specifies the scope of the search. Currently this parameter is ignored. Use the value
Reserved, must be zero.
Service
Interface identifier of the requested interface.
Array of interface references. If the method succeeds, each member of the array contains either a valid interface reference or
Pointer to a value that specifies the size of the ppvObjects array. The value must be at least 1. In the current implementation, there is no reason to specify an array size larger than one element. The value is not changed on output.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The requested interface is not available. |
| The method was not called from inside the |
| The object does not support the specified service |
?
This method can be called only from inside the
The presenter can use this method to query the EVR and the mixer. The mixer can use it to query the EVR and the presenter. Which objects are queried depends on the caller and the service
Caller | Service | Objects queried |
---|---|---|
Presenter | MR_VIDEO_RENDER_SERVICE | EVR |
Presenter | MR_VIDEO_MIXER_SERVICE | Mixer |
Mixer | MR_VIDEO_RENDER_SERVICE | Presenter and EVR |
?
The following interfaces are available from the EVR:
IMediaEventSink. This interface is documented in the DirectShow SDK documentation.
The following interfaces are available from the mixer:
Signals the object to release the interface references obtained from the enhanced video renderer (EVR).
+After this method is called, any interface references obtained during the previous call to
Signals the mixer or presenter to query the enhanced video renderer (EVR) for interface references.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The
When the EVR calls
Signals the object to release the interface references obtained from the enhanced video renderer (EVR).
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
After this method is called, any interface references obtained during the previous call to
Tracks the reference counts on a video media sample. Video samples created by the
Use this interface to determine whether it is safe to delete or re-use the buffer contained in a sample. One object assigns itself as the owner of the video sample by calling SetAllocator. When all objects release their reference counts on the sample, the owner's callback method is invoked.
+
Sets the owner for the sample.
+Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The owner was already set. This method cannot be called twice on the sample. |
?
When this method is called, the sample holds an additional reference count on itself. When every other object releases its reference counts on the sample, the sample invokes the pSampleAllocator callback method. To get a reference to the sample, call
After the callback is invoked, the sample clears the callback. To reinstate the callback, you must call SetAllocator again.
It is safe to pass in the sample's
Implemented by the transcode profile object.
The transcode profile stores configuration settings that the topology builder uses to generate the transcode topology for the output file. These configuration settings are specified by the caller and include audio and video stream properties, encoder settings, and container settings that are specified by the caller.
To create the transcode profile object, call
Sets audio stream configuration settings in the transcode profile.
To get a list of compatible audio media types supported by the Media Foundation transform (MFT) encoder , call
If this method succeeds, it returns
Gets the audio stream settings that are currently set in the transcode profile.
+Receives a reference to the
If this method succeeds, it returns
If there are no audio attributes set in the transcode profile, the call to GetAudioAttributes succeeds and ppAttrs receives
To get a specific attribute value, the caller must call the appropriate
Sets video stream configuration settings in the transcode profile.
For example code, see
If this method succeeds, it returns
Gets the video stream settings that are currently set in the transcode profile.
+Receives a reference to the
If this method succeeds, it returns
If there are no container attributes set in the transcode profile, the GetVideoAttributes method succeeds and ppAttrs receives
To get a specific attribute value, the caller must call the appropriate
Sets container configuration settings in the transcode profile.
For example code, see
If this method succeeds, it returns
Gets the container settings that are currently set in the transcode profile.
+Receives a reference to the
If this method succeeds, it returns
If there are no container attributes set in the transcode profile, the call to GetContainerAttributes succeeds and ppAttrs receives
To get a specific attribute value, the caller must call the appropriate
Gets or sets the audio stream settings that are currently set in the transcode profile.
+If there are no audio attributes set in the transcode profile, the call to GetAudioAttributes succeeds and ppAttrs receives
To get a specific attribute value, the caller must call the appropriate
Gets or sets the video stream settings that are currently set in the transcode profile.
+If there are no container attributes set in the transcode profile, the GetVideoAttributes method succeeds and ppAttrs receives
To get a specific attribute value, the caller must call the appropriate
Gets or sets the container settings that are currently set in the transcode profile.
+If there are no container attributes set in the transcode profile, the call to GetContainerAttributes succeeds and ppAttrs receives
To get a specific attribute value, the caller must call the appropriate
Implemented by the transcode sink activation object.
The transcode sink activation object can be used to create any of the following file sinks:
To use this interface, perform the following steps:
Sets the name of the encoded output file.
+Pointer to a null-terminated string that contains the name of the output file.
If this method succeeds, it returns
The media sink will create a local file with the specified file name.
Alternately, you can call
Sets an output byte stream for the transcode media sink.
+A reference to the
If this method succeeds, it returns
Call this method to provide a writeable byte stream that will receive the transcoded data.
Alternatively, you can provide the name of an output file, by calling
The pByteStreamActivate parameter must specify an activation object that creates a writeable byte stream. Internally, the transcode media sink calls
*pByteStream = null ; hr = pByteStreamActivate->ActivateObject(IID_IMFByteStream, (void**)&pByteStream);
Currently, Microsoft Media Foundation does not provide any byte-stream activation objects. To use this method, an application must provide a custom implementation of
Sets the transcoding profile on the transcode sink activation object.
+A reference to the
If this method succeeds, it returns
Before calling this method, initialize the profile object as follows:
Gets the media types for the audio and video streams specified in the transcode profile.
+A reference to an
If the method succeeds, the method assigns
If this method succeeds, it returns
Before calling this method, call
Sets the name of the encoded output file.
+The media sink will create a local file with the specified file name.
Alternately, you can call
Sets an output byte stream for the transcode media sink.
+Call this method to provide a writeable byte stream that will receive the transcoded data.
Alternatively, you can provide the name of an output file, by calling
The pByteStreamActivate parameter must specify an activation object that creates a writeable byte stream. Internally, the transcode media sink calls
*pByteStream = null ; hr = pByteStreamActivate->ActivateObject(IID_IMFByteStream, (void**)&pByteStream);
Currently, Microsoft Media Foundation does not provide any byte-stream activation objects. To use this method, an application must provide a custom implementation of
Sets the transcoding profile on the transcode sink activation object.
+Before calling this method, initialize the profile object as follows:
Gets the media types for the audio and video streams specified in the transcode profile.
+Before calling this method, call
Implemented by all Media Foundation Transforms (MFTs).
+Gets the minimum and maximum number of input and output streams for this Media Foundation transform (MFT).
+Receives the minimum number of input streams.
Receives the maximum number of input streams. If there is no maximum, receives the value MFT_STREAMS_UNLIMITED.
Receives the minimum number of output streams.
Receives the maximum number of output streams. If there is no maximum, receives the value MFT_STREAMS_UNLIMITED.
If this method succeeds, it returns
If the MFT has a fixed number of streams, the minimum and maximum values are the same.
It is not recommended to create an MFT that supports zero inputs or zero outputs. An MFT with no inputs or no outputs may not be compatible with the rest of the Media Foundation pipeline. You should create a Media Foundation sink or source for this purpose instead.
When an MFT is first created, it is not guaranteed to have the minimum number of streams. To find the actual number of streams, call
This method should not be called with
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetStreamLimits. See Creating Hybrid DMO/MFT Objects.
+Gets the current number of input and output streams on this Media Foundation transform (MFT).
+Receives the number of input streams.
Receives the number of output streams.
If this method succeeds, it returns
The number of streams includes unselected streams?that is, streams with no media type or a
This method should not be called with
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetStreamCount. See Creating Hybrid DMO/MFT Objects.
+Gets the stream identifiers for the input and output streams on this Media Foundation transform (MFT).
+Number of elements in the pdwInputIDs array.
Pointer to an array allocated by the caller. The method fills the array with the input stream identifiers. The array size must be at least equal to the number of input streams. To get the number of input streams, call
If the caller passes an array that is larger than the number of input streams, the MFT must not write values into the extra array entries.
Number of elements in the pdwOutputIDs array.
Pointer to an array allocated by the caller. The method fills the array with the output stream identifiers. The array size must be at least equal to the number of output streams. To get the number of output streams, call GetStreamCount.
If the caller passes an array that is larger than the number of output streams, the MFT must not write values into the extra array entries.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Not implemented. See Remarks. |
| One or both of the arrays is too small. |
?
Stream identifiers are necessary because some MFTs can add or remove streams, so the index of a stream may not be unique. Therefore,
This method can return E_NOTIMPL if both of the following conditions are true:
This method must be implemented if any of the following conditions is true:
All input stream identifiers must be unique within an MFT, and all output stream identifiers must be unique. However, an input stream and an output stream can share the same identifier.
If the client adds an input stream, the client assigns the identifier, so the MFT must allow arbitrary identifiers, as long as they are unique. If the MFT creates an output stream, the MFT assigns the identifier.
By convention, if an MFT has exactly one fixed input stream and one fixed output stream, it should assign the identifier 0 to both streams.
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetStreamIDs. See Creating Hybrid DMO/MFT Objects.
+Gets the buffer requirements and other information for an input stream on this Media Foundation transform (MFT).
+ Input stream identifier. To get the list of stream identifiers, call
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream identifier. |
?
It is valid to call this method before setting the media types.
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetInputStreamInfo. See Creating Hybrid DMO/MFT Objects.
+Gets the buffer requirements and other information for an output stream on this Media Foundation transform (MFT).
+ Output stream identifier. To get the list of stream identifiers, call
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream number. |
?
It is valid to call this method before setting the media types.
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetOutputStreamInfo. See Creating Hybrid DMO/MFT Objects.
+Gets the global attribute store for this Media Foundation transform (MFT).
+ Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The MFT does not support attributes. |
?
Use the
Implementation of this method is optional unless the MFT needs to support a particular set of attributes. Exception: Hardware-based MFTs must implement this method. See Hardware MFTs.
+Gets the attribute store for an input stream on this Media Foundation transform (MFT).
+ Input stream identifier. To get the list of stream identifiers, call
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The MFT does not support input stream attributes. |
| Invalid stream identifier. |
?
Implementation of this method is optional unless the MFT needs to support a particular set of attributes.
To get the attribute store for the entire MFT, call
Gets the attribute store for an output stream on this Media Foundation transform (MFT).
+ Output stream identifier. To get the list of stream identifiers, call
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The MFT does not support output stream attributes. |
| Invalid stream identifier. |
?
Implementation of this method is optional unless the MFT needs to support a particular set of attributes.
To get the attribute store for the entire MFT, call
Removes an input stream from this Media Foundation transform (MFT).
+Identifier of the input stream to remove.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The transform has a fixed number of input streams. |
| The stream is not removable, or the transform currently has the minimum number of input streams it can support. |
| Invalid stream identifier. |
| The transform has unprocessed input buffers for the specified stream. |
?
If the transform has a fixed number of input streams, the method returns E_NOTIMPL.
An MFT might support this method but not allow certain input streams to be removed. If an input stream can be removed, the
If the transform still has unprocessed input for that stream, the method might succeed or it might return
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTDeleteInputStream. See Creating Hybrid DMO/MFT Objects.
+Adds one or more new input streams to this Media Foundation transform (MFT).
+Number of streams to add.
Array of stream identifiers. The new stream identifiers must not match any existing input streams.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| The MFT has a fixed number of input streams. |
?
If the new streams exceed the maximum number of input streams for this transform, the method returns E_INVALIDARG. To find the maximum number of input streams, call
If any of the new stream identifiers conflicts with an existing input stream, the method returns E_INVALIDARG.
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTAddInputStreams. See Creating Hybrid DMO/MFT Objects.
+Gets an available media type for an input stream on this Media Foundation transform (MFT).
+ Input stream identifier. To get the list of stream identifiers, call
Index of the media type to retrieve. Media types are indexed from zero and returned in approximate order of preference.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The MFT does not have a list of available input types. |
| Invalid stream identifier. |
| The dwTypeIndex parameter is out of range. |
| You must set the output types before setting the input types. |
?
The MFT defines a list of available media types for each input stream and orders them by preference. This method enumerates the available media types for an input stream. To enumerate the available types, increment dwTypeIndex until the method returns
Setting the media type on one stream might change the available types for another stream, or change the preference order. However, an MFT is not required to update the list of available types dynamically. The only guaranteed way to test whether you can set a particular input type is to call
In some cases, an MFT cannot return a list of input types until one or more output types are set. If so, the method returns
An MFT is not required to implement this method. However, most MFTs should implement this method, unless the supported types are simple and can be discovered through the
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetInputAvailableType. See Creating Hybrid DMO/MFT Objects.
For encoders, after the output type is set, GetInputAvailableType must return a list of input types that are compatible with the current output type. This means that all types returned by GetInputAvailableType after the output type is set must be valid types for SetInputType.
Encoders should reject input types if the attributes of the input media type and output media type do not match, such as resolution setting with
Gets an available media type for an output stream on this Media Foundation transform (MFT).
+ Output stream identifier. To get the list of stream identifiers, call
Index of the media type to retrieve. Media types are indexed from zero and returned in approximate order of preference.
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The MFT does not have a list of available output types. |
| Invalid stream identifier. |
| The dwTypeIndex parameter is out of range. |
| You must set the input types before setting the output types. |
?
The MFT defines a list of available media types for each output stream and orders them by preference. This method enumerates the available media types for an output stream. To enumerate the available types, increment dwTypeIndex until the method returns MF_E_NO_MORE_TYPES.
Setting the media type on one stream can change the available types for another stream (or change the preference order). However, an MFT is not required to update the list of available types dynamically. The only guaranteed way to test whether you can set a particular input type is to call
In some cases, an MFT cannot return a list of output types until one or more input types are set. If so, the method returns
An MFT is not required to implement this method. However, most MFTs should implement this method, unless the supported types are simple and can be discovered through the
This method can return a partial media type. A partial media type contains an incomplete description of a format, and is used to provide a hint to the caller. For example, a partial type might include just the major type and subtype GUIDs. However, after the client sets the input types on the MFT, the MFT should generally return at least one complete output type, which can be used without further modification. For more information, see Complete and Partial Media Types.
Some MFTs cannot provide an accurate list of output types until the MFT receives the first input sample. For example, the MFT might need to read the first packet header to deduce the format. An MFT should handle this situation as follows:
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetOutputAvailableType. See Creating Hybrid DMO/MFT Objects.
+Sets, tests, or clears the media type for an input stream on this Media Foundation transform (MFT).
+ Input stream identifier. To get the list of stream identifiers, call
Pointer to the
Zero or more flags from the _MFT_SET_TYPE_FLAGS enumeration.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The MFT cannot use the proposed media type. |
| Invalid stream identifier. |
| The proposed type is not valid. This error code indicates that the media type itself is not configured correctly; for example, it might contain mutually contradictory attributes. |
| The MFT cannot switch types while processing data. Try draining or flushing the MFT. |
| You must set the output types before setting the input types. |
| The MFT could not find a suitable DirectX Video Acceleration (DXVA) configuration. |
?
This method can be used to set, test without setting, or clear the media type:
Setting the media type on one stream may change the acceptable types on another stream.
An MFT may require the caller to set one or more output types before setting the input type. If so, the method returns
If the MFT supports DirectX Video Acceleration (DXVA) but is unable to find a suitable DXVA configuration (for example, if the graphics driver does not have the right capabilities), the method should return
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTSetInputType. See Creating Hybrid DMO/MFT Objects.
+Sets, tests, or clears the media type for an output stream on this Media Foundation transform (MFT).
+ Output stream identifier. To get the list of stream identifiers, call
Pointer to the
Zero or more flags from the _MFT_SET_TYPE_FLAGS enumeration.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The transform cannot use the proposed media type. |
| Invalid stream identifier. |
| The proposed type is not valid. This error code indicates that the media type itself is not configured correctly; for example, it might contain mutually contradictory flags. |
| The MFT cannot switch types while processing data. Try draining or flushing the MFT. |
| You must set the input types before setting the output types. |
| The MFT could not find a suitable DirectX Video Acceleration (DXVA) configuration. |
?
This method can be used to set, test without setting, or clear the media type:
Setting the media type on one stream may change the acceptable types on another stream.
An MFT may require the caller to set one or more input types before setting the output type. If so, the method returns
If the MFT supports DirectX Video Acceleration (DXVA) but is unable to find a suitable DXVA configuration (for example, if the graphics driver does not have the right capabilities), the method should return
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTSetOutputType. See Creating Hybrid DMO/MFT Objects.
+Gets the current media type for an input stream on this Media Foundation transform (MFT).
+ Input stream identifier. To get the list of stream identifiers, call
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream identifier. |
| The input media type has not been set. |
?
If the specified input stream does not yet have a media type, the method returns
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetInputCurrentType. See Creating Hybrid DMO/MFT Objects.
+Gets the current media type for an output stream on this Media Foundation transform (MFT).
+ Output stream identifier. To get the list of stream identifiers, call
Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream identifier. |
| The output media type has not been set. |
?
If the specified output stream does not yet have a media type, the method returns
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetOutputCurrentType. See Creating Hybrid DMO/MFT Objects.
+Queries whether an input stream on this Media Foundation transform (MFT) can accept more data.
+ Input stream identifier. To get the list of stream identifiers, call
Receives a member of the _MFT_INPUT_STATUS_FLAGS enumeration, or zero. If the value is
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream identifier. |
| The media type is not set on one or more streams. |
?
If the method returns the
Use this method to test whether the input stream is ready to accept more data, without incurring the overhead of allocating a new sample and calling ProcessInput.
After the client has set valid media types on all of the streams, the MFT should always be in one of two states: Able to accept more input, or able to produce more output (or both).
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetInputStatus. See Creating Hybrid DMO/MFT Objects.
+Queries whether the Media Foundation transform (MFT) is ready to produce output data.
+ Receives a member of the _MFT_OUTPUT_STATUS_FLAGS enumeration, or zero. If the value is
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Not implemented. |
| The media type is not set on one or more streams. |
?
If the method returns the
MFTs are not required to implement this method. If the method returns E_NOTIMPL, you must call ProcessOutput to determine whether the transform has output data.
If the MFT has more than one output stream, but it does not produce samples at the same time for each stream, it can set the
After the client has set valid media types on all of the streams, the MFT should always be in one of two states: Able to accept more input, or able to produce more output.
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetOutputStatus. See Creating Hybrid DMO/MFT Objects.
+Sets the range of time stamps the client needs for output.
+Specifies the earliest time stamp. The Media Foundation transform (MFT) will accept input until it can produce an output sample that begins at this time; or until it can produce a sample that ends at this time or later. If there is no lower bound, use the value MFT_OUTPUT_BOUND_LOWER_UNBOUNDED.
Specifies the latest time stamp. The MFT will not produce an output sample with time stamps later than this time. If there is no upper bound, use the value MFT_OUTPUT_BOUND_UPPER_UNBOUNDED.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Not implemented. |
| The media type is not set on one or more streams. |
?
This method can be used to optimize preroll, especially in formats that have gaps between time stamps, or formats where the data must start on a sync point, such as MPEG-2. Calling this method is optional, and implementation of this method by an MFT is optional. If the MFT does not implement the method, the return value is E_NOTIMPL.
If an MFT implements this method, it must limit its output data to the range of times specified by hnsLowerBound and hnsUpperBound. The MFT discards any input data that is not needed to produce output within this range. If the sample boundaries do not exactly match the range, the MFT should split the output samples, if possible. Otherwise, the output samples can overlap the range.
For example, suppose the output range is 100 to 150 milliseconds (ms), and the output format is video with each frame lasting 33 ms. A sample with a time stamp of 67 ms overlaps the range (67 + 33 = 100) and is produced as output. A sample with a time stamp of 66 ms is discarded (66 + 33 = 99). Similarly, a sample with a time stamp of 150 ms is produced as output, but a sample with a time stamp of 151 is discarded.
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTSetOutputBounds. See Creating Hybrid DMO/MFT Objects.
+Sends an event to an input stream on this Media Foundation transform (MFT).
+ Input stream identifier. To get the list of stream identifiers, call
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Not implemented. |
| Invalid stream number. |
| The media type is not set on one or more streams. |
| The pipeline should not propagate the event. |
?
An MFT can handle sending the event downstream, or it can let the pipeline do this, as indicated by the return value:
To send the event downstream, the MFT adds the event to the collection object that is provided by the client in the pEvents member of the
Events must be serialized with the samples that come before and after them. Attach the event to the output sample that follows the event. (The pipeline will process the event first, and then the sample.) If an MFT holds back one or more samples between calls to
If an MFT does not hold back samples and does not need to examine any events, it can return E_NOTIMPL.
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTProcessEvent. See Creating Hybrid DMO/MFT Objects.
+Sends a message to the Media Foundation transform (MFT).
+ The message to send, specified as a member of the
Message parameter. The meaning of this parameter depends on the message type.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream number. Applies to the |
| The media type is not set on one or more streams. |
?
Before calling this method, set the media types on all input and output streams.
The MFT might ignore certain message types. If so, the method returns
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTProcessMessage. See Creating Hybrid DMO/MFT Objects.
+Delivers data to an input stream on this Media Foundation transform (MFT).
+ Input stream identifier. To get the list of stream identifiers, call
Pointer to the
Reserved. Must be zero.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid argument. |
| Invalid stream identifier. |
| The input sample requires a valid sample duration. To set the duration, call Some MFTs require that input samples have valid durations. Some MFTs do not require sample durations. |
| The input sample requires a time stamp. To set the time stamp, call Some MFTs require that input samples have valid time stamps. Some MFTs do not require time stamps. |
| The transform cannot process more input at this time. |
| The media type is not set on one or more streams. |
| The media type is not supported for DirectX Video Acceleration (DXVA). A DXVA-enabled decoder might return this error code. |
?
Note??If you are converting a DirectX Media Object (DMO) to an MFT, be aware that S_FALSE is not a valid return code for
In most cases, if the method succeeds, the MFT stores the sample and holds a reference count on the
If the MFT already has enough input data to produce an output sample, it does not accept new input data, and ProcessInput returns
An exception to this rule is the
An MFT can process the input data in the ProcessInput method. However, most MFTs wait until the client calls ProcessOutput.
After the client has set valid media types on all of the streams, the MFT should always be in one of two states: Able to accept more input, or able to produce more output. It should never be in both states or neither state. An MFT should only accept as much input as it needs to generate at least one output sample, at which point ProcessInput returns
If an MFT encounters a non-fatal error in the input data, it can simply drop the data and attempt to recover when it gets the more input data. To request more input data, the MFT returns
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTProcessInput. See Creating Hybrid DMO/MFT Objects.
+Generates output from the current input data.
+Bitwise OR of zero or more flags from the _MFT_PROCESS_OUTPUT_FLAGS enumeration.
Number of elements in the pOutputSamples array. The value must be at least 1.
Pointer to an array of
Receives a bitwise OR of zero or more flags from the _MFT_PROCESS_OUTPUT_STATUS enumeration.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The ProcessOutput method was called on an asynchronous MFT that was not expecting this method call. |
| Invalid stream identifier in the dwStreamID member of one or more |
| The transform cannot produce output data until it receives more input data. |
| The format has changed on an output stream, or there is a new preferred format, or there is a new output stream. |
| You must set the media type on one or more streams of the MFT. |
?
Note??If you are converting a DirectX Media Object (DMO) to an MFT, be aware that S_FALSE is not a valid return code for
The size of the pOutputSamples array must be equal to or greater than the number of selected output streams. The number of selected output streams equals the total number of output streams minus the number of deselected streams. A stream is deselected if it has the
This method generates output samples and can also generate events. If the method succeeds, at least one of the following conditions is true:
If MFT_UNIQUE_METHOD_NAMES is defined before including Mftransform.h, this method is renamed MFTProcessOutput. See Creating Hybrid DMO/MFT Objects.
+Gets the global attribute store for this Media Foundation transform (MFT).
+ Use the
Implementation of this method is optional unless the MFT needs to support a particular set of attributes. Exception: Hardware-based MFTs must implement this method. See Hardware MFTs.
+Queries whether the Media Foundation transform (MFT) is ready to produce output data.
+ If the method returns the
MFTs are not required to implement this method. If the method returns E_NOTIMPL, you must call ProcessOutput to determine whether the transform has output data.
If the MFT has more than one output stream, but it does not produce samples at the same time for each stream, it can set the
After the client has set valid media types on all of the streams, the MFT should always be in one of two states: Able to accept more input, or able to produce more output.
If MFT_UNIQUE_METHOD_NAMES is defined before including mftransform.h, this method is renamed MFTGetOutputStatus. See Creating Hybrid DMO/MFT Objects.
+Implemented by components that provide input trust authorities (ITAs). This interface is used to get the ITA for each of the component's streams.
+
Retrieves the input trust authority (ITA) for a specified stream.
+The stream identifier for which the ITA is being requested.
The interface identifier (IID) of the interface being requested. Currently the only supported value is IID_IMFInputTrustAuthority.
Receives a reference to the ITA's
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The ITA does not expose the requested interface. |
?
Implemented by components that provide output trust authorities (OTAs). Any Media Foundation transform (MFT) or media sink that is designed to work within the protected media path (PMP) and also sends protected content outside the Media Foundation pipeline must implement this interface.
The policy engine uses this interface to negotiate what type of content protection should be applied to the content. Applications do not use this interface directly.
+If an MFT supports
Gets the number of output trust authorities (OTAs) provided by this trusted output. Each OTA reports a single action.
+Receives the number of OTAs.
If this method succeeds, it returns
Gets an output trust authority (OTA), specified by index.
+ Zero-based index of the OTA to retrieve. To get the number of OTAs provided by this object, call
Receives a reference to the
If this method succeeds, it returns
Queries whether this output is a policy sink, meaning it handles the rights and restrictions required by the input trust authority (ITA).
+Receives a Boolean value. If TRUE, this object is a policy sink. If
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
A trusted output is generally considered to be a policy sink if it does not pass the media content that it receives anywhere else; or, if it does pass the media content elsewhere, either it protects the content using some proprietary method such as encryption, or it sufficiently devalues the content so as not to require protection.
+Gets the number of output trust authorities (OTAs) provided by this trusted output. Each OTA reports a single action.
+
Queries whether this output is a policy sink, meaning it handles the rights and restrictions required by the input trust authority (ITA).
+A trusted output is generally considered to be a policy sink if it does not pass the media content that it receives anywhere else; or, if it does pass the media content elsewhere, either it protects the content using some proprietary method such as encryption, or it sufficiently devalues the content so as not to require protection.
+Enables a plug-in component for the enhanced video renderer (EVR) to work with protected media.
To work in the protected media path (PMP), a custom EVR mixer or presenter must implement this interface. The EVR obtains a reference to this interface by calling QueryInterface on the plug-in component.
This interface is required only if the plug-in is a trusted component, designed to work in the PMP. It is not required for playing clear content in an unprotected process.
+Queries whether the plug-in has any transient vulnerabilities at this time.
+Receives a Boolean value. If TRUE, the plug-in has no transient vulnerabilities at the moment and can receive protected content. If
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method provides a way for the plug-in to report temporary conditions that would cause the input trust authority (ITA) to distrust the plug-in. For example, if an EVR presenter is in windowed mode, it is vulnerable to GDI screen captures.
To disable screen capture in Direct3D, the plug-in must do the following:
Create the Direct3D device in full-screen exlusive mode.
Specify the
In addition, the graphics adapter must support the Windows Vista Display Driver Model (WDDM) and the Direct3D extensions for Windows Vista (sometimes called D3D9Ex or D3D9L).
If these conditions are met, the presenter can return TRUE in the pYes parameter. Otherwise, it should return
The EVR calls this method whenever the device changes. If the plug-in returns
This method should be used only to report transient conditions. A plug-in that is never in a trusted state should not implement the
Queries whether the plug-in can limit the effective video resolution.
+Receives a Boolean value. If TRUE, the plug-in can limit the effective video resolution. Otherwise, the plug-in cannot limit the video resolution. If the method fails, the EVR treats the value as
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Constriction is a protection mechanism that limits the effective resolution of the video frame to a specified maximum number of pixels.
Video constriction can be implemented by either the mixer or the presenter.
If the method returns TRUE, the EVR might call
Limits the effective video resolution.
+Maximum number of source pixels that may appear in the final video image, in thousands of pixels. If the value is zero, the video is disabled. If the value is MAXDWORD (0xFFFFFFFF), video constriction is removed and the video may be rendered at full resolution.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method limits the effective resolution of the video image. The actual resolution on the target device might be higher, due to stretching the image.
The EVR might call this method at any time if the
Enables or disables the ability of the plug-in to export the video image.
+Boolean value. Specify TRUE to disable image exporting, or
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
An EVR plug-in might expose a way for the application to get a copy of the video frames. For example, the standard EVR presenter implements
If the plug-in supports image exporting, this method enables or disables it. Before this method has been called for the first time, the EVR assumes that the mechanism is enabled.
If the plug-in does not support image exporting, this method should return
While image exporting is disabled, any associated export method, such as GetCurrentImage, should return
Limits the effective video resolution.
+This method limits the effective resolution of the video image. The actual resolution on the target device might be higher, due to stretching the image.
The EVR might call this method at any time if the
Returns the device identifier supported by a video renderer component. This interface is implemented by mixers and presenters for the enhanced video renderer (EVR). If you replace either of these components, the mixer and presenter must report the same device identifier.
+
Returns the identifier of the video device supported by an EVR mixer or presenter.
+Receives the device identifier. Generally, the value is IID_IDirect3DDevice9.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The video renderer has been shut down. |
?
If a mixer or presenter uses Direct3D 9, it must return the value IID_IDirect3DDevice9 in pDeviceID. The EVR's default mixer and presenter both return this value. If you write a custom mixer or presenter, it can return some other value. However, the mixer and presenter must use matching device identifiers.
+
Returns the identifier of the video device supported by an EVR mixer or presenter.
+If a mixer or presenter uses Direct3D 9, it must return the value IID_IDirect3DDevice9 in pDeviceID. The EVR's default mixer and presenter both return this value. If you write a custom mixer or presenter, it can return some other value. However, the mixer and presenter must use matching device identifiers.
+
Sets the source and destination rectangles for the video.
+The source rectangle defines which portion of the video is displayed. It is specified in normalized coordinates. For more information, see
The destination rectangle defines a rectangle within the clipping window where the video appears. It is specified in pixels, relative to the client area of the window. To fill the entire window, set the destination rectangle to {0, 0, width, height}, where width and height are dimensions of the window client area. The default destination rectangle is {0, 0, 0, 0}.
To update just one of these rectangles, set the other parameter to
Before setting the destination rectangle (prcDest), you must set the video window by calling
Gets the size and aspect ratio of the video, prior to any stretching by the video renderer.
+Receives the size of the native video rectangle. This parameter can be
Receives the aspect ratio of the video. This parameter can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| At least one of the parameters must be non- |
| The video renderer has been shut down. |
?
If no media types have been set on any video streams, the method succeeds but all parameters are set to zero.
You can set pszVideo or pszARVideo to
Gets the range of sizes that the enhanced video renderer (EVR) can display without significantly degrading performance or image quality.
+Receives the minimum ideal size. This parameter can be
Receives the maximum ideal size. This parameter can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| At least one parameter must be non- |
| The video renderer has been shut down. |
?
You can set pszMin or pszMax to
Sets the source and destination rectangles for the video.
+Pointer to an
Specifies the destination rectangle. This parameter can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| At least one parameter must be non- |
| The video renderer has been shut down. |
?
The source rectangle defines which portion of the video is displayed. It is specified in normalized coordinates. For more information, see
The destination rectangle defines a rectangle within the clipping window where the video appears. It is specified in pixels, relative to the client area of the window. To fill the entire window, set the destination rectangle to {0, 0, width, height}, where width and height are dimensions of the window client area. The default destination rectangle is {0, 0, 0, 0}.
To update just one of these rectangles, set the other parameter to
Before setting the destination rectangle (prcDest), you must set the video window by calling
Gets the source and destination rectangles for the video.
+Pointer to an
Receives the current destination rectangle.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| One or more required parameters are |
| The video renderer has been shut down. |
?
Specifies how the enhanced video renderer (EVR) handles the aspect ratio of the source video.
+Bitwise OR of one or more flags from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid flags. |
| The video renderer has been shut down. |
?
Queries how the enhanced video renderer (EVR) handles the aspect ratio of the source video.
+Receives a bitwise OR of one or more flags from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The video renderer has been shut down. |
?
Sets the clipping window for the video.
+Handle to the window where the enhanced video renderer (EVR) will draw the video.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| hwndVideo does not specify a valid window. |
| DWM thumbnails were not enabled/disabled. |
?
The EVR will not display any video unless the application calls this method with a valid window handle.
For protected content, this method might disable Desktop Window Manager (DWM) thumbnail previews for the window. If thumbnail previews cannot be disabled, the method returns S_FALSE.
+Gets the clipping window for the video.
+Receives a handle to the window where the enhanced video renderer (EVR) will draw the video.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The video renderer has been shut down. |
?
There is no default clipping window. The application must set the clipping window.
+
Repaints the current video frame. Call this method whenever the application receives a WM_PAINT message.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The EVR cannot repaint the frame at this time. This error can occur while the EVR is switching between full-screen and windowed mode. The caller can safely ignore this error. |
| The video renderer has been shut down. |
?
Gets a copy of the current image being displayed by the video renderer.
+Pointer to a sizeof(
before calling the method.
Receives a reference to a buffer that contains a packed Windows device-independent bitmap (DIB). The caller must free the memory for the bitmap by calling CoTaskMemFree.
Receives the size of the buffer returned in pDib, in bytes.
Receives the time stamp of the captured image.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The content is protected and the license does not permit capturing the image. |
| The video renderer has been shut down. |
?
This method can be called at any time. However, calling the method too frequently degrades the video playback performance.
This method retrieves a copy of the final composited image, which includes any substreams, alpha-blended bitmap, aspect ratio correction, background color, and so forth.
In windowed mode, the bitmap is the size of the destination rectangle specified in
Sets the border color for the video.
+Specifies the border color as a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The video renderer has been shut down. |
?
By default, if the video window straddles two monitors, the enhanced video renderer (EVR) clips the video to one monitor and draws the border color on the remaining portion of the window. (To change the clipping behavior, call
The border color is not used for letterboxing. To change the letterbox color, call
Gets the border color for the video.
+Receives the border color, as a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The video renderer has been shut down. |
?
The border color is used for areas where the enhanced video renderer (EVR) does not draw any video.
The border color is not used for letterboxing. To get the letterbox color, call
Sets various preferences related to video rendering.
+Bitwise OR of zero or more flags from the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid flags. |
| The video renderer has been shut down. |
?
Controls how the Enhanced Video Renderer (EVR) displays video.
The EVR presenter implements this interface. To get a reference to the interface, call
If you implement a custom presenter for the EVR, the presenter can optionally expose this interface as a service.
+[This API is not supported and may be altered or unavailable in the future. ]
Sets or unsets full-screen rendering mode.
To implement full-screen playback, an application should simply resize the video window to cover the entire area of the monitor. Also set the window to be a topmost window, so that the application receives all mouse-click messages. For more information about topmost windows, see the documentation for the SetWindowPos function.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The video renderer has been shut down. |
?
The default EVR presenter implements full-screen mode using Direct3D exclusive mode.
If you use this method to switch to full-screen mode, set the application window to be a topmost window and resize the window to cover the entire monitor. This ensures that the application window receives all mouse-click messages. Also set the keyboard focus to the application window. When you switch out of full-screen mode, restore the window's original size and position.
By default, the cursor is still visible in full-screen mode. To hide the cursor, call ShowCursor.
The transition to and from full-screen mode occurs asynchronously. To get the current mode, call
Queries whether the enhanced video renderer (EVR) is currently in full-screen mode.
+Receives a Boolean value. If TRUE, the EVR is in full-screen mode. If
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The EVR is currently switching between full-screen and windowed mode. |
?
Queries how the enhanced video renderer (EVR) handles the aspect ratio of the source video.
+Gets or sets the clipping window for the video.
+There is no default clipping window. The application must set the clipping window.
+Gets or sets the border color for the video.
+The border color is used for areas where the enhanced video renderer (EVR) does not draw any video.
The border color is not used for letterboxing. To get the letterbox color, call
Controls how the Enhanced Video Renderer (EVR) displays video.
The EVR presenter implements this interface. To get a reference to the interface, call
If you implement a custom presenter for the EVR, the presenter can optionally expose this interface as a service.
+Queries whether the enhanced video renderer (EVR) is currently in full-screen mode.
+Represents a description of a video format.
+If the major type of a media type is
Applications should avoid using this interface except when a method or function requires an
Represents a description of a video format.
+If the major type of a media type is
Applications should avoid using this interface except when a method or function requires an
[This API is not supported and may be altered or unavailable in the future. Instead, applications should set the
Retrieves an alternative representation of the media type.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is equivalent to
Instead of calling this method, applications should set the
Represents a description of a video format.
+If the major type of a media type is
Applications should avoid using this interface except when a method or function requires an
Alpha-blends a static bitmap image with the video displayed by the Enhanced Video Renderer (EVR).
The EVR mixer implements this interface. To get a reference to the interface, call
If you implement a custom mixer for the EVR, the mixer can optionally expose this interface as a service.
+
Sets a bitmap image for the enhanced video renderer (EVR) to alpha-blend with the video.
+Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The blending parameters defined in the pBmpParms structure are not valid. |
?
The application can provide the image either as a GDI bitmap or as a Direct3D surface. The EVR mixer blends the image with the next video frame and all subsequent frames, until the image is changed or removed. The image can contain embedded per-pixel alpha information so that transparent regions can be defined. Transparent areas can also be identified using a color key value.
If you use a Direct3D surface, the surface format must be 32-bit RGB, either
There is no defined limit to how frequently you can pass images to the video renderer. However, changing the image several times per second can impact the performance and smoothness of the video.
+
Removes the current bitmap and releases any resources associated with it.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No bitmap is currently set. |
?
Updates the current alpha-blending settings, including the source and destination rectangles, the color key, and other information. You can update some or all of the blending parameters.
+Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The blending parameters defined in the pBmpParms structure are not valid. |
| No bitmap is currently set. You must call |
?
The video must be playing for the changes to take effect.
+
Retrieves the current settings that the enhanced video renderer (EVR) uses to alpha-blend the bitmap with the video.
+Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| No bitmap is currently set. You must call |
?
This method returns the current values of all the blending parameters, not just those that the application specified. Ignore the dwFlags member of the structure.
+
Sets a bitmap image for the enhanced video renderer (EVR) to alpha-blend with the video.
+The application can provide the image either as a GDI bitmap or as a Direct3D surface. The EVR mixer blends the image with the next video frame and all subsequent frames, until the image is changed or removed. The image can contain embedded per-pixel alpha information so that transparent regions can be defined. Transparent areas can also be identified using a color key value.
If you use a Direct3D surface, the surface format must be 32-bit RGB, either
There is no defined limit to how frequently you can pass images to the video renderer. However, changing the image several times per second can impact the performance and smoothness of the video.
+
Retrieves the current settings that the enhanced video renderer (EVR) uses to alpha-blend the bitmap with the video.
+This method returns the current values of all the blending parameters, not just those that the application specified. Ignore the dwFlags member of the structure.
+Controls how the Enhanced Video Renderer (EVR) mixes video substreams. Applications can use this interface to control video mixing during playback.
The EVR mixer implements this interface. To get a reference to the interface, call
If you implement a custom mixer for the EVR, the mixer can optionally expose this interface as a service.
+
Sets the z-order of a video stream.
+Identifier of the stream. For the EVR media sink, the stream identifier is defined when the
Z-order value. The z-order of the reference stream must be zero. The maximum z-order value is the number of streams minus one.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The value of dwZ is larger than the maximum z-order value. |
| Invalid z-order for this stream. For the reference stream, dwZ must be zero. For all other streams, dwZ must be greater than zero. |
| Invalid stream identifier. |
?
The EVR draws the video streams in the order of their z-order values, starting with zero. The reference stream must be first in the z-order, and the remaining streams can be in any order.
+
Retrieves the z-order of a video stream.
+Identifier of the stream. For the EVR media sink, the stream identifier is defined when the
Receives the z-order value.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream identifier. |
?
Sets the position of a video stream within the composition rectangle.
+Identifier of the stream. For the EVR media sink, the stream identifier is defined when the
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The coordinates of the bounding rectangle given in pnrcOutput are not valid. |
| Invalid stream identifier. |
?
The mixer draws each video stream inside a bounding rectangle that is specified relative to the final video image. This bounding rectangle is given in normalized coordinates. For more information, see
The coordinates of the bounding rectangle must fall within the range [0.0, 1.0]. Also, the X and Y coordinates of the upper-left corner cannot exceed the X and Y coordinates of the lower-right corner. In other words, the bounding rectangle must fit entirely within the composition rectangle and cannot be flipped vertically or horizontally.
The following diagram shows how the EVR mixes substreams.
The output rectangle for the stream is specified by calling SetStreamOutputRect. The source rectangle is specified by calling
Retrieves the position of a video stream within the composition rectangle.
+The identifier of the stream. For the EVR media sink, the stream identifier is defined when the
Pointer to an
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid stream identifier. |
?
Controls preferences for video deinterlacing.
The default video mixer for the Enhanced Video Renderer (EVR) implements this interface.
To get a reference to the interface, call
Sets the preferences for video deinterlacing.
+Bitwise OR of zero or more flags from the
If this method succeeds, it returns
Gets the current preferences for video deinterlacing.
+Receives a bitwise OR of zero or more flags from the
If this method succeeds, it returns
Gets or sets the current preferences for video deinterlacing.
+Maps a position on an input video stream to the corresponding position on an output video stream.
To obtain a reference to this interface, call
Maps output image coordinates to input image coordinates. This method provides the reverse transformation for components that map coordinates on the input image to different coordinates on the output image.
+X-coordinate of the output image, normalized to the range [0...1].
Y-coordinate of the output image, normalized to the range [0...1].
Output stream index for the coordinate mapping.
Input stream index for the coordinate mapping.
Receives the mapped x-coordinate of the input image, normalized to the range [0...1].
Receives the mapped y-coordinate of the input image, normalized to the range [0...1].
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The video renderer has been shut down. |
?
In the following diagram, R(dest) is the destination rectangle for the video. You can obtain this rectangle by calling
The position of P relative to R(dest) in normalized coordinates is calculated as follows:
float xn = float(x + 0.5) / widthDest;
+ float xy = float(y + 0.5) / heightDest;
+
where widthDest and heightDest are the width and height of R(dest) in pixels.
To calculate the position of P relative to R1, call MapOutputCoordinateToInputStream as follows:
float x1 = 0, y1 = 0;
+ hr = pMap->MapOutputCoordinateToInputStream(xn, yn, 0, dwInputStreamIndex, &x1, &y1);
The values returned in x1 and y1 are normalized to the range [0...1]. To convert back to pixel coordinates, scale these values by the size of R1:
int scaledx = int(floor(x1 * widthR1));
+ int scaledy = int(floor(xy * heightR1));
Note that x1 and y1 might fall outside the range [0...1] if P lies outside of R1.
+Represents a video presenter. A video presenter is an object that receives video frames, typically from a video mixer, and presents them in some way, typically by rendering them to the display. The enhanced video renderer (EVR) provides a default video presenter, and applications can implement custom presenters.
The video presenter receives video frames as soon as they are available from upstream. The video presenter is responsible for presenting frames at the correct time and for synchronizing with the presentation clock.
+
Sets one or more color adjustment (ProcAmp) settings.
+Before calling this method, set the video processor mode. To select a video processor mode, call
To find out which ProcAmp settings the driver supports, call
Retrieves the video processor modes that the video driver supports.
+Receives the number of video processor modes.
Receives a reference to an array of GUIDs. The number of elements in the array is returned in the lpdwNumProcessingModes parameter. The caller must release the memory for the array by calling CoTaskMemFree. This parameter can be
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media type for the reference stream is not set. |
?
Video processor modes are identified by
Before calling this method, you must set the media type for the reference stream. Which modes are available might depend on the media type of the reference stream.
+
Retrieves the capabilities of a video processor mode.
+Pointer to a
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media type for the reference stream is not set. |
?
Before calling this method, you must set the media type for the reference stream.
+
Retrieves the application's preferred video processor mode. To set the preferred mode, call
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media type for the reference stream is not set. |
| The application has not specified a preferred video processor mode. |
?
Sets the preferred video processor mode. The EVR will attempt to use this mode when playback starts.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The requested mode is not valid. |
| The mixer has already allocated Direct3D resources and cannot change modes. |
| The media type for the reference stream is not set. |
?
Before calling this method, set the media type for the reference stream as follows:
DirectShow EVR filter: Connect pin 0.
EVR media sink: Set the media type for stream 0.
Mixer (standalone): Set the media type for input stream 0 and set the media type for the output stream.
Which modes are available might depend on the reference stream's media type.
Call this method before video playback begins.
+
Retrieves the range of values for a color adjustment (ProcAmp) setting.
+The ProcAmp setting to query. For a list of possible values, see ProcAmp Settings.
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid value for dwProperty. |
| No video processor mode has been set. |
| The media type for the reference stream is not set. |
?
This method returns the range of values that the current video processor mode supports for the specified ProcAmp setting.
This method fails if the video processor mode has not been set on the mixer. To select a video processor mode, call
To find out which ProcAmp settings the driver supports, call
Retrieves the current settings for one or more color adjustment (ProcAmp) settings.
+Bitwise OR of one or more flags, specifying which operations to query. For a list of flags, see ProcAmp Settings.
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The media type for the reference stream is not set. |
?
Before calling this method, you must set the media type for the reference stream.
Until the mixer's video processor mode is set, the returned values are all zero. After the processor mode is set, the returned values reflect the current mode. To select a video processor mode, call
To find out which ProcAmp settings the driver supports, call
Sets one or more color adjustment (ProcAmp) settings.
+Bitwise OR of one or more flags, specifying which ProcAmp values to set. For a list of flags, see ProcAmp Settings.
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The dwFlags parameter is invalid, or one or more values in pValues is not within the correct range. |
| The media type for the reference stream is not set. |
?
Before calling this method, set the video processor mode. To select a video processor mode, call
To find out which ProcAmp settings the driver supports, call
Retrieves the range of values for a specified image filter setting.
+The image filtering parameter to query. For a list of possible values, see DXVA Image Filter Settings.
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The driver does not support this filter setting. |
| Invalid value for dwProperty. |
| No video processor mode has been set. |
| The specified operation is not available. |
| The media type for the reference stream is not set. |
?
This method returns the range of values that the current video processor mode supports for the specified image filter setting.
This method fails if the video processor mode has not been set on the mixer. To select a video processor mode, call
To find out which image filters the driver supports, call
Retrieves the current setting for an image filter.
+The filter setting to query. For a list of possible values, see DXVA Image Filter Settings.
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The value of dwProperty is invalid. |
| The media type for the reference stream is not set. |
?
Before calling this method, you must set the media type for the reference stream.
Until the mixer's video processor mode is set, the returned values are all zero. After the processor mode is set, the returned values reflect the current mode. To select a video processor mode, call
To find out which image filters the driver supports, call
Sets a parameter for an image filter.
+The image filtering parameter to set. For a list of possible values, see DXVA Image Filter Settings.
Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The value of dwProperty is invalid. |
| The media type for the reference stream is not set. |
?
Before calling this method, set the video processor mode. To select a video processor mode, call
To find out which image filters the driver supports, call
Retrieves the background color for the composition rectangle. The background color is used for letterboxing the video image.
+Pointer to a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Sets the background color for the composition rectangle. The background color is used for letterboxing the video image.
+Background color, specified as a
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Retrieves the application's preferred video processor mode. To set the preferred mode, call
Retrieves the background color for the composition rectangle. The background color is used for letterboxing the video image.
+Sets a new mixer or presenter for the Enhanced Video Renderer (EVR).
Both the EVR media sink and the DirectShow EVR filter implement this interface. To get a reference to the interface, call QueryInterface on the media sink or the filter. Do not use
The EVR activation object returned by the
Sets a new mixer or presenter for the enhanced video renderer (EVR).
+Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Either the mixer or the presenter is invalid. |
| The mixer and presenter cannot be replaced in the current state. (EVR media sink.) |
| The video renderer has been shut down. |
| One or more input pins are connected. (DirectShow EVR filter.) |
?
Call this method directly after creating the EVR, before you do any of the following:
Call
Call
Connect any pins on the EVR filter, or set any media types on EVR media sink.
The EVR filter returns VFW_E_WRONG_STATE if any of the filter's pins are connected. The EVR media sink returns
The device identifiers for the mixer and the presenter must match. The
If the video renderer is in the protected media path (PMP), the mixer and presenter objects must be certified safe components and pass any trust authority verification that is being enforced. Otherwise, this method will fail.
+Allocates video samples for a video media sink.
The stream sinks on the enhanced video renderer (EVR) expose this interface as a service. To obtain a reference to the interface, call
Specifies the Direct3D device manager for the video media sink to use.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
The media sink uses the Direct3D device manager to obtain a reference to the Direct3D device, which it uses to allocate Direct3D surfaces. The device manager enables multiple objects in the pipeline (such as a video renderer and a video decoder) to share the same Direct3D device.
+
Releases all of the video samples that have been allocated.
+The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Specifies the number of samples to allocate and the media type for the samples.
+Number of samples to allocate.
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| Invalid media type. |
?
Gets a video sample from the allocator.
+Receives a reference to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The allocator was not initialized. Call |
| No samples are available. |
?
Specifies the Direct3D device manager for the video media sink to use.
+The media sink uses the Direct3D device manager to obtain a reference to the Direct3D device, which it uses to allocate Direct3D surfaces. The device manager enables multiple objects in the pipeline (such as a video renderer and a video decoder) to share the same Direct3D device.
+Enables an application to track video samples allocated by the enhanced video renderer (EVR).
The stream sinks on the EVR expose this interface as a service. To get a reference to the interface, call the
Sets the callback object that receives notification whenever a video sample is returned to the allocator.
+A reference to the
If this method succeeds, it returns
To get a video sample from the allocator, call the
The allocator holds at most one callback reference. Calling this method again replaces the previous callback reference.
+Gets the number of video samples that are currently available for use.
+Receives the number of available samples.
If this method succeeds, it returns
To get a video sample from the allocator, call the
Sets the callback object that receives notification whenever a video sample is returned to the allocator.
+To get a video sample from the allocator, call the
The allocator holds at most one callback reference. Calling this method again replaces the previous callback reference.
+The callback for the
Called when a video sample is returned to the allocator.
+If this method succeeds, it returns
To get a video sample from the allocator, call the
This interface is not supported.
+Note??This method is not supported.
+If this method succeeds, it returns
Note??This method is not supported.
+If this method succeeds, it returns
Note??This method is not supported.
+
Registers the topology work queues with the Multimedia Class Scheduler Service (MMCSS).
+Each source node in the topology defines one branch of the topology. The branch includes every topology node that receives data from that node. An application can assign each branch of a topology its own work queue and then associate those work queues with MMCSS tasks.
To use this method, perform the following steps.
The BeginRegisterTopologyWorkQueuesWithMMCSS method is asynchronous. When the operation completes, the callback object's
To unregister the topology work queues from MMCSS, call
Registers the topology work queues with the Multimedia Class Scheduler Service (MMCSS).
+A reference to the
A reference to the
If this method succeeds, it returns
Each source node in the topology defines one branch of the topology. The branch includes every topology node that receives data from that node. An application can assign each branch of a topology its own work queue and then associate those work queues with MMCSS tasks.
To use this method, perform the following steps.
The BeginRegisterTopologyWorkQueuesWithMMCSS method is asynchronous. When the operation completes, the callback object's
To unregister the topology work queues from MMCSS, call
Completes an asynchronous request to register the topology work queues with the Multimedia Class Scheduler Service (MMCSS).
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Call this method when the
Unregisters the topology work queues from the Multimedia Class Scheduler Service (MMCSS).
+Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is asynchronous. When the operation completes, the callback object's
Completes an asynchronous request to unregister the topology work queues from the Multimedia Class Scheduler Service (MMCSS).
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Call this method when the
Retrieves the Multimedia Class Scheduler Service (MMCSS) class for a specified branch of the current topology.
+Identifies the work queue assigned to this topology branch. The application defines this value by setting the
Pointer to a buffer that receives the name of the MMCSS class. This parameter can be
On input, specifies the size of the pwszClass buffer, in characters. On output, receives the required size of the buffer, in characters. The size includes the terminating null character.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| There is no work queue with the specified identifier. |
| The pwszClass buffer is too small to receive the class name. |
?
Retrieves the Multimedia Class Scheduler Service (MMCSS) task identifier for a specified branch of the current topology.
+Identifies the work queue assigned to this topology branch. The application defines this value by setting the
Receives the task identifier.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Associates a platform work queue with a Multimedia Class Scheduler Service (MMCSS) task.
+ The platform work queue to register with MMCSS. See Work Queue Identifiers. To register all of the standard work queues to the same MMCSS task, set this parameter to
The name of the MMCSS task to be performed.
The unique task identifier. To obtain a new task identifier, set this value to zero.
A reference to the
A reference to the
If this method succeeds, it returns
This method is asynchronous. When the operation completes, the callback object's
To unregister the work queue from the MMCSS class, call
Completes an asynchronous request to associate a platform work queue with a Multimedia Class Scheduler Service (MMCSS) task.
+Pointer to the
The unique task identifier.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Call this function when the
To unregister the work queue from the MMCSS class, call
Unregisters a platform work queue from a Multimedia Class Scheduler Service (MMCSS) task.
+Platform work queue to register with MMCSS. See
Pointer to the
Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
This method is asynchronous. When the operation completes, the callback object's
Completes an asynchronous request to unregister a platform work queue from a Multimedia Class Scheduler Service (MMCSS) task.
+Pointer to the
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Call this method when the
Retrieves the Multimedia Class Scheduler Service (MMCSS) class for a specified platform work queue.
+Platform work queue to query. See
Pointer to a buffer that receives the name of the MMCSS class. This parameter can be
On input, specifies the size of the pwszClass buffer, in characters. On output, receives the required size of the buffer, in characters. The size includes the terminating null character.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
| The pwszClass buffer is too small to receive the class name. |
?
Retrieves the Multimedia Class Scheduler Service (MMCSS) task identifier for a specified platform work queue.
+Platform work queue to query. See
Receives the task identifier.
The method returns an
Return code | Description |
---|---|
| The method succeeded. |
?
Specifies the protection level for Analog Copy Protection (ACP).
+This enumeration is numerically equivalent to the COPP_ACP_Protection_Level enumeration used in Certified Output Protection Protocol. The
ACP is disabled.
ACP protection level 1.
ACP protection level 2.
ACP protection level 3.
The DXGKMDT_OPM_DPCP_PROTECTION_LEVEL enumeration indicates the protection levels for a protected output that supports DisplayPort Content Protection (DPCP).
+DPCP protects digital video signals from DisplayPort output connectors. For more information about DisplayPort, see the DisplayPort article.
+Indicates that DPCP does not protect the output's signal.
Indicates that DPCP protects the output's signal.
The DXGKMDT_OPM_HDCP_PROTECTION_LEVEL enumeration indicates the protection levels for a protected output that supports High-bandwidth Digital Content Protection (HDCP).
+HDCP protects digital video signals from digital video output connectors. Currently, OPM can use HDCP to protect data from Digital Video Interface (DVI) and High-Definition Multimedia Interface (HDMI) connector outputs. For more information about the HDCP system, see the HDCP Specification Revision 1.1.
+Indicates that HDCP does not protect the output's signal.
Indicates that HDCP protects the output's signal.
Specifies the aspect ratio for ETSI EN 300 294.
+This enumeration is numerically equivalent to the COPP_ImageAspectRatio_EN300294 enumeration used in Certified Output Protection Protocol (COPP).
+Full format 4:3.
Box 14:9 center.
Box 14:9 top.
Box 16:9 center.
Box 16:9 top.
Box > 16:9 center.
Full format 4:3 (shoot and protect 14:9 center).
Full format 16:9 (anamorphic).
Specifies whether the
Creates an Output Protection Manager (OPM) object for each physical monitor that is associated with a particular
The monitor handle for which to create OPM objects. There are several functions that return
A member of the
Value | Meaning |
---|---|
The returned | |
The returned |
?
Receives the number of
Receives a reference to an array of
If this function succeeds, it returns
A single
The
Creates an Output Protection Manager (OPM) object for each physical monitor that is associated with a particular Direct3D device.
+ Pointer to the
A member of the
Value | Meaning |
---|---|
The returned | |
The returned |
?
Receives the number of
Receives a reference to an array of
If this function succeeds, it returns
A single Direct3D device can be associated with several physical monitors. Each physical monitor has its own connector. The application must set the protection mechanism individually for each physical monitor, by using the
The
Represents a video output for an Output Protection Manager (OPM) session.
To get a reference to this interface, call one of the following functions:
Begins the initialization sequence for an Output Protection Manager (OPM) session.
+Pointer to an
Receives a reference to a buffer containing the display driver's certificate. The method allocates the memory for the buffer. The caller must release the memory by calling CoTaskMemFree.
Receives the length of the buffer pointed to by ppbCertificate.
If this method succeeds, it returns
This method is equivalent to the IAMCertifiedOutputProtection::KeyExchange method in Certified Output Protection Protocol (COPP).
The method returns a certificate chain that contains the driver's 2048-bit RSA public key. The caller must validate the certificate chain and then call
This method supports both OPM semantics and COPP semantics. COPP semantics are supported for backward compatibility; new applications should use OPM semantics.
+Completes the initialization sequence for an Output Protection Manager (OPM) session.
+Pointer to an
Returns an
Return code | Description |
---|---|
| The method succeeded. |
| An unexpected error occurred the display driver. |
| The encrypted parameters in pParameters are incorrect. |
?
This method is equivalent to the IAMCertifiedOutputProtection::SessionSequenceStart method in Certified Output Protection Protocol (COPP).
The pParameters parameter points to an
Encrypt this number with RAEAS-OAEP, encryption using the display driver's public encryption key. The public encryption key is contained in the certificate returned in the ppbCertificate parameter of the StartInitialization method.
The application must use cryptographically secure random numbers. The CryptGenRandom function is recommended, although not required.
+Sends an Output Protection Manager (OPM) status request to the display driver.
+Pointer to an
Pointer to an
Returns an
Return code | Description |
---|---|
| The method succeeded. |
| The OPM object was created with Certified Output Protection Protocol (COPP) semantics. |
?
This method is equivalent to the IAMCertifiedOutputProtection::ProtectionStatus method in COPP.
The
Sends an Output Protection Manager (OPM) status request to the display driver. Use this method when OPM is emulating Certified Output Protection Manager (COPP).
+Pointer to an
Pointer to an
Returns an
Return code | Description |
---|---|
| The method succeeded. |
| The OPM object was created with OPM semantics, not COPP semantics. |
?
This method is equivalent to the IAMCertifiedOutputProtection::ProtectionStatus method in COPP.
The
Configures a video output. This method sends an Output Protection Manager (OPM) or Certified Output Protection Protocol (COPP) command to the driver.
+Pointer to an
The size of the pbAdditionalParameters buffer, in bytes.
Pointer to a buffer that contains additional information for the command.
If this method succeeds, it returns
This method is equivalent to the IAMCertifiedOutputProtection::ProtectionCommand method in COPP.
This method supports both OPM semantics and COPP semantics. COPP semantics are supported for backward compatibility; new applications should use OPM semantics.
+Contains the result from an OPM_GET_ACP_AND_CGMSA_SIGNALING query.
+The layout of this structure is identical to the DXVA_COPPStatusSignalingCmdData structure used in Certified Output Protection Protocol (COPP).
+Contains the result of an OPM_GET_ACTUAL_OUTPUT_FORMAT query in Output Protection Manager (OPM).
+The refresh rate is expressed as a fraction. For example, if the refresh rate is 72 Hz, FreqNumerator = 72 and FreqDenominator = 1. For NTSC television, the values are FreqNumerator = 60000 and FreqDenominator = 1001 (59.94 fields per second).
The layout of this structure is identical to the DXVA_COPPStatusDisplayData structure used in Certified Output Protection Protocol (COPP).
+Contains an Output Protection Manager (OPM) or Certified Output Protection Manager (COPP) command.
+The layout of this structure is identical to the AMCOPPCommand structure used in Certified Output Protection Protocol (COPP).
Initialize this structure as follows.
The DXGKMDT_OPM_CONNECTED_HDCP_DEVICE_INFORMATION structure contains High-bandwidth Digital Content Protection (HDCP) information that is retrieved in a call to the DxgkDdiOPMGetInformation function.
+The DXGKMDT_OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS structure contains parameters that are used to retrieve information from a protected output object in a call to the DxgkDdiOPMGetCOPPCompatibleInformation function.
+Contains initialization parameters for an Output Protection Manager (OPM) session.
+The layout of this structure is identical to the AMCOPPSignature structure used in Certified Output Protection Protocol (COPP).
+Contains the result from an OPM_GET_CODEC_INFO query.
+Contains information for the OPM_GET_CODEC_INFO command.
+Contains parameters for the
Initialize this structure as follows:
Contains the key selection vector (KSV) for a High-Bandwidth Digital Content Protection (HDCP) receiver.
+A buffer that contains the device's KSV. (This is the value named Bksv in the HDCP specification.)
The DXGKMDT_OPM_OMAC structure contains a One-key Cipher Block Chaining (CBC)-mode message authentication code (OMAC) for message authenticity.
+For more information about OMAC, see the OMAC-1 algorithm.
The OMAC-1 parameters that OPM and COPP use are:
E = AES (Advanced Encryption Standard)
t = 128 bits
K = The 128-bit key that the display miniport driver receives when DxgkDdiOPMSetSigningKeyAndSequenceNumbers is called.
n = 128 bits
For information about AES, see the RSA Laboratories website.
+A 16-byte array that comprises the OMAC.
Contains the result from an OPM_GET_OUTPUT_ID status request.
+Contains a 128-bit random number for use with Output Protection Manager (OPM).
+Always use a cryptographically secure random-number generator to fill in this structure. The CryptGenRandom function is recommended, although not required.
+Contains the result of an Output Protection Manager (OPM) status request.
+The layout of this structure is identical to the AMCOPPStatusOutput structure used in Certified Output Protection Protocol (COPP).
+Contains information for the OPM_SET_ACP_AND_CGMSA_SIGNALING command in Output Protection Manager (OPM).
This command causes the driver to insert Wide Screen Signaling (WSS) codes or other data packets in the television signal, as required by some Analog Copy Protection (ACP) and Copy Generation Management System ? Analog (CGMS-A) specifications. For example:
The layout of this structure is identical to the DXVA_COPPSetSignalingCmdData structure used in Certified Output Protection Manager (COPP).
+Contains parameters for the OPM_SET_HDCP_SRM command. This command updates the system renewability message (SRM) for High-Bandwidth Digital Content Protection (HDCP).
+The DXGKMDT_OPM_SET_PROTECTION_LEVEL_PARAMETERS structure contains parameters to set the protection level of a protected output in a call to the DxgkDdiOPMConfigureProtectedOutput function.
+Contains the result from an Output Protection Manager (OPM) status request.
+The layout of this structure is identical to the DXVA_COPPStatusData structure used in Certified Output Protection Protocol (COPP).
+
Contains an image that is stored as metadata for a media source. This structure is used as the data item for the WM/Picture metadata attribute.
+The WM/Picture attribute is defined in the Windows Media Format SDK. The attribute contains a picture related to the content, such as album art.
To get this attribute from a media source, call
Image data.
This format differs from the WM_PICTURE structure used in the Windows Media Format SDK. The WM_PICTURE structure contains internal references to two strings and the image data. If the structure is copied, these references become invalid. The
Contains synchronized lyrics stored as metadata for a media source. This structure is used as the data item for the WM/Lyrics_Synchronised metadata attribute.
+The WM/Lyrics_Synchronised attribute is defined in the Windows Media Format SDK. The attribute contains lyrics synchronized to times in the source file.
To get this attribute from a media source, call
Null-terminated wide-character string that contains a description.
Lyric data. The format of the lyric data is described in the Windows Media Format SDK documentation.
This format differs from the WM_SYNCHRONISED_LYRICS structure used in the Windows Media Format SDK. The WM_SYNCHRONISED_LYRICS structure contains internal references to two strings and the lyric data. If the structure is copied, these references become invalid. The
Specifies the format of time stamps in the lyrics. This member is equivalent to the bTimeStampFormat member in the WM_SYNCHRONISED_LYRICS structure. The WM_SYNCHRONISED_LYRICS structure is documented in the Windows Media Format SDK.
Specifies the type of synchronized strings that are in the lyric data. This member is equivalent to the bContentType member in the WM_SYNCHRONISED_LYRICS structure.
Size, in bytes, of the lyric data.
Describes the indexing configuration for a stream and type of index.
+
Number of bytes used for each index entry. If the value is MFASFINDEXER_PER_ENTRY_BYTES_DYNAMIC, the index entries have variable size.
Optional text description of the index.
Indexing interval. The units of this value depend on the index type. A value of MFASFINDEXER_NO_FIXED_INTERVAL indicates that there is no fixed indexing interval.
Specifies an index for the ASF indexer object.
+The index object of an ASF file can contain a number of distinct indexes. Each index is identified by the type of index and the stream number. No ASF index object can contain more than one index for a particular combination of stream number and index type.
+The type of index. Currently this value must be GUID_NULL, which specifies time-based indexing.
The stream number to which this structure applies.
Contains statistics about the progress of the ASF multiplexer.
+Use
Number of frames written by the ASF multiplexer.
Number of frames dropped by the ASF multiplexer.
Describes a 4:4:4:4 Y'Cb'Cr' sample.
+Cr (chroma difference) value.
Cb (chroma difference) value.
Y (luma) value.
Alpha value.
Specifies the buffering parameters for a network byte stream.
+Size of the file, in bytes. If the total size is unknown, set this member to -1.
Size of the playable media data in the file, excluding any trailing data that is not useful for playback. If this value is unknown, set this member to -1.
Pointer to an array of
The number of elements in the prgBuckets array.
Amount of data to buffer from the network, in 100-nanosecond units. This value is in addition to the buffer windows defined in the prgBuckets member.
Amount of additional data to buffer when seeking, in 100-nanosecond units. This value reflects the fact that downloading must start from the previous key frame before the seek point. If the value is unknown, set this member to zero.
The playback duration of the file, in 100-nanosecond units. If the duration is unknown, set this member to zero.
Playback rate.
Defines the properties of a clock.
+ The interval at which the clock correlates its clock time with the system time, in 100-nanosecond units. If the value is zero, the correlation is made whenever the
The unique identifier of the underlying device that provides the time. If two clocks have the same unique identifier, they are based on the same device. If the underlying device is not shared between two clocks, the value can be GUID_NULL.
A bitwise OR of flags from the
The clock frequency in Hz. A value of MFCLOCK_FREQUENCY_HNS means that the clock has a frequency of 10 MHz (100-nanosecond ticks), which is the standard MFTIME time unit in Media Foundation. If the
The amount of inaccuracy that may be present on the clock, in parts per billion (ppb). For example, an inaccuracy of 50 ppb means the clock might drift up to 50 seconds per billion seconds of real time. If the tolerance is not known, the value is MFCLOCK_TOLERANCE_UNKNOWN. This constant is equal to 50 parts per million (ppm).
The amount of jitter that may be present, in 100-nanosecond units. Jitter is the variation in the frequency due to sampling the underlying clock. Jitter does not include inaccuracies caused by drift, which is reflected in the value of dwClockTolerance.
For clocks based on a single device, the minimum jitter is the length of the tick period (the inverse of the frequency). For example, if the frequency is 10 Hz, the jitter is 0.1 second, which is 1,000,000 in MFTIME units. This value reflects the fact that the clock might be sampled just before the next tick, resulting in a clock time that is one period less than the actual time. If the frequency is greater than 10 MHz, the jitter should be set to 1 (the minimum value).
If a clock's underlying hardware device does not directly time stamp the incoming data, the jitter also includes the time required to dispatch the driver's interrupt service routine (ISR). In that case, the expected jitter should include the following values:
Value | Meaning |
---|---|
| Jitter due to time stamping during the device driver's ISR. |
| Jitter due to time stamping during the deferred procedure call (DPC) processing. |
| Jitter due to dropping to normal thread execution before time stamping. |
?
The
This structure is identical to the DirectShow
Major type
Subtype
If TRUE, samples are of a fixed size. This field is informational only. For audio, it is generally set to TRUE. For video, it is usually TRUE for uncompressed video and
If TRUE, samples are compressed using temporal (interframe) compression. (A value of TRUE indicates that not all frames are key frames.) This field is informational only.
Size of the sample in bytes. For compressed data, the value can be zero.
Format type | Format structure |
---|---|
| DVINFO |
| |
| |
| None. |
| |
| |
| |
?
Not used. Set to
Size of the format block of the media type.
Pointer to the format structure. The structure type is specified by the formattype member. The format structure must be present, unless formattype is GUID_NULL or FORMAT_None.
Contains coefficients used to transform multichannel audio into a smaller number of audio channels. This process is called fold-down.
+To specify this information in the media type, set the
The ASF media source supports fold-down from six channels (5.1 audio) to two channels (stereo). It gets the information from the g_wszFold6To2Channels3 attribute in the ASF header. This attribute is documented in the Windows Media Format SDK documentation.
+Size of the structure, in bytes.
Number of source channels.
Number of destination channels.
Specifies the assignment of audio channels to speaker positions in the transformed audio. This member is a bitwise OR of flags that define the speaker positions. For a list of valid flags, see
Array that contains the fold-down coefficients. The number of coefficients is cSrcChannels?cDstChannels. If the number of coefficients is less than the size of the array, the remaining elements in the array are ignored. For more information about how the coefficients are applied, see Windows Media Audio Professional Codec Features.
Describes an action requested by an output trust authority (OTA). The request is sent to an input trust authority (ITA).
+Specifies the action as a member of the
Pointer to a buffer that contains a ticket object, provided by the OTA.
Size of the ticket object, in bytes.
Contains parameters for the
Specifies the buffering requirements of a file.
+This structure describes the buffering requirements for content encoded at the bit rate specified in the dwBitrate. The msBufferWindow member indicates how much data should be buffered before starting playback. The size of the buffer in bytes is msBufferWinow?dwBitrate / 8000.
+Bit rate, in bits per second.
Size of the buffer window, in milliseconds.
Contains encoding statistics from the Digital Living Network Alliance (DLNA) media sink.
This structure is used with the
Contains format data for a binary stream in an Advanced Streaming Format (ASF) file.
+This structure is used with the
This structure corresponds to the first 60 bytes of the Type-Specific Data field of the Stream Properties Object, in files where the stream type is ASF_Binary_Media. For more information, see the ASF specification.
The Format Data field of the Type-Specific Data field is contained in the
Major media type. This value is the
Media subtype.
If TRUE, samples have a fixed size in bytes. Otherwise, samples have variable size.
If TRUE, the data in this stream uses temporal compression. Otherwise, samples are independent of each other.
If bFixedSizeSamples is TRUE, this member specifies the sample size in bytes. Otherwise, the value is ignored and should be 0.
Format type
Defines custom color primaries for a video source. The color primaries define how to convert colors from RGB color space to CIE XYZ color space.
+This structure is used with the
Red x-coordinate.
Red y-coordinate.
Green x-coordinate.
Green y-coordinate.
Blue x-coordinate.
Blue y-coordinate.
White point x-coordinate.
White point y-coordinate.
Contains the authentication information for the credential manager.
+The response code of the authentication challenge. For example, NS_E_PROXY_ACCESSDENIED.
Set this flag to TRUE if the currently logged on user's credentials should be used as the default credentials.
If TRUE, the authentication package will send unencrypted credentials over the network. Otherwise, the authentication package encrypts the credentials.
The original URL that requires authentication.
The name of the site or proxy that requires authentication.
The name of the realm for this authentication.
The name of the authentication package. For example, "Digest" or "MBS_BASIC".
The number of times that the credential manager should retry after authentication fails.
Specifies a rectangular area within a video frame.
+ An
An
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
If the flags member contains the
To cancel authentication, set fProceedWithAuthentication equal to
By default, MFPlay uses the network source's implementation of
Contains one palette entry in a color table.
+This union can be used to represent both RGB palettes and Y'Cb'Cr' palettes. The video format that defines the palette determines which union member should be used.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
This event is not used to signal the failure of an asynchronous
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Contains information that is common to every type of MFPlay event.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Media items are created asynchronously. If multiple items are created, the operations can complete in any order, not necessarily in the same order as the method calls. You can use the dwUserData member to identify the items, if you have simultaneous requests pending.
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
If one or more streams could not be connected to a media sink, the event property store contains the MFP_PKEY_StreamRenderingResults property. The value of the property is an array of
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
If MFEventType is
Property | Description |
---|---|
MFP_PKEY_StreamIndex | The index of the stream whose format changed. |
?
+
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Important??Deprecated. This API may be removed from future releases of Windows. Applications should use the Media Session for playback.
Event structure for the
To get a reference to this structure, cast the pEventHeader parameter of the
Represents a ratio.
+Numerator of the ratio.
Denominator of the ratio.
Contains information about a revoked component.
+Specifies the reason for the revocation. The following values are defined.
Value | Meaning |
---|---|
| A boot driver could not be verified. |
| A certificate in a trusted component's certificate chain was revoked. |
| The high-security certificate for authenticating the protected environment (PE) was revoked. The high-security certificate is typically used by ITAs that handle high-definition content and next-generation formats such as HD-DVD. |
| A certificate's extended key usage (EKU) object is invalid. |
| The root certificate is not valid. |
| The low-security certificate for authenticating the PE was revoked. The low-security certificate is typically used by ITAs that handle standard-definition content and current-generation formats. |
| A trusted component was revoked. |
| The GRL was not found. |
| Could not load the global revocation list (GRL). |
| The GRL signature is invalid. |
| A certificate chain was not well-formed, or a boot driver is unsigned or is signed with an untrusted certificate. |
| A component was signed by a test certificate. |
?
In addition, one of the following flags might be present, indicating the type of component that failed to load.
Value | Meaning |
---|---|
| User-mode component. |
| Kernel-mode component. |
?
Contains a hash of the file header.
Contains a hash of the public key in the component's certificate.
File name of the revoked component.
Contains information about one or more revoked components.
+Revocation information version.
Number of elements in the pRRComponents array.
Array of
Contains statistics about the performance of the sink writer.
+The size of the structure, in bytes.
The time stamp of the most recent sample given to the sink writer. The sink writer updates this value each time the application calls
The time stamp of the most recent sample to be encoded. The sink writer updates this value whenever it calls
The time stamp of the most recent sample given to the media sink. The sink writer updates this value whenever it calls
The time stamp of the most recent stream tick. The sink writer updates this value whenever the application calls
The system time of the most recent sample request from the media sink. The sink writer updates this value whenever it receives an
The number of samples received.
The number of samples encoded.
The number of samples given to the media sink.
The number of stream ticks received.
The amount of data, in bytes, currently waiting to be processed.
The total amount of data, in bytes, that has been sent to the media sink.
The number of pending sample requests.
The average rate, in media samples per 100-nanoseconds, at which the application sent samples to the sink writer.
The average rate, in media samples per 100-nanoseconds, at which the sink writer sent samples to the encoder.
The average rate, in media samples per 100-nanoseconds, at which the sink writer sent samples to the media sink.
Not for application use.
+This structure is used internally by the Microsoft Media Foundation AVStream proxy.
+Reserved.
Reserved.
Contains information about an input stream on a Media Foundation transform (MFT). To get these values, call
Before the media types are set, the only values that should be considered valid are the
The
The
After you set a media type on all of the input and output streams (not including optional streams), all of the values returned by the GetInputStreamInfo method are valid. They might change if you set different media types.
+Specifies a new attribute value for a topology node.
+ Due to an error in the structure declaration, the u64 member is declared as a 32-bit integer, not a 64-bit integer. Therefore, any 64-bit value passed to the
The identifier of the topology node to update. To get the identifier of a topology node, call
Attribute type, specified as a member of the
Attribute value (unsigned 32-bit integer). This member is used when attrType equals
Attribute value (unsigned 32-bit integer). This member is used when attrType equals
Attribute value (floating point). This member is used when attrType equals
Contains information about an output buffer for a Media Foundation transform. This structure is used in the
You must provide an
MFTs can support two different allocation models for output samples:
To find which model the MFT supports for a given output stream, call
Flag | Allocation Model |
---|---|
The MFT allocates the output samples for the stream. Set pSample to | |
The MFT supports both allocation models. | |
Neither (default) | The client must allocate the output samples for the stream. |
?
The behavior of ProcessOutput depends on the initial value of pSample and the value of the dwFlags parameter in the ProcessOutput method.
If pSample is
Restriction: This output stream must have the
If pSample is
Restriction: This output stream must have the
If pSample is non-
Restriction: This output stream must not have the
Any other combinations are invalid and cause ProcessOutput to return E_INVALIDARG.
Each call to ProcessOutput can produce zero or more events and up to one sample per output stream.
+
Contains information about an output stream on a Media Foundation transform (MFT). To get these values, call
Before the media types are set, the only values that should be considered valid is the
After you set a media type on all of the input and output streams (not including optional streams), all of the values returned by the GetOutputStreamInfo method are valid. They might change if you set different media types.
+Contains information about the audio and video streams for the transcode sink activation object.
To get the information stored in this structure, call
The
Contains media type information for registering a Media Foundation transform (MFT).
+The major media type. For a list of possible values, see Major Media Types.
The media subtype. For a list of possible values, see the following topics:
Contains parameters for the
Specifies a bitmap for the enhanced video renderer (EVR) to alpha-blend with the video.
+To specify a GDI bitmap, create a device context and call SelectObject to select the bitmap into the DC. Then set the hdc member of the structure equal to the handle to the DC and set the GetBitmapFromDC member to TRUE.
+If TRUE, the hdc member is used. Otherwise, the pDDs member is used.
A union that contains the following members.
Handle to the device context (DC) of a GDI bitmap. If GetBitmapFromDC is
Pointer to the
Specifies a bitmap for the enhanced video renderer (EVR) to alpha-blend with the video.
+To specify a GDI bitmap, create a device context and call SelectObject to select the bitmap into the DC. Then set the hdc member of the structure equal to the handle to the DC and set the GetBitmapFromDC member to TRUE.
+If TRUE, the hdc member is used. Otherwise, the pDDs member is used.
A union that contains the following members.
Handle to the device context (DC) of a GDI bitmap. If GetBitmapFromDC is
Pointer to the
Specifies how the enhanced video renderer (EVR) alpha-blends a bitmap with the video.
+Bitwise OR of one or more flags from the
Source color key. This member is used if the dwFlags member contains the
You cannot specify a color key if you are alpha-blending a Direct3D surface with per-pixel alpha (
Source rectangle. The source rectangle defines the region of the bitmap that is alpha-blended with the video. The source rectangle is given in pixels and is relative to the original bitmap.
If you are alpha-blending a GDI bitmap, you must fill in this structure when you call
If you are alpha-blending a Direct3D surface and the dwFlags member contains the
After setting the initiali bitmap, you can update the source rectangle by calling
The source rectangle cannot be an empty rectangle, and cannot exceed the bounds of the bitmap.
Destination rectangle. The destination rectangle defines the region of the composited video image that receives the alpha-blended bitmap. The destination rectangle is specified as a normalized rectangle using the
This member is used if the dwFlags member contains the
Alpha blending value. This member is used if the dwFlags member contains the
This value is applied to the entire bitmap image. To create transparent regions, use the clrSrcKey member or use a DirectDraw surface with per-pixel alpha.
Direct3D filtering mode to use when performing the alpha-blend operation. Specify the filter mode as a
This member is used if the dwFlags member contains the
Point filtering is particularly useful for images that contain text and will not be stretched.
Specifies a rectangular area within a video frame.
+ An
An
A
Contains information about a video compression format. This structure is used in the
For uncompressed video formats, set the structure members to zero.
+
Describes a video format.
+Applications should avoid using this structure. Instead, it is recommended that applications use attributes to describe the video format. For a list of media type attributes, see Media Type Attributes. With attributes, you can set just the format information that you know, which is easier (and more likely to be accurate) than trying to fill in complete format information for the
To initialize a media type object from an
You can use the
Size of the structure, in bytes. This value includes the size of the palette entries that may appear after the surfaceInfo member.
Video subtype. See Video Subtype GUIDs.
Contains video format information that applies to both compressed and uncompressed formats.
This structure is used in the
Developers are encouraged to use media type attributes instead of using the
Structure Member | Media Type Attribute |
---|---|
dwWidth, dwHeight | |
PixelAspectRatio | |
SourceChromaSubsampling | |
InterlaceMode | |
TransferFunction | |
ColorPrimaries | |
TransferMatrix | |
SourceLighting | |
FramesPerSecond | |
NominalRange | |
GeometricAperture | |
MinimumDisplayAperture | |
PanScanAperture | |
VideoFlags | See |
?
+
Defines a normalized rectangle, which is used to specify sub-rectangles in a video rectangle. When a rectangle N is normalized relative to some other rectangle R, it means the following:
The coordinate (0.0, 0.0) on N is mapped to the upper-left corner of R.
The coordinate (1.0, 1.0) on N is mapped to the lower-right corner of R.
Any coordinates of N that fall outside the range [0...1] are mapped to positions outside the rectangle R. A normalized rectangle can be used to specify a region within a video rectangle without knowing the resolution or even the aspect ratio of the video. For example, the upper-left quadrant is defined as {0.0, 0.0, 0.5, 0.5}.
+X-coordinate of the upper-left corner of the rectangle.
Y-coordinate of the upper-left corner of the rectangle.
X-coordinate of the lower-right corner of the rectangle.
Y-coordinate of the lower-right corner of the rectangle.
Contains information about an uncompressed video format. This structure is used in the
Applies to: desktop apps | Metro style apps
Initializes Microsoft Media Foundation.
+ An application must call this function before using Media Foundation. Before your application quits, call
Do not call
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
Applies to: desktop apps | Metro style apps
Shuts down the Microsoft Media Foundation platform. Call this function once for every call to
If this function succeeds, it returns
This function is available on the following platforms if the Windows Media Format 11 SDK redistributable components are installed:
There are many user-input devices beside the traditional keyboard and mouse. For example, user input can come from a joystick, a touch screen, a microphone, or other devices that allow great flexibility in user input. These devices are collectively known as Human Interface Devices (HIDs). The raw input API provides a stable and robust way for applications to accept raw input from any HID, including the keyboard and mouse.
This section covers the following topics:
There are many user-input devices beside the traditional keyboard and mouse. For example, user input can come from a joystick, a touch screen, a microphone, or other devices that allow great flexibility in user input. These devices are collectively known as Human Interface Devices (HIDs). The raw input API provides a stable and robust way for applications to accept raw input from any HID, including the keyboard and mouse.
This section covers the following topics:
There are many user-input devices beside the traditional keyboard and mouse. For example, user input can come from a joystick, a touch screen, a microphone, or other devices that allow great flexibility in user input. These devices are collectively known as Human Interface Devices (HIDs). The raw input API provides a stable and robust way for applications to accept raw input from any HID, including the keyboard and mouse.
This section covers the following topics:
There are many user-input devices beside the traditional keyboard and mouse. For example, user input can come from a joystick, a touch screen, a microphone, or other devices that allow great flexibility in user input. These devices are collectively known as Human Interface Devices (HIDs). The raw input API provides a stable and robust way for applications to accept raw input from any HID, including the keyboard and mouse.
This section covers the following topics:
There are many user-input devices beside the traditional keyboard and mouse. For example, user input can come from a joystick, a touch screen, a microphone, or other devices that allow great flexibility in user input. These devices are collectively known as Human Interface Devices (HIDs). The raw input API provides a stable and robust way for applications to accept raw input from any HID, including the keyboard and mouse.
This section covers the following topics:
Enumerates the raw input devices attached to the system.
+An array of
If pRawInputDeviceList is
The size of a
If the function is successful, the return value is the number of devices stored in the buffer pointed to by pRawInputDeviceList.
On any other error, the function returns (UINT) -1 and GetLastError returns the error indication.
The devices returned from this function are the mouse, the keyboard, and other Human Interface Device (HID) devices.
To get more detailed information about the attached devices, call GetRawInputDeviceInfo using the hDevice from
Retrieves the information about the raw input devices for the current application.
+An array of
The number of
The size, in bytes, of a
If successful, the function returns a non-negative number that is the number of
If the pRawInputDevices buffer is too small or
To receive raw input from a device, an application must register it by using RegisterRawInputDevices.
+Retrieves information about the raw input device.
+A handle to the raw input device. This comes from the lParam of the WM_INPUT message, from the hDevice member of
Specifies what data will be returned in pData. This parameter can be one of the following values.
Value | Meaning |
---|---|
| pData points to a string that contains the device name. For this uiCommand only, the value in pcbSize is the character count (not the byte count). |
| pData points to an |
| pData points to the previously parsed data. |
?
A reference to a buffer that contains the information specified by uiCommand. If uiCommand is sizeof(
before calling GetRawInputDeviceInfo.
The size, in bytes, of the data in pData.
If successful, this function returns a non-negative number indicating the number of bytes copied to pData.
If pData is not large enough for the data, the function returns -1. If pData is
Call GetLastError to identify any other errors.
Registers the devices that supply the raw input data.
+An array of
The number of
The size, in bytes, of a
TRUE if the function succeeds; otherwise,
To receive WM_INPUT messages, an application must first register the raw input devices using RegisterRawInputDevices. By default, an application does not receive raw input.
To receive WM_INPUT_DEVICE_CHANGE messages, an application must specify the
If a
Performs a buffered read of the raw input data.
+A reference to a buffer of
The size, in bytes, of a
The size, in bytes, of the
If pData is
If an error occurs, the return value is (UINT)-1. Call GetLastError for the error code.
Using GetRawInputBuffer, the raw input data is buffered in the array of
The NEXTRAWINPUTBLOCK macro allows an application to traverse an array of
Note??To get the correct size of the raw input buffer, do not use *pcbSize, use *pcbSize * 8 instead. To ensure GetRawInputBuffer behaves properly on WOW64, you must align the
[StructLayout(LayoutKind.Explicit)] + internal struct++ { [FieldOffset(0)] public header; [FieldOffset(16+8)] public mouse; [FieldOffset(16+8)] public keyboard; [FieldOffset(16+8)] public hid; + } +
Retrieves the raw input from the specified device.
+A handle to the
The command flag. This parameter can be one of the following values.
Value | Meaning |
---|---|
| Get the header information from the |
| Get the raw data from the |
?
A reference to the data that comes from the
The size, in bytes, of the data in pData.
The size, in bytes, of the
If pData is
If there is an error, the return value is (UINT)-1.
GetRawInputData gets the raw input one
Defines the raw input data coming from any device.
+The size, in bytes, of the
The type of raw input data. This member can be one of the following values.
Value | Meaning |
---|---|
| Data comes from an HID that is not a keyboard or a mouse. |
| Data comes from a keyboard. |
| Data comes from a mouse. |
?
If dwType is
If dwType is
If dwType is
Defines the raw input data coming from the specified Human Interface Device (HID).
+The vendor identifier for the HID.
The product identifier for the HID.
The version number for the HID.
The top-level collection Usage Page for the device.
The top-level collection Usage for the device.
Defines the raw input data coming from the specified keyboard.
+For the keyboard, the Usage Page is 1 and the Usage is 6.
+The type of the keyboard.
The subtype of the keyboard.
The scan code mode.
The number of function keys on the keyboard.
The number of LED indicators on the keyboard.
The total number of keys on the keyboard.
Defines the raw input data coming from the specified mouse.
+For the mouse, the Usage Page is 1 and the Usage is 2.
+The identifier of the mouse device.
The number of buttons for the mouse.
The number of data points per second. This information may not be applicable for every mouse device.
TRUE if the mouse has a wheel for horizontal scrolling; otherwise,
Windows?XP:??This member is only supported starting with Windows?Vista.
Describes the format of the raw input from a Human Interface Device (HID).
+Each WM_INPUT can indicate several inputs, but all of the inputs come from the same HID. The size of the bRawData array is dwSizeHid * dwCount.
+The size, in bytes, of each HID input in bRawData.
The number of HID inputs in bRawData.
The raw input data, as an array of bytes.
This section describes how the system provides raw input to your application and how an application receives and processes that input. Raw input is sometimes referred to as generic input.
+Defines information for the raw input devices.
+If
If
Top level collection Usage page for the raw input device.
Top level collection Usage for the raw input device.
Mode flag that specifies how to interpret the information provided by usUsagePage and usUsage. It can be zero (the default) or one of the following values. By default, the operating system sends raw input from devices with the specified top level collection (TLC) to the registered application as long as it has the window focus.
Value | Meaning |
---|---|
| If set, the application command keys are handled. |
| If set, the mouse button click does not activate the other window. |
| If set, this enables the caller to receive WM_INPUT_DEVICE_CHANGE notifications for device arrival and device removal. Windows?XP:??This flag is not supported until Windows?Vista |
| If set, this specifies the top level collections to exclude when reading a complete usage page. This flag only affects a TLC whose usage page is already specified with |
| If set, this enables the caller to receive input in the background only if the foreground application does not process it. In other words, if the foreground application is not registered for raw input, then the background application that is registered will receive the input. Windows?XP:??This flag is not supported until Windows?Vista |
| If set, this enables the caller to receive the input even when the caller is not in the foreground. Note that hwndTarget must be specified. |
| If set, the application-defined keyboard device hotkeys are not handled. However, the system hotkeys; for example, ALT+TAB and CTRL+ALT+DEL, are still handled. By default, all keyboard hotkeys are handled. |
| If set, this prevents any devices specified by usUsagePage or usUsage from generating legacy messages. This is only for the mouse and keyboard. See Remarks. |
| If set, this specifies all devices whose top level collection is from the specified usUsagePage. Note that usUsage must be zero. To exclude a particular top level collection, use |
| If set, this removes the top level collection from the inclusion list. This tells the operating system to stop reading from a device which matches the top level collection. |
?
A handle to the target window. If
Contains information about a raw input device.
+A handle to the raw input device.
The type of device. This can be one of the following values.
Value | Meaning |
---|---|
| The device is an HID that is not a keyboard and not a mouse. |
| The device is a keyboard. |
| The device is a mouse. |
?
Contains the header information that is part of the raw input data.
+To get more information on the device, use hDevice in a call to GetRawInputDeviceInfo.
+The type of raw input. It can be one of the following values.
Value | Meaning |
---|---|
| Raw input comes from some device that is not a keyboard or a mouse. |
| Raw input comes from the keyboard. |
| Raw input comes from the mouse. |
?
The size, in bytes, of the entire input packet of data. This includes
A handle to the device generating the raw input data.
The value passed in the wParam parameter of the WM_INPUT message.
Contains information about the state of the keyboard.
+The scan code from the key depression. The scan code for keyboard overrun is KEYBOARD_OVERRUN_MAKE_CODE.
Flags for scan code information. It can be one or more of the following.
Value | Meaning |
---|---|
| The key is up. |
| This is the left version of the key. |
| This is the right version of the key. |
| The key is down. |
?
Reserved; must be zero.
Windows message compatible virtual-key code. For more information, see Virtual Key Codes.
The corresponding window message, for example
The device-specific additional information for the event.
Contains information about an XAPO for use in an effect chain.
+XAPO instances are passed to XAudio2 as
For additional information on using XAPOs with XAudio2 see How to: Create an Effect Chain and How to: Use an XAPO in XAudio2.
+Represents an audio data buffer, used with
XAudio2 audio data is interleaved, data from each channel is adjacent for a particular sample number. For example if there was a 4 channel wave playing into an XAudio2 source voice, the audio data would be a sample of channel 0, a sample of channel 1, a sample of channel 2, a sample of channel 3, and then the next sample of channels 0, 1, 2, 3, etc.
The AudioBytes and pAudioData members of
Memory allocated to hold a
The
This interface should be implemented by the XAudio2 client. XAudio2 calls these methods via an interface reference provided by the client, using the XAudio2Create method. Methods in this interface return void, rather than an
See XAudio2 Callbacks for restrictions on callback implementation.
+Describes I3DL2 (Interactive 3D Audio Rendering Guidelines Level 2.0) parameters for use in the ReverbConvertI3DL2ToNative function.
+There are many preset values defined for the
Describes parameters for use in the reverb APO.
+All parameters related to sampling rate or time are relative to a 48kHz voice and must be scaled for use with other sampling rates. For example, setting ReflectionsDelay to 300ms gives a true 300ms delay when the reverb is hosted in a 48kHz voice, but becomes a 150ms delay when hosted in a 24kHz voice.
+Percentage of the output that will be reverb. Allowable values are from 0 to 100.
The delay time of the first reflection relative to the direct path. Permitted range is from 0 to 300 milliseconds.
Note??All parameters related to sampling rate or time are relative to a 48kHz sampling rate and must be scaled for use with other sampling rates. See remarks section below for additional information.
Delay of reverb relative to the first reflection. Permitted range is from 0 to 85 milliseconds.
Note??All parameters related to sampling rate or time are relative to a 48kHz sampling rate and must be scaled for use with other sampling rates. See remarks section below for additional information.
Delay for the left rear output and right rear output. Permitted range is from 0 to 5 milliseconds.
Note??All parameters related to sampling rate or time are relative to a 48kHz sampling rate and must be scaled for use with other sampling rates. See remarks section below for additional information.
Position of the left input within the simulated space relative to the listener. With PositionLeft set to the minimum value, the left input is placed close to the listener. In this position, early reflections are dominant, and the reverb decay is set back in the sound field and reduced in amplitude. With PositionLeft set to the maximum value, the left input is placed at a maximum distance from the listener within the simulated room. PositionLeft does not affect the reverb decay time (liveness of the room), only the apparent position of the source relative to the listener. Permitted range is from 0 to 30 (no units).
Same as PositionLeft, but affecting only the right input. Permitted range is from 0 to 30 (no units).
Note??PositionRight is ignored in mono-in/mono-out mode.
Gives a greater or lesser impression of distance from the source to the listener. Permitted range is from 0 to 30 (no units).
Gives a greater or lesser impression of distance from the source to the listener. Permitted range is from 0 to 30 (no units).
Note??PositionMatrixRight is ignored in mono-in/mono-out mode.
Controls the character of the individual wall reflections. Set to minimum value to simulate a hard flat surface and to maximum value to simulate a diffuse surface. Permitted range is from 0 to 15 (no units).
Controls the character of the individual wall reverberations. Set to minimum value to simulate a hard flat surface and to maximum value to simulate a diffuse surface. Permitted range is from 0 to 15 (no units). +
Adjusts the decay time of low frequencies relative to the decay time at 1 kHz. The values correspond to dB of gain as follows:
Value | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Gain (dB) | -8 | -7 | -6 | -5 | -4 | -3 | -2 | -1 | 0 | +1 | +2 | +3 | +4 |
?
Note??A LowEQGain value of 8 results in the decay time of low frequencies being equal to the decay time at 1 kHz.
Permitted range is from 0 to 12 (no units).
Sets the corner frequency of the low pass filter that is controlled by the LowEQGain parameter. The values correspond to frequency in Hz as follows:
Value | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
---|---|---|---|---|---|---|---|---|---|---|
Frequency (Hz) | 50 | 100 | 150 | 200 | 250 | 300 | 350 | 400 | 450 | 500 |
?
Permitted range is from 0 to 9 (no units).
Adjusts the decay time of high frequencies relative to the decay time at 1 kHz. When set to zero, high frequencies decay at the same rate as 1 kHz. When set to maximum value, high frequencies decay at a much faster rate than 1 kHz.
Value | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
---|---|---|---|---|---|---|---|---|---|
Gain (dB) | -8 | -7 | -6 | -5 | -4 | -3 | -2 | -1 | 0 |
?
Permitted range is from 0 to 8 (no units).
Sets the corner frequency of the high pass filter that is controlled by the HighEQGain parameter. The values correspond to frequency in kHz as follows:
Value | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Frequency (kHz) | 1 | 1.5 | 2 | 2.5 | 3 | 3.5 | 4 | 4.5 | 5 | 5.5 | 6 | 6.5 | 7 | 7.5 | 8 |
?
Permitted range is from 0 to 14 (no units).
Sets the corner frequency of the low pass filter for the room effect. Permitted range is from 20 to 20,000 Hz.
Note??All parameters related to sampling rate or time are relative to a 48kHz sampling rate and must be scaled for use with other sampling rates. See remarks section below for additional information.
Sets the pass band intensity level of the low-pass filter for both the early reflections and the late field reverberation. Permitted range is from -100 to 0 dB.
Sets the intensity of the low-pass filter for both the early reflections and the late field reverberation at the corner frequency (RoomFilterFreq). Permitted range is from -100 to 0 dB.
Adjusts the intensity of the early reflections. Permitted range is from -100 to 20 dB.
Adjusts the intensity of the reverberations. Permitted range is from -100 to 20 dB.
Reverberation decay time at 1 kHz. This is the time that a full scale input signal decays by 60 dB. Permitted range is from 0.1 to infinity seconds.
Controls the modal density in the late field reverberation. For colorless spaces, Density should be set to the maximum value (100). As Density is decreased, the sound becomes hollow (comb filtered). This is an effect that can be useful if you are trying to model a silo. Permitted range as a percentage is from 0 to 100.
The apparent size of the acoustic space. Permitted range is from 1 to 100 feet.
XAudio2 constants that specify default parameters, maximum values, and flags.
XAudio2 boundary values
+Indicates the filter type.
+Note??Note that the DirectX SDK versions of XAUDIO2 do not support the LowPassOnePoleFilter or the HighPassOnePoleFilter.
+Attenuates (reduces) frequencies above the cutoff frequency.
Attenuates frequencies outside a given range.
Attenuates frequencies below the cutoff frequency.
Attenuates frequencies inside a given range.
Stops consumption of audio by the current voice.
+All source buffers that are queued on the voice and the current cursor position are preserved. This allows the voice to continue from where it left off, when it is restarted. The
By default, any pending output from voice effects?for example, reverb tails?is not played. Instead, the voice is immediately rendered silent. The
A voice stopped with the
Stop is always asynchronous, even if called within a callback.
Note??XAudio2 never calls any voice callbacks for a voice if the voice is stopped (even if it was stopped with
Flags that control how the voice is stopped. Can be 0 or the following: +
Value | Description |
---|---|
Continue emitting effect output after the voice is stopped.? |
?
Creates and configures a source voice.
+Source voices read audio data from the client. They process the data and send it to the XAudio2 processing graph.
A source voice includes a variable-rate sample rate conversion, to convert data from the source format sample rate to the output rate required for the voice send list. If you use a
You cannot create any source or submix voices until a mastering voice exists, and you cannot destory a mastering voice if any source or submix voices still exist.
Source voices are always processed before any submix or mastering voices. This means that you do not need a ProcessingStage parameter to control the processing order.
When first created, source voices are in the stopped state.
XAudio2 uses an internal memory pooler for voices with the same format. This means memory allocation for voices will occur less frequently as more voices are created and then destroyed. To minimize just-in-time allocations, a title can create the anticipated maximum number of voices needed up front, and then delete them as necessary. Voices will then be reused from the XAudio2 pool. The memory pool is tied to an XAudio2 engine instance. You can reclaim all the memory used by an instance of the XAudio2 engine by destroying the XAudio2 object and recreating it as necessary (forcing the memory pool to grow via preallocation would have to be reapplied as needed).
It is invalid to call CreateSourceVoice from within a callback (that is,
The
If successful, returns a reference to the new
Pointer to a one of the structures in the table below. This structure contains the expected format for all audio buffers submitted to the source voice. + XAudio2 supports PCM and ADPCM voice types.
Format tag | Wave format structure | Size (in bytes) |
---|---|---|
PCMWAVEFORMAT | 16 | |
-or- | | 18 |
PCMWAVEFORMAT | 18 | |
ADPCMWAVEFORMAT | 50 | |
| 40 |
?
XAudio2 supports the following PCM formats. +
The number of channels in a source voice must be less than or equal to
Note??PCM data formats such as PCMWAVEFORMAT and ADPCMWAVEFORMAT that require more information than provided by
Flags that specify the behavior of the source voice. A flag can be 0 or a combination of one or more of the following:
Value | Description |
---|---|
No pitch control is available on the voice.? | |
No sample rate conversion is available on the voice. The voice's outputs must have the same sample rate. Note??The | |
The filter effect should be available on this voice.? |
?
Note??The
Highest allowable frequency ratio that can be set on this voice. The value for this argument must be between
If MaxFrequencyRatio is less than 1.0, the voice will use that ratio immediately after being created (rather than the default of 1.0).
Xbox 360 |
---|
For XMA voices, there is one more restriction on the MaxFrequencyRatio argument and the voice's sample rate. The product of these two numbers cannot exceed XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MONO for one-channel voices or XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MULTICHANNEL for voices with any other number of channels. If the value specified for MaxFrequencyRatio is too high for the specified format, the call to CreateSourceVoice fails and produces a debug message. + |
?
Note??You can use the lowest possible MaxFrequencyRatio value to reduce XAudio2's memory usage.
Defines a destination voice that is the target of a send from another voice and specifies whether a filter should be used.
+Indicates whether a filter should be used on data sent to the voice pointed to by pOutputVoice. Flags can be 0 or
XAudio2 constants that specify default parameters, maximum values, and flags.
XAudio2 boundary values
+Interface that defines an XAPO, allowing it to be used in an XAudio2 effect chain.
+Interface that defines an XAPO, allowing it to be used in an XAudio2 effect chain.
+Returns the registration properties of an XAPO.
+ Receives a reference to a
Returns
Queries if a specific input format is supported for a given output format.
+Output format.
Input format to check for being supported.
If not
Returns
The
Queries if a specific output format is supported for a given input format.
+Input format.
Output format to check for being supported.
If not
Returns
The
Performs any effect-specific initialization.
+ Effect-specific initialization parameters, may be
Size of pData in bytes, may be 0 if pData is
Returns
The contents of pData are defined by a given XAPO. Immutable parameters (constant for the lifetime of the XAPO) should be set in this method. Once initialized, an XAPO cannot be initialized again. An XAPO should be initialized before passing it to XAudio2 as part of an effect chain.
Note??XAudio2 does not call this method, it should be called by the client before passing the XAPO to XAudio2.
+Resets variables dependent on frame history.
+Constant and locked parameters such as the input and output formats remain unchanged. Variables set by
For example, an effect with delay should zero out its delay line during this method, but should not reallocate anything as the XAPO remains locked with a constant input and output configuration.
XAudio2 only calls this method if the XAPO is locked.
This method is called from the realtime thread and should not block. +
+Called by XAudio2 to lock the input and output configurations of an XAPO allowing it to do any final initialization before Process is called on the realtime thread.
+Returns
Once locked, the input and output configuration and any other locked parameters remain constant until UnLockForProcess is called. After an XAPO is locked, further calls to LockForProcess have no effect until the UnLockForProcess function is called.
An XAPO indicates what specific formats it supports through its implementation of the IsInputFormatSupported and IsOutputFormatSupported methods. An XAPO should assert the input and output configurations are supported and that any required effect-specific initialization is complete. The IsInputFormatSupported, IsOutputFormatSupported, and Initialize methods should be used as necessary before calling this method.
Because Process is a nonblocking method, all internal memory buffers required for Process should be allocated in LockForProcess.
Process is never called before LockForProcess returns successfully.
LockForProcess is called directly by XAudio2 and should not be called by the client code.
+Deallocates variables that were allocated with the LockForProcess method.
+Unlocking an XAPO instance allows it to be reused with different input and output formats.
+Runs the XAPO's digital signal processing (DSP) code on the given input and output buffers.
+Number of elements in pInputProcessParameters.
Note??XAudio2 currently supports only one input stream and one output stream.
Input array of
Number of elements in pOutputProcessParameters.
Note??XAudio2 currently supports only one input stream and one output stream.
Output array of
TRUE to process normally;
Implementations of this function should not block, as the function is called from the realtime audio processing thread.
All code that could cause a delay, such as format validation and memory allocation, should be put in the
For in-place processing, the pInputProcessParameters parameter will not necessarily be the same as pOutputProcessParameters. Rather, their pBuffer members will point to the same memory.
Multiple input and output buffers may be used with in-place XAPOs, though the input buffer count must equal the output buffer count. For in-place processing when multiple input and output buffers are used, the XAPO may assume the number of input buffers equals the number of output buffers.
In addition to writing to the output buffer, as appropriate, an XAPO is responsible for setting the output stream's buffer flags and valid frame count.
When IsEnabled is
When writing a Process method, it is important to note XAudio2 audio data is interleaved, which means data from each channel is adjacent for a particular sample number. For example, if there was a 4-channel wave playing into an XAudio2 source voice, the audio data would be a sample of channel 0, a sample of channel 1, a sample of channel 2, a sample of channel 3, and then the next sample of channels 0, 1, 2, 3, and so on. +
+Returns the number of input frames required to generate the given number of output frames.
+The number of output frames desired.
Returns the number of input frames required.
XAudio2 calls this method to determine what size input buffer an XAPO requires to generate the given number of output frames. This method only needs to be called once while an XAPO is locked. CalcInputFrames is only called by XAudio2 if the XAPO is locked.
This function should not block, because it may be called from the realtime audio processing thread.
+Returns the number of output frames that will be generated from a given number of input frames.
+The number of input frames.
Returns the number of output frames that will be produced.
XAudio2 calls this method to determine how large of an output buffer an XAPO will require for a certain number of input frames. CalcOutputFrames is only called by XAudio2 if the XAPO is locked.
This function should not block, because it may be called from the realtime audio processing thread.
+Describes parameters for use with the volume meter APO.
+This structure is used with the XAudio2
pPeakLevels and pRMSLevels are not returned by
ChannelCount must be set by the application to match the number of channels in the voice the effect is applied to.
+Array that will be filled with the maximum absolute level for each channel during a processing pass. The array must be at least ChannelCount ? sizeof(float) bytes. pPeakLevels may be
Array that will be filled with root mean square level for each channel during a processing pass. The array must be at least ChannelCount ? sizeof(float) bytes. pRMSLevels may be
Number of channels being processed.
A mastering voice is used to represent the audio output device.
Data buffers cannot be submitted directly to mastering voices, but data submitted to other types of voices must be directed to a mastering voice to be heard. +
Sets the filter parameters on one of this voice's sends.
+SetOutputFilterParameters will fail if the send was not created with the
Note??
Returns information about the creation flags, input channels, and sample rate of a voice.
+
Designates a new set of submix or mastering voices to receive the output of the voice.
+Array of
Returns
This method is only valid for source and submix voices. Mastering voices can not send audio to another voice.
After calling SetOutputVoices a voice's current send levels will be replaced by a default send matrix. The
It is invalid to call SetOutputVoices from within a callback (that is,
Note??Calling SetOutputVoices invalidates any send matrices previously set with
Replaces the effect chain of the voice.
+Pointer to an
Note??If pEffectChain is non-
Returns
See XAudio2 Error Codes for descriptions of XAudio2 specific error codes.
The number of output channels allowed for a voice's effect chain is locked at creation of the voice. If you create the voice with an effect chain, any new effect chain passed to SetEffectChain must have the same number of input and output channels as the original effect chain. If you create the voice without an effect chain, the number of output channels allowed for the effect chain will default to the number of input channels for the voice. If any part of effect chain creation fails, none of it is applied.
After you attach an effect to an XAudio2 voice, XAudio2 takes control of the effect, and the client should not make any further calls to it. The simplest way to ensure this is to release all references to the effect.
It is invalid to call SetEffectChain from within a callback (that is,
The
Enables the effect at a given position in the effect chain of the voice.
+Zero-based index of an effect in the effect chain of the voice.
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
Be careful when you enable an effect while the voice that hosts it is running. Such an action can result in a problem if the effect significantly changes the audio's pitch or volume.
The effects in a given XAudio2 voice's effect chain must consume and produce audio at that voice's processing sample rate. The only aspect of the audio format they can change is the channel count. For example a reverb effect can convert mono data to 5.1. The client can use the
EnableEffect takes effect immediately when you call it from an XAudio2 callback with an OperationSet of
Disables the effect at a given position in the effect chain of the voice.
+Zero-based index of an effect in the effect chain of the voice.
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
The effects in a given XAudio2 voice's effect chain must consume and produce audio at that voice's processing sample rate. The only aspect of the audio format they can change is the channel count. For example a reverb effect can convert mono data to 5.1. The client can use the
Disabling an effect immediately removes it from the processing graph. Any pending audio in the effect?such as a reverb tail?is not played. Be careful disabling an effect while the voice that hosts it is running. This can result in an audible artifact if the effect significantly changes the audio's pitch or volume.
DisableEffect takes effect immediately when called from an XAudio2 callback with an OperationSet of
Returns the running state of the effect at a specified position in the effect chain of the voice.
+Zero-based index of an effect in the effect chain of the voice.
GetEffectState always returns the effect's actual current state. However, this may not be the state set by the most recent
Sets parameters for a given effect in the voice's effect chain.
+Zero-based index of an effect within the voice's effect chain.
Returns the current values of the effect-specific parameters.
Size of the pParameters array in bytes.
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
Fails with E_NOTIMPL if the effect does not support a generic parameter control interface.
The specific effect being used determines the valid size and format of the pParameters buffer. The call will fail if pParameters is invalid or if ParametersByteSize is not exactly the size that the effect expects. The client must take care to direct the SetEffectParameters call to the right effect. If this call is directed to a different effect that happens to accept the same parameter block size, the parameters will be interpreted differently. This may lead to unexpected results.
The memory pointed to by pParameters must not be freed immediately, because XAudio2 will need to refer to it later when the parameters actually are applied to the effect. This happens during the next audio processing pass if the OperationSet argument is
SetEffectParameters takes effect immediately when called from an XAudio2 callback with an OperationSet of
Note??
Returns the current effect-specific parameters of a given effect in the voice's effect chain.
+Zero-based index of an effect within the voice's effect chain.
Returns the current values of the effect-specific parameters.
Size, in bytes, of the pParameters array.
Returns
Fails with E_NOTIMPL if the effect does not support a generic parameter control interface.
GetEffectParameters always returns the effect's actual current parameters. However, these may not match the parameters set by the most recent call to
Sets the voice's filter parameters.
+Pointer to an
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
SetFilterParameters will fail if the voice was not created with the
This method is usable only on source and submix voices and has no effect on mastering voices.
Note??
Gets the voice's filter parameters.
+Pointer to an
GetFilterParameters will fail if the voice was not created with the
GetFilterParameters always returns this voice's actual current filter parameters. However, these may not match the parameters set by the most recent
Note??GetFilterParameters is usable only on source and submix voices and has no effect on mastering voices.
+Sets the filter parameters on one of this voice's sends.
+
Pointer to an
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
SetOutputFilterParameters will fail if the send was not created with the
Note??
Returns the filter parameters from one of this voice's sends.
+
Pointer to an
GetOutputFilterParameters will fail if the send was not created with the
Note??
Sets the overall volume level for the voice.
+Overall volume level to use. See Remarks for more information on volume levels.
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
SetVolume controls a voice's master input volume level. The master volume level is applied at different times depending on the type of voice. For submix and mastering voices the volume level is applied just before the voice's built in filter and effect chain is applied. For source voices the master volume level is applied after the voice's filter and effect chain is applied.
Volume levels are expressed as floating-point amplitude multipliers between -
Note??
Gets the current overall volume level of the voice.
+Returns the current overall volume level of the voice. See Remarks for more information on volume levels.
Volume levels are expressed as floating-point amplitude multipliers between -224 to 224, with a maximum gain of 144.5 dB. A volume level of 1 means there is no attenuation or gain and 0 means silence. Negative levels can be used to invert the audio's phase. See XAudio2 Volume and Pitch Control for additional information on volume control.
Note??GetVolume always returns the volume most recently set by
Sets the volume levels for the voice, per channel.
+Number of channels in the voice.
Array containing the new volumes of each channel in the voice. The array must have Channels elements. See Remarks for more information on volume levels.
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
SetChannelVolumes controls a voice's per-channel output levels and is applied just after the voice's final SRC and before its sends.
This method is valid only for source and submix voices, because mastering voices do not specify volume per channel.
Volume levels are expressed as floating-point amplitude multipliers between -
Note??
Returns the volume levels for the voice, per channel.
+Confirms the channel count of the voice.
Returns the current volume level of each channel in the voice. The array must have at least Channels elements. See Remarks for more information on volume levels.
These settings are applied after the effect chain is applied. This method is valid only for source and submix voices, because mastering voices do not specify volume per channel.
Volume levels are expressed as floating-point amplitude multipliers between -224 to 224, with a maximum gain of 144.5 dB. A volume of 1 means there is no attenuation or gain, 0 means silence, and negative levels can be used to invert the audio's phase. See XAudio2 Volume and Pitch Control for additional information on volume control.
Note??GetChannelVolumes always returns the volume levels most recently set by
Sets the volume level of each channel of the final output for the voice. These channels are mapped to the input channels of a specified destination voice.
+Pointer to a destination
Note??If the voice sends to a single target voice then specifying
Confirms the output channel count of the voice. This is the number of channels that are produced by the last effect in the chain.
Confirms the input channel count of the destination voice.
Array of [SourceChannels ? DestinationChannels] volume levels sent to the destination voice. The level sent from source channel S to destination channel D is specified in the form pLevelMatrix[SourceChannels ? D + S].
For example, when rendering two-channel stereo input into 5.1 output that is weighted toward the front channels?but is absent from the center and low-frequency channels?the matrix might have the values shown in the following table.
Output | Left Input [Array Index] | Right Input [Array Index] |
---|---|---|
Left | 1.0 [0] | 0.0 [1] |
Right | 0.0 [2] | 1.0 [3] |
Front Center | 0.0 [4] | 0.0 [5] |
LFE | 0.0 [6] | 0.0 [7] |
Rear Left | 0.8 [8] | 0.0 [9] |
Rear Right | 0.0 [10] | 0.8 [11] |
?
Note??The left and right input are fully mapped to the output left and right channels; 80 percent of the left and right input is mapped to the rear left and right channels.
See Remarks for more information on volume levels.
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
This method is valid only for source and submix voices, because mastering voices write directly to the device with no matrix mixing.
Volume levels are expressed as floating-point amplitude multipliers between -
The X3DAudio function
Note??
Gets the volume level of each channel of the final output for the voice. These channels are mapped to the input channels of a specified destination voice.
+Pointer specifying the destination
Note??If the voice sends to a single target voice then specifying
Confirms the output channel count of the voice. This is the number of channels that are produced by the last effect in the chain.
Confirms the input channel count of the destination voice.
Array of [SourceChannels * DestinationChannels] volume levels sent to the destination voice. The level sent from source channel S to destination channel D is returned in the form pLevelMatrix[DestinationChannels ? S + D]. See Remarks for more information on volume levels.
This method applies only to source and submix voices, because mastering voices write directly to the device with no matrix mixing. Volume levels are expressed as floating-point amplitude multipliers between -224 to 224, with a maximum gain of 144.5 dB. A volume level of 1 means there is no attenuation or gain and 0 means silence. Negative levels can be used to invert the audio's phase. See XAudio2 Volume and Pitch Control for additional information on volume control.
See
Note??GetOutputMatrix always returns the levels most recently set by
Destroys the voice. If necessary, stops the voice and removes it from the XAudio2 graph.
+If any other voice is currently sending audio to this voice, the method fails.
DestroyVoice waits for the audio processing thread to be idle, so it can take a little while (typically no more than a couple of milliseconds). This is necessary to guarantee that the voice will no longer make any callbacks or read any audio data, so the application can safely free up these resources as soon as the call returns.
To avoid title thread interruptions from a blocking DestroyVoice call, the application can destroy voices on a separate non-critical thread, or the application can use voice pooling strategies to reuse voices rather than destroying them. Note that voices can only be reused with audio that has the same data format and the same number of channels the voice was created with. A voice can play audio data with different sample rates than that of the voice by calling
It is invalid to call DestroyVoice from within a callback (that is,
Returns information about the creation flags, input channels, and sample rate of a voice.
+Designates a new set of submix or mastering voices to receive the output of the voice.
+This method is only valid for source and submix voices. Mastering voices can not send audio to another voice.
After calling SetOutputVoices a voice's current send levels will be replaced by a default send matrix. The
It is invalid to call SetOutputVoices from within a callback (that is,
Note??Calling SetOutputVoices invalidates any send matrices previously set with
Gets the voice's filter parameters.
+GetFilterParameters will fail if the voice was not created with the
GetFilterParameters always returns this voice's actual current filter parameters. However, these may not match the parameters set by the most recent
Note??GetFilterParameters is usable only on source and submix voices and has no effect on mastering voices.
+Gets the current overall volume level of the voice.
+Volume levels are expressed as floating-point amplitude multipliers between -224 to 224, with a maximum gain of 144.5 dB. A volume level of 1 means there is no attenuation or gain and 0 means silence. Negative levels can be used to invert the audio's phase. See XAudio2 Volume and Pitch Control for additional information on volume control.
Note??GetVolume always returns the volume most recently set by
Use a source voice to submit audio data to the XAudio2 processing pipeline.You must send voice data to a mastering voice to be heard, either directly or through intermediate submix voices. +
+Starts consumption and processing of audio by the voice. Delivers the result to any connected submix or mastering voices, or to the output device.
+Flags that control how the voice is started. Must be 0.
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
If the XAudio2 engine is stopped, the voice stops running. However, it remains in the started state, so that it starts running again as soon as the engine starts.
When first created, source voices are in the stopped state. Submix and mastering voices are in the started state.
After Start is called it has no further effect if called again before
Stops consumption of audio by the current voice.
+Flags that control how the voice is stopped. Can be 0 or the following:
Value | Description |
---|---|
Continue emitting effect output after the voice is stopped.? |
?
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
All source buffers that are queued on the voice and the current cursor position are preserved. This allows the voice to continue from where it left off, when it is restarted. The
By default, any pending output from voice effects?for example, reverb tails?is not played. Instead, the voice is immediately rendered silent. The
A voice stopped with the
Stop is always asynchronous, even if called within a callback.
Note??XAudio2 never calls any voice callbacks for a voice if the voice is stopped (even if it was stopped with
Adds a new audio buffer to the voice queue.
+ Pointer to an
Pointer to an additional
Returns
The voice processes and plays back the buffers in its queue in the order that they were submitted.
The
If the voice is started and has no buffers queued, the new buffer will start playing immediately. If the voice is stopped, the buffer is added to the voice's queue and will be played when the voice starts.
If only part of the given buffer should be played, the PlayBegin and PlayLength fields in the
If all or part of the buffer should be played in a continuous loop, the LoopBegin, LoopLength and LoopCount fields in
If an explicit play region is specified, it must begin and end within the given audio buffer (or, in the compressed case, within the set of samples that the buffer will decode to). In addition, the loop region cannot end past the end of the play region.
Xbox 360 |
---|
For certain audio formats, there may be additional restrictions on the valid endpoints of any play or loop regions; e.g. for XMA buffers, the regions can only begin or end at 128-sample boundaries in the decoded audio. + |
?
The pBuffer reference can be reused or freed immediately after calling this method, but the actual audio data referenced by pBuffer must remain valid until the buffer has been fully consumed by XAudio2 (which is indicated by the
Up to
SubmitSourceBuffer takes effect immediately when called from an XAudio2 callback with an OperationSet of
Xbox 360 |
---|
This method can be called from an Xbox system thread (most other XAudio2 methods cannot). However, a maximum of two source buffers can be submitted from a system thread at a time. |
?
+Removes all pending audio buffers from the voice queue.
+Returns
If the voice is started, the buffer that is currently playing is not removed from the queue.
FlushSourceBuffers can be called regardless of whether the voice is currently started or stopped.
For every buffer removed, an OnBufferEnd callback will be made, but none of the other per-buffer callbacks (OnBufferStart, OnStreamEnd or OnLoopEnd) will be made.
FlushSourceBuffers does not change a the voice's running state, so if the voice was playing a buffer prior to the call, it will continue to do so, and will deliver all the callbacks for the buffer normally. This means that the OnBufferEnd callback for this buffer will take place after the OnBufferEnd callbacks for the buffers that were removed. Thus, an XAudio2 client that calls FlushSourceBuffers cannot expect to receive OnBufferEnd callbacks in the order in which the buffers were submitted.
No warnings for starvation of the buffer queue will be emitted when the currently playing buffer completes; it is assumed that the client has intentionally removed the buffers that followed it. However, there may be an audio pop if this buffer does not end at a zero crossing. If the application must ensure that the flush operation takes place while a specific buffer is playing?perhaps because the buffer ends with a zero crossing?it must call FlushSourceBuffers from a callback, so that it executes synchronously.
Calling FlushSourceBuffers after a voice is stopped and then submitting new data to the voice resets all of the voice's internal counters.
A voice's state is not considered reset after calling FlushSourceBuffers until the OnBufferEnd callback occurs (if a buffer was previously submitted) or
Notifies an XAudio2 voice that no more buffers are coming after the last one that is currently in its queue.
+Returns
Discontinuity suppresses the warnings that normally occur in the debug build of XAudio2 when a voice runs out of audio buffers to play. It is preferable to mark the final buffer of a stream by tagging it with the
Because calling Discontinuity is equivalent to applying the
Note??XAudio2 may consume its entire buffer queue and emit a warning before the Discontinuity call takes effect, so Discontinuity is not guaranteed to suppress the warnings.
+Stops looping the voice when it reaches the end of the current loop region.
+Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
If the cursor for the voice is not in a loop region, ExitLoop does nothing.
+Returns the voice's current cursor position data.
+ Pointer to an
If a client needs to get the correlated positions of several voices (for example, to know exactly which sample of a given voice is playing when a given sample of another voice is playing), it must make GetState calls in an XAudio2 engine callback. This ensures that none of the voices advance while the calls are being made. See the XAudio2 Callbacks overview for information about using XAudio2 callbacks.
Note that the DirectX SDK versions of XAUDIO2 do not take the Flags parameter for GetState.
+Sets the frequency adjustment ratio of the voice.
+Frequency adjustment ratio. This value must be between
Identifies this call as part of a deferred batch. See the XAudio2 Operation Sets overview for more information.
Returns
Frequency adjustment is expressed as source frequency / target frequency. Changing the frequency ratio changes the rate audio is played on the voice. A ratio greater than 1.0 will cause the audio to play faster and a ratio less than 1.0 will cause the audio to play slower. Additionally, the frequency ratio affects the pitch of audio on the voice. As an example, a value of 1.0 has no effect on the audio, whereas a value of 2.0 raises pitch by one octave and 0.5 lowers it by one octave.
If SetFrequencyRatio is called specifying a Ratio value outside the valid range, the method will set the frequency ratio to the nearest valid value. A warning also will be generated for debug builds.
Note??
Returns the frequency adjustment ratio of the voice.
+Returns the current frequency adjustment ratio if successful.
GetFrequencyRatio always returns the voice's actual current frequency ratio. However, this may not match the ratio set by the most recent
For information on frequency ratios, see
Reconfigures the voice to consume source data at a different sample rate than the rate specified when the voice was created.
+The new sample rate the voice should process submitted data at. Valid sample rates are 1kHz to 200kHz.
Returns
The SetSourceSampleRate method supports reuse of XAudio2 voices by allowing a voice to play sounds with a variety of sample rates. To use SetSourceSampleRate the voice must have been created without the
The typical use of SetSourceSampleRate is to support voice pooling. For example to support voice pooling an application would precreate all the voices it expects to use. Whenever a new sound will be played the application chooses an inactive voice or ,if all voices are busy, picks the least important voice and calls SetSourceSampleRate on the voice with the new sound's sample rate. After SetSourceSampleRate has been called on the voice, the application can immediately start submitting and playing buffers with the new sample rate. This allows the application to avoid the overhead of creating and destroying voices frequently during gameplay. +
+Returns the voice's current cursor position data.
+If a client needs to get the correlated positions of several voices (for example, to know exactly which sample of a given voice is playing when a given sample of another voice is playing), it must make GetState calls in an XAudio2 engine callback. This ensures that none of the voices advance while the calls are being made. See the XAudio2 Callbacks overview for information about using XAudio2 callbacks.
Note that the DirectX SDK versions of XAUDIO2 do not take the Flags parameter for GetState.
+Returns the frequency adjustment ratio of the voice.
+GetFrequencyRatio always returns the voice's actual current frequency ratio. However, this may not match the ratio set by the most recent
For information on frequency ratios, see
Reconfigures the voice to consume source data at a different sample rate than the rate specified when the voice was created.
+The SetSourceSampleRate method supports reuse of XAudio2 voices by allowing a voice to play sounds with a variety of sample rates. To use SetSourceSampleRate the voice must have been created without the
The typical use of SetSourceSampleRate is to support voice pooling. For example to support voice pooling an application would precreate all the voices it expects to use. Whenever a new sound will be played the application chooses an inactive voice or ,if all voices are busy, picks the least important voice and calls SetSourceSampleRate on the voice with the new sound's sample rate. After SetSourceSampleRate has been called on the voice, the application can immediately start submitting and playing buffers with the new sample rate. This allows the application to avoid the overhead of creating and destroying voices frequently during gameplay. +
+The
This interface should be implemented by the XAudio2 client. XAudio2 calls these methods through an interface reference provided by the client in the
See the XAudio2 Callbacks topic for restrictions on callback implementation.
+A submix voice is used primarily for performance improvements and effects processing.
+Data buffers cannot be submitted directly to submix voices and will not be audible unless submitted to a mastering voice. A submix voice can be used to ensure that a particular set of voice data is converted to the same format and/or to have a particular effect chain processed on the collective result. +
This is the only XAudio2 interface that is derived from the COM
The DirectX SDK versions of XAUDIO2 included three member functions that are not present in the Windows 8 version: GetDeviceCount, GetDeviceDetails, and Initialize. These enumeration methods are no longer provided and standard Windows Audio APIs should be used for device enumeration instead.
+This topic shows how to integrate X3DAudio with XAudio2. You can use X3DAudio to provide the volume and pitch values for XAudio2 voices and the parameters for the XAudio2 built in reverb effect. This topic assumes that you have created an audio graph as described in How to: Build a Basic Audio Processing Graph.
To initialize X3DAudio
Initialize X3DAudio by calling
The
Note??For the DirectX SDK versions of XAUDIO2, you will need to call
DWORD dwChannelMask; + pMasteringVoice->GetChannelMask( &dwChannelMask ); X3DAUDIO_HANDLE X3DInstance; +( dwChannelMask, X3DAUDIO_SPEED_OF_SOUND, X3DInstance ); +
Create instances of the
The
Members of the structures that will not be updated in a game loop should be initialized here. Most members of the structures can simply be initialized to zero. However, some members of
Listener = {0}; + Emitter = {0}; + Emitter.ChannelCount = 1; + Emitter.CurveDistanceScaler = FLT_MIN; +
Create an instance of the
The
DSPSettings = {0}; + FLOAT32 * matrix = new FLOAT32[deviceDetails.OutputFormat.Format.nChannels]; + DSPSettings.SrcChannelCount = 1; + DSPSettings.DstChannelCount = deviceDetails.OutputFormat.Format.nChannels; + DSPSettings.pMatrixCoefficients = matrix;
Note??For XAUDIO 2.8 in Windows 8 use
Perform these steps once every two to three frames to calculate new settings and apply them. In this example, a source voice is sending directly to the mastering voice and to a submix voice with a reverb effect applied to it.
To use X3DAudio to calculate and apply new 3D audio settings
Update the
Emitter.OrientFront = EmitterOrientFront; + Emitter.OrientTop = EmitterOrientTop; + Emitter.Position = EmitterPosition; + Emitter.Velocity = EmitterVelocity; + Listener.OrientFront = ListenerOrientFront; + Listener.OrientTop = ListenerOrientTop; + Listener.Position = ListenerPosition; + Listener.Velocity = ListenerVelocity; +
Call
The parameters for
(X3DInstance, &Listener, &Emitter, | | | , &DSPSettings );
Use
pSFXSourceVoice->SetOutputMatrix( pMasterVoice, 1, deviceDetails.OutputFormat.Format.nChannels, DSPSettings.pMatrixCoefficients ) ; + pSFXSourceVoice->SetFrequencyRatio(DSPSettings.DopplerFactor); +
Use
pSFXSourceVoice->SetOutputMatrix(pSubmixVoice, 1, 1, &DSPSettings.ReverbLevel); +
Use
FilterParameters = { LowPassFilter, 2.0f * sinf(X3DAUDIO_PI/6.0f * DSPSettings.LPFDirectCoefficient), 1.0f }; + pSFXSourceVoice->SetFilterParameters(&FilterParameters); +
Adds an
Returns
This method can be called multiple times, allowing different components or layers of the same application to manage their own engine callback implementations separately.
It is invalid to call RegisterForCallbacks from within a callback (that is,
Removes an
It is invalid to call UnregisterForCallbacks from within a callback (that is,
Creates and configures a source voice.
+If successful, returns a reference to the new
Pointer to a one of the structures in the table below. This structure contains the expected format for all audio buffers submitted to the source voice. XAudio2 supports PCM and ADPCM voice types.
Format tag | Wave format structure | Size (in bytes) |
---|---|---|
PCMWAVEFORMAT | 16 | |
-or- | | 18 |
PCMWAVEFORMAT | 18 | |
ADPCMWAVEFORMAT | 50 | |
| 40 |
?
XAudio2 supports the following PCM formats.
The number of channels in a source voice must be less than or equal to
Note??PCM data formats such as PCMWAVEFORMAT and ADPCMWAVEFORMAT that require more information than provided by
Flags that specify the behavior of the source voice. A flag can be 0 or a combination of one or more of the following:
Value | Description |
---|---|
No pitch control is available on the voice.? | |
No sample rate conversion is available on the voice. The voice's outputs must have the same sample rate. Note??The | |
The filter effect should be available on this voice.? |
?
Note??The
Highest allowable frequency ratio that can be set on this voice. The value for this argument must be between
If MaxFrequencyRatio is less than 1.0, the voice will use that ratio immediately after being created (rather than the default of 1.0).
Xbox 360 |
---|
For XMA voices, there is one more restriction on the MaxFrequencyRatio argument and the voice's sample rate. The product of these two numbers cannot exceed XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MONO for one-channel voices or XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MULTICHANNEL for voices with any other number of channels. If the value specified for MaxFrequencyRatio is too high for the specified format, the call to CreateSourceVoice fails and produces a debug message. |
?
Note??You can use the lowest possible MaxFrequencyRatio value to reduce XAudio2's memory usage.
Pointer to a client-provided callback interface,
Pointer to a list of
Pointer to a list of
Returns
See XAudio2 Error Codes for descriptions of XAudio2-specific error codes.
Source voices read audio data from the client. They process the data and send it to the XAudio2 processing graph.
A source voice includes a variable-rate sample rate conversion, to convert data from the source format sample rate to the output rate required for the voice send list. If you use a
You cannot create any source or submix voices until a mastering voice exists, and you cannot destory a mastering voice if any source or submix voices still exist.
Source voices are always processed before any submix or mastering voices. This means that you do not need a ProcessingStage parameter to control the processing order.
When first created, source voices are in the stopped state.
XAudio2 uses an internal memory pooler for voices with the same format. This means memory allocation for voices will occur less frequently as more voices are created and then destroyed. To minimize just-in-time allocations, a title can create the anticipated maximum number of voices needed up front, and then delete them as necessary. Voices will then be reused from the XAudio2 pool. The memory pool is tied to an XAudio2 engine instance. You can reclaim all the memory used by an instance of the XAudio2 engine by destroying the XAudio2 object and recreating it as necessary (forcing the memory pool to grow via preallocation would have to be reapplied as needed).
It is invalid to call CreateSourceVoice from within a callback (that is,
The
Creates and configures a submix voice.
+On success, returns a reference to the new
Number of channels in the input audio data of the submix voice. InputChannels must be less than or equal to
Sample rate of the input audio data of submix voice. This rate must be a multiple of XAUDIO2_QUANTUM_DENOMINATOR. InputSampleRate must be between
Flags that specify the behavior of the submix voice. It can be 0 or the following:
Value | Description |
---|---|
The filter effect should be available on this voice. |
?
An arbitrary number that specifies when this voice is processed with respect to other submix voices, if the XAudio2 engine is running other submix voices. The voice is processed after all other voices that include a smaller ProcessingStage value and before all other voices that include a larger ProcessingStage value. Voices that include the same ProcessingStage value are processed in any order. A submix voice cannot send to another submix voice with a lower or equal ProcessingStage value. This prevents audio being lost due to a submix cycle.
Pointer to a list of
Pointer to a list of
Returns
See XAudio2 Error Codes for descriptions of XAudio2 specific error codes.
Submix voices receive the output of one or more source or submix voices. They process the output, and then send it to another submix voice or to a mastering voice.
A submix voice performs a sample rate conversion from the input sample rate to the input rate of its output voices in pSendList. If you specify multiple voice sends, they must all have the input same sample rate.
You cannot create any source or submix voices until a mastering voice exists, and you cannot destroy a mastering voice if any source or submix voices still exist.
When first created, submix voices are in the started state.
XAudio2 uses an internal memory pooler for voices with the same format. This means that memory allocation for voices will occur less frequently as more voices are created and then destroyed. To minimize just-in-time allocations, a title can create the anticipated maximum number of voices needed up front, and then delete them as necessary. Voices will then be reused from the XAudio2 pool. The memory pool is tied to an XAudio2 engine instance. You can reclaim all the memory used by an instance of the XAudio2 engine by destroying the XAudio2 object and recreating it as necessary (forcing the memory pool to grow via preallocation would have to be reapplied as needed).
It is invalid to call CreateSubmixVoice from within a callback (that is,
The
Creates and configures a mastering voice.
+ If successful, returns a reference to the new
Number of channels the mastering voice expects in its input audio. InputChannels must be less than or equal to
You can set InputChannels to
Sample rate of the input audio data of the mastering voice. This rate must be a multiple of XAUDIO2_QUANTUM_DENOMINATOR. InputSampleRate must be between
You can set InputSampleRate to
Windows XP defaults to 44100.
Windows Vista and Windows 7 default to the setting specified in the Sound Control Panel. The default for this setting is 44100 (or 48000 if required by the driver). Flags
Flags that specify the behavior of the mastering voice. Must be 0.
Identifier of the device to receive the output audio. Specifying the default value of
Pointer to an
Returns
See XAudio2 Error Codes for descriptions of XAudio2 specific error codes.
Mastering voices receive the output of one or more source or submix voices. They process the data, and send it to the audio output device.
Typically, you should create a mastering voice with an input sample rate that will be used by the majority of the title's audio content. The mastering voice performs a sample rate conversion from this input sample rate to the actual device output rate.
You cannot create a source or submix voices until a mastering voice exists. You cannot destroy a mastering voice if any source or submix voices still exist.
Mastering voices are always processed after all source and submix voices. This means that you need not specify a ProcessingStage parameter to control the processing order.
XAudio2 only allows one mastering voice to exist at once. If you attempt to create more than one voice,
When first created, mastering voices are in the started state.
It is invalid to call CreateMasteringVoice from within a callback (that is,
The
Note that the DirectX SDK XAUDIO2 version of CreateMasteringVoice took a DeviceIndex argument instead of a szDeviceId and a StreamCategory argument. This reflects the changes needed for the standard Windows device enumeration model.
+Starts the audio processing thread.
+Returns
After StartEngine is called, all started voices begin to consume audio. All enabled effects start running, and the resulting audio is sent to any connected output devices. When XAudio2 is first initialized, the engine is already in the started state.
It is invalid to call StartEngine from within a callback (that is,
Stops the audio processing thread.
+When StopEngine is called, all output is stopped immediately. However, the audio graph is left untouched, preserving effect parameters, effect histories (for example, the data stored by a reverb effect in order to emit echoes of a previous sound), voice states, pending source buffers, cursor positions, and so forth. When the engine is restarted, the resulting audio output will be identical?apart from a period of silence?to the output that would have been produced if the engine had never been stopped.
It is invalid to call StopEngine from within a callback (that is,
Atomically applies a set of operations that are tagged with a given identifier.
+Identifier of the set of operations to be applied. To commit all pending operations, pass
Returns
CommitChanges does nothing if no operations are tagged with the given identifier.
See the XAudio2 Operation Sets overview about working with CommitChanges and XAudio2 interface methods that may be deferred. +
+Returns current resource usage details, such as available memory or CPU usage.
+On success, reference to an
For specific information on the statistics returned by GetPerformanceData, see the
Changes global debug logging options for XAudio2.
+Pointer to a
SetDebugConfiguration sets the debug configuration for the given instance of XAudio2 engine. See
Returns current resource usage details, such as available memory or CPU usage.
+For specific information on the statistics returned by GetPerformanceData, see the
Used with
When streaming an xWMA file a few packets at a time,
In addition, when streaming an xWMA file a few packets at a time, the application should subtract pDecodedPacketCumulativeBytes[PacketCount-1] of the previous packet from all the entries of the currently submitted packet.
The members of
Memory allocated to hold a
XAUDIO 2.8 in Windows 8 does not support xWMA decoding. Use Windows Media Foundation APIs to perform the decoding from WMA to PCM instead. This functionality is available in the DirectX SDK versions of XAUDIO.
+Contains the new global debug configuration for XAudio2. Used with the SetDebugConfiguration function.
+Debugging messages can be completely turned off by initializing
Note??For this version of XAudio2, only the
Defines an effect chain.
+Number of effects in the effect chain for the voice.
Array of
Defines filter parameters for a source voice.
+Setting
FilterParams; + FilterParams.Frequency = 1.0f; + FilterParams.OneOverQ = 1.0f; + FilterParams.Type = LowPassFilter; +
The following formulas show the relationship between the members of
Yl( n ) = F1 yb( n ) + yl( n - 1 ) + Yb( n ) = F1 yh( n ) + yb( n - 1 ) + Yh( n ) = x( n ) - yl( n ) - OneOverQ(yb( n - 1 ) + Yn( n ) = Yl(n) + Yh(n)
Where:
Yl = lowpass output + Yb = bandpass output + Yh = highpass output + Yn = notch output + F1 =+.Frequency + OneOverQ = .OneOverQ
The
Filter radian frequency calculated as (2 * sin(pi * (desired filter cutoff frequency) / sampleRate)). The frequency must be greater than or equal to 0 and less than or equal to
Reciprocal of Q factor. Controls how quickly frequencies beyond Frequency are dampened. Larger values result in quicker dampening while smaller values cause dampening to occur more gradually. Must be greater than 0 and less than or equal to
Contains performance information.
+CPU cycles are recorded using . Use to convert these values.
+CPU cycles spent on audio processing since the last call to the
Total CPU cycles elapsed since the last call.
Note??This only counts cycles on the CPU on which XAudio2 is running.
Fewest CPU cycles spent on processing any single audio quantum since the last call.
Most CPU cycles spent on processing any single audio quantum since the last call.
Total memory currently in use.
Minimum delay that occurs between the time a sample is read from a source buffer and the time it reaches the speakers.
Windows |
---|
The delay reported is a variable value equal to the rough distance between the last sample submitted to the driver by XAudio2 and the sample currently playing. The following factors can affect the delay: playing multichannel audio on a hardware-accelerated device; the type of audio device (WavePci, WaveCyclic, or WaveRT); and, to a lesser extent, audio hardware implementation. + |
?
Xbox 360 |
---|
The delay reported is a fixed value, which is normally 1,024 samples (21.333 ms at 48 kHz). If XOverrideSpeakerConfig has been called using the XAUDIOSPEAKERCONFIG_LOW_LATENCY flag, the delay reported is 512 samples (10.667 ms at 48 kHz). + |
?
Total audio dropouts since the engine started.
Number of source voices currently playing.
Total number of source voices currently in existence.
Number of submix voices currently playing.
Number of resampler xAPOs currently active.
Number of matrix mix xAPOs currently active.
Windows |
---|
Unsupported. |
?
Xbox 360 |
---|
Number of source voices decoding XMA data. |
?
Windows |
---|
Unsupported. |
?
Xbox 360 |
---|
A voice can use more than one XMA stream. |
?
Contains information about the creation flags, input channels, and sample rate of a voice.
+Note the DirectX SDK versions of XAUDIO2 do not support the ActiveFlags member.
+Flags used to create the voice; see the individual voice interfaces for more information.
The number of input channels the voice expects.
The input sample rate the voice expects.
Defines a destination voice that is the target of a send from another voice and specifies whether a filter should be used.
+Indicates whether a filter should be used on data sent to the voice pointed to by pOutputVoice. Flags can be 0 or
A reference to an
Defines a set of voices to receive data from a single output voice.
+If pSends is not
Setting SendCount to 0 is useful for certain effects such as volume meters or file writers that don't generate any audio output to pass on to another voice.
If needed, a voice will perform a single sample rate conversion, from the voice's input sample rate to the input sample rate of the voice's output voices. Because only one sample rate conversion will be performed, all the voice's output voices must have the same input sample rate. If the input sample rates of the voice and its output voices are the same, no sample rate conversion is performed. +
+Number of voices to receive the output of the voice. An OutputCount value of 0 indicates the voice should not send output to any voices.
Array of
Returns the voice's current state and cursor position data.
+For all encoded formats, including constant bit rate (CBR) formats such as adaptive differential pulse code modulation (ADPCM), SamplesPlayed is expressed in terms of decoded samples. For pulse code modulation (PCM) formats, SamplesPlayed is expressed in terms of either input or output samples. There is a one-to-one mapping from input to output for PCM formats.
If a client needs to get the correlated positions of several voices?that is, to know exactly which sample of a particular voice is playing when a specified sample of another voice is playing?it must make the
Pointer to a buffer context provided in the
Number of audio buffers currently queued on the voice, including the one that is processed currently.
Total number of samples processed by this voice since it last started, or since the last audio stream ended (as marked with the
Calculates DSP settings with respect to 3D parameters.
+3D audio instance handle. Call
Pointer to an
Pointer to an
Value | Description |
---|---|
Enables matrix coefficient table calculation.? | |
Enables delay time array calculation (stereo only).? | |
Enables low pass filter (LPF) direct-path coefficient calculation.? | |
Enables LPF reverb-path coefficient calculation.? | |
Enables reverb send level calculation.? | |
Enables Doppler shift factor calculation.? | |
Enables emitter-to-listener interior angle calculation.? | |
Fills the center channel with silence. This flag allows you to keep a 6-channel matrix so you do not have to remap the channels, but the center channel will be silent. This flag is only valid if you also set | |
Applies an equal mix of all source channels to a low frequency effect (LFE) destination channel. It only applies to matrix calculations with a source that does not have an LFE channel and a destination that does have an LFE channel. This flag is only valid if you also set |
?
Pointer to an
You typically call
Important?? The listener and emitter values must be valid. Floating-point specials (NaN, QNaN, +INF, -INF) can cause the entire audio output to go silent if introduced into a running audio graph.
+Sets all global 3D audio constants.
+Assignment of channels to speaker positions. This value must not be zero. The only permissible value on Xbox 360 is SPEAKER_XBOX.
Speed of sound, in user-defined world units per second. Use this value only for doppler calculations. It must be greater than or equal to FLT_MIN.
3D audio instance handle. Use this handle when you call
X3DAUDIO_HANDLE is an opaque data structure. Because the operating system doesn't allocate any additional storage for the 3D audio instance handle, you don't need to free or close it.
+Specifies directionality for a single-channel non-LFE emitter by scaling DSP behavior with respect to the emitter's orientation.
+For a detailed explanation of sound cones see Sound Cones.
+Inner cone angle in radians. This value must be within 0.0f to X3DAUDIO_2PI.
Outer cone angle in radians. This value must be within InnerAngle to X3DAUDIO_2PI.
Volume scaler on/within inner cone. This value must be within 0.0f to 2.0f.
Volume scaler on/beyond outer cone. This value must be within 0.0f to 2.0f.
LPF direct-path or reverb-path coefficient scaler on/within inner cone. This value is only used for LPF calculations and must be within 0.0f to 1.0f.
LPF direct-path or reverb-path coefficient scaler on or beyond outer cone. This value is only used for LPF calculations and must be within 0.0f to 1.0f.
Reverb send level scaler on or within inner cone. This must be within 0.0f to 2.0f.
Reverb send level scaler on/beyond outer cone. This must be within 0.0f to 2.0f. +
Defines a DSP setting at a given normalized distance.
+Normalized distance. This must be within 0.0f to 1.0f.
DSP control setting.
Defines an explicit piecewise curve made up of linear segments, directly defining DSP behavior with respect to normalized distance.
+
Number of distance curve points. There must be two or more points since all curves must have at least two endpoints defining values at 0.0f and 1.0f normalized distance, respectively.
Receives the results from a call to
The following members must be initialized before passing this structure to the
The following members are returned by passing this structure to the
Note??For pMatrixCoefficients and pDelayTimes,
Defines a single-point or multiple-point 3D audio source that is used with an arbitrary number of sound channels.
+The parameter type
X3DAudio uses a left-handed Cartesian coordinate system, with values on the x-axis increasing from left to right, on the y-axis from bottom to top, and on the z-axis from near to far. Azimuths are measured clockwise from a given reference direction.
For user-defined distance curves, the distance field of the first point must be 0.0f and the distance field of the last point must be 1.0f.
If an emitter moves beyond a distance of (CurveDistanceScaler ? 1.0f), the last point on the curve is used to compute the volume output level. The last point is determined by the following: +
+.pPoints[PointCount-1].DSPSetting)
Pointer to a sound cone. Used only with single-channel emitters for matrix, LPF (both direct and reverb paths), and reverb calculations.
Orientation of the front direction. This value must be orthonormal with OrientTop. OrientFront must be normalized when used. For single-channel emitters without cones OrientFront is only used for emitter angle calculations. For multi channel emitters or single-channel with cones OrientFront is used for matrix, LPF (both direct and reverb paths), and reverb calculations.
Orientation of the top direction. This value must be orthonormal with OrientFront. OrientTop is only used with multi-channel emitters for matrix calculations.
Position in user-defined world units. This value does not affect Velocity.
Velocity vector in user-defined world units/second. This value is used only for doppler calculations. It does not affect Position. +
Value to be used for the inner radius calculations. If InnerRadius is 0, then no inner radius is used, but InnerRadiusAngle may still be used. This value must be between 0.0f and MAX_FLT. +
Value to be used for the inner radius angle calculations. This value must be between 0.0f and X3DAUDIO_PI/4.0.
Number of emitters defined by the
Distance from Position that channels will be placed if ChannelCount is greater than 1. ChannelRadius is only used with multi-channel emitters for matrix calculations. Must be greater than or equal to 0.0f.
Table of channel positions, expressed as an azimuth in radians along the channel radius with respect to the front orientation vector in the plane orthogonal to the top orientation vector. An azimuth of X3DAUDIO_2PI specifies a channel is a low-frequency effects (LFE) channel. LFE channels are positioned at the emitter base and are calculated with respect to pLFECurve only, never pVolumeCurve. pChannelAzimuths must have at least ChannelCount elements, but can be
Volume-level distance curve, which is used only for matrix calculations.
LFE roll-off distance curve, or
Low-pass filter (LPF) direct-path coefficient distance curve, or
LPF reverb-path coefficient distance curve, or
Reverb send level distance curve, or
Curve distance scaler that is used to scale normalized distance curves to user-defined world units, and/or to exaggerate their effect. This does not affect any other calculations. The value must be within the range FLT_MIN to FLT_MAX. CurveDistanceScaler is only used for matrix, LPF (both direct and reverb paths), and reverb calculations.
Doppler shift scaler that is used to exaggerate Doppler shift effect. DopplerScaler is only used for Doppler calculations and does not affect any other calculations. The value must be within the range 0.0f to FLT_MAX.
Defines a point of 3D audio reception.
+The parameter type
A listener's front and top vectors must be orthonormal. To be considered orthonormal, a pair of vectors must have a magnitude of 1 +- 1x10-5 and a dot product of 0 +- 1x10-5.
+Orientation of front direction. When pCone is
Orientation of top direction, used only for matrix and delay calculations. This value must be orthonormal with OrientFront when used.
Position in user-defined world units. This value does not affect Velocity.
Velocity vector in user-defined world units per second, used only for doppler calculations. This value does not affect Position.
Pointer to an
Describes the contents of a stream buffer.
+This metadata can be used to implement optimizations that require knowledge of a stream buffer's contents. For example, XAPOs that always produce silent output from silent input can check the flag on the input stream buffer to determine if any signal processing is necessary. If silent, the XAPO can simply set the flag on the output stream buffer to silent and return, thus averting the work of processing silent data.
Likewise, XAPOs that receive valid input data, but generate silence (for any reason), may set the output stream buffer's flag accordingly, rather than writing silent samples to the buffer.
These flags represent what should be assumed is in the respective buffer. The flags may not reflect what is actually stored in memory. For example, the
Stream buffer contains only silent samples.
Stream buffer contains audio data to be processed.
Creates an instance of the requested XAPOFX effect.
+If this function succeeds, it returns
The created XAPO will have a reference count of 1. Client code must call IUnknown::Release after passing the XAPO to XAudio2 to allow XAudio2 to dispose of the XAPO when it is no longer needed. Use
Note??The DirectX SDK version of this function doesn't have the pInitData or InitDataByteSize parameters as it only takes the first 2 parameters. To set initial parameters for the XAPOFX effect that is created with the DirectX SDK version of this function, you must bind that effect to a voice and use
Parameters for use with the FXECHO XAPOFX.
+Echo only supports FLOAT32 audio formats.
+Parameters for use with the FXEQ XAPO.
+Each band ranges from FrequencyCenterN - (BandwidthN / 2) to FrequencyCenterN + (BandwidthN / 2).
+Center frequency in Hz for band 0. Must be between
The boost or decrease to frequencies in band 0. Must be between
Width of band 0. Must be between
Center frequency in Hz for band 1. Must be between
The boost or decrease to frequencies in band 1. Must be between
Width of band 1. Must be between
Center frequency in Hz for band 2. Must be between
The boost or decrease to frequencies in band 2. Must be between
Width of band 2. Must be between
Center frequency in Hz for band 3. Must be between
The boost or decrease to frequencies in band 3. Must be between
Width of band 3. Must be between
Parameters for use with the FXMasteringLimiter XAPO.
+Parameters for use with the FXReverb XAPO.
+Controls the character of the individual wall reflections. Set to minimum value to simulate a hard flat surface and to maximum value to simulate a diffuse surface.Value must be between
Size of the room. Value must be between
An optional interface that allows an XAPO to use effect-specific parameters.
+An optional interface that allows an XAPO to use effect-specific parameters.
+Sets effect-specific parameters.
+Effect-specific parameter block.
Size of pParameters, in bytes.
The data in pParameters is completely effect-specific and determined by the implementation of the
SetParameters can only be called on the real-time audio processing thread; no synchronization between SetParameters and the
Gets the current values for any effect-specific parameters.
+Receives an effect-specific parameter block.
Size of pParameters, in bytes.
The data in pParameters is completely effect-specific and determined by the implementation of the
Unlike SetParameters, XAudio2 does not call this method on the realtime audio processing thread. Thus, the XAPO must protect variables shared with
XAudio2 calls this method from the
This method may block and should never be called from the realtime audio processing thread instead get the current parameters from CXAPOParametersBase::BeginProcess.
+Defines stream buffer parameters that may change from one call to the next. Used with the Process method.
+Although the format and maximum size values of a particular stream buffer are constant, as defined by the
Note??Only constant-bit-rate XAPOs are currently supported.
+Defines stream buffer parameters that remain constant while an XAPO is locked. Used with the
The byte size of the respective stream buffer must be at least MaxFrameCount ? (pFormat->nBlockAlign) bytes.
+Describes general characteristics of an XAPO. Used with
Describes the current state of the Xbox 360 Controller.
+This structure is used by the
The specific mapping of button to game function varies depending on the game type.
The constant XINPUT_GAMEPAD_TRIGGER_THRESHOLD may be used as the value which bLeftTrigger and bRightTrigger must be greater than to register as pressed. This is optional, but often desirable. Xbox 360 Controller buttons do not manifest crosstalk. +
+Bitmask of the device digital buttons, as follows. A set bit indicates that the corresponding button is pressed.
Device button | Bitmask |
---|---|
0x0001 | |
0x0002 | |
0x0004 | |
0x0008 | |
0x0010 | |
0x0020 | |
0x0040 | |
0x0080 | |
0x0100 | |
0x0200 | |
0x1000 | |
0x2000 | |
0x4000 | |
0x8000 |
?
Bits that are set but not defined above are reserved, and their state is undefined.
The current value of the left trigger analog control. The value is between 0 and 255.
The current value of the right trigger analog control. The value is between 0 and 255.
Left thumbstick x-axis value. Each of the thumbstick axis members is a signed value between -32768 and 32767 describing the position of the thumbstick. A value of 0 is centered. Negative values signify down or to the left. Positive values signify up or to the right. The constants
Left thumbstick y-axis value. The value is between -32768 and 32767.
Right thumbstick x-axis value. The value is between -32768 and 32767.
Right thumbstick y-axis value. The value is between -32768 and 32767.
Retrieves the battery type and charge status of a wireless controller.
+Index of the signed-in gamer associated with the device. Can be a value in the range 0?XUSER_MAX_COUNT ? 1.
Specifies which device associated with this user index should be queried. Must be
Contains information on battery type and charge state.
+The type of battery. BatteryType will be one of the following values.
Value | Description |
---|---|
The device is not connected.? | |
The device is a wired device and does not have a battery.? | |
The device has an alkaline battery.? | |
The device has a nickel metal hydride battery.? | |
The device has an unknown battery type.? |
?
The charge state of the battery. This value is only valid for wireless devices with a known battery type. BatteryLevel will be one of the following values.
Value |
---|
?
Describes the capabilities of a connected controller. The
The SubType member indicates the specific subtype of controller present. Games may detect the controller subtype and tune their handling of controller input or output based on subtypes that are well suited to their game genre. For example, a car racing game might check for the presence of a wheel controller to provide finer control of the car being driven. However, titles must not disable or ignore a device based on its subtype. Subtypes not recognized by the game or for which the game is not specifically tuned should be treated as a standard Xbox 360 Controller (
Older XUSB Windows drivers report incomplete capabilities information, particularly for wireless devices. The latest XUSB Windows driver provides full support for wired and wireless devices, and more complete and accurate capabilties flags.
+A table of controller subtypes available in XInput.
+Describes the capabilities of a connected controller. The
The SubType member indicates the specific subtype of controller present. Games may detect the controller subtype and tune their handling of controller input or output based on subtypes that are well suited to their game genre. For example, a car racing game might check for the presence of a wheel controller to provide finer control of the car being driven. However, titles must not disable or ignore a device based on its subtype. Subtypes not recognized by the game or for which the game is not specifically tuned should be treated as a standard Xbox 360 Controller (
Older XUSB Windows drivers report incomplete capabilities information, particularly for wireless devices. The latest XUSB Windows driver provides full support for wired and wireless devices, and more complete and accurate capabilties flags.
+Describes the current state of the Xbox 360 Controller.
+This structure is used by the
The specific mapping of button to game function varies depending on the game type.
The constant XINPUT_GAMEPAD_TRIGGER_THRESHOLD may be used as the value which bLeftTrigger and bRightTrigger must be greater than to register as pressed. This is optional, but often desirable. Xbox 360 Controller buttons do not manifest crosstalk. +
+Bitmask of the device digital buttons, as follows. A set bit indicates that the corresponding button is pressed.
Device button | Bitmask |
---|---|
0x0001 | |
0x0002 | |
0x0004 | |
0x0008 | |
0x0010 | |
0x0020 | |
0x0040 | |
0x0080 | |
0x0100 | |
0x0200 | |
0x1000 | |
0x2000 | |
0x4000 | |
0x8000 |
?
Bits that are set but not defined above are reserved, and their state is undefined.
The current value of the left trigger analog control. The value is between 0 and 255.
The current value of the right trigger analog control. The value is between 0 and 255.
Left thumbstick x-axis value. Each of the thumbstick axis members is a signed value between -32768 and 32767 describing the position of the thumbstick. A value of 0 is centered. Negative values signify down or to the left. Positive values signify up or to the right. The constants
Left thumbstick y-axis value. The value is between -32768 and 32767.
Right thumbstick x-axis value. The value is between -32768 and 32767.
Right thumbstick y-axis value. The value is between -32768 and 32767.
Specifies keystroke data returned by
Future devices may return HID codes and virtual key values that are not supported on current devices, and are currently undefined. Applications should ignore these unexpected values.
A virtual-key code is a byte value that represents a particular physical key on the keyboard, not the character or characters (possibly none) that the key can be mapped to based on keyboard state. The keyboard state at the time a virtual key is pressed modifies the character reported. For example, VK_4 might represent a "4" or a "$", depending on the state of the SHIFT key.
A reported keyboard event includes the virtual key that caused the event, whether the key was pressed or released (or is repeating), and the state of the keyboard at the time of the event. The keyboard state includes information about whether any CTRL, ALT, or SHIFT keys are down.
If the keyboard event represents an Unicode character (for example, pressing the "A" key), the Unicode member will contain that character. Otherwise, Unicode will contain the value zero.
The valid virtual-key (VK_xxx) codes are defined in XInput.h. In addition to codes that indicate key presses, the following codes indicate controller input.
Value | Description |
---|---|
A button? | |
B button? | |
X button? | |
Y button? | |
Right shoulder button? | |
Left shoulder button? | |
Left trigger? | |
Right trigger? | |
Directional pad up? | |
Directional pad down? | |
Directional pad left? | |
Directional pad right? | |
START button? | |
BACK button? | |
Left thumbstick click? | |
Right thumbstick click? | |
Left thumbstick up? | |
Left thumbstick down? | |
Left thumbstick right? | |
Left thumbstick left? | |
Left thumbstick up and left? | |
Left thumbstick up and right? | |
Left thumbstick down and right? | |
Left thumbstick down and left? | |
Right thumbstick up? | |
Right thumbstick down? | |
Right thumbstick right? | |
Right thumbstick left? | |
Right thumbstick up and left? | |
Right thumbstick up and right? | |
Right thumbstick down and right? | |
Right thumbstick down and left? |
?
+Specifies keystroke data returned by
Future devices may return HID codes and virtual key values that are not supported on current devices, and are currently undefined. Applications should ignore these unexpected values.
A virtual-key code is a byte value that represents a particular physical key on the keyboard, not the character or characters (possibly none) that the key can be mapped to based on keyboard state. The keyboard state at the time a virtual key is pressed modifies the character reported. For example, VK_4 might represent a "4" or a "$", depending on the state of the SHIFT key.
A reported keyboard event includes the virtual key that caused the event, whether the key was pressed or released (or is repeating), and the state of the keyboard at the time of the event. The keyboard state includes information about whether any CTRL, ALT, or SHIFT keys are down.
If the keyboard event represents an Unicode character (for example, pressing the "A" key), the Unicode member will contain that character. Otherwise, Unicode will contain the value zero.
The valid virtual-key (VK_xxx) codes are defined in XInput.h. In addition to codes that indicate key presses, the following codes indicate controller input.
Value | Description |
---|---|
A button? | |
B button? | |
X button? | |
Y button? | |
Right shoulder button? | |
Left shoulder button? | |
Left trigger? | |
Right trigger? | |
Directional pad up? | |
Directional pad down? | |
Directional pad left? | |
Directional pad right? | |
START button? | |
BACK button? | |
Left thumbstick click? | |
Right thumbstick click? | |
Left thumbstick up? | |
Left thumbstick down? | |
Left thumbstick right? | |
Left thumbstick left? | |
Left thumbstick up and left? | |
Left thumbstick up and right? | |
Left thumbstick down and right? | |
Left thumbstick down and left? | |
Right thumbstick up? | |
Right thumbstick down? | |
Right thumbstick right? | |
Right thumbstick left? | |
Right thumbstick up and left? | |
Right thumbstick up and right? | |
Right thumbstick down and right? | |
Right thumbstick down and left? |
?
+Retrieves a gamepad input event.
+Wireless controllers are not considered active upon system startup, and calls to any of the XInput functions before a wireless controller is made active return
[in] Index of the signed-in gamer associated with the device. Can be a value in the range 0?XUSER_MAX_COUNT ? 1, or
[in] Reserved
[out] Pointer to an
Retrieves a gamepad input event.
+[in] Index of the signed-in gamer associated with the device. Can be a value in the range 0?XUSER_MAX_COUNT ? 1, or
[in] Reserved
[out] Pointer to an
If the function succeeds, the return value is
If no new keys have been pressed, the return value is
If the controller is not connected or the user has not activated it, the return value is
If the function fails, the return value is an error code defined in Winerror.h. The function does not use SetLastError to set the calling thread's last-error code.
Wireless controllers are not considered active upon system startup, and calls to any of the XInput functions before a wireless controller is made active return
Retrieves the battery type and charge status of a wireless controller.
+Index of the signed-in gamer associated with the device. Can be a value in the range 0?XUSER_MAX_COUNT ? 1.
Specifies which device associated with this user index should be queried. Must be
Pointer to an
If the function succeeds, the return value is
Sends data to a connected controller. This function is used to activate the vibration function of a controller.
+Index of the user's controller. Can be a value from 0 to 3. For information about how this value is determined and how the value maps to indicators on the controller, see Multiple Controllers.
Pointer to an
If the function succeeds, the return value is
If the controller is not connected, the return value is
If the function fails, the return value is an error code defined in WinError.h. The function does not use SetLastError to set the calling thread's last-error code.
Gets the sound rendering and sound capture device GUIDs that are associated with the headset connected to the specified controller.
+[in] Index of the user's controller. It can be a value in the range 0?3. For information about how this value is determined and how the value maps to indicators on the controller, see Multiple Controllers.
[out] Pointer that receives the
[out] Pointer that receives the
If the function successfully retrieves the device IDs for render and capture, the return code is
If there is no headset connected to the controller, the function also retrieves
If the controller port device is not physically connected, the function returns
If the function fails, it returns a valid Win32 error code.
Use of legacy DirectSound is not recommended, and DirectSound is not available for Windows Store apps.
Note??
Retrieves the current state of the specified controller.
+Index of the user's controller. Can be a value from 0 to 3. For information about how this value is determined and how the value maps to indicators on the controller, see Multiple Controllers.
Pointer to an
If the function succeeds, the return value is
If the controller is not connected, the return value is
If the function fails, the return value is an error code defined in Winerror.h. The function does not use SetLastError to set the calling thread's last-error code.
When
Sets the reporting state of XInput.
+If enable is
This function is meant to be called when an application gains or loses focus (such as via WM_ACTIVATEAPP). Using this function, you will not have to change the XInput query loop in your application as neutral data will always be reported if XInput is disabled.
In a controller that supports vibration effects:
Retrieves the capabilities and features of a connected controller.
+Index of the user's controller. Can be a value in the range 0?3. For information about how this value is determined and how the value maps to indicators on the controller, see Multiple Controllers.
Input flags that identify the controller type. If this value is 0, then the capabilities of all controllers connected to the system are returned. Currently, only one value is supported:
Value | Description |
---|---|
Limit query to devices of Xbox 360 Controller type. |
?
Any value of dwflags other than the above or 0 is illegal and will result in an error break when debugging.
Pointer to an
If the function succeeds, the return value is
If the controller is not connected, the return value is
If the function fails, the return value is an error code defined in WinError.h. The function does not use SetLastError to set the calling thread's last-error code.
Note??The legacy XINPUT 9.1.0 version (included in Windows?Vista and later) always returned a fixed set of capabilities regardless of attached device.
+Contains information on battery type and charge state.
+The type of battery. BatteryType will be one of the following values.
Value | Description |
---|---|
The device is not connected.? | |
The device is a wired device and does not have a battery.? | |
The device has an alkaline battery.? | |
The device has a nickel metal hydride battery.? | |
The device has an unknown battery type.? |
?
The charge state of the battery. This value is only valid for wireless devices with a known battery type. BatteryLevel will be one of the following values.
Value |
---|
?
Describes the capabilities of a connected controller. The
The SubType member indicates the specific subtype of controller present. Games may detect the controller subtype and tune their handling of controller input or output based on subtypes that are well suited to their game genre. For example, a car racing game might check for the presence of a wheel controller to provide finer control of the car being driven. However, titles must not disable or ignore a device based on its subtype. Subtypes not recognized by the game or for which the game is not specifically tuned should be treated as a standard Xbox 360 Controller (
Older XUSB Windows drivers report incomplete capabilities information, particularly for wireless devices. The latest XUSB Windows driver provides full support for wired and wireless devices, and more complete and accurate capabilties flags.
+Specifies keystroke data returned by
Future devices may return HID codes and virtual key values that are not supported on current devices, and are currently undefined. Applications should ignore these unexpected values.
A virtual-key code is a byte value that represents a particular physical key on the keyboard, not the character or characters (possibly none) that the key can be mapped to based on keyboard state. The keyboard state at the time a virtual key is pressed modifies the character reported. For example, VK_4 might represent a "4" or a "$", depending on the state of the SHIFT key.
A reported keyboard event includes the virtual key that caused the event, whether the key was pressed or released (or is repeating), and the state of the keyboard at the time of the event. The keyboard state includes information about whether any CTRL, ALT, or SHIFT keys are down.
If the keyboard event represents an Unicode character (for example, pressing the "A" key), the Unicode member will contain that character. Otherwise, Unicode will contain the value zero.
The valid virtual-key (VK_xxx) codes are defined in XInput.h. In addition to codes that indicate key presses, the following codes indicate controller input.
Value | Description |
---|---|
A button? | |
B button? | |
X button? | |
Y button? | |
Right shoulder button? | |
Left shoulder button? | |
Left trigger? | |
Right trigger? | |
Directional pad up? | |
Directional pad down? | |
Directional pad left? | |
Directional pad right? | |
START button? | |
BACK button? | |
Left thumbstick click? | |
Right thumbstick click? | |
Left thumbstick up? | |
Left thumbstick down? | |
Left thumbstick right? | |
Left thumbstick left? | |
Left thumbstick up and left? | |
Left thumbstick up and right? | |
Left thumbstick down and right? | |
Left thumbstick down and left? | |
Right thumbstick up? | |
Right thumbstick down? | |
Right thumbstick right? | |
Right thumbstick left? | |
Right thumbstick up and left? | |
Right thumbstick up and right? | |
Right thumbstick down and right? | |
Right thumbstick down and left? |
?
+Represents the state of a controller.
+The dwPacketNumber member is incremented only if the status of the controller has changed since the controller was last polled.
+State packet number. The packet number indicates whether there have been any changes in the state of the controller. If the dwPacketNumber member is the same in sequentially returned
Specifies motor speed levels for the vibration function of a controller.
+The left motor is the low-frequency rumble motor. The right motor is the high-frequency rumble motor. The two motors are not the same, and they create different vibration effects.
+Speed of the left motor. Valid values are in the range 0 to 65,535. Zero signifies no motor use; 65,535 signifies 100 percent motor use.
Speed of the right motor. Valid values are in the range 0 to 65,535. Zero signifies no motor use; 65,535 signifies 100 percent motor use.
s = det([o_2 - o_1, d_2, d_1 x d_2]) / ||d_1 x d_2||^2
+ t = det([o_2 - o_1, d_1, d_1 x d_2]) / ||d_1 x d_2||^2
+ Where o_1 is the position of the first ray, o_2 is the position of the second ray,
+ d_1 is the normalized direction of the first ray, d_2 is the normalized direction
+ of the second ray, det denotes the determinant of a matrix, x denotes the cross
+ product, [ ] denotes a matrix, and || || denotes the length or magnitude of a vector.
+ Driver type options.
+The driver type is required when calling
The driver type is unknown.
A hardware driver, which implements Direct3D features in hardware. This is the primary driver that you should use in your Direct3D applications because it provides the best performance. A hardware driver uses hardware acceleration (on supported hardware) but can also use software for parts of the pipeline that are not supported in hardware. This driver type is often referred to as a hardware abstraction layer or HAL.
A reference driver, which is a software implementation that supports every Direct3D feature. A reference driver is designed for accuracy rather than speed and as a result is slow but accurate. The rasterizer portion of the driver does make use of special CPU instructions whenever it can, but it is not intended for retail applications; use it only for feature testing, demonstration of functionality, debugging, or verifying bugs in other drivers. The reference device for this driver is installed by the Windows SDK 8.0 or later and is intended only as a debug aid for development purposes. This driver may be referred to as a REF driver, a reference driver, or a reference rasterizer.
Note??When you use the REF driver in Windows Store apps, the REF driver renders correctly but doesn't display any output on the screen. To verify bugs in hardware drivers for Windows Store apps, use
A
A software driver, which is a driver implemented completely in software. The software implementation is not intended for a high-performance application due to its very slow performance.
A WARP driver, which is a high-performance software rasterizer. The rasterizer supports feature levels 9_1 through level 10_1 with a high performance software implementation. For information about limitations creating a WARP device on certain feature levels, see Limitations Creating WARP and Reference Devices. For more information about using a WARP driver, see Windows Advanced Rasterization Platform (WARP) In-Depth Guide.
Note??The WARP driver that Windows?8 includes supports feature levels 9_1 through level 11_1.
Note??The WARP driver that Windows?8.1 includes fully supports feature level 11_1, including tiled, IDXGIDevice3::Trim, shared BCn surfaces, minblend, and map default.
Describes the set of features targeted by a Direct3D device.
+For an overview of the capabilities of each feature level, see Overview For Each Feature Level.
For information about limitations creating nonhardware-type devices on certain feature levels, see Limitations Creating WARP and Reference Devices.
+Targets features supported by feature level 9.1 including shader model 2.
Targets features supported by feature level 9.2 including shader model 2.
Targets features supported by feature level 9.3 including shader model 2.0b.
Targets features supported by Direct3D 10.0 including shader model 4.
Targets features supported by Direct3D 10.1 including shader model 4.
Targets features supported by Direct3D 11.0 including shader model 5.
Values that indicate how the pipeline interprets vertex data that is bound to the input-assembler stage. These primitive topology values determine how the vertex data is rendered on screen.
+Use the
The following diagram shows the various primitive types for a geometry shader object.
+Values that identify the type of resource to be viewed as a shader resource.
+A
The type is unknown.
The resource is a buffer.
The resource is a 1D texture.
The resource is an array of 1D textures.
The resource is a 2D texture.
The resource is an array of 2D textures.
The resource is a multisampling 2D texture.
The resource is an array of multisampling 2D textures.
The resource is a 3D texture.
The resource is a cube texture.
The resource is an array of cube textures.
The resource is a raw buffer. For more info about raw viewing of buffers, see Raw Views of Buffers.
Creates a buffer.
+Number of bytes in the blob.
The address of a reference to the ID3DBlob interface that is used to retrieve the buffer.
Returns one of the Direct3D 11 return codes.
The latest D3dcompiler_nn.dll contains the
Windows Phone 8.1: This API is supported.
+This interface is used to return arbitrary length data.
+An
The ID3DBlob interface is type defined in the D3DCommon.h header file as a
Blobs can be used as a data buffer, storing vertex, adjacency, and material information during mesh optimization and loading operations. Also, these objects are used to return object code and error messages in APIs that compile vertex, geometry and pixel shaders.
+Get a reference to the data.
+Returns a reference.
Get the size.
+The size of the data, in bytes.
Get a reference to the data.
+Get the size.
+A multithread interface accesses multithread settings and can only be used if the thread-safe layer is turned on.
+This interface is obtained by querying it from the
Enter a device's critical section.
+Entering a device's critical section prevents other threads from simultaneously calling that device's methods (if multithread protection is set to true), calling DXGI methods, and calling the methods of all resource, view, shader, state, and asynchronous interfaces.
This function should be used in multithreaded applications when there is a series of graphics commands that must happen in order. This function is typically called at the beginning of the series of graphics commands, and
Leave a device's critical section.
+This function is typically used in multithreaded applications when there is a series of graphics commands that must happen in order.
Turn multithreading on or off.
+True to turn multithreading on, false to turn it off.
True if multithreading was turned on prior to calling this method, false otherwise.
Find out if multithreading is turned on or not.
+Whether or not multithreading is turned on. True means on, false means off.
Defines a shader macro.
+You can use shader macros in your shaders. The
Shader_Macros[1] = { "zero", "0" }; +
The following shader or effect creation functions take an array of shader macros as an input parameter:
The macro name.
The macro definition.
Aliased
Aliased
Indicates the type of locks placed on records during editing.
+ The
+
The
+
The
+
The methods in this interface present your object's data as a contiguous sequence of bytes that you can read or write. There are also methods for committing and reverting changes on streams that are open in transacted mode and methods for restricting access to a range of bytes in the stream.
Streams can remain open for long periods of time without consuming file-system resources. The IUnknown::Release method is similar to a close function on a file. Once released, the stream object is no longer valid and cannot be used.
Clients of asynchronous monikers can choose between a data-pull or data-push model for driving an asynchronous
+ IMoniker::BindToStorage operation and for receiving asynchronous notifications. See
+ URL Monikers for more information. The following table compares the behavior of asynchronous
+
The
+
The
+
The Read method reads a specified number of bytes from the stream object into memory, starting at the current seek reference.
+A reference to the buffer which the stream data is read into.
The number of bytes of data to read from the stream object.
A reference to a ULONG variable that receives the actual number of bytes read from the stream object.
Note??The number of bytes read may be zero.
This method reads bytes from this stream object into memory. The stream object must be opened in STGM_READ mode. This method adjusts the seek reference by the actual number of bytes read.
The number of bytes actually read is also returned in the pcbRead parameter.
+The Write method writes a specified number of bytes into the stream object starting at the current seek reference.
+A reference to the buffer that contains the data that is to be written to the stream. A valid reference must be provided for this parameter even when cb is zero.
The number of bytes of data to attempt to write into the stream. This value can be zero.
A reference to a ULONG variable where this method writes the actual number of bytes written to the stream object. The caller can set this reference to
If the seek reference is currently past the end of the stream and the byte count is nonzero, this method increases the size of the stream to the seek reference and writes the specified bytes starting at the seek reference. The fill bytes written to the stream are not initialized to any particular value. This is the same as the end-of-file behavior in the MS-DOS FAT file system.
With a zero byte count and a seek reference past the end of the stream, this method does not create the fill bytes to increase the stream to the seek reference. In this case, you must call the
+
The pcbWritten parameter can have a value even if an error occurs.
In the COM-provided implementation, stream objects are not sparse. Any fill bytes are eventually allocated on the disk and assigned to the stream.
+ The
+
The
+
The methods in this interface present your object's data as a contiguous sequence of bytes that you can read or write. There are also methods for committing and reverting changes on streams that are open in transacted mode and methods for restricting access to a range of bytes in the stream.
Streams can remain open for long periods of time without consuming file-system resources. The IUnknown::Release method is similar to a close function on a file. Once released, the stream object is no longer valid and cannot be used.
Clients of asynchronous monikers can choose between a data-pull or data-push model for driving an asynchronous
+ IMoniker::BindToStorage operation and for receiving asynchronous notifications. See
+ URL Monikers for more information. The following table compares the behavior of asynchronous
+
The Seek method changes the seek reference to a new location. The new location is relative to either the beginning of the stream, the end of the stream, or the current seek reference.
+The displacement to be added to the location indicated by the dwOrigin parameter. If dwOrigin is STREAM_SEEK_SET, this is interpreted as an unsigned value rather than a signed value.
The origin for the displacement specified in dlibMove. The origin can be the beginning of the file (STREAM_SEEK_SET), the current seek reference (STREAM_SEEK_CUR), or the end of the file (STREAM_SEEK_END). For more information about values, see the STREAM_SEEK enumeration.
A reference to the location where this method writes the value of the new seek reference from the beginning of the stream.
You can set this reference to
You can also use this method to obtain the current value of the seek reference by calling this method with the dwOrigin parameter set to STREAM_SEEK_CUR and the dlibMove parameter set to 0 so that the seek reference is not changed. The current seek reference is returned in the plibNewPosition parameter.
+The SetSize method changes the size of the stream object.
+Specifies the new size, in bytes, of the stream.
This method can return one of these values.
The size of the stream object was successfully changed.
Asynchronous Storage only: Part or all of the stream's data is currently unavailable. For more information, see IFillLockBytes and Asynchronous Storage.
The stream size is not changed because there is no space left on the storage device.
The value of the libNewSize parameter is not supported by the implementation. Not all streams support greater than 232 bytes. If a stream does not support more than 232 bytes, the high DWORD data type of libNewSize must be zero. If it is nonzero, the implementation may return STG_E_INVALIDFUNCTION. In general, COM-based implementations of the
The object has been invalidated by a revert operation above it in the transaction tree.
If the libNewSize parameter is smaller than the current stream, the stream is truncated to the indicated size.
The seek reference is not affected by the change in stream size.
Calling
The CopyTo method copies a specified number of bytes from the current seek reference in the stream to the current seek reference in another stream.
+A reference to the destination stream. The stream pointed to by pstm can be a new stream or a clone of the source stream.
The number of bytes to copy from the source stream.
A reference to the location where this method writes the actual number of bytes written to the destination. You can set this reference to
A reference to the location where this method writes the actual number of bytes read from the source. You can set this reference to
The CopyTo method copies the specified bytes from one stream to another. It can also be used to copy a stream to itself. The seek reference in each stream instance is adjusted for the number of bytes read or written. This method is equivalent to reading cb bytes into memory using
+
The destination stream can be a clone of the source stream created by calling the
+
If
If
To copy the remainder of the source from the current seek reference, specify the maximum large integer value for the cb parameter. If the seek reference is the beginning of the stream, this operation copies the entire stream.
+The Commit method ensures that any changes made to a stream object open in transacted mode are reflected in the parent storage. If the stream object is open in direct mode,
Controls how the changes for the stream object are committed. See the
This method can return one of these values.
Changes to the stream object were successfully committed to the parent level.
Asynchronous Storage only: Part or all of the stream's data is currently unavailable. For more information see IFillLockBytes and Asynchronous Storage.
The commit operation failed due to lack of space on the storage device.
The object has been invalidated by a revert operation above it in the transaction tree.
The Commit method ensures that changes to a stream object opened in transacted mode are reflected in the parent storage. Changes that have been made to the stream since it was opened or last committed are reflected to the parent storage object. If the parent is opened in transacted mode, the parent may revert at a later time, rolling back the changes to this stream object. The compound file implementation does not support the opening of streams in transacted mode, so this method has very little effect other than to flush memory buffers. For more information, see
+
If the stream is open in direct mode, this method ensures that any memory buffers have been flushed out to the underlying storage object. This is much like a flush in traditional file systems.
The
The Revert method discards all changes that have been made to a transacted stream since the last
+
This method can return one of these values.
The stream was successfully reverted to its previous version.
Asynchronous Storage only: Part or all of the stream's data is currently unavailable. For more information see IFillLockBytes and Asynchronous Storage.
The Revert method discards changes made to a transacted stream since the last commit operation.
+The LockRegion method restricts access to a specified range of bytes in the stream. Supporting this functionality is optional since some file systems do not provide it.
+Integer that specifies the byte offset for the beginning of the range.
Integer that specifies the length of the range, in bytes, to be restricted.
Specifies the restrictions being requested on accessing the range.
This method can return one of these values.
The specified range of bytes was locked.
Asynchronous Storage only: Part or all of the stream's data is currently unavailable. For more information, see IFillLockBytes and Asynchronous Storage.
Locking is not supported at all or the specific type of lock requested is not supported.
Requested lock is supported, but cannot be granted because of an existing lock.
The object has been invalidated by a revert operation above it in the transaction tree.
The byte range of the stream can be extended. Locking an extended range for the stream is useful as a method of communication between different instances of the stream without changing data that is actually part of the stream.
Three types of locking can be supported: locking to exclude other writers, locking to exclude other readers or writers, and locking that allows only one requester to obtain a lock on the given range, which is usually an alias for one of the other two lock types. A given stream instance might support either of the first two types, or both. The lock type is specified by dwLockType, using a value from the
+
Any region locked with
The UnlockRegion method removes the access restriction on a range of bytes previously restricted with
+
This method can return one of these values.
The byte range was unlocked.
Asynchronous Storage only: Part or all of the stream's data is currently unavailable. For more information see IFillLockBytes and Asynchronous Storage.
Locking is not supported at all or the specific type of lock requested is not supported.
The requested unlock operation cannot be granted.
The object has been invalidated by a revert operation above it in the transaction tree.
The Stat method retrieves the
+
The Clone method creates a new stream object with its own seek reference that references the same bytes as the original stream.
+When successful, reference to the location of an
The Clone method creates a new stream object for accessing the same bytes but using a separate seek reference. The new stream object sees the same data as the source-stream object. Changes written to one object are immediately visible in the other. Range locking is shared between the stream objects.
The initial setting of the seek reference in the cloned stream instance is the same as the current setting of the seek reference in the original stream at the time of the clone operation.
+ The
+
start + (end - start) * amount
+ Passing start + (end - start) * amount
+ Passing
+ control.Show();
+ using (var loop = new RenderLoop(control))
+ {
+ while (loop.NextFrame())
+ {
+ // Perform draw operations here.
+ }
+ }
+
+ Note that the main control can be changed at anytime inside the loop.
+