Skip to content

Commit

Permalink
Vertex state correctness (gpuweb#4109)
Browse files Browse the repository at this point in the history
Refactor these tests so they don't use storage buffers.

The tests were using one uniform or storage buffer per
vertex attribute. Switched to using just a single uniform
buffer.

Note: There was a note about increasign the vertex count
by +1 in vertex_buffer_used_multiple_times_interleaved.
I don't know if that's old because removing the +1 there
are a few lines below does not break the test nor generate
any validation errors.

The reason I needed to remove it was the code that packs
the data into a single buffer needed the size of each buffer
to match kVertexCount but that particular test was passing
+1 in some places and not in others.

* Use max limits

This test might break on devices with higher limits
but in that case the test should be fixed.
  • Loading branch information
greggman authored Dec 20, 2024
1 parent b710041 commit 6fd9914
Showing 1 changed file with 28 additions and 31 deletions.
59 changes: 28 additions & 31 deletions src/webgpu/api/operation/vertex_state/correctness.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import {
kVertexFormatInfo,
kVertexFormats,
} from '../../../capability_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { GPUTest, MaxLimitsTestMixin } from '../../../gpu_test.js';
import { float32ToFloat16Bits, normalizedIntegerAsFloat } from '../../../util/conversion.js';
import { align, clamp } from '../../../util/math.js';

Expand Down Expand Up @@ -119,7 +119,7 @@ class VertexStateTest extends GPUTest {

let vsInputs = '';
let vsChecks = '';
let vsBindings = '';
let providedDataDefs = '';

for (const b of buffers) {
for (const a of b.attributes) {
Expand All @@ -140,15 +140,8 @@ class VertexStateTest extends GPUTest {
indexBuiltin = `input.instanceIndex`;
}

// Start using storage buffers when we run out of uniform buffers.
let storageType = 'uniform';
if (i >= maxUniformBuffers) {
storageType = 'storage, read';
}

vsInputs += ` @location(${i}) attrib${i} : ${shaderType},\n`;
vsBindings += `struct S${i} { data : array<vec4<${a.shaderBaseType}>, ${maxCount}> };\n`;
vsBindings += `@group(0) @binding(${i}) var<${storageType}> providedData${i} : S${i};\n`;
providedDataDefs += ` data${i}: array<vec4<${a.shaderBaseType}>, ${maxCount}>,\n`;

// Generate the all the checks for the attributes.
for (let component = 0; component < shaderComponentCount; component++) {
Expand All @@ -162,7 +155,7 @@ class VertexStateTest extends GPUTest {
// Check each component individually, with special handling of tolerance for floats.
const attribComponent =
shaderComponentCount === 1 ? `input.attrib${i}` : `input.attrib${i}[${component}]`;
const providedData = `providedData${i}.data[${indexBuiltin}][${component}]`;
const providedData = `providedData.data${i}[${indexBuiltin}][${component}]`;
if (format.type === 'uint' || format.type === 'sint') {
vsChecks += ` check(${attribComponent} == ${providedData});\n`;
} else {
Expand All @@ -181,7 +174,11 @@ ${vsInputs}
@builtin(instance_index) instanceIndex: u32,
};
${vsBindings}
struct ProvidedData {
${providedDataDefs}
};
@group(0) @binding(0) var<uniform> providedData: ProvidedData;
var<private> vsResult : i32 = 1;
var<private> checkIndex : i32 = 0;
Expand Down Expand Up @@ -536,7 +533,7 @@ struct VSOutputs {
shaderBaseType: data.shaderBaseType,
testComponentCount: maxCount * componentCount,
floatTolerance: data.floatTolerance,
expectedData: expandedExpectedData.buffer,
expectedData: expandedExpectedData.slice(0, maxCount * 4 * 4),
vertexData: expandedVertexData.buffer,
};
}
Expand Down Expand Up @@ -579,25 +576,29 @@ struct VSOutputs {
}

createExpectedBG(state: VertexState<{}, TestData>, pipeline: GPURenderPipeline): GPUBindGroup {
// Create the bindgroups from that test data
const bgEntries: GPUBindGroupEntry[] = [];
// Create the bindgroup for the expected test data

// Concat expectedData into one buffer
let numBytes = 0;
const arrayBuffers = [];
for (const buffer of state) {
for (const attrib of buffer.attributes) {
const expectedDataBuffer = this.makeBufferWithContents(
new Uint8Array(attrib.expectedData),
GPUBufferUsage.UNIFORM | GPUBufferUsage.STORAGE
);
bgEntries.push({
binding: attrib.shaderLocation,
resource: { buffer: expectedDataBuffer },
});
numBytes += attrib.expectedData.byteLength;
arrayBuffers.push(attrib.expectedData);
}
}

const allExpectedData = new Uint8Array(numBytes);
let offset = 0;
for (const arrayBuffer of arrayBuffers) {
allExpectedData.set(new Uint8Array(arrayBuffer), offset);
offset += arrayBuffer.byteLength;
}
const expectedDataBuffer = this.makeBufferWithContents(allExpectedData, GPUBufferUsage.UNIFORM);

return this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: bgEntries,
entries: [{ binding: 0, resource: { buffer: expectedDataBuffer } }],
});
}

Expand Down Expand Up @@ -654,7 +655,7 @@ struct VSOutputs {
}
}

export const g = makeTestGroup(VertexStateTest);
export const g = makeTestGroup(MaxLimitsTestMixin(VertexStateTest));

g.test('vertex_format_to_shader_format_conversion')
.desc(
Expand Down Expand Up @@ -994,14 +995,10 @@ g.test('vertex_buffer_used_multiple_times_interleaved')
attributes: attribs,
},
],
// Request one vertex more than what we need so we have an extra full stride. Otherwise WebGPU
// validation of vertex being in bounds will fail for all vertex buffers at an offset that's
// not 0 (since their last stride will go beyond the data for vertex kVertexCount -1).
kVertexCount + 1,
kVertexCount,
kInstanceCount
);
const vertexBuffer = t.createVertexBuffers(baseData, kVertexCount + 1, kInstanceCount)[0]
.buffer;
const vertexBuffer = t.createVertexBuffers(baseData, kVertexCount, kInstanceCount)[0].buffer;

// Then we recreate test data by:
// 1) creating multiple "vertex buffers" that all point at the GPUBuffer above but at
Expand Down

0 comments on commit 6fd9914

Please sign in to comment.