From 0e411f9015d2886143c04166d1bce7400a8b2bd3 Mon Sep 17 00:00:00 2001 From: Mike Jensen Date: Wed, 27 Nov 2024 12:17:34 -0700 Subject: [PATCH] Fix test from minimum segment size enforcement This change was applied easily in go, but there are issues with integration and other existing test payloads. Because this is low risk, I believe it's ok to remove this protection in the Java SDK, but leave commented so it's known to be explict. Alternatively we could update test payloads. --- .../main/java/io/opentdf/platform/sdk/Config.java | 2 +- .../main/java/io/opentdf/platform/sdk/TDF.java | 4 ++-- .../java/io/opentdf/platform/sdk/ConfigTest.java | 15 +++++++++++++-- .../java/io/opentdf/platform/sdk/TDFTest.java | 9 ++++----- 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/sdk/src/main/java/io/opentdf/platform/sdk/Config.java b/sdk/src/main/java/io/opentdf/platform/sdk/Config.java index 8f1e7da3..a8b58c4b 100644 --- a/sdk/src/main/java/io/opentdf/platform/sdk/Config.java +++ b/sdk/src/main/java/io/opentdf/platform/sdk/Config.java @@ -22,7 +22,7 @@ public class Config { public static final int TDF3_KEY_SIZE = 2048; public static final int DEFAULT_SEGMENT_SIZE = 2 * 1024 * 1024; // 2mb public static final int MAX_SEGMENT_SIZE = DEFAULT_SEGMENT_SIZE * 2; - public static final int MIN_SEGMENT_SIZE = 16 * 1024; + public static final int MIN_SEGMENT_SIZE = 16 * 1024; // not currently enforced in parsing due to existing payloads in testing public static final String KAS_PUBLIC_KEY_PATH = "/kas_public_key"; public static final String DEFAULT_MIME_TYPE = "application/octet-stream"; public static final int MAX_COLLECTION_ITERATION = (1 << 24) - 1; diff --git a/sdk/src/main/java/io/opentdf/platform/sdk/TDF.java b/sdk/src/main/java/io/opentdf/platform/sdk/TDF.java index c24628e2..ca01fd21 100644 --- a/sdk/src/main/java/io/opentdf/platform/sdk/TDF.java +++ b/sdk/src/main/java/io/opentdf/platform/sdk/TDF.java @@ -361,9 +361,9 @@ public void readPayload(OutputStream outputStream) throws TDFReadFailed, for (Manifest.Segment segment : manifest.encryptionInformation.integrityInformation.segments) { if (segment.encryptedSegmentSize > Config.MAX_SEGMENT_SIZE) { throw new IllegalStateException("Segment size " + segment.encryptedSegmentSize + " exceeded limit " + Config.MAX_SEGMENT_SIZE); - } else if (segment.encryptedSegmentSize < Config.MIN_SEGMENT_SIZE) { + }/* else if (segment.encryptedSegmentSize < Config.MIN_SEGMENT_SIZE) { throw new IllegalStateException("Segment size " + segment.encryptedSegmentSize + " is under minimum " + Config.MIN_SEGMENT_SIZE); - } + }*/ // Commented out due to tests needing small segment sizes with existing payloads byte[] readBuf = new byte[(int) segment.encryptedSegmentSize]; int bytesRead = tdfReader.readPayloadBytes(readBuf); diff --git a/sdk/src/test/java/io/opentdf/platform/sdk/ConfigTest.java b/sdk/src/test/java/io/opentdf/platform/sdk/ConfigTest.java index cae23434..1754428d 100644 --- a/sdk/src/test/java/io/opentdf/platform/sdk/ConfigTest.java +++ b/sdk/src/test/java/io/opentdf/platform/sdk/ConfigTest.java @@ -4,6 +4,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; class ConfigTest { @@ -46,8 +47,18 @@ void withMetaData_shouldSetMetaData() { @Test void withSegmentSize_shouldSetSegmentSize() { - Config.TDFConfig config = Config.newTDFConfig(Config.withSegmentSize(1024)); - assertEquals(1024, config.defaultSegmentSize); + Config.TDFConfig config = Config.newTDFConfig(Config.withSegmentSize(Config.MIN_SEGMENT_SIZE)); + assertEquals(Config.MIN_SEGMENT_SIZE, config.defaultSegmentSize); + } + + @Test + void withSegmentSize_shouldIgnoreSegmentSize() { + try { + Config.newTDFConfig(Config.withSegmentSize(1024)); + fail("Expected exception"); + } catch (IllegalArgumentException e) { + // expected + } } @Test diff --git a/sdk/src/test/java/io/opentdf/platform/sdk/TDFTest.java b/sdk/src/test/java/io/opentdf/platform/sdk/TDFTest.java index 35416f37..96f5b980 100644 --- a/sdk/src/test/java/io/opentdf/platform/sdk/TDFTest.java +++ b/sdk/src/test/java/io/opentdf/platform/sdk/TDFTest.java @@ -359,11 +359,10 @@ public void testCreatingTDFWithMultipleSegments() throws Exception { Config.TDFConfig config = Config.newTDFConfig( Config.withAutoconfigure(false), Config.withKasInformation(getKASInfos()), - // use a random segment size that makes sure that we will use multiple segments - Config.withSegmentSize(1 + random.nextInt(20))); + Config.withSegmentSize(Config.MIN_SEGMENT_SIZE)); - // data should be bigger than the largest segment - var data = new byte[21 + random.nextInt(2048)]; + // data should be large enough to have multiple complete and a partial segment + var data = new byte[(int)(Config.MIN_SEGMENT_SIZE * 2.8)]; random.nextBytes(data); var plainTextInputStream = new ByteArrayInputStream(data); var tdfOutputStream = new ByteArrayOutputStream(); @@ -418,7 +417,7 @@ public void write(byte[] b, int off, int len) { var tdfConfig = Config.newTDFConfig( Config.withAutoconfigure(false), Config.withKasInformation(getKASInfos()), - Config.withSegmentSize(1 + random.nextInt(128))); + Config.withSegmentSize(Config.MIN_SEGMENT_SIZE)); assertThrows(TDF.DataSizeNotSupported.class, () -> tdf.createTDF(is, os, tdfConfig, kas, null), "didn't throw an exception when we created TDF that was too large");