|
1 | 1 | package com.scalar.db.storage.jdbc; |
2 | 2 |
|
| 3 | +import static org.assertj.core.api.Assertions.assertThat; |
| 4 | + |
3 | 5 | import com.scalar.db.api.DistributedStorageColumnValueIntegrationTestBase; |
| 6 | +import com.scalar.db.api.Get; |
| 7 | +import com.scalar.db.api.Put; |
| 8 | +import com.scalar.db.api.PutBuilder; |
| 9 | +import com.scalar.db.api.Result; |
| 10 | +import com.scalar.db.api.TableMetadata; |
4 | 11 | import com.scalar.db.config.DatabaseConfig; |
| 12 | +import com.scalar.db.exception.storage.ExecutionException; |
| 13 | +import com.scalar.db.io.BigIntColumn; |
| 14 | +import com.scalar.db.io.BlobColumn; |
| 15 | +import com.scalar.db.io.BooleanColumn; |
5 | 16 | import com.scalar.db.io.Column; |
6 | 17 | import com.scalar.db.io.DataType; |
| 18 | +import com.scalar.db.io.DateColumn; |
| 19 | +import com.scalar.db.io.DoubleColumn; |
| 20 | +import com.scalar.db.io.FloatColumn; |
| 21 | +import com.scalar.db.io.IntColumn; |
| 22 | +import com.scalar.db.io.Key; |
| 23 | +import com.scalar.db.io.TextColumn; |
| 24 | +import com.scalar.db.io.TimeColumn; |
| 25 | +import com.scalar.db.io.TimestampColumn; |
| 26 | +import com.scalar.db.io.TimestampTZColumn; |
7 | 27 | import com.scalar.db.util.TestUtils; |
| 28 | +import java.util.ArrayList; |
| 29 | +import java.util.List; |
| 30 | +import java.util.Optional; |
8 | 31 | import java.util.Properties; |
9 | 32 | import java.util.Random; |
| 33 | +import java.util.stream.Stream; |
| 34 | +import org.junit.jupiter.api.Test; |
| 35 | +import org.junit.jupiter.api.condition.EnabledIf; |
| 36 | +import org.junit.jupiter.params.ParameterizedTest; |
| 37 | +import org.junit.jupiter.params.provider.Arguments; |
| 38 | +import org.junit.jupiter.params.provider.MethodSource; |
10 | 39 |
|
11 | 40 | public class JdbcDatabaseColumnValueIntegrationTest |
12 | 41 | extends DistributedStorageColumnValueIntegrationTestBase { |
@@ -68,4 +97,186 @@ protected Column<?> getColumnWithMaxValue(String columnName, DataType dataType) |
68 | 97 | } |
69 | 98 | return super.getColumnWithMaxValue(columnName, dataType); |
70 | 99 | } |
| 100 | + // TODO: Expand this test to cover all supported storages, not just Oracle/DB2. |
| 101 | + // This test verifies that large BLOB data can be inserted and retrieved correctly. |
| 102 | + // Currently, it is limited to Oracle and DB2 due to known differences in BLOB handling, |
| 103 | + // potential resource constraints, or lack of support for large BLOBs in other engines. |
| 104 | + // Before enabling for other storages, investigate their BLOB size limits and behavior, |
| 105 | + // and ensure the test does not cause failures or excessive resource usage. |
| 106 | + @EnabledIf("isDb2OrOracle") |
| 107 | + @ParameterizedTest() |
| 108 | + @MethodSource("provideBlobSizes") |
| 109 | + public void put_largeBlobData_ShouldWorkCorrectly(int blobSize, String humanReadableBlobSize) |
| 110 | + throws ExecutionException { |
| 111 | + String tableName = TABLE + "_large_single_blob"; |
| 112 | + try { |
| 113 | + // Arrange |
| 114 | + TableMetadata.Builder metadata = |
| 115 | + TableMetadata.newBuilder() |
| 116 | + .addColumn(COL_NAME1, DataType.INT) |
| 117 | + .addColumn(COL_NAME2, DataType.BLOB) |
| 118 | + .addPartitionKey(COL_NAME1); |
| 119 | + |
| 120 | + admin.createTable(namespace, tableName, metadata.build(), true, getCreationOptions()); |
| 121 | + byte[] blobData = createLargeBlob(blobSize); |
| 122 | + Put put = |
| 123 | + Put.newBuilder() |
| 124 | + .namespace(namespace) |
| 125 | + .table(tableName) |
| 126 | + .partitionKey(Key.ofInt(COL_NAME1, 1)) |
| 127 | + .blobValue(COL_NAME2, blobData) |
| 128 | + .build(); |
| 129 | + |
| 130 | + // Act |
| 131 | + storage.put(put); |
| 132 | + |
| 133 | + // Assert |
| 134 | + Optional<Result> optionalResult = |
| 135 | + storage.get( |
| 136 | + Get.newBuilder() |
| 137 | + .namespace(namespace) |
| 138 | + .table(tableName) |
| 139 | + .partitionKey(Key.ofInt(COL_NAME1, 1)) |
| 140 | + .build()); |
| 141 | + assertThat(optionalResult).isPresent(); |
| 142 | + Result result = optionalResult.get(); |
| 143 | + assertThat(result.getColumns().get(COL_NAME2).getBlobValueAsBytes()).isEqualTo(blobData); |
| 144 | + } finally { |
| 145 | + admin.dropTable(namespace, tableName, true); |
| 146 | + } |
| 147 | + } |
| 148 | + |
| 149 | + Stream<Arguments> provideBlobSizes() { |
| 150 | + List<Arguments> args = new ArrayList<>(); |
| 151 | + if (isOracle()) { |
| 152 | + // As explained in |
| 153 | + // `com.scalar.db.storage.jdbc.RdbEngineOracle.bindBlobColumnToPreparedStatement()`, |
| 154 | + // handing a BLOB size bigger than 32,766 bytes requires a workaround so we particularly test |
| 155 | + // values around it. |
| 156 | + args.add(Arguments.of(32_766, "32.766 KB")); |
| 157 | + args.add(Arguments.of(32_767, "32.767 KB")); |
| 158 | + } |
| 159 | + args.add(Arguments.of(100_000_000, "100 MB")); |
| 160 | + return args.stream(); |
| 161 | + } |
| 162 | + |
| 163 | + @EnabledIf("isOracle") |
| 164 | + @Test |
| 165 | + public void put_largeBlobData_WithMultipleBlobColumnsShouldWorkCorrectly() |
| 166 | + throws ExecutionException { |
| 167 | + String tableName = TABLE + "_large_multiples_blob"; |
| 168 | + try { |
| 169 | + // Arrange |
| 170 | + TableMetadata.Builder metadata = |
| 171 | + TableMetadata.newBuilder() |
| 172 | + .addColumn(COL_NAME1, DataType.INT) |
| 173 | + .addColumn(COL_NAME2, DataType.BLOB) |
| 174 | + .addColumn(COL_NAME3, DataType.BLOB) |
| 175 | + .addPartitionKey(COL_NAME1); |
| 176 | + |
| 177 | + admin.createTable(namespace, tableName, metadata.build(), true, getCreationOptions()); |
| 178 | + byte[] blobDataCol2 = createLargeBlob(32_766); |
| 179 | + byte[] blobDataCol3 = createLargeBlob(5000); |
| 180 | + Put put = |
| 181 | + Put.newBuilder() |
| 182 | + .namespace(namespace) |
| 183 | + .table(tableName) |
| 184 | + .partitionKey(Key.ofInt(COL_NAME1, 1)) |
| 185 | + .blobValue(COL_NAME2, blobDataCol2) |
| 186 | + .blobValue(COL_NAME3, blobDataCol3) |
| 187 | + .build(); |
| 188 | + |
| 189 | + // Act |
| 190 | + storage.put(put); |
| 191 | + |
| 192 | + // Assert |
| 193 | + Optional<Result> optionalResult = |
| 194 | + storage.get( |
| 195 | + Get.newBuilder() |
| 196 | + .namespace(namespace) |
| 197 | + .table(tableName) |
| 198 | + .partitionKey(Key.ofInt(COL_NAME1, 1)) |
| 199 | + .build()); |
| 200 | + assertThat(optionalResult).isPresent(); |
| 201 | + Result result = optionalResult.get(); |
| 202 | + assertThat(result.getColumns().get(COL_NAME2).getBlobValueAsBytes()).isEqualTo(blobDataCol2); |
| 203 | + assertThat(result.getColumns().get(COL_NAME3).getBlobValueAsBytes()).isEqualTo(blobDataCol3); |
| 204 | + } finally { |
| 205 | + admin.dropTable(namespace, tableName, true); |
| 206 | + } |
| 207 | + } |
| 208 | + |
| 209 | + @EnabledIf("isOracle") |
| 210 | + @Test |
| 211 | + public void put_largeBlobData_WithAllColumnsTypesShouldWorkCorrectly() throws ExecutionException { |
| 212 | + // Arrange |
| 213 | + IntColumn partitionKeyValue = (IntColumn) getColumnWithMaxValue(PARTITION_KEY, DataType.INT); |
| 214 | + BooleanColumn col1Value = (BooleanColumn) getColumnWithMaxValue(COL_NAME1, DataType.BOOLEAN); |
| 215 | + IntColumn col2Value = (IntColumn) getColumnWithMaxValue(COL_NAME2, DataType.INT); |
| 216 | + BigIntColumn col3Value = (BigIntColumn) getColumnWithMaxValue(COL_NAME3, DataType.BIGINT); |
| 217 | + FloatColumn col4Value = (FloatColumn) getColumnWithMaxValue(COL_NAME4, DataType.FLOAT); |
| 218 | + DoubleColumn col5Value = (DoubleColumn) getColumnWithMaxValue(COL_NAME5, DataType.DOUBLE); |
| 219 | + TextColumn col6Value = (TextColumn) getColumnWithMaxValue(COL_NAME6, DataType.TEXT); |
| 220 | + BlobColumn col7Value = BlobColumn.of(COL_NAME7, createLargeBlob(32_766)); |
| 221 | + DateColumn col8Value = (DateColumn) getColumnWithMaxValue(COL_NAME8, DataType.DATE); |
| 222 | + TimeColumn col9Value = (TimeColumn) getColumnWithMaxValue(COL_NAME9, DataType.TIME); |
| 223 | + TimestampTZColumn col10Value = |
| 224 | + (TimestampTZColumn) getColumnWithMaxValue(COL_NAME10, DataType.TIMESTAMPTZ); |
| 225 | + TimestampColumn column11Value = null; |
| 226 | + if (isTimestampTypeSupported()) { |
| 227 | + column11Value = (TimestampColumn) getColumnWithMaxValue(COL_NAME11, DataType.TIMESTAMP); |
| 228 | + } |
| 229 | + |
| 230 | + PutBuilder.Buildable put = |
| 231 | + Put.newBuilder() |
| 232 | + .namespace(namespace) |
| 233 | + .table(TABLE) |
| 234 | + .partitionKey(Key.newBuilder().add(partitionKeyValue).build()) |
| 235 | + .value(col1Value) |
| 236 | + .value(col2Value) |
| 237 | + .value(col3Value) |
| 238 | + .value(col4Value) |
| 239 | + .value(col5Value) |
| 240 | + .value(col6Value) |
| 241 | + .value(col7Value) |
| 242 | + .value(col8Value) |
| 243 | + .value(col9Value) |
| 244 | + .value(col10Value); |
| 245 | + if (isTimestampTypeSupported()) { |
| 246 | + put.value(column11Value); |
| 247 | + } |
| 248 | + // Act |
| 249 | + storage.put(put.build()); |
| 250 | + |
| 251 | + // Assert |
| 252 | + assertResult( |
| 253 | + partitionKeyValue, |
| 254 | + col1Value, |
| 255 | + col2Value, |
| 256 | + col3Value, |
| 257 | + col4Value, |
| 258 | + col5Value, |
| 259 | + col6Value, |
| 260 | + col7Value, |
| 261 | + col8Value, |
| 262 | + col9Value, |
| 263 | + col10Value, |
| 264 | + column11Value); |
| 265 | + } |
| 266 | + |
| 267 | + private byte[] createLargeBlob(int size) { |
| 268 | + byte[] blob = new byte[size]; |
| 269 | + random.nextBytes(blob); |
| 270 | + return blob; |
| 271 | + } |
| 272 | + |
| 273 | + @SuppressWarnings("unused") |
| 274 | + private boolean isDb2OrOracle() { |
| 275 | + return JdbcEnv.isOracle() || JdbcEnv.isDb2(); |
| 276 | + } |
| 277 | + |
| 278 | + @SuppressWarnings("unused") |
| 279 | + private boolean isOracle() { |
| 280 | + return JdbcEnv.isOracle(); |
| 281 | + } |
71 | 282 | } |
0 commit comments