2626import static org .junit .jupiter .api .Assertions .assertNotNull ;
2727import static org .junit .jupiter .api .Assertions .assertTrue ;
2828import static org .junit .jupiter .api .Assertions .fail ;
29- import static org .junit .jupiter .api .Assumptions .assumeFalse ;
3029import static org .junit .jupiter .api .Assumptions .assumeTrue ;
3130
3231import java .io .BufferedInputStream ;
@@ -849,39 +848,22 @@ public void testUseCompress() throws Exception {
849848 this .rs = this .stmt .executeQuery ("SHOW VARIABLES LIKE 'max_allowed_packet'" );
850849 this .rs .next ();
851850 long defaultMaxAllowedPacket = this .rs .getInt (2 );
852- boolean changeMaxAllowedPacket = defaultMaxAllowedPacket < 4 + 1024 * 1024 * 32 - 1 ;
853-
854- if (versionMeetsMinimum (5 , 6 , 20 ) && !versionMeetsMinimum (5 , 7 )) {
855- /*
856- * The 5.6.20 patch for Bug #16963396, Bug #19030353, Bug #69477 limits the size of redo log BLOB writes
857- * to 10% of the redo log file size. The 5.7.5 patch addresses the bug without imposing a limitation.
858- * As a result of the redo log BLOB write limit introduced for MySQL 5.6, innodb_log_file_size should be set to a value
859- * greater than 10 times the largest BLOB data size found in the rows of your tables plus the length of other variable length
860- * fields (VARCHAR, VARBINARY, and TEXT type fields).
861- */
862- this .rs = this .stmt .executeQuery ("SHOW VARIABLES LIKE 'innodb_log_file_size'" );
863- this .rs .next ();
864- assumeFalse (this .rs .getInt (2 ) < 1024 * 1024 * 32 * 10 ,
865- "You need to increase innodb_log_file_size to at least " + 1024 * 1024 * 32 * 10 + " before running this test!" );
866- }
851+
852+ createTable ("testUseCompress" , "(pos INT PRIMARY KEY AUTO_INCREMENT, blobdata LONGBLOB)" );
867853
868854 try {
869- if (changeMaxAllowedPacket ) {
870- this .stmt .executeUpdate ("SET GLOBAL max_allowed_packet=" + 1024 * 1024 * 33 );
871- }
855+ this .stmt .executeUpdate ("SET GLOBAL max_allowed_packet=" + 1024 * 1024 * 33 );
872856
873- testCompressionWith ("false" , 1024 * 1024 * 16 - 2 ); // no split
874- testCompressionWith ("false" , 1024 * 1024 * 16 - 1 ); // split with additional empty packet
875- testCompressionWith ("false" , 1024 * 1024 * 32 ); // big payload
857+ testUseCompress ("false" , 1024 * 1024 * 16 - 2 ); // No split.
858+ testUseCompress ("false" , 1024 * 1024 * 16 - 1 ); // Split with additional empty packet.
859+ testUseCompress ("false" , 1024 * 1024 * 32 ); // Big payload.
876860
877- testCompressionWith ("true" , 1024 * 1024 * 16 - 2 - 3 ); // no split, one compressed packet
878- testCompressionWith ("true" , 1024 * 1024 * 16 - 2 - 2 ); // no split, two compressed packets
879- testCompressionWith ("true" , 1024 * 1024 * 16 - 1 ); // split with additional empty packet, two compressed packets
880- testCompressionWith ("true" , 1024 * 1024 * 32 ); // big payload
861+ testUseCompress ("true" , 1024 * 1024 * 16 - 2 - 3 ); // No split, one compressed packet.
862+ testUseCompress ("true" , 1024 * 1024 * 16 - 2 - 2 ); // No split, two compressed packets.
863+ testUseCompress ("true" , 1024 * 1024 * 16 - 1 ); // Split with additional empty packet, two compressed packets.
864+ testUseCompress ("true" , 1024 * 1024 * 32 ); // Big payload.
881865 } finally {
882- if (changeMaxAllowedPacket ) {
883- this .stmt .executeUpdate ("SET GLOBAL max_allowed_packet=" + defaultMaxAllowedPacket );
884- }
866+ this .stmt .executeUpdate ("SET GLOBAL max_allowed_packet=" + defaultMaxAllowedPacket );
885867 }
886868 }
887869
@@ -891,62 +873,59 @@ public void testUseCompress() throws Exception {
891873 *
892874 * @throws Exception
893875 */
894- private void testCompressionWith (String useCompression , int maxPayloadSize ) throws Exception {
895- String sqlToSend = "INSERT INTO BLOBTEST(blobdata) VALUES (?)" ;
896- int requiredSize = maxPayloadSize - sqlToSend .length () - "_binary''" .length ();
876+ private void testUseCompress (String useCompression , int maxPayloadSize ) throws Exception {
877+ this .stmt .executeUpdate ("TRUNCATE TABLE testUseCompress" );
897878
898- File testBlobFile = File .createTempFile ("cmj-testblob" , ".dat" );
899- testBlobFile .deleteOnExit ();
879+ String sqlToSend = "INSERT INTO testUseCompress (blobdata) VALUES (?)" ;
880+ int remainingSize = maxPayloadSize - sqlToSend .length () - "X''" .length () - 2 /* parameter_count & parameter_set_count */ ;
881+ if (remainingSize % 2 != 0 ) {
882+ sqlToSend += " " ;
883+ remainingSize --;
884+ }
885+ int requiredSize = remainingSize / 2 ; // HEX requires twice the size of the data.
900886
901- // TODO: following cleanup doesn't work correctly during concurrent execution of testsuite
902- // cleanupTempFiles( testBlobFile, "cmj-testblob" );
887+ File testBlobFile = File . createTempFile ( "testUseCompress" , ".dat" );
888+ testBlobFile . deleteOnExit ( );
903889
904890 BufferedOutputStream bOut = new BufferedOutputStream (new FileOutputStream (testBlobFile ));
905-
906- // generate a random sequence of letters. this ensures that no escaped characters cause packet sizes that interfere with bounds tests
891+ // Generate a random sequence of letters.
907892 Random random = new Random ();
908893 for (int i = 0 ; i < requiredSize ; i ++) {
909894 bOut .write ((byte ) (65 + random .nextInt (26 )));
910895 }
911-
912896 bOut .flush ();
913897 bOut .close ();
914898
915899 Properties props = new Properties ();
916900 props .setProperty (PropertyKey .sslMode .getKeyName (), SslMode .DISABLED .name ());
917901 props .setProperty (PropertyKey .allowPublicKeyRetrieval .getKeyName (), "true" );
918902 props .setProperty (PropertyKey .useCompression .getKeyName (), useCompression );
919- Connection conn1 = getConnectionWithProps (props );
920- Statement stmt1 = conn1 .createStatement ();
903+ Connection testConn = getConnectionWithProps (props );
921904
922- createTable ( "BLOBTEST" , "(pos int PRIMARY KEY auto_increment, blobdata LONGBLOB)" );
905+ PreparedStatement testPstmt = testConn . prepareStatement ( sqlToSend );
923906 BufferedInputStream bIn = new BufferedInputStream (new FileInputStream (testBlobFile ));
907+ testPstmt .setBinaryStream (1 , bIn , (int ) testBlobFile .length ());
908+ testPstmt .execute ();
909+ testPstmt .clearParameters ();
910+ bIn .close ();
924911
925- this .pstmt = conn1 .prepareStatement (sqlToSend );
926-
927- this .pstmt .setBinaryStream (1 , bIn , (int ) testBlobFile .length ());
928- this .pstmt .execute ();
929- this .pstmt .clearParameters ();
930-
931- this .rs = stmt1 .executeQuery ("SELECT blobdata from BLOBTEST LIMIT 1" );
912+ Statement testStmt = testConn .createStatement ();
913+ this .rs = testStmt .executeQuery ("SELECT blobdata FROM testUseCompress LIMIT 1" );
932914 this .rs .next ();
933915 InputStream is = this .rs .getBinaryStream (1 );
934916
935- bIn .close ();
936917 bIn = new BufferedInputStream (new FileInputStream (testBlobFile ));
937- int blobbyte = 0 ;
938- int count = 0 ;
939- while ((blobbyte = is .read ()) > -1 ) {
940- int filebyte = bIn .read ();
941- assertFalse (filebyte < 0 || filebyte != blobbyte , "Blob is not identical to initial data." );
942- count ++;
943- }
944- assertEquals (requiredSize , count );
918+ int blobByte = 0 ;
919+ int blobSize = 0 ;
920+ while ((blobByte = is .read ()) > -1 ) {
921+ int fileByte = bIn .read ();
922+ assertFalse (fileByte < 0 || fileByte != blobByte , "Blob is not identical to initial data." );
923+ blobSize ++;
924+ }
925+ assertEquals (requiredSize , blobSize );
926+ bIn .close ();
945927
946928 is .close ();
947- if (bIn != null ) {
948- bIn .close ();
949- }
950929 }
951930
952931 /**
0 commit comments