Skip to content

Commit

Permalink
IMPALA-11756: Add error messages and debug logs in CatalogdMetaProvid…
Browse files Browse the repository at this point in the history
…erTest and PartialCatalogInfoWriteIdTest

We saw several test failures on collecting file metadata after minor
compaction in Hive. However, I can't reproduce the failure by running
the tests around 100 times.

To ease our debug, this patch adds error messages to print the actual
file paths. Also adds more logs to locate the time range when the test
runs.

Tests:
 - Ran CatalogdMetaProviderTest and PartialCatalogInfoWriteIdTest
   locally. Verified the new logs exist.
 - Modified the expected value to fail the tests. Verified the error
   messages work as expected.

Change-Id: I806cc35da5a758215b8dc6cc2df86a7a968301da
Reviewed-on: http://gerrit.cloudera.org:8080/19319
Reviewed-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
Tested-by: Impala Public Jenkins <impala-public-jenkins@cloudera.com>
  • Loading branch information
stiga-huang authored and Impala Public Jenkins committed Dec 7, 2022
1 parent 4c772f9 commit 8dbc8b7
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
import java.sql.SQLException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
import org.apache.impala.common.InternalException;
Expand Down Expand Up @@ -50,7 +52,9 @@
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -76,6 +80,9 @@ public class PartialCatalogInfoWriteIdTest {
private static final String testPartitionedTbl = "insert_only_partitioned";
private static final String testAcidTblName = "test_full_acid";

@Rule
public TestName name = new TestName();

@BeforeClass
public static void setupTestEnv() throws SQLException, ClassNotFoundException {
catalog_ = CatalogServiceTestCatalog.create();
Expand All @@ -94,6 +101,7 @@ public static void shutdown() {

@Before
public void createTestTbls() throws Exception {
LOG.info("Creating test tables for {}", name.getMethodName());
Stopwatch st = Stopwatch.createStarted();
ImpalaJdbcClient client = ImpalaJdbcClient
.createClientUsingHiveJdbcDriver();
Expand Down Expand Up @@ -602,7 +610,13 @@ private void testFileMetadataAfterCompaction(String tableName, String partition,
int afterFileCount = afterPartitionInfo.getFile_descriptorsSize()
+ afterPartitionInfo.getInsert_file_descriptorsSize()
+ afterPartitionInfo.getDelete_file_descriptorsSize();
Assert.assertEquals(expectedFileCount, afterFileCount);
String message = "Actual file_descriptors:\n" +
getPathsFromFileDescriptors(afterPartitionInfo.getFile_descriptors()) +
"\nActual insert_file_descriptors:\n" +
getPathsFromFileDescriptors(afterPartitionInfo.getInsert_file_descriptors()) +
"\nActual delete_file_descriptors:\n" +
getPathsFromFileDescriptors(afterPartitionInfo.getDelete_file_descriptors());
Assert.assertEquals(message, expectedFileCount, afterFileCount);
long numMissesAfterMinor =
getMetricCount(testDbName, tableName, HdfsTable.FILEMETADATA_CACHE_MISS_METRIC);
long numHitsAfterMinor =
Expand All @@ -611,6 +625,13 @@ private void testFileMetadataAfterCompaction(String tableName, String partition,
Assert.assertEquals(numMisses, numMissesAfterMinor);
}

private List<String> getPathsFromFileDescriptors(List<THdfsFileDesc> fileDescriptors) {
return fileDescriptors.stream()
.map(HdfsPartition.FileDescriptor::fromThrift)
.map(HdfsPartition.FileDescriptor::getPath)
.collect(Collectors.toList());
}

/**
* This test make sure that the table returned is consistent with given writeId list
* even if the table was dropped and recreated from outside.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,20 @@
import static org.junit.Assert.*;

import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.impala.catalog.HdfsPartition;
import org.apache.impala.catalog.local.CatalogdMetaProvider.SizeOfWeigher;
import org.apache.impala.catalog.local.MetaProvider.PartitionMetadata;
import org.apache.impala.catalog.local.MetaProvider.PartitionRef;
Expand All @@ -55,7 +58,9 @@
import org.apache.impala.thrift.TTable;
import org.apache.impala.util.ListMap;
import org.junit.Assume;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -75,6 +80,8 @@ public class CatalogdMetaProviderTest {
private final TableMetaRef tableRef_;

private CacheStats prevStats_;
@Rule
public TestName name = new TestName();

private static HiveJdbcClientPool hiveJdbcClientPool_;
private static final String testDbName_ = "catalogd_meta_provider_test";
Expand Down Expand Up @@ -107,6 +114,8 @@ private CacheStats diffStats() {
}

private void createTestTbls() throws Exception {
LOG.info("Creating test tables for {}", name.getMethodName());
Stopwatch st = Stopwatch.createStarted();
ImpalaJdbcClient client = ImpalaJdbcClient.createClientUsingHiveJdbcDriver();
client.connect();
try {
Expand All @@ -122,6 +131,8 @@ private void createTestTbls() throws Exception {
+ " (c1 int) partitioned by (part int) stored as orc "
+ "tblproperties ('transactional'='true')");
} finally {
LOG.info("Time taken for createTestTbls {} msec",
st.stop().elapsed(TimeUnit.MILLISECONDS));
client.close();
}
}
Expand Down Expand Up @@ -622,9 +633,12 @@ private void testFileMetadataAfterCompaction(String dbName, String tableName,
Map<String, TCounter> counters = Maps.uniqueIndex(prof.counters, TCounter::getName);
assertEquals(1, counters.get("CatalogFetch.Partitions.Requests").getValue());
assertEquals(1, counters.get("CatalogFetch.Partitions.Misses").getValue());
int afterFileCount = partMap.values().stream()
.map(PartitionMetadata::getFileDescriptors).mapToInt(List::size).sum();
assertEquals(1, afterFileCount);
List<String> paths = partMap.values().stream()
.map(PartitionMetadata::getFileDescriptors)
.flatMap(Collection::stream)
.map(HdfsPartition.FileDescriptor::getPath)
.collect(Collectors.toList());
assertEquals("Actual paths: " + paths, 1, paths.size());
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ private void validateConnection() throws SQLException {
*/
public ResultSet execQuery(String query) throws SQLException {
validateConnection();
LOG.debug("Executing: " + query);
LOG.info("Executing: " + query);
return stmt_.executeQuery(query);
}

Expand All @@ -99,7 +99,7 @@ public ResultSet execQuery(String query) throws SQLException {
*/
public boolean executeSql(String sql) throws SQLException {
validateConnection();
LOG.debug("Executing sql : " + sql);
LOG.info("Executing sql : " + sql);
return stmt_.execute(sql);
}
}
Expand Down

0 comments on commit 8dbc8b7

Please sign in to comment.