Skip to content

Commit

Permalink
minor, Math operands should be cast before assignment
Browse files Browse the repository at this point in the history
  • Loading branch information
etherge authored and nichunen committed Feb 20, 2020
1 parent 28b83a3 commit 8c78e7c
Show file tree
Hide file tree
Showing 18 changed files with 119 additions and 84 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ private long getSleepTimeMs() {
if (retryCount == 0)
firstSleepTime = System.currentTimeMillis();

long ms = baseSleepTimeMs * (1 << retryCount);
long ms = baseSleepTimeMs * (1L << retryCount);

if (ms > maxSleepTimeMs)
ms = maxSleepTimeMs;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ public CuboidBenefitModel.BenefitModel calculateBenefitTotal(Set<Long> cuboidsTo
protected double getCostSaving(long descendant, long cuboid) {
long cuboidCost = getCuboidCost(cuboid);
long descendantAggCost = getCuboidAggregationCost(descendant);
return descendantAggCost - cuboidCost;
return (double) descendantAggCost - cuboidCost;
}

protected Long getCuboidCost(long cuboid) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ public static Map<Long, Long> generateSourceCuboidStats(Map<Long, Long> statisti
nEffective++;
}
}

if (nEffective != 0)
srcCuboidsStats.put(cuboid, totalEstRowCount / nEffective);
else
Expand Down Expand Up @@ -349,7 +349,7 @@ public static boolean isDescendant(long cuboidToCheck, long parentCuboid) {
}

private static double calculateRollupRatio(Pair<Long, Long> rollupStats) {
double rollupInputCount = rollupStats.getFirst() + rollupStats.getSecond();
double rollupInputCount = (double) rollupStats.getFirst() + rollupStats.getSecond();
return rollupInputCount == 0 ? 0 : 1.0 * rollupStats.getFirst() / rollupInputCount;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -200,13 +200,14 @@ private void initSnapshotState(String tableName, File snapshotCacheFolder) {
private void initExecutors() {
this.cacheBuildExecutor = new ThreadPoolExecutor(0, 50, 60L, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("lookup-cache-build-thread"));
this.cacheStateCheckExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(
"lookup-cache-state-checker"));
cacheStateCheckExecutor.scheduleAtFixedRate(cacheStateChecker, 10, 10 * 60, TimeUnit.SECONDS); // check every 10 minutes
this.cacheStateCheckExecutor = Executors
.newSingleThreadScheduledExecutor(new NamedThreadFactory("lookup-cache-state-checker"));
cacheStateCheckExecutor.scheduleAtFixedRate(cacheStateChecker, 10, 10 * 60L, TimeUnit.SECONDS); // check every 10 minutes
}

@Override
public ILookupTable getCachedLookupTable(TableDesc tableDesc, ExtTableSnapshotInfo extTableSnapshotInfo, boolean buildIfNotExist) {
public ILookupTable getCachedLookupTable(TableDesc tableDesc, ExtTableSnapshotInfo extTableSnapshotInfo,
boolean buildIfNotExist) {
String resourcePath = extTableSnapshotInfo.getResourcePath();
if (inBuildingTables.containsKey(resourcePath)) {
logger.info("cache is in building for snapshot:" + resourcePath);
Expand All @@ -215,7 +216,8 @@ public ILookupTable getCachedLookupTable(TableDesc tableDesc, ExtTableSnapshotIn
CachedTableInfo cachedTableInfo = tablesCache.getIfPresent(resourcePath);
if (cachedTableInfo == null) {
if (buildIfNotExist) {
buildSnapshotCache(tableDesc, extTableSnapshotInfo, getSourceLookupTable(tableDesc, extTableSnapshotInfo));
buildSnapshotCache(tableDesc, extTableSnapshotInfo,
getSourceLookupTable(tableDesc, extTableSnapshotInfo));
}
logger.info("no available cache ready for the table snapshot:" + extTableSnapshotInfo.getResourcePath());
return null;
Expand All @@ -231,14 +233,16 @@ private ILookupTable getSourceLookupTable(TableDesc tableDesc, ExtTableSnapshotI
}

@Override
public void buildSnapshotCache(final TableDesc tableDesc, final ExtTableSnapshotInfo extTableSnapshotInfo, final ILookupTable sourceTable) {
public void buildSnapshotCache(final TableDesc tableDesc, final ExtTableSnapshotInfo extTableSnapshotInfo,
final ILookupTable sourceTable) {
if (extTableSnapshotInfo.getSignature().getSize() / 1024 > maxCacheSizeInKB * 2 / 3) {
logger.warn("the size is to large to build to cache for snapshot:{}, size:{}, skip cache building",
extTableSnapshotInfo.getResourcePath(), extTableSnapshotInfo.getSignature().getSize());
return;
}
final String[] keyColumns = extTableSnapshotInfo.getKeyColumns();
final String cachePath = getSnapshotCachePath(extTableSnapshotInfo.getTableName(), extTableSnapshotInfo.getId());
final String cachePath = getSnapshotCachePath(extTableSnapshotInfo.getTableName(),
extTableSnapshotInfo.getId());
final String dbPath = getSnapshotStorePath(extTableSnapshotInfo.getTableName(), extTableSnapshotInfo.getId());
final String snapshotResPath = extTableSnapshotInfo.getResourcePath();

Expand Down Expand Up @@ -278,8 +282,8 @@ public CacheState getCacheState(ExtTableSnapshotInfo extTableSnapshotInfo) {
if (inBuildingTables.containsKey(resourcePath)) {
return CacheState.IN_BUILDING;
}
File stateFile = getCacheStateFile(getSnapshotCachePath(extTableSnapshotInfo.getTableName(),
extTableSnapshotInfo.getId()));
File stateFile = getCacheStateFile(
getSnapshotCachePath(extTableSnapshotInfo.getTableName(), extTableSnapshotInfo.getId()));
if (!stateFile.exists()) {
return CacheState.NONE;
}
Expand All @@ -301,14 +305,14 @@ public void checkCacheState() {
}

private void saveSnapshotCacheState(ExtTableSnapshotInfo extTableSnapshotInfo, String cachePath) {
File stateFile = getCacheStateFile(getSnapshotCachePath(extTableSnapshotInfo.getTableName(),
extTableSnapshotInfo.getId()));
File stateFile = getCacheStateFile(
getSnapshotCachePath(extTableSnapshotInfo.getTableName(), extTableSnapshotInfo.getId()));
try {
Files.write(CacheState.AVAILABLE.name(), stateFile, Charsets.UTF_8);
tablesCache.put(extTableSnapshotInfo.getResourcePath(), new CachedTableInfo(cachePath));
} catch (IOException e) {
throw new RuntimeException("error when write cache state for snapshot:"
+ extTableSnapshotInfo.getResourcePath());
throw new RuntimeException(
"error when write cache state for snapshot:" + extTableSnapshotInfo.getResourcePath());
}
}

Expand Down Expand Up @@ -347,17 +351,19 @@ public void run() {
}
}

final Set<String> activeSnapshotSet = ExtTableSnapshotInfoManager.getInstance(config).getAllExtSnapshotResPaths();
final Set<String> activeSnapshotSet = ExtTableSnapshotInfoManager.getInstance(config)
.getAllExtSnapshotResPaths();

List<Pair<String, File>> toRemovedCachedSnapshots = Lists.newArrayList(FluentIterable.from(
allCachedSnapshots).filter(new Predicate<Pair<String, File>>() {
@Override
List<Pair<String, File>> toRemovedCachedSnapshots = Lists.newArrayList(
FluentIterable.from(allCachedSnapshots).filter(new Predicate<Pair<String, File>>() {
@Override
public boolean apply(@Nullable Pair<String, File> input) {
long lastModified = input.getSecond().lastModified();
return !activeSnapshotSet.contains(input.getFirst()) && lastModified > 0
&& lastModified < (System.currentTimeMillis() - config.getExtTableSnapshotLocalCacheCheckVolatileRange());
}
}));
&& lastModified < (System.currentTimeMillis()
- config.getExtTableSnapshotLocalCacheCheckVolatileRange());
}
}));
for (Pair<String, File> toRemovedCachedSnapshot : toRemovedCachedSnapshots) {
File snapshotCacheFolder = toRemovedCachedSnapshot.getSecond();
logger.info("removed cache file:{}, it is not referred by any cube",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,10 @@ public void run() {
startTime = System.currentTimeMillis();
continue;
} else if (size() < minReportSize && (System.currentTimeMillis() - startTime < maxReportTime)) {
logger.info("The number of records in the blocking queue is less than {} and " +
"the duration from last reporting is less than {} ms. " +
"Will delay to report!", minReportSize, maxReportTime);
logger.info(
"The number of records in the blocking queue is less than {} and "
+ "the duration from last reporting is less than {} ms. " + "Will delay to report!",
minReportSize, maxReportTime);
sleep();
continue;
}
Expand All @@ -177,7 +178,7 @@ public void run() {

private void sleep() {
try {
Thread.sleep(60 * 1000);
Thread.sleep(60 * 1000L);
} catch (InterruptedException e) {
logger.warn("Interrupted during running");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ public void applyLimitPushDown(IRealization realization, StorageLimitLevel stora
return;
}

long temp = this.getOffset() + this.getLimit();
long temp = this.getOffset() + (long) this.getLimit();

if (!isValidPushDownLimit(temp)) {
logger.warn("Not enabling limit push down because current limit is invalid: " + this.getLimit());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@

public class ColumnToRowJob extends AbstractHadoopJob {
private static final Logger logger = LoggerFactory.getLogger(ColumnToRowJob.class);
private static final long DEFAULT_SIZE_PER_REDUCER = 16 * 1024 * 1024;
private static final long DEFAULT_SIZE_PER_REDUCER = 16 * 1024 * 1024L;
private static final int MAX_REDUCERS = 1000;

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ public List<RexNode> getProjects() {
public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) {
boolean hasRexOver = RexOver.containsOver(getProjects(), null);
RelOptCost relOptCost = super.computeSelfCost(planner, mq).multiplyBy(.05)
.multiplyBy(getProjects().size() * (hasRexOver ? 50 : 1))
.multiplyBy(getProjects().size() * (double) (hasRexOver ? 50 : 1))
.plus(planner.getCostFactory().makeCost(0.1 * caseCount, 0, 0));
return planner.getCostFactory().makeCost(relOptCost.getRows(), 0, 0);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ public TreeSet<QueryContext> getRunningQueries(
if (runTimeMoreThan == -1) {
return QueryContextFacade.getAllRunningQueries();
} else {
return QueryContextFacade.getLongRunningQueries(runTimeMoreThan * 1000);
return QueryContextFacade.getLongRunningQueries(runTimeMoreThan * 1000L);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,12 @@

package org.apache.kylin.rest.job;

import com.google.common.collect.Lists;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.Locale;

import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
Expand Down Expand Up @@ -47,11 +52,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import com.google.common.collect.Lists;

public class KylinHealthCheckJob extends AbstractApplication {
private static final Logger logger = LoggerFactory.getLogger(KylinHealthCheckJob.class);
Expand Down Expand Up @@ -288,7 +289,7 @@ private void checkDataExpansionRate(List<CubeInstance> cubes) {
long sizeRecordSize = cube.getInputRecordSizeBytes();
if (sizeRecordSize > 0) {
long cubeDataSize = cube.getSizeKB() * 1024;
double expansionRate = cubeDataSize / sizeRecordSize;
double expansionRate = (double) cubeDataSize / sizeRecordSize;
if (sizeRecordSize > 1L * expansionCheckMinCubeSizeInGb * 1024 * 1024 * 1024) {
if (expansionRate > warningExpansionRate) {
logger.info("Cube: {} in project: {} with too large expansion rate: {}, cube data size: {}G",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import java.sql.Statement;
import java.sql.Types;
import java.util.Random;

import org.apache.kylin.metadata.datatype.DataType;
import org.apache.kylin.source.hive.DBConnConf;
import org.slf4j.Logger;
Expand Down Expand Up @@ -84,7 +85,7 @@ public static Connection getConnection(DBConnConf dbconf) {
logger.warn("while use:" + dbconf, e);
try {
int rt = r.nextInt(10);
Thread.sleep(rt * 1000);
Thread.sleep(rt * 1000L);
} catch (InterruptedException e1) {
Thread.interrupted();
}
Expand Down
Loading

0 comments on commit 8c78e7c

Please sign in to comment.