Skip to content

Commit

Permalink
HADOOP-9241. DU refresh interval is not configurable. (harsh)
Browse files Browse the repository at this point in the history
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1502954 13f79535-47bb-0310-9956-ffa450edef68
  • Loading branch information
QwertyManiac committed Jul 14, 2013
1 parent b980f2a commit adfd2fa
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 3 deletions.
2 changes: 2 additions & 0 deletions hadoop-common-project/hadoop-common/CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,8 @@ Release 2.3.0 - UNRELEASED

IMPROVEMENTS

HADOOP-9241. DU refresh interval is not configurable (harsh)

OPTIMIZATIONS

BUG FIXES
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ public class CommonConfigurationKeysPublic {
public static final String FS_DF_INTERVAL_KEY = "fs.df.interval";
/** Default value for FS_DF_INTERVAL_KEY */
public static final long FS_DF_INTERVAL_DEFAULT = 60000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_DU_INTERVAL_KEY = "fs.du.interval";
/** Default value for FS_DU_INTERVAL_KEY */
public static final long FS_DU_INTERVAL_DEFAULT = 600000;


//Defaults are not specified for following keys
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.Shell;

import java.io.BufferedReader;
Expand Down Expand Up @@ -64,8 +65,8 @@ public DU(File path, long interval) throws IOException {
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, Configuration conf) throws IOException {
this(path, 600000L);
//10 minutes default refresh interval
this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,12 @@
<description>Disk usage statistics refresh interval in msec.</description>
</property>

<property>
<name>fs.du.interval</name>
<value>600000</value>
<description>File space usage statistics refresh interval in msec.</description>
</property>

<property>
<name>fs.s3.block.size</name>
<value>67108864</value>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
import java.io.RandomAccessFile;
import java.util.Random;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;

/** This test makes sure that "DU" does not get to run on each call to getUsed */
public class TestDU extends TestCase {
final static private File DU_DIR = new File(
Expand Down Expand Up @@ -106,7 +109,9 @@ public void testDU() throws IOException, InterruptedException {
public void testDUGetUsedWillNotReturnNegative() throws IOException {
File file = new File(DU_DIR, "data");
assertTrue(file.createNewFile());
DU du = new DU(file, 10000);
Configuration conf = new Configuration();
conf.setLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, 10000L);
DU du = new DU(file, conf);
du.decDfsUsed(Long.MAX_VALUE);
long duSize = du.getUsed();
assertTrue(String.valueOf(duSize), duSize >= 0L);
Expand Down

0 comments on commit adfd2fa

Please sign in to comment.