Skip to content

HDFS-8631. WebHDFS : Support setQuota #1253

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1788,6 +1788,32 @@ public QuotaUsage getQuotaUsage(Path f) throws IOException {
return getContentSummary(f);
}

/**
* Set quota for the given {@link Path}.
*
* @param src the target path to set quota for
* @param namespaceQuota the namespace quota (i.e., # of files/directories) to set
* @param storagespaceQuota the storage space quota to set
* @throws IOException IO failure
*/
public void setQuota(Path src, final long namespaceQuota,
final long storagespaceQuota) throws IOException {
methodNotSupported();
}

/**
* Set per storage type quota for the given {@link Path}.
*
* @param src the target path to set storage type quota for
* @param type the storage type to set
* @param quota the quota to set for the given storage type
* @throws IOException IO failure
*/
public void setQuotaByStorageType(Path src, final StorageType type,
final long quota) throws IOException {
methodNotSupported();
}

/**
* The default filter accepts all paths.
*/
Expand Down Expand Up @@ -4455,6 +4481,22 @@ protected CompletableFuture<FSDataInputStream> openFileWithOptions(
return result;
}

/**
* Helper method that throws an {@link UnsupportedOperationException} for the
* current {@link FileSystem} method being called.
*/
protected void methodNotSupported() {
// The order of the stacktrace elements looks like this (from top to bottom):
// - java.lang.Thread.getStackTrace
// - org.apache.hadoop.fs.FileSystem.methodNotSupported
// - <the FileSystem method>
// therefore, to find out the current method name, we use the element at
// index 2.
String name = Thread.currentThread().getStackTrace()[2].getMethodName();
throw new UnsupportedOperationException(getClass().getCanonicalName() +
" does not support method " + name);
}

/**
* Create instance of the standard {@link FSDataInputStreamBuilder} for the
* given filesystem and path.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,8 @@ public Token<?>[] addDelegationTokens(String renewer, Credentials creds)
public Path fixRelativePart(Path p);
public ContentSummary getContentSummary(Path f);
public QuotaUsage getQuotaUsage(Path f);
public void setQuota(Path f, long namespaceQuota, long storagespaceQuota);
public void setQuotaByStorageType(Path f, StorageType type, long quota);
StorageStatistics getStorageStatistics();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,8 @@ public FSDataOutputStream create(Path f, FsPermission permission,
public void processDeleteOnExit();
public ContentSummary getContentSummary(Path f);
public QuotaUsage getQuotaUsage(Path f);
public void setQuota(Path f, long namespaceQuota, long storagespaceQuota);
public void setQuotaByStorageType(Path f, StorageType type, long quota);
public FsStatus getStatus();
public FileStatus[] listStatus(Path f, PathFilter filter);
public FileStatus[] listStatusBatch(Path f, byte[] token);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1000,6 +1000,7 @@ public QuotaUsage next(final FileSystem fs, final Path p)
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String,
* long, long, StorageType)
*/
@Override
public void setQuota(Path src, final long namespaceQuota,
final long storagespaceQuota) throws IOException {
statistics.incrementWriteOps(1);
Expand Down Expand Up @@ -1029,6 +1030,7 @@ public Void next(final FileSystem fs, final Path p)
* @param quota value of the specific storage type quota to be modified.
* Maybe {@link HdfsConstants#QUOTA_RESET} to clear quota by storage type.
*/
@Override
public void setQuotaByStorageType(Path src, final StorageType type,
final long quota)
throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@
import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageStatistics;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsCreateModes;
import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
Expand Down Expand Up @@ -1913,6 +1914,48 @@ QuotaUsage decodeResponse(Map<?, ?> json) {
}.run();
}

@Override
public void setQuota(Path p, final long namespaceQuota,
final long storagespaceQuota) throws IOException {
// sanity check
if ((namespaceQuota <= 0 &&
namespaceQuota != HdfsConstants.QUOTA_RESET) ||
(storagespaceQuota < 0 &&
storagespaceQuota != HdfsConstants.QUOTA_RESET)) {
throw new IllegalArgumentException("Invalid values for quota : " +
namespaceQuota + " and " + storagespaceQuota);
}

statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SET_QUOTA_USAGE);

final HttpOpParam.Op op = PutOpParam.Op.SETQUOTA;
new FsPathRunner(op, p, new NameSpaceQuotaParam(namespaceQuota),
new StorageSpaceQuotaParam(storagespaceQuota)).run();
}

@Override
public void setQuotaByStorageType(Path path, StorageType type, long quota)
throws IOException {
if (quota <= 0 && quota != HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Invalid values for quota :" + quota);
}
if (type == null) {
throw new IllegalArgumentException("Invalid storage type (null)");
}
if (!type.supportTypeQuota()) {
throw new IllegalArgumentException(
"Quota for storage type '" + type.toString() + "' is not supported");
}

statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SET_QUOTA_BYTSTORAGEYPE);

final HttpOpParam.Op op = PutOpParam.Op.SETQUOTABYSTORAGETYPE;
new FsPathRunner(op, path, new StorageTypeParam(type.name()),
new StorageSpaceQuotaParam(quota)).run();
}

@Override
public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;

import org.apache.hadoop.hdfs.protocol.HdfsConstants;

/** The name space quota parameter for directory. */
public class NameSpaceQuotaParam extends LongParam {
/** Parameter name. */
public static final String NAME = "namespacequota";
/** Default parameter value ({@link Long#MAX_VALUE}). */
public static final String DEFAULT = "9223372036854775807";

private static final Domain DOMAIN = new Domain(NAME);

public NameSpaceQuotaParam(final Long value) {
super(DOMAIN, value, HdfsConstants.QUOTA_RESET,
HdfsConstants.QUOTA_DONT_SET);
}

public NameSpaceQuotaParam(final String str) {
this(DOMAIN.parse(str));
}

@Override
public String getName() {
return NAME;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,9 @@ public enum Op implements HttpOpParam.Op {
RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
SETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK),

SETQUOTA(false, HttpURLConnection.HTTP_OK),
SETQUOTABYSTORAGETYPE(false, HttpURLConnection.HTTP_OK),

NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);

final boolean doOutputAndRedirect;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;

import org.apache.hadoop.hdfs.protocol.HdfsConstants;

/** The storage space quota parameter for directory. */
public class StorageSpaceQuotaParam extends LongParam {
/** Parameter name. */
public static final String NAME = "storagespacequota";
/** Default parameter value ({@link Long#MAX_VALUE}). */
public static final String DEFAULT = "9223372036854775807";

private static final Domain DOMAIN = new Domain(NAME);

public StorageSpaceQuotaParam(final Long value) {
super(DOMAIN, value, HdfsConstants.QUOTA_RESET,
HdfsConstants.QUOTA_DONT_SET);
}

public StorageSpaceQuotaParam(final String str) {
this(DOMAIN.parse(str));
}

@Override
public String getName() {
return NAME;
}
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;

/** storage type parameter. */
public class StorageTypeParam extends StringParam {
/** Parameter name. */
public static final String NAME = "storagetype";
/** Default parameter value. */
public static final String DEFAULT = "";

private static final Domain DOMAIN = new Domain(NAME, null);

public StorageTypeParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
}

@Override
public String getName() {
return NAME;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,47 @@ public Map execute(FileSystem fs) throws IOException {
}
}

@InterfaceAudience.Private
public static class FSSetQuota
implements FileSystemAccess.FileSystemExecutor<Void> {
private final Path path;
private final long namespaceQuota;
private final long storagespaceQuota;

public FSSetQuota(String path, long namespaceQuota,
long storagespaceQuota) {
this.path = new Path(path);
this.namespaceQuota = namespaceQuota;
this.storagespaceQuota = storagespaceQuota;
}

@Override
public Void execute(FileSystem fs) throws IOException {
fs.setQuota(path, namespaceQuota, storagespaceQuota);
return null;
}
}

@InterfaceAudience.Private
public static class FSSetQuotaByStorageType
implements FileSystemAccess.FileSystemExecutor<Void> {
private final Path path;
private final StorageType type;
private final long quota;

public FSSetQuotaByStorageType(String path, StorageType type, long quota) {
this.path = new Path(path);
this.type = type;
this.quota = quota;
}

@Override
public Void execute(FileSystem fs) throws IOException {
fs.setQuotaByStorageType(path, type, quota);
return null;
}
}

/**
* Executor that performs a create FileSystemAccess files system operation.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
import org.apache.hadoop.hdfs.web.resources.NameSpaceQuotaParam;
import org.apache.hadoop.hdfs.web.resources.NewLengthParam;
import org.apache.hadoop.hdfs.web.resources.NoRedirectParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
Expand All @@ -73,6 +74,8 @@
import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam;
import org.apache.hadoop.hdfs.web.resources.StartAfterParam;
import org.apache.hadoop.hdfs.web.resources.StoragePolicyParam;
import org.apache.hadoop.hdfs.web.resources.StorageSpaceQuotaParam;
import org.apache.hadoop.hdfs.web.resources.StorageTypeParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.TokenKindParam;
import org.apache.hadoop.hdfs.web.resources.TokenServiceParam;
Expand Down Expand Up @@ -209,7 +212,10 @@ protected Response put(
final CreateFlagParam createFlagParam,
final NoRedirectParam noredirectParam,
final StoragePolicyParam policyName,
final ECPolicyParam ecpolicy
final ECPolicyParam ecpolicy,
final NameSpaceQuotaParam namespaceQuota,
final StorageSpaceQuotaParam storagespaceQuota,
final StorageTypeParam storageType
) throws IOException, URISyntaxException {

switch(op.getValue()) {
Expand Down Expand Up @@ -261,7 +267,7 @@ protected Response put(
accessTime, renameOptions, createParent, delegationTokenArgument,
aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
oldSnapshotName, exclDatanodes, createFlagParam, noredirectParam,
policyName, ecpolicy);
policyName, ecpolicy, namespaceQuota, storagespaceQuota, storageType);
}
default:
throw new UnsupportedOperationException(op + " is not supported");
Expand Down
Loading