Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;

/**
* This class provides an interface for implementors of a Hadoop file system
* (analogous to the VFS of Unix). Applications do not access this class;
Expand All @@ -72,7 +74,7 @@
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class AbstractFileSystem {
public abstract class AbstractFileSystem implements PathCapabilities {
static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class);

/** Recording statistics per a file system class. */
Expand Down Expand Up @@ -1371,4 +1373,16 @@ public CompletableFuture<FSDataInputStream> openFileWithOptions(Path path,
new CompletableFuture<>(), () -> open(path, bufferSize));
}

public boolean hasPathCapability(final Path path,
final String capability)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Been talking about something like this for years, thanks for working on this. Taking a Path instead of a scheme seems right: it is flexible (general design, specific implementation applies here). Was curious about examples where different paths in the same FS would have different capabilities. I suppose S3A could have different buckets in different regions or with different configurations (e.g. permissions, or S3Guard enabled/disabled, etc.) Just noticed you mentioned ViewFS as well. Another good case for per-path capabilities.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Was curious about examples where different paths in the same FS would have different capabilities.

Files HDFS encryption zones behave differently; viewfs relays things, and any DFS whose mount points may have different semantics can do it. Oh, and WASB has an option for special path where leases need to be acquired before renames -HBase needs that

throws IOException {
switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
case CommonPathCapabilities.FS_SYMLINKS:
// delegate to the existing supportsSymlinks() call.
return supportsSymlinks();
default:
// the feature is not implemented.
return false;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CompletableFuture;

Expand All @@ -42,6 +43,8 @@
import org.apache.hadoop.util.LambdaUtils;
import org.apache.hadoop.util.Progressable;

import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;

/****************************************************************
* Abstract Checksumed FileSystem.
* It provide a basic implementation of a Checksumed FileSystem,
Expand Down Expand Up @@ -872,4 +875,23 @@ public FSDataOutputStreamBuilder createFile(Path path) {
public FSDataOutputStreamBuilder appendFile(Path path) {
return createDataOutputStreamBuilder(this, path).append();
}

/**
* Disable those operations which the checksummed FS blocks.
* {@inheritDoc}
*/
@Override
public boolean hasPathCapability(final Path path, final String capability)
throws IOException {
// query the superclass, which triggers argument validation.
final Path p = makeQualified(path);
switch (validatePathCapabilityArgs(p, capability)) {
case CommonPathCapabilities.FS_APPEND:
case CommonPathCapabilities.FS_CONCAT:
return false;
default:
return super.hasPathCapability(p, capability);
}
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.fs;

/**
* Common path capabilities.
*/
public final class CommonPathCapabilities {

private CommonPathCapabilities() {
}

/**
* Does the store support
* {@code FileSystem.setAcl(Path, List)},
* {@code FileSystem.getAclStatus(Path)}
* and related methods?
* Value: {@value}.
*/
public static final String FS_ACLS = "fs.capability.paths.acls";

/**
* Does the store support {@code FileSystem.append(Path)}?
* Value: {@value}.
*/
public static final String FS_APPEND = "fs.capability.paths.append";

/**
* Does the store support {@code FileSystem.getFileChecksum(Path)}?
* Value: {@value}.
*/
public static final String FS_CHECKSUMS = "fs.capability.paths.checksums";

/**
* Does the store support {@code FileSystem.concat(Path, Path[])}?
* Value: {@value}.
*/
public static final String FS_CONCAT = "fs.capability.paths.concat";

/**
* Does the store support {@code FileSystem.listCorruptFileBlocks(Path)} ()}?
* Value: {@value}.
*/
public static final String FS_LIST_CORRUPT_FILE_BLOCKS =
"fs.capability.paths.list-corrupt-file-blocks";

/**
* Does the store support
* {@code FileSystem.createPathHandle(FileStatus, Options.HandleOpt...)}
* and related methods?
* Value: {@value}.
*/
public static final String FS_PATHHANDLES = "fs.capability.paths.pathhandles";

/**
* Does the store support {@code FileSystem.setPermission(Path, FsPermission)}
* and related methods?
* Value: {@value}.
*/
public static final String FS_PERMISSIONS = "fs.capability.paths.permissions";

/**
* Does this filesystem connector only support filesystem read operations?
* For example, the {@code HttpFileSystem} is always read-only.
* This is different from "is the specific instance and path read only?",
* which must be determined by checking permissions (where supported), or
* attempting write operations under a path.
* Value: {@value}.
*/
public static final String FS_READ_ONLY_CONNECTOR =
"fs.capability.paths.read-only-connector";

/**
* Does the store support snapshots through
* {@code FileSystem.createSnapshot(Path)} and related methods??
* Value: {@value}.
*/
public static final String FS_SNAPSHOTS = "fs.capability.paths.snapshots";

/**
* Does the store support {@code FileSystem.setStoragePolicy(Path, String)}
* and related methods?
* Value: {@value}.
*/
public static final String FS_STORAGEPOLICY =
"fs.capability.paths.storagepolicy";

/**
* Does the store support symlinks through
* {@code FileSystem.createSymlink(Path, Path, boolean)} and related methods?
* Value: {@value}.
*/
public static final String FS_SYMLINKS =
"fs.capability.paths.symlinks";

/**
* Does the store support {@code FileSystem#truncate(Path, long)} ?
* Value: {@value}.
*/
public static final String FS_TRUNCATE =
"fs.capability.paths.truncate";

/**
* Does the store support XAttributes through
* {@code FileSystem#.setXAttr()} and related methods?
* Value: {@value}.
*/
public static final String FS_XATTRS = "fs.capability.paths.xattrs";

}
Original file line number Diff line number Diff line change
Expand Up @@ -281,4 +281,11 @@ public CompletableFuture<FSDataInputStream> openFileWithOptions(Path path,
int bufferSize) throws IOException {
return fsImpl.openFileWithOptions(path, mandatoryKeys, options, bufferSize);
}

@Override
public boolean hasPathCapability(final Path path,
final String capability)
throws IOException {
return fsImpl.hasPathCapability(path, capability);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl;
import org.apache.hadoop.fs.impl.FsLinkResolution;
import org.apache.hadoop.fs.impl.PathCapabilitiesSupport;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
Expand All @@ -68,6 +70,8 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;

/**
* The FileContext class provides an interface for users of the Hadoop
* file system. It exposes a number of file system operations, e.g. create,
Expand Down Expand Up @@ -171,7 +175,7 @@

@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileContext {
public class FileContext implements PathCapabilities {

public static final Logger LOG = LoggerFactory.getLogger(FileContext.class);
/**
Expand Down Expand Up @@ -2934,4 +2938,21 @@ public CompletableFuture<FSDataInputStream> next(
}.resolve(FileContext.this, absF);
}
}

/**
* Return the path capabilities of the bonded {@code AbstractFileSystem}.
* @param path path to query the capability of.
* @param capability string to query the stream support for.
* @return true iff the capability is supported under that FS.
* @throws IOException path resolution or other IO failure
* @throws IllegalArgumentException invalid arguments
*/
public boolean hasPathCapability(Path path, String capability)
throws IOException {
validatePathCapabilityArgs(path, capability);
return FsLinkResolution.resolve(this,
fixRelativePart(path),
(fs, p) -> fs.hasPathCapability(p, capability));
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@

import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;

/****************************************************************
* An abstract base class for a fairly generic filesystem. It
Expand Down Expand Up @@ -134,7 +135,7 @@
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class FileSystem extends Configured
implements Closeable, DelegationTokenIssuer {
implements Closeable, DelegationTokenIssuer, PathCapabilities {
public static final String FS_DEFAULT_NAME_KEY =
CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
public static final String DEFAULT_FS =
Expand Down Expand Up @@ -720,6 +721,7 @@ protected FileSystem() {
*
*/
protected void checkPath(Path path) {
Preconditions.checkArgument(path != null, "null path");
URI uri = path.toUri();
String thatScheme = uri.getScheme();
if (thatScheme == null) // fs is relative
Expand Down Expand Up @@ -3259,6 +3261,25 @@ public Collection<FileStatus> getTrashRoots(boolean allUsers) {
return ret;
}

/**
* The base FileSystem implementation generally has no knowledge
* of the capabilities of actual implementations.
* Unless it has a way to explicitly determine the capabilities,
* this method returns false.
* {@inheritDoc}
*/
public boolean hasPathCapability(final Path path, final String capability)
throws IOException {
switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
case CommonPathCapabilities.FS_SYMLINKS:
// delegate to the existing supportsSymlinks() call.
return supportsSymlinks() && areSymlinksEnabled();
default:
// the feature is not implemented.
return false;
}
}

// making it volatile to be able to do a double checked locking
private volatile static boolean FILE_SYSTEMS_LOADED = false;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -729,4 +729,11 @@ protected CompletableFuture<FSDataInputStream> openFileWithOptions(
return fs.openFileWithOptions(pathHandle, mandatoryKeys, options,
bufferSize);
}

@Override
public boolean hasPathCapability(final Path path, final String capability)
throws IOException {
return fs.hasPathCapability(path, capability);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -446,4 +446,9 @@ public CompletableFuture<FSDataInputStream> openFileWithOptions(
return myFs.openFileWithOptions(path, mandatoryKeys, options, bufferSize);
}

public boolean hasPathCapability(final Path path,
final String capability)
throws IOException {
return myFs.hasPathCapability(path, capability);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
import java.net.URLDecoder;
import java.util.*;

import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;

/**
* This is an implementation of the Hadoop Archive
* Filesystem. This archive Filesystem has index files
Expand Down Expand Up @@ -899,7 +901,22 @@ public void setPermission(Path p, FsPermission permission)
throws IOException {
throw new IOException("Har: setPermission not allowed");
}


/**
* Declare that this filesystem connector is always read only.
* {@inheritDoc}
*/
@Override
public boolean hasPathCapability(final Path path, final String capability)
throws IOException {
switch (validatePathCapabilityArgs(path, capability)) {
case CommonPathCapabilities.FS_READ_ONLY_CONNECTOR:
return true;
default:
return false;
}
}

/**
* Hadoop archives input stream. This input stream fakes EOF
* since archive files are part of bigger part files.
Expand Down
Loading