Skip to content

Checkpathfix #55

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,15 @@ public GlusterFileSystem(){
log.info("GIT INFO="+v);
log.info("GIT_TAG="+v.getTag());
}

public GlusterFileSystem(FileSystem rawLocalFileSystem){

/**
* An internal constructor for creating FlusterFileSystem from a rawlocal fs.
* Example usage:
* <code>
* FileSystem fs = new GlusterFileSystem(new GlusterVolume());
* </code>
*/
protected GlusterFileSystem(FileSystem rawLocalFileSystem){
super(rawLocalFileSystem);
rfs=rawLocalFileSystem;
}
Expand Down Expand Up @@ -113,6 +120,11 @@ public void copyToLocalFile(boolean delSrc,Path src,Path dst) throws IOException
FileUtil.copy(srcFs, src, dstFs, dst, delSrc, getConf());
}

@Override
protected void checkPath(Path path){
Util.checkPath(this, path);
}

public String toString(){
return "Gluster File System, no CRC.";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,12 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOExcepti
FileUtil.copy(srcFs, src, dstFs, dst, delSrc, getConf());
}


@Override
protected void checkPath(Path path){
Util.checkPath(this, path);
}

public String toString(){
return "Gluster File System - CRC Enabled";
}
Expand Down
19 changes: 17 additions & 2 deletions src/main/java/org/apache/hadoop/fs/glusterfs/GlusterVolume.java
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
*
*/
public class GlusterVolume extends RawLocalFileSystem{

static final Logger log = LoggerFactory.getLogger(GlusterFileSystemCRC.class);
Expand All @@ -53,7 +56,7 @@ public GlusterVolume(Configuration conf){
public URI getUri() { return NAME; }

public void setConf(Configuration conf){
log.info("Initializing gluster volume..");
log.info("Initializing gluster volume: " + conf.toString());
super.setConf(conf);
String getfattrcmd = null;
if(conf!=null){
Expand Down Expand Up @@ -154,13 +157,25 @@ public BlockLocation[] getFileBlockLocations(FileStatus file,long start,long len

result=attr.getPathInfo(f.getPath(), start, len);
if(result==null){
log.info("Problem getting destination host for file "+f.getPath());
log.info("Problem getting destination host for file "+f.getPath() + " start=" + start + " len=" + len) ;
return null;
}

return result;
}

@Override
protected void checkPath(Path path){
Util.checkPath(this, path);
}

/**
* May revert to include this at some point.
* protected void checkPath(Path arg0){
* super.checkPath(arg0);
* }
*/

public String toString(){
return "Gluster Volume mounted at: " + root;
}
Expand Down
32 changes: 32 additions & 0 deletions src/main/java/org/apache/hadoop/fs/glusterfs/Util.java
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@
import java.io.File;
import java.io.IOException;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Shell;

public class Util{
Expand All @@ -50,4 +52,34 @@ public static String[] getGET_PERMISSION_COMMAND(){
public static final boolean WINDOWS /* borrowed from Path.WINDOWS */
=System.getProperty("os.name").startsWith("Windows");
// / loads permissions, owner, and group from `ls -ld`

/**
* Check that a Path belongs to this FileSystem.
* lenient : doesn't check authority. This might be temporary ~
* there could be a better long term implementation.
* Having custom implementation here is critical for debugging, because
* existing checkPath in hadoop doesn't print all the scheme/auth values.
* */
public static void checkPath(FileSystem fs, Path path) {
String thisScheme = fs.getUri().getScheme();
String thisAuthority = fs.getUri().getAuthority();

String thatScheme = path.toUri().getScheme();
String thatAuthority = path.toUri().getAuthority();

//String debugInfo="GV: checking path " +path+ " scheme=" + thisScheme+" auth="+thisAuthority + " vs scheme=" + thatScheme +" auth=" + thatAuthority;
//log.info(debugInfo);
//log.warn("Not validating authority");
//now the exception will be traceable in the logs above .
try{
//super.checkPath(path);
if(thisScheme.equals(thatScheme) || (thatScheme==null && thatAuthority==null))
return ;
else
throw new RuntimeException("Schemes dont match: expecting :" + thisScheme + " but input path is :" + thatScheme);
}
catch(Throwable t){
throw new RuntimeException("ERROR matching schemes/auths: " + thisScheme +" ~ " + thisAuthority + " : " + thatScheme + " ~ " + thatAuthority );
}
}
}