Skip to content

HDFS-14718. HttpFS: Sort response by key names as WebHDFS does #1270

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: trunk
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@
import java.io.OutputStream;
import java.util.Collection;
import java.util.EnumSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
Expand All @@ -74,7 +73,7 @@ public class FSOperations {
* @return JSON map suitable for wire transport
*/
private static Map<String, Object> toJson(FileStatus fileStatus) {
Map<String, Object> json = new LinkedHashMap<>();
Map<String, Object> json = new TreeMap<>();
json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(fileStatus, true));
return json;
}
Expand All @@ -87,8 +86,8 @@ private static Map<String, Object> toJson(FileStatus fileStatus) {
@SuppressWarnings({"unchecked"})
private static Map<String, Object> toJson(FileStatus[] fileStatuses,
boolean isFile) {
Map<String, Object> json = new LinkedHashMap<>();
Map<String, Object> inner = new LinkedHashMap<>();
Map<String, Object> json = new TreeMap<>();
Map<String, Object> inner = new TreeMap<>();
JSONArray statuses = new JSONArray();
for (FileStatus f : fileStatuses) {
statuses.add(toJsonInner(f, isFile));
Expand All @@ -103,7 +102,7 @@ private static Map<String, Object> toJson(FileStatus[] fileStatuses,
*/
private static Map<String, Object> toJsonInner(FileStatus fileStatus,
boolean emptyPathSuffix) {
Map<String, Object> json = new LinkedHashMap<String, Object>();
Map<String, Object> json = new TreeMap<String, Object>();
json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
(emptyPathSuffix) ? "" : fileStatus.getPath().getName());
json.put(HttpFSFileSystem.TYPE_JSON,
Expand Down Expand Up @@ -145,8 +144,8 @@ private static Map<String, Object> toJsonInner(FileStatus fileStatus,
*/
private static Map<String, Object> toJson(FileSystem.DirectoryEntries
entries, boolean isFile) {
Map<String, Object> json = new LinkedHashMap<>();
Map<String, Object> inner = new LinkedHashMap<>();
Map<String, Object> json = new TreeMap<>();
Map<String, Object> inner = new TreeMap<>();
Map<String, Object> fileStatuses = toJson(entries.getEntries(), isFile);
inner.put(HttpFSFileSystem.PARTIAL_LISTING_JSON, fileStatuses);
inner.put(HttpFSFileSystem.REMAINING_ENTRIES_JSON, entries.hasMore() ? 1
Expand All @@ -163,8 +162,8 @@ private static Map<String, Object> toJson(FileSystem.DirectoryEntries
*/
@SuppressWarnings({"unchecked"})
private static Map<String,Object> aclStatusToJSON(AclStatus aclStatus) {
Map<String,Object> json = new LinkedHashMap<String,Object>();
Map<String,Object> inner = new LinkedHashMap<String,Object>();
Map<String,Object> json = new TreeMap<String,Object>();
Map<String,Object> inner = new TreeMap<String,Object>();
JSONArray entriesArray = new JSONArray();
inner.put(HttpFSFileSystem.OWNER_JSON, aclStatus.getOwner());
inner.put(HttpFSFileSystem.GROUP_JSON, aclStatus.getGroup());
Expand All @@ -187,12 +186,12 @@ private static Map<String,Object> aclStatusToJSON(AclStatus aclStatus) {
*/
@SuppressWarnings({"unchecked"})
private static Map fileChecksumToJSON(FileChecksum checksum) {
Map json = new LinkedHashMap();
Map json = new TreeMap();
json.put(HttpFSFileSystem.CHECKSUM_ALGORITHM_JSON, checksum.getAlgorithmName());
json.put(HttpFSFileSystem.CHECKSUM_BYTES_JSON,
org.apache.hadoop.util.StringUtils.byteToHexString(checksum.getBytes()));
json.put(HttpFSFileSystem.CHECKSUM_LENGTH_JSON, checksum.getLength());
Map response = new LinkedHashMap();
Map response = new TreeMap();
response.put(HttpFSFileSystem.FILE_CHECKSUM_JSON, json);
return response;
}
Expand All @@ -209,11 +208,11 @@ private static Map fileChecksumToJSON(FileChecksum checksum) {
@SuppressWarnings({"unchecked", "rawtypes"})
private static Map xAttrsToJSON(Map<String, byte[]> xAttrs,
XAttrCodec encoding) throws IOException {
Map jsonMap = new LinkedHashMap();
Map jsonMap = new TreeMap();
JSONArray jsonArray = new JSONArray();
if (xAttrs != null) {
for (Entry<String, byte[]> e : xAttrs.entrySet()) {
Map json = new LinkedHashMap();
Map json = new TreeMap();
json.put(HttpFSFileSystem.XATTR_NAME_JSON, e.getKey());
if (e.getValue() != null) {
json.put(HttpFSFileSystem.XATTR_VALUE_JSON,
Expand All @@ -236,7 +235,7 @@ private static Map xAttrsToJSON(Map<String, byte[]> xAttrs,
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private static Map xAttrNamesToJSON(List<String> names) throws IOException {
Map jsonMap = new LinkedHashMap();
Map jsonMap = new TreeMap();
jsonMap.put(HttpFSFileSystem.XATTRNAMES_JSON, JSONArray.toJSONString(names));
return jsonMap;
}
Expand All @@ -251,7 +250,7 @@ private static Map xAttrNamesToJSON(List<String> names) throws IOException {
*/
@SuppressWarnings({"unchecked"})
private static Map contentSummaryToJSON(ContentSummary contentSummary) {
Map json = new LinkedHashMap();
Map json = new TreeMap();
json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON,
contentSummary.getDirectoryCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_ECPOLICY_JSON,
Expand All @@ -269,7 +268,7 @@ private static Map contentSummaryToJSON(ContentSummary contentSummary) {
json.put(e.getKey(), e.getValue());
}
}
Map response = new LinkedHashMap();
Map response = new TreeMap();
response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
return response;
}
Expand All @@ -280,14 +279,14 @@ private static Map contentSummaryToJSON(ContentSummary contentSummary) {
*/
@SuppressWarnings({"unchecked"})
private static Map quotaUsageToJSON(QuotaUsage quotaUsage) {
Map response = new LinkedHashMap();
Map response = new TreeMap();
Map quotaUsageMap = quotaUsageToMap(quotaUsage);
response.put(HttpFSFileSystem.QUOTA_USAGE_JSON, quotaUsageMap);
return response;
}

private static Map<String, Object> quotaUsageToMap(QuotaUsage quotaUsage) {
Map<String, Object> result = new LinkedHashMap<>();
Map<String, Object> result = new TreeMap<>();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be nice to add a unit test to make sure that the order is preserved.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I haven't found any existing unit tests that compares the order of the response JSON of WebHDFS and HttpFS. e.g. BaseTestHttpFSWith#testQuotaUsage checks the the correctness of output from DFS/HttpFS but doesn't care about the order.
That might be something we want to do with a new unit test class. Or is there one test class that does this already?

Initially I was just looking at output for LISTSTATUS. I discovered that WebHDFS responds with sorted keys, but HttpFS doesn't. Then I found other responses from HttpFS also aren't sorted but WebHDFS does. So I extended the scope of this jira.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I honestly don't think people care about the order.

Copy link
Contributor Author

@smengcl smengcl Aug 14, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You are right. People won't care in production.
I filed this jira just because I was comparing the RAW JSON, and they weren't in the same order.

result.put(HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON,
quotaUsage.getFileAndDirectoryCount());
result.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON, quotaUsage.getQuota());
Expand Down