diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 5435da8419f5e..9567bcd63bc2e 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -8,8 +8,15 @@ on: pull_request_target: types: [opened, synchronize, reopened] +permissions: + contents: read # to fetch code (actions/checkout) + jobs: gradle-check: + permissions: + contents: read # to fetch code (actions/checkout) + pull-requests: write # to create or update comment (peter-evans/create-or-update-comment) + runs-on: ubuntu-latest timeout-minutes: 130 steps: diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index ca05aee8be378..ac94f5ef5ec5e 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -2,6 +2,8 @@ name: Link Checker on: schedule: - cron: '0 0 * * *' +permissions: + contents: read # to fetch code (actions/checkout) jobs: linkchecker: if: github.repository == 'opensearch-project/OpenSearch' diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 42c2d21d106ce..d1b5e90484ec4 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -5,6 +5,7 @@ on: tags: - '*.*.*' +permissions: {} jobs: build: runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index 08206e62b4aba..2ac7b584cb13c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 3.0] ### Added +- Hardened token permissions in GitHub workflows ([#4587](https://github.com/opensearch-project/OpenSearch/pull/4587)) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) - Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) @@ -56,6 +57,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Support remote translog transfer for request level durability([#4480](https://github.com/opensearch-project/OpenSearch/pull/4480)) - Changed http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) +- Change http code for DecommissioningFailedException from 500 to 400 ([#5283](https://github.com/opensearch-project/OpenSearch/pull/5283)) ### Deprecated @@ -84,6 +86,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - Prevent deletion of snapshots that are backing searchable snapshot indexes ([#5069](https://github.com/opensearch-project/OpenSearch/pull/5069)) +- Add max_shard_size parameter for shrink API ([#5229](https://github.com/opensearch-project/OpenSearch/pull/5229)) ### Dependencies - Bumps `bcpg-fips` from 1.0.5.1 to 1.0.7.1 @@ -96,6 +99,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated ### Removed ### Fixed +- Fix 1.x compatibility bug with stored Tasks ([#5412](https://github.com/opensearch-project/OpenSearch/pull/5412)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.4...HEAD diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java index 2a22c8d7d19e9..ebbd813c9fe15 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Collections; @@ -58,6 +59,7 @@ public class ResizeRequest extends TimedRequest implements Validatable, ToXConte private final String targetIndex; private Settings settings = Settings.EMPTY; private Set aliases = new HashSet<>(); + private ByteSizeValue maxShardSize; /** * Creates a new resize request @@ -155,6 +157,24 @@ public ActiveShardCount getWaitForActiveShards() { return waitForActiveShards; } + /** + * Sets the maximum size of a primary shard in the new shrunken index. + * This parameter can be used to calculate the lowest factor of the source index's shards number + * which satisfies the maximum shard size requirement. + * + * @param maxShardSize the maximum size of a primary shard in the new shrunken index + */ + public void setMaxShardSize(ByteSizeValue maxShardSize) { + this.maxShardSize = maxShardSize; + } + + /** + * Returns the maximum size of a primary shard in the new shrunken index. + */ + public ByteSizeValue getMaxShardSize() { + return maxShardSize; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index 512cc058a64a7..7ed06129dc893 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -79,6 +79,7 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import org.junit.Assert; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Arrays; @@ -701,6 +702,8 @@ private void resizeTest(ResizeType resizeType, CheckedFunction \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null + +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` +APP_BASE_NAME=${0##*/} # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar @@ -87,9 +121,9 @@ CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -98,7 +132,7 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" + JAVACMD=java which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the @@ -106,80 +140,95 @@ location of your Java installation." fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. # For Cygwin or MSYS, switch paths to Windows format before running java -if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=`expr $i + 1` + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - 0) set -- ;; - 1) set -- "$args0" ;; - 2) set -- "$args0" "$args1" ;; - 3) set -- "$args0" "$args1" "$args2" ;; - 4) set -- "$args0" "$args1" "$args2" "$args3" ;; - 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=`save "$@"` +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' exec "$JAVACMD" "$@" diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index f42b44b56ccb8..0380b5f229838 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,10 +38,10 @@ opensearchplugin { } versions << [ - 'tika' : '2.5.0', - 'pdfbox': '2.0.25', - 'poi' : '5.2.2', - 'mime4j': '0.8.3' + 'tika' : '2.6.0', + 'pdfbox': '2.0.27', + 'poi' : '5.2.3', + 'mime4j': '0.8.8' ] dependencies { diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 deleted file mode 100644 index 464a34dd97643..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1179b56c9919c1a8e20d3a528ee4c6cee19bcbe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 new file mode 100644 index 0000000000000..77c36691d36b5 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 @@ -0,0 +1 @@ +7330de23c52f71617cbec7f1d2760dae32e687cd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 deleted file mode 100644 index 4f98753aa0af4..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e80733714eb6a70895bfc74a9528c658504c2c83 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 new file mode 100644 index 0000000000000..fb9c5fed27162 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 @@ -0,0 +1 @@ +e76715563a6bd150f84ccb0adb920aec8faf4779 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 deleted file mode 100644 index 3191976e949f8..0000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6644a1eb1d165eded719a88bf7bdcff91740b98 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 new file mode 100644 index 0000000000000..d578dffbfa3f6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 @@ -0,0 +1 @@ +d08c064d18b2b149da937d15c0d1708cba03f29d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 deleted file mode 100644 index 165b3649e80bf..0000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c18cd03ff3a2dfc3c4a30d3a35173bd2690bcb92 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 new file mode 100644 index 0000000000000..4f670b7f95e8c --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 @@ -0,0 +1 @@ +416a9dfce3714116bfdf793b15368df04266845f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 deleted file mode 100644 index d9f58e72c9200..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5513d31545085c33809c4b6553c2009fd19a6016 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..3d8b3daf606ad --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 @@ -0,0 +1 @@ +2fb22ae74ad5aea6af1a9c64b9542f2ccf348604 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 deleted file mode 100644 index 7b3abffc1abd5..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a201b5bdc92c0fae4bed4b8e5546388c4c2f9eb0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..8371593cf0841 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 @@ -0,0 +1 @@ +02efd11c940adb18c03eb9ce7ad88fc40ee6a196 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 deleted file mode 100644 index f5137b1e5223e..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5df31b69375131fc2163a5557093cb112be90ce1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..5c6365876b7be --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 @@ -0,0 +1 @@ +db113c8e9051b0ff967f4911fa20336c8325a7c5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 deleted file mode 100644 index 568dde5125c3f..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c5cd5f1b3e7b3656ab983b73bbbf8bf5f14f793 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..3c8f92498f1a4 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 @@ -0,0 +1 @@ +2a7fce47e22b7fedb1b277347ff4fe36d6eda50d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 deleted file mode 100644 index 419f01c631375..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f9f35e4827726b062ac2b0ad0fd361837a50ac9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..c66c2f3f39401 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 @@ -0,0 +1 @@ +f6ed6356dd4a9bd269d873f65494376685e6192e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 deleted file mode 100644 index a9e47ff8a8a86..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -649574dca8f19d991ac25894c40284446dc5cf50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..e7bc59bb5ae49 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 @@ -0,0 +1 @@ +72b784a7bdab0ffde005fa64d15e3f077331d6fc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 deleted file mode 100644 index d648183868034..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9268511c34d8a1098f0565438cb8077fcf845d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..83c0777fcbe8a --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 @@ -0,0 +1 @@ +00980e70b1df13c1236b750f0ca1462edd5d7417 \ No newline at end of file diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java index 8bebb3881e3fd..2d26238763a09 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java @@ -32,14 +32,16 @@ package org.opensearch.upgrades; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; +import org.hamcrest.MatcherAssert; import org.opensearch.client.Request; +import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.test.XContentTestUtils.JsonMapView; +import java.io.IOException; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -62,25 +64,7 @@ public void testSystemIndicesUpgrades() throws Exception { "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); - // start a async reindex job - Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity( - "{\n" + - " \"source\":{\n" + - " \"index\":\"test_index_old\"\n" + - " },\n" + - " \"dest\":{\n" + - " \"index\":\"test_index_reindex\"\n" + - " }\n" + - "}"); - reindex.addParameter("wait_for_completion", "false"); - Map response = entityAsMap(client().performRequest(reindex)); - String taskId = (String) response.get("task"); - - // wait for task - Request getTask = new Request("GET", "/_tasks/" + taskId); - getTask.addParameter("wait_for_completion", "true"); - client().performRequest(getTask); + createAndVerifyStoredTask(); // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); @@ -97,6 +81,8 @@ public void testSystemIndicesUpgrades() throws Exception { } }); } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + createAndVerifyStoredTask(); + assertBusy(() -> { Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); Map indices = new JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))) @@ -115,4 +101,29 @@ public void testSystemIndicesUpgrades() throws Exception { }); } } + + /** + * Completed tasks get persisted into the .tasks index, so this method waits + * until the task is completed in order to verify that it has been successfully + * written to the index and can be retrieved. + */ + private static void createAndVerifyStoredTask() throws Exception { + // Use update by query to create an async task + final Request updateByQueryRequest = new Request("POST", "/test_index_old/_update_by_query"); + updateByQueryRequest.addParameter("wait_for_completion", "false"); + final Response updateByQueryResponse = client().performRequest(updateByQueryRequest); + MatcherAssert.assertThat(updateByQueryResponse.getStatusLine().getStatusCode(), equalTo(200)); + final String taskId = (String) entityAsMap(updateByQueryResponse).get("task"); + + // wait for task to complete + waitUntil(() -> { + try { + final Response getTaskResponse = client().performRequest(new Request("GET", "/_tasks/" + taskId)); + MatcherAssert.assertThat(getTaskResponse.getStatusLine().getStatusCode(), equalTo(200)); + return (Boolean) entityAsMap(getTaskResponse).get("completed"); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java new file mode 100644 index 0000000000000..4d9115b8962ea --- /dev/null +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http; + +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.settings.Settings; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.opensearch.test.NodeRoles.onlyRole; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AwarenessAttributeDecommissionRestIT extends HttpSmokeTestCase{ + + public void testRestStatusForDecommissioningFailedException() { + internalCluster().startNodes(3); + Request request = new Request("PUT", "/_cluster/decommission/awareness/zone/zone-1"); + ResponseException exception = expectThrows( + ResponseException.class, + () -> getRestClient().performRequest(request) + ); + assertEquals(exception.getResponse().getStatusLine().getStatusCode(), RestStatus.BAD_REQUEST.getStatus()); + assertTrue(exception.getMessage().contains("invalid awareness attribute requested for decommissioning")); + } + + public void testRestStatusForAcknowledgedDecommission() throws IOException { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); + List clusterManagerNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build() + ); + + logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'"); + List dataNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build() + ); + + ensureStableCluster(6); + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + + Request request = new Request("PUT", "/_cluster/decommission/awareness/zone/c"); + Response response = getRestClient().performRequest(request); + assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus()); + } +} diff --git a/release-notes/opensearch.release-notes-1.3.7.md b/release-notes/opensearch.release-notes-1.3.7.md new file mode 100644 index 0000000000000..b8330b5bfcd7d --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.7.md @@ -0,0 +1,10 @@ +## 2022-11-30 Version 1.3.7 Release Notes + +### Upgrades +* Upgrade netty to 4.1.84.Final ([#4919](https://github.com/opensearch-project/OpenSearch/pull/4919)) +* OpenJDK Update (October 2022 Patch releases) ([#5016](https://github.com/opensearch-project/OpenSearch/pull/5016)) +* Upgrade com.netflix.nebula:nebula-publishing-plugin to 4.6.0 and gradle-docker-compose-plugin to 0.14.12 ([#5316](https://github.com/opensearch-project/OpenSearch/pull/5136)) +* Updated Jackson to 2.14.1 ([#5356](https://github.com/opensearch-project/OpenSearch/pull/5356)) + +### Bug Fixes +* Fixed error handling while reading analyzer mapping rules ([#5149](https://github.com/opensearch-project/OpenSearch/pull/5149)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml new file mode 100644 index 0000000000000..b5fe43edcb003 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml @@ -0,0 +1,82 @@ +--- +"Shrink index with max_shard_size": + # shrink index with max_shard_size parameter, which is used to generate an optimum + # number_of_shards for the target index. + + - skip: + version: " - 2.9.99" + reason: "only available in 3.0+" + features: allowed_warnings + + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + # ensure everything is allocated on the same data node + index.routing.allocation.include._id: $node_id + index.number_of_shards: 3 + index.number_of_replicas: 0 + - do: + index: + index: source + id: "1" + body: { "foo": "hello world" } + + - do: + get: + index: source + id: "1" + + - match: { _index: source } + - match: { _id: "1" } + - match: { _source: { foo: "hello world" } } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # shrink with max_shard_size + - do: + allowed_warnings: + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." + indices.shrink: + index: "source" + target: "new_shrunken_index" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + max_shard_size: "10gb" + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: "new_shrunken_index" + id: "1" + + - do: + indices.get_settings: + index: "new_shrunken_index" + + - match: { new_shrunken_index.settings.index.number_of_shards: "1" } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java index daa124fab2220..3420074a0f60b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java @@ -66,6 +66,7 @@ import org.opensearch.common.Priority; import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.Index; @@ -75,8 +76,8 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.Arrays; @@ -760,4 +761,72 @@ public void testShrinkThenSplitWithFailedNode() throws Exception { ); ensureGreen("splitagain"); } + + public void testCreateShrinkIndexWithMaxShardSize() { + internalCluster().ensureAtLeastNumDataNodes(2); + final String shrinkNode = internalCluster().startDataOnlyNode(); + + final int shardCount = between(2, 5); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount) + ).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + client().admin().indices().prepareFlush("source").get(); + ensureGreen(); + + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings( + Settings.builder() + .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), shrinkNode) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + ) + .get(); + ensureGreen(); + + // Cannot set max_shard_size and index.number_of_shards at the same time + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMaxShardSize(new ByteSizeValue(1)) + .setResizeType(ResizeType.SHRINK) + .get() + ); + assertEquals(exc.getMessage(), "Cannot set max_shard_size and index.number_of_shards at the same time!"); + + // use max_shard_size to calculate the target index's shards number + // set max_shard_size to 1 then the target index's shards number will be same with the source index's + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .putNull(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey()) + .build() + ) + .setMaxShardSize(new ByteSizeValue(1)) + .setResizeType(ResizeType.SHRINK) + .get() + ); + ensureGreen(); + + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(String.valueOf(shardCount), target.getIndexToSettings().get("target").get("index.number_of_shards")); + } } diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index d4d32672f719b..cef8ab1320342 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -85,7 +85,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); // UNRELEASED - public static final Version V_2_4_1 = new Version(2040199, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_2_4_1 = new Version( + 2040199, + org.apache.lucene.util.Version.fromBits(9, 4, 2) /** needs updated 9.5.0 snapshots */ + ); public static final Version V_2_5_0 = new Version( 2050099, org.apache.lucene.util.Version.fromBits(9, 4, 2) /** needs updated 9.5.0 snapshots */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index f5d9528422b58..f83431994a649 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,6 +31,7 @@ package org.opensearch.action.admin.indices.shrink; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -46,6 +47,7 @@ import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Objects; @@ -60,6 +62,8 @@ public class ResizeRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { public static final ObjectParser PARSER = new ObjectParser<>("resize_request"); + private static final ParseField MAX_SHARD_SIZE = new ParseField("max_shard_size"); + static { PARSER.declareField( (parser, request, context) -> request.getTargetIndexRequest().settings(parser.map()), @@ -71,12 +75,19 @@ public class ResizeRequest extends AcknowledgedRequest implements new ParseField("aliases"), ObjectParser.ValueType.OBJECT ); + PARSER.declareField( + ResizeRequest::setMaxShardSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SHARD_SIZE.getPreferredName()), + MAX_SHARD_SIZE, + ObjectParser.ValueType.STRING + ); } private CreateIndexRequest targetIndexRequest; private String sourceIndex; private ResizeType type = ResizeType.SHRINK; private Boolean copySettings = true; + private ByteSizeValue maxShardSize; public ResizeRequest(StreamInput in) throws IOException { super(in); @@ -84,6 +95,9 @@ public ResizeRequest(StreamInput in) throws IOException { sourceIndex = in.readString(); type = in.readEnum(ResizeType.class); copySettings = in.readOptionalBoolean(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + maxShardSize = in.readOptionalWriteable(ByteSizeValue::new); + } } ResizeRequest() {} @@ -108,6 +122,9 @@ public ActionRequestValidationException validate() { if (type == ResizeType.SPLIT && IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) { validationException = addValidationError("index.number_of_shards is required for split operations", validationException); } + if (maxShardSize != null && maxShardSize.getBytes() <= 0) { + validationException = addValidationError("max_shard_size must be greater than 0", validationException); + } assert copySettings == null || copySettings; return validationException; } @@ -123,6 +140,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sourceIndex); out.writeEnum(type); out.writeOptionalBoolean(copySettings); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(maxShardSize); + } } @Override @@ -205,6 +225,24 @@ public Boolean getCopySettings() { return copySettings; } + /** + * Sets the maximum size of a primary shard in the new shrunken index. + * This parameter can be used to calculate the lowest factor of the source index's shards number + * which satisfies the maximum shard size requirement. + * + * @param maxShardSize the maximum size of a primary shard in the new shrunken index + */ + public void setMaxShardSize(ByteSizeValue maxShardSize) { + this.maxShardSize = maxShardSize; + } + + /** + * Returns the maximum size of a primary shard in the new shrunken index. + */ + public ByteSizeValue getMaxShardSize() { + return maxShardSize; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -221,6 +259,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } builder.endObject(); + if (maxShardSize != null) { + builder.field(MAX_SHARD_SIZE.getPreferredName(), maxShardSize); + } } builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 418e83a5431ec..eb05c0a69b78b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; /** * Transport request builder for resizing an index @@ -95,4 +96,12 @@ public ResizeRequestBuilder setResizeType(ResizeType type) { this.request.setResizeType(type); return this; } + + /** + * Sets the maximum size of a primary shard in the new shrunken index. + */ + public ResizeRequestBuilder setMaxShardSize(ByteSizeValue maxShardSize) { + this.request.setMaxShardSize(maxShardSize); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index ba079aeb03921..7f55e5efe801b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -57,6 +57,8 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.index.store.StoreStats; import java.io.IOException; import java.util.Locale; @@ -141,11 +143,12 @@ protected void clusterManagerOperation( .prepareStats(sourceIndex) .clear() .setDocs(true) + .setStore(true) .execute(ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> { CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); return shard == null ? null : shard.getPrimary().getDocs(); - }, sourceIndex, targetIndex); + }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); createIndexService.createIndex( updateRequest, ActionListener.map( @@ -162,6 +165,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( final ResizeRequest resizeRequest, final ClusterState state, final IntFunction perShardDocStats, + final StoreStats primaryShardsStoreStats, String sourceIndexName, String targetIndexName ) { @@ -176,12 +180,22 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( targetIndexSettingsBuilder.remove(IndexMetadata.SETTING_HISTORY_UUID); final Settings targetIndexSettings = targetIndexSettingsBuilder.build(); final int numShards; + + // max_shard_size is only supported for shrink + ByteSizeValue maxShardSize = resizeRequest.getMaxShardSize(); + if (resizeRequest.getResizeType() != ResizeType.SHRINK && maxShardSize != null) { + throw new IllegalArgumentException("Unsupported parameter [max_shard_size]"); + } + if (IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { numShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings); + if (resizeRequest.getResizeType() == ResizeType.SHRINK && maxShardSize != null) { + throw new IllegalArgumentException("Cannot set max_shard_size and index.number_of_shards at the same time!"); + } } else { assert resizeRequest.getResizeType() != ResizeType.SPLIT : "split must specify the number of shards explicitly"; if (resizeRequest.getResizeType() == ResizeType.SHRINK) { - numShards = 1; + numShards = calculateTargetIndexShardsNum(maxShardSize, primaryShardsStoreStats, metadata); } else { assert resizeRequest.getResizeType() == ResizeType.CLONE; numShards = metadata.getNumberOfShards(); @@ -250,6 +264,46 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( .copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings()); } + /** + * Calculate target index's shards count according to max_shard_ize and the source index's storage(only primary shards included) + * for shrink. Target index's shards count is the lowest factor of the source index's primary shards count which satisfies the + * maximum shard size requirement. If max_shard_size is less than the source index's single shard size, then target index's shards count + * will be equal to the source index's shards count. + * @param maxShardSize the maximum size of a primary shard in the target index + * @param sourceIndexShardStoreStats primary shards' store stats of the source index + * @param sourceIndexMetaData source index's metadata + * @return target index's shards number + */ + protected static int calculateTargetIndexShardsNum( + ByteSizeValue maxShardSize, + StoreStats sourceIndexShardStoreStats, + IndexMetadata sourceIndexMetaData + ) { + if (maxShardSize == null + || sourceIndexShardStoreStats == null + || maxShardSize.getBytes() == 0 + || sourceIndexShardStoreStats.getSizeInBytes() == 0) { + return 1; + } + + int sourceIndexShardsNum = sourceIndexMetaData.getNumberOfShards(); + // calculate the minimum shards count according to source index's storage, ceiling ensures that the minimum shards count is never + // less than 1 + int minValue = (int) Math.ceil((double) sourceIndexShardStoreStats.getSizeInBytes() / maxShardSize.getBytes()); + // if minimum shards count is greater than the source index's shards count, then the source index's shards count will be returned + if (minValue >= sourceIndexShardsNum) { + return sourceIndexShardsNum; + } + + // find the lowest factor of the source index's shards count here, because minimum shards count may not be a factor + for (int i = minValue; i < sourceIndexShardsNum; i++) { + if (sourceIndexShardsNum % i == 0) { + return i; + } + } + return sourceIndexShardsNum; + } + @Override protected String getClusterManagerActionName(DiscoveryNode node) { return super.getClusterManagerActionName(node); diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java index fe1b9368ac712..9d1325ccf4912 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java @@ -11,6 +11,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.rest.RestStatus; import java.io.IOException; @@ -52,4 +53,9 @@ public void writeTo(StreamOutput out) throws IOException { public DecommissionAttribute decommissionAttribute() { return decommissionAttribute; } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index 0503db713258d..249b4ff5316d9 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -108,10 +108,12 @@ public boolean isThrottlingEnabled() { } void validateSetting(final Settings settings) { - if (minNodeVersionSupplier.get().compareTo(Version.V_2_4_0) < 0) { - throw new IllegalArgumentException("All the nodes in cluster should be on version later than or equal to 2.4.0"); - } Map groups = settings.getAsGroups(); + if (groups.size() > 0) { + if (minNodeVersionSupplier.get().compareTo(Version.V_2_4_0) < 0) { + throw new IllegalArgumentException("All the nodes in cluster should be on version later than or equal to 2.4.0"); + } + } for (String key : groups.keySet()) { if (!THROTTLING_TASK_KEYS.containsKey(key)) { throw new IllegalArgumentException("Cluster manager task throttling is not configured for given task type: " + key); diff --git a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java index 66d3aeb748cf7..accc02624f71c 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java @@ -86,7 +86,7 @@ public class TaskResultsService { public static final String TASK_RESULT_MAPPING_VERSION_META_FIELD = "version"; - public static final int TASK_RESULT_MAPPING_VERSION = 3; // must match version in task-index-mapping.json + public static final int TASK_RESULT_MAPPING_VERSION = 4; // must match version in task-index-mapping.json /** * The backoff policy to use when saving a task result fails. The total wait diff --git a/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json b/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json index 54e9d39902f03..58b6b2d3bc873 100644 --- a/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json +++ b/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json @@ -1,7 +1,7 @@ { "_doc" : { "_meta": { - "version": 3 + "version": 4 }, "dynamic" : "strict", "properties" : { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java index e4b79ac54f8fd..5705362cc73f4 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -38,8 +38,8 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -52,7 +52,9 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.index.shard.DocsStats; +import org.opensearch.index.store.StoreStats; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -107,6 +109,7 @@ public void testErrorCondition() { new ResizeRequest("target", "source"), state, (i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ) @@ -121,6 +124,7 @@ public void testErrorCondition() { req, clusterState, (i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null, + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -139,6 +143,7 @@ public void testErrorCondition() { req, clusterState, (i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -167,6 +172,7 @@ public void testErrorCondition() { new ResizeRequest("target", "source"), clusterState, (i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -193,13 +199,27 @@ public void testPassNumRoutingShards() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SPLIT); resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", 2).build()); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); resizeRequest.getTargetIndexRequest() .settings( Settings.builder().put("index.number_of_routing_shards", randomIntBetween(2, 10)).put("index.number_of_shards", 2).build() ); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); } public void testPassNumRoutingShardsAndFail() { @@ -224,7 +244,14 @@ public void testPassNumRoutingShardsAndFail() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SPLIT); resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", numShards * 2).build()); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); resizeRequest.getTargetIndexRequest() .settings( @@ -233,7 +260,14 @@ public void testPassNumRoutingShardsAndFail() { ClusterState finalState = clusterState; IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> TransportResizeAction.prepareCreateIndexRequest(resizeRequest, finalState, null, "source", "target") + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ) ); assertEquals("cannot provide index.number_of_routing_shards on resize", iae.getMessage()); } @@ -266,6 +300,7 @@ public void testShrinkIndexSettings() { target, clusterState, (i) -> stats, + new StoreStats(between(1, 10000), between(1, 10000)), indexName, "target" ); @@ -276,6 +311,206 @@ public void testShrinkIndexSettings() { assertEquals(request.waitForActiveShards(), activeShardCount); } + public void testShrinkWithMaxShardSize() { + String indexName = randomAlphaOfLength(10); + // create one that won't fail + ClusterState clusterState = ClusterState.builder( + createClusterState(indexName, 10, 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + + // Cannot set max_shard_size when split or clone + ResizeRequest resizeRequestForFailure = new ResizeRequest("target", indexName); + ResizeType resizeType = ResizeType.SPLIT; + if (randomBoolean()) { + resizeType = ResizeType.CLONE; + } + resizeRequestForFailure.setResizeType(resizeType); + resizeRequestForFailure.setMaxShardSize(new ByteSizeValue(100)); + resizeRequestForFailure.getTargetIndexRequest() + .settings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 100)).build()); + ClusterState finalState = clusterState; + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequestForFailure, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + indexName, + "target" + ) + ); + assertEquals("Unsupported parameter [max_shard_size]", iae.getMessage()); + + // Cannot set max_shard_size and index.number_of_shards at the same time + ResizeRequest resizeRequest = new ResizeRequest("target", indexName); + resizeRequest.setResizeType(ResizeType.SHRINK); + resizeRequest.setMaxShardSize(new ByteSizeValue(100)); + resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 100)).build()); + iae = expectThrows( + IllegalArgumentException.class, + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + indexName, + "target" + ) + ); + assertEquals("Cannot set max_shard_size and index.number_of_shards at the same time!", iae.getMessage()); + + AllocationService service = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + // now we start the shard + routingTable = OpenSearchAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, indexName).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + int numSourceShards = clusterState.metadata().index(indexName).getNumberOfShards(); + DocsStats stats = new DocsStats(between(0, (IndexWriter.MAX_DOCS) / numSourceShards), between(1, 1000), between(1, 10000)); + + // target index's shards number must be the lowest factor of the source index's shards number + int expectedShardsNum = 5; + resizeRequest.setMaxShardSize(new ByteSizeValue(25)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + CreateIndexClusterStateUpdateRequest request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + + // if max_shard_size is greater than whole of the source primary shards' storage, + // then the target index will only have one primary shard. + expectedShardsNum = 1; + resizeRequest.setMaxShardSize(new ByteSizeValue(1000)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + + // if max_shard_size is less than the primary shard's storage of the source index, + // then the target index's shards number will be equal to the source index's. + expectedShardsNum = numSourceShards; + resizeRequest.setMaxShardSize(new ByteSizeValue(1)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + } + + public void testCalculateTargetIndexShardsNum() { + String indexName = randomAlphaOfLength(10); + ClusterState clusterState = ClusterState.builder( + createClusterState(indexName, randomIntBetween(2, 10), 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + IndexMetadata indexMetadata = clusterState.metadata().index(indexName); + + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(null, new StoreStats(100, between(1, 10000)), indexMetadata), 1); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(0), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 1 + ); + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(new ByteSizeValue(1), null, indexMetadata), 1); + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(new ByteSizeValue(1), new StoreStats(0, 0), indexMetadata), 1); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(1000), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 1 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(1), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + indexMetadata.getNumberOfShards() + ); + + clusterState = ClusterState.builder( + createClusterState(indexName, 10, 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + indexMetadata = clusterState.metadata().index(indexName); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(10), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 10 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(12), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + indexMetadata.getNumberOfShards() + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(20), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 5 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(50), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 2 + ); + } + private DiscoveryNode newNode(String nodeId) { final Set roles = Collections.unmodifiableSet( new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)) diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index d20fed5c37361..c5e706e50c298 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -114,6 +114,15 @@ public void testValidateSettingsForDifferentVersion() { Settings newSettings = Settings.builder().put("cluster_manager.throttling.thresholds.put-mapping.value", newLimit).build(); assertThrows(IllegalArgumentException.class, () -> throttler.validateSetting(newSettings)); + + // validate for empty setting, it shouldn't throw exception + Settings emptySettings = Settings.builder().build(); + try { + throttler.validateSetting(emptySettings); + } catch (Exception e) { + // it shouldn't throw exception + throw new AssertionError(e); + } } public void testValidateSettingsForTaskWihtoutRetryOnDataNode() { diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java index faab2f405010a..8757458e3317e 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java @@ -269,7 +269,13 @@ public void testCoordinatingPrimaryThreadedUpdateToShardLimitsAndRejections() th nodeStats = shardIndexingPressure.stats(); IndexingPressurePerShardStats shardStoreStats = shardIndexingPressure.shardStats().getIndexingPressureShardStats(shardId1); - assertNull(shardStoreStats); + // If rejection count equals NUM_THREADS that means rejections happened until the last request, then we'll get shardStoreStats which + // was updated on the last request. In other cases, the shardStoreStats simply moves to the cold store and null is returned. + if (rejectionCount.get() == NUM_THREADS) { + assertEquals(10, shardStoreStats.getCurrentPrimaryAndCoordinatingLimits()); + } else { + assertNull(shardStoreStats); + } shardStats = shardIndexingPressure.coldStats(); if (randomBoolean) { assertEquals(rejectionCount.get(), nodeStats.getCoordinatingRejections()); @@ -331,7 +337,13 @@ public void testReplicaThreadedUpdateToShardLimitsAndRejections() throws Excepti assertEquals(0, nodeStats.getCurrentReplicaBytes()); IndexingPressurePerShardStats shardStoreStats = shardIndexingPressure.shardStats().getIndexingPressureShardStats(shardId1); - assertNull(shardStoreStats); + // If rejection count equals NUM_THREADS that means rejections happened until the last request, then we'll get shardStoreStats which + // was updated on the last request. In other cases, the shardStoreStats simply moves to the cold store and null is returned. + if (rejectionCount.get() == NUM_THREADS) { + assertEquals(15, shardStoreStats.getCurrentReplicaLimits()); + } else { + assertNull(shardStoreStats); + } shardStats = shardIndexingPressure.coldStats(); assertEquals(rejectionCount.get(), shardStats.getIndexingPressureShardStats(shardId1).getReplicaNodeLimitsBreachedRejections()); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 72b77bb706065..892ed5a23fb9d 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2764,8 +2764,21 @@ public void testRestoreShardFromRemoteStore() throws IOException { // Delete files in store directory to restore from remote directory Directory storeDirectory = target.store().directory(); + for (String file : storeDirectory.listAll()) { storeDirectory.deleteFile(file); + // Windows has buggy File delete logic where AccessDeniedExceptions + // are thrown when there is an open file handle on a particular file. FSDirectory attempts to resolve this with hacks by + // swallowing the exceptions and moving the file to a pending delete state + // to retry in the future while being filtered from listAll invocations. + // However, this logic is also buggy and after the first delete attempt we are left in a state where the file is still on disk + // and not pending delete. + // A second attempt to delete the file will properly move it to pending deletion, and be filtered from listAll. + if (Arrays.asList(storeDirectory.listAll()).contains(file) && storeDirectory.getPendingDeletions().contains(file) == false) { + logger.info("File {} was not deleted and is not pending delete, attempting delete again...", file); + storeDirectory.deleteFile(file); + assertTrue(storeDirectory.getPendingDeletions().contains(file)); + } } assertEquals(0, storeDirectory.listAll().length); diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java index 65969cc65359e..6f3387a935c03 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java @@ -16,6 +16,7 @@ import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.SimpleFSLockFactory; import org.apache.lucene.tests.util.LuceneTestCase; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.Version; import org.junit.After; import org.junit.Before; @@ -52,6 +53,7 @@ public class OnDemandBlockSnapshotIndexInputTests extends OpenSearchTestCase { @Before public void init() { + assumeFalse("Awaiting Windows fix https://github.com/opensearch-project/OpenSearch/issues/5396", Constants.WINDOWS); transferManager = mock(TransferManager.class); lockFactory = SimpleFSLockFactory.INSTANCE; path = LuceneTestCase.createTempDir("OnDemandBlockSnapshotIndexInputTests");