Skip to content

Commit

Permalink
[DOCS] Reformat snippets to use two-space indents (#59973) (#59994)
Browse files Browse the repository at this point in the history
  • Loading branch information
jrodewig authored Jul 21, 2020
1 parent 606b7ea commit b302b09
Show file tree
Hide file tree
Showing 160 changed files with 5,050 additions and 5,049 deletions.
70 changes: 35 additions & 35 deletions README.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -45,23 +45,23 @@ Let's try and index some twitter like information. First, let's index some tweet
----
curl -XPUT 'http://localhost:9200/twitter/_doc/1?pretty' -H 'Content-Type: application/json' -d '
{
"user": "kimchy",
"post_date": "2009-11-15T13:12:00",
"message": "Trying out Elasticsearch, so far so good?"
"user": "kimchy",
"post_date": "2009-11-15T13:12:00",
"message": "Trying out Elasticsearch, so far so good?"
}'
curl -XPUT 'http://localhost:9200/twitter/_doc/2?pretty' -H 'Content-Type: application/json' -d '
{
"user": "kimchy",
"post_date": "2009-11-15T14:12:12",
"message": "Another tweet, will it be indexed?"
"user": "kimchy",
"post_date": "2009-11-15T14:12:12",
"message": "Another tweet, will it be indexed?"
}'
curl -XPUT 'http://localhost:9200/twitter/_doc/3?pretty' -H 'Content-Type: application/json' -d '
{
"user": "elastic",
"post_date": "2010-01-15T01:46:38",
"message": "Building the site, should be kewl"
"user": "elastic",
"post_date": "2010-01-15T01:46:38",
"message": "Building the site, should be kewl"
}'
----

Expand All @@ -87,9 +87,9 @@ We can also use the JSON query language Elasticsearch provides instead of a quer
----
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -H 'Content-Type: application/json' -d '
{
"query" : {
"match" : { "user": "kimchy" }
}
"query" : {
"match" : { "user": "kimchy" }
}
}'
----

Expand All @@ -98,9 +98,9 @@ Just for kicks, let's get all the documents stored (we should see the tweet from
----
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -H 'Content-Type: application/json' -d '
{
"query" : {
"match_all" : {}
}
"query" : {
"match_all" : {}
}
}'
----

Expand All @@ -109,11 +109,11 @@ We can also do range search (the `post_date` was automatically identified as dat
----
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -H 'Content-Type: application/json' -d '
{
"query" : {
"range" : {
"post_date" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
}
"query" : {
"range" : {
"post_date" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
}
}
}'
----

Expand All @@ -130,16 +130,16 @@ Another way to define our simple twitter system is to have a different index per
----
curl -XPUT 'http://localhost:9200/kimchy/_doc/1?pretty' -H 'Content-Type: application/json' -d '
{
"user": "kimchy",
"post_date": "2009-11-15T13:12:00",
"message": "Trying out Elasticsearch, so far so good?"
"user": "kimchy",
"post_date": "2009-11-15T13:12:00",
"message": "Trying out Elasticsearch, so far so good?"
}'
curl -XPUT 'http://localhost:9200/kimchy/_doc/2?pretty' -H 'Content-Type: application/json' -d '
{
"user": "kimchy",
"post_date": "2009-11-15T14:12:12",
"message": "Another tweet, will it be indexed?"
"user": "kimchy",
"post_date": "2009-11-15T14:12:12",
"message": "Another tweet, will it be indexed?"
}'
----

Expand All @@ -150,10 +150,10 @@ Complete control on the index level is allowed. As an example, in the above case
----
curl -XPUT http://localhost:9200/another_user?pretty -H 'Content-Type: application/json' -d '
{
"settings" : {
"index.number_of_shards" : 2,
"index.number_of_replicas" : 1
}
"settings" : {
"index.number_of_shards" : 2,
"index.number_of_replicas" : 1
}
}'
----

Expand All @@ -163,9 +163,9 @@ index (twitter user), for example:
----
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -H 'Content-Type: application/json' -d '
{
"query" : {
"match_all" : {}
}
"query" : {
"match_all" : {}
}
}'
----

Expand All @@ -174,9 +174,9 @@ Or on all the indices:
----
curl -XGET 'http://localhost:9200/_search?pretty=true' -H 'Content-Type: application/json' -d '
{
"query" : {
"match_all" : {}
}
"query" : {
"match_all" : {}
}
}'
----

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,16 +145,16 @@ The following <<indices-create-index,create index API>> request uses the
----
PUT sample_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"my_sample_analyzer" : {
"tokenizer" : "standard",
"filter" : ["sample"]
}
}
"settings": {
"analysis": {
"analyzer": {
"my_sample_analyzer": {
"tokenizer": "standard",
"filter": [ "sample" ]
}
}
}
}
}
----
// TEST[skip: REMOVE THIS COMMENT.]
Expand Down Expand Up @@ -212,22 +212,22 @@ For example, the following request creates a custom `sample` filter with
----
PUT sample_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"my_custom_analyzer" : {
"tokenizer" : "whitespace",
"filter" : ["my_custom_sample_token_filter"]
}
},
"filter" : {
"my_custom_sample_token_filter" : {
"type" : "sample",
"foo" : true
}
}
"settings": {
"analysis": {
"analyzer": {
"my_custom_analyzer": {
"tokenizer": "whitespace",
"filter": [ "my_custom_sample_token_filter" ]
}
},
"filter": {
"my_custom_sample_token_filter": {
"type": "sample",
"foo": true
}
}
}
}
}
----
// TEST[skip: REMOVE THIS COMMENT.]
Original file line number Diff line number Diff line change
Expand Up @@ -77,15 +77,15 @@ apostrophe token filter to configure a new
--------------------------------------------------
PUT /apostrophe_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"standard_apostrophe" : {
"tokenizer" : "standard",
"filter" : ["apostrophe"]
}
}
"settings": {
"analysis": {
"analyzer": {
"standard_apostrophe": {
"tokenizer": "standard",
"filter": [ "apostrophe" ]
}
}
}
}
}
--------------------------------------------------
Original file line number Diff line number Diff line change
Expand Up @@ -83,16 +83,16 @@ The following <<indices-create-index,create index API>> request uses the
--------------------------------------------------
PUT /asciifold_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"standard_asciifolding" : {
"tokenizer" : "standard",
"filter" : ["asciifolding"]
}
}
"settings": {
"analysis": {
"analyzer": {
"standard_asciifolding": {
"tokenizer": "standard",
"filter": [ "asciifolding" ]
}
}
}
}
}
--------------------------------------------------

Expand All @@ -118,21 +118,21 @@ For example, the following request creates a custom `asciifolding` filter with
--------------------------------------------------
PUT /asciifold_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"standard_asciifolding" : {
"tokenizer" : "standard",
"filter" : ["my_ascii_folding"]
}
},
"filter" : {
"my_ascii_folding" : {
"type" : "asciifolding",
"preserve_original" : true
}
}
"settings": {
"analysis": {
"analyzer": {
"standard_asciifolding": {
"tokenizer": "standard",
"filter": [ "my_ascii_folding" ]
}
},
"filter": {
"my_ascii_folding": {
"type": "asciifolding",
"preserve_original": true
}
}
}
}
}
--------------------------------------------------
Original file line number Diff line number Diff line change
Expand Up @@ -126,16 +126,16 @@ CJK bigram token filter to configure a new
--------------------------------------------------
PUT /cjk_bigram_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"standard_cjk_bigram" : {
"tokenizer" : "standard",
"filter" : ["cjk_bigram"]
}
}
"settings": {
"analysis": {
"analyzer": {
"standard_cjk_bigram": {
"tokenizer": "standard",
"filter": [ "cjk_bigram" ]
}
}
}
}
}
--------------------------------------------------

Expand Down Expand Up @@ -176,26 +176,26 @@ parameters.
--------------------------------------------------
PUT /cjk_bigram_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"han_bigrams" : {
"tokenizer" : "standard",
"filter" : ["han_bigrams_filter"]
}
},
"filter" : {
"han_bigrams_filter" : {
"type" : "cjk_bigram",
"ignored_scripts": [
"hangul",
"hiragana",
"katakana"
],
"output_unigrams" : true
}
}
"settings": {
"analysis": {
"analyzer": {
"han_bigrams": {
"tokenizer": "standard",
"filter": [ "han_bigrams_filter" ]
}
},
"filter": {
"han_bigrams_filter": {
"type": "cjk_bigram",
"ignored_scripts": [
"hangul",
"hiragana",
"katakana"
],
"output_unigrams": true
}
}
}
}
}
--------------------------------------------------
Original file line number Diff line number Diff line change
Expand Up @@ -69,15 +69,15 @@ CJK width token filter to configure a new
--------------------------------------------------
PUT /cjk_width_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"standard_cjk_width" : {
"tokenizer" : "standard",
"filter" : ["cjk_width"]
}
}
"settings": {
"analysis": {
"analyzer": {
"standard_cjk_width": {
"tokenizer": "standard",
"filter": [ "cjk_width" ]
}
}
}
}
}
--------------------------------------------------
Loading

0 comments on commit b302b09

Please sign in to comment.