Skip to content

Commit

Permalink
Fix type in tests
Browse files Browse the repository at this point in the history
  • Loading branch information
olivere committed Oct 28, 2017
1 parent 844c7cb commit 217c9db
Show file tree
Hide file tree
Showing 128 changed files with 1,597 additions and 646 deletions.
2 changes: 1 addition & 1 deletion .travis.yml.off
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,5 @@ services:
- docker
before_install:
- sudo sysctl -w vm.max_map_count=262144
- docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:6.0.0-beta2 elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_
- docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:6.0.0-rc1 elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_
- sleep 30
34 changes: 3 additions & 31 deletions CHANGELOG-6.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,35 +12,7 @@ Only use `true` or `false` for boolean values, not `0` or `1` or `on` or `off`.

## Single Type Indices

Notice that 6.0 will default to single type indices, i.e. you may not use multiple
types when e.g. adding an index with a mapping.

To enable multiple indices, specify index.mapping.single_type : false. Example:

```
{
"settings":{
"number_of_shards":1,
"number_of_replicas":0,
"index.mapping.single_type" : false
},
"mappings":{
"tweet":{
"properties":{
...
}
},
"comment":{
"_parent": {
"type": "tweet"
}
},
"order":{
"properties":{
...
}
}
}
}
```
Notice that 6.0 and future versions will default to single type indices, i.e. you may not use multiple types when e.g. adding an index with a mapping.

See [here for details](https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html#_what_are_mapping_types).

2 changes: 2 additions & 0 deletions CONTRIBUTORS
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ Andrew Dunham [@andrew-d](https://github.com/andrew-d)
Andrew Gaul [@andrewgaul](https://github.com/andrewgaul)
Andy Walker [@alaska](https://github.com/alaska)
Arquivei [@arquivei](https://github.com/arquivei)
arthurgustin [@arthurgustin](https://github.com/arthurgustin)
Benjamin Fernandes [@LotharSee](https://github.com/LotharSee)
Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux)
Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va)
Expand Down Expand Up @@ -109,4 +110,5 @@ Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb)
Yarden Bar [@ayashjorden](https://github.com/ayashjorden)
zakthomas [@zakthomas](https://github.com/zakthomas)
singham [@zhaochenxiao90](https://github.com/zhaochenxiao90)
@林 [@zplzpl](https://github.com/zplzpl)
Roman Colohanin [@zuzmic](https://github.com/zuzmic)
96 changes: 2 additions & 94 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,101 +121,9 @@ The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
You typically create one client for your app. Here's a complete example of
creating a client, creating an index, adding a document, executing a search etc.

```go
// Create a context
ctx := context.Background()

// Create a client
client, err := elastic.NewClient()
if err != nil {
// Handle error
panic(err)
}

// Create an index
_, err = client.CreateIndex("twitter").Do(ctx)
if err != nil {
// Handle error
panic(err)
}

// Add a document to the index
tweet := Tweet{User: "olivere", Message: "Take Five"}
_, err = client.Index().
Index("twitter").
Type("tweet").
Id("1").
BodyJson(tweet).
Refresh("true").
Do(ctx)
if err != nil {
// Handle error
panic(err)
}

// Search with a term query
termQuery := elastic.NewTermQuery("user", "olivere")
searchResult, err := client.Search().
Index("twitter"). // search in index "twitter"
Query(termQuery). // specify the query
Sort("user", true). // sort by "user" field, ascending
From(0).Size(10). // take documents 0-9
Pretty(true). // pretty print request and response JSON
Do(ctx) // execute
if err != nil {
// Handle error
panic(err)
}

// searchResult is of type SearchResult and returns hits, suggestions,
// and all kinds of other information from Elasticsearch.
fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)

// Each is a convenience function that iterates over hits in a search result.
// It makes sure you don't need to check for nil values in the response.
// However, it ignores errors in serialization. If you want full control
// over iterating the hits, see below.
var ttyp Tweet
for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
if t, ok := item.(Tweet); ok {
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
}
}
// TotalHits is another convenience function that works even when something goes wrong.
fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())

// Here's how you iterate through results with full control over each step.
if searchResult.Hits.TotalHits > 0 {
fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)

// Iterate through results
for _, hit := range searchResult.Hits.Hits {
// hit.Index contains the name of the index

// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
var t Tweet
err := json.Unmarshal(*hit.Source, &t)
if err != nil {
// Deserialization failed
}

// Work with tweet
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
}
} else {
// No hits
fmt.Print("Found no tweets\n")
}

// Delete the index again
_, err = client.DeleteIndex("twitter").Do(ctx)
if err != nil {
// Handle error
panic(err)
}
```
An example is available [here](https://olivere.github.io/elastic/).

Here's a [link to a complete working example](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263).
Here's a [link to a complete working example for v3](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263).

See the [wiki](https://github.com/olivere/elastic/wiki) for more details.

Expand Down
14 changes: 11 additions & 3 deletions bulk.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,13 @@ func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) {
}

// Get response
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
Method: "POST",
Path: path,
Params: params,
Body: body,
ContentType: "application/x-ndjson",
})
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -304,10 +310,12 @@ type BulkResponseItem struct {
Type string `json:"_type,omitempty"`
Id string `json:"_id,omitempty"`
Version int64 `json:"_version,omitempty"`
Status int `json:"status,omitempty"`
Result string `json:"result,omitempty"`
Shards *shardsInfo `json:"_shards,omitempty"`
SeqNo int64 `json:"_seq_no,omitempty"`
PrimaryTerm int64 `json:"_primary_term,omitempty"`
Status int `json:"status,omitempty"`
ForcedRefresh bool `json:"forced_refresh,omitempty"`
Found bool `json:"found,omitempty"`
Error *ErrorDetails `json:"error,omitempty"`
}

Expand Down
14 changes: 7 additions & 7 deletions bulk_delete_request_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,23 @@ func TestBulkDeleteRequestSerialization(t *testing.T) {
}{
// #0
{
Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"),
Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1"),
Expected: []string{
`{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`,
`{"delete":{"_id":"1","_index":"index1","_type":"doc"}}`,
},
},
// #1
{
Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1").Parent("2"),
Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1").Parent("2"),
Expected: []string{
`{"delete":{"_id":"1","_index":"index1","_parent":"2","_type":"tweet"}}`,
`{"delete":{"_id":"1","_index":"index1","_parent":"2","_type":"doc"}}`,
},
},
// #2
{
Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1").Routing("3"),
Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1").Routing("3"),
Expected: []string{
`{"delete":{"_id":"1","_index":"index1","_routing":"3","_type":"tweet"}}`,
`{"delete":{"_id":"1","_index":"index1","_routing":"3","_type":"doc"}}`,
},
},
}
Expand All @@ -58,7 +58,7 @@ func TestBulkDeleteRequestSerialization(t *testing.T) {
var bulkDeleteRequestSerializationResult string

func BenchmarkBulkDeleteRequestSerialization(b *testing.B) {
r := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
var s string
for n := 0; n < b.N; n++ {
s = r.String()
Expand Down
26 changes: 13 additions & 13 deletions bulk_index_request_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,55 +16,55 @@ func TestBulkIndexRequestSerialization(t *testing.T) {
}{
// #0
{
Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1").
Request: NewBulkIndexRequest().Index("index1").Type("doc").Id("1").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
`{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
`{"index":{"_id":"1","_index":"index1","_type":"doc"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #1
{
Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1").
Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("doc").Id("1").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
`{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`,
`{"create":{"_id":"1","_index":"index1","_type":"doc"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #2
{
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
`{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
`{"index":{"_id":"1","_index":"index1","_type":"doc"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #3
{
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").RetryOnConflict(42).
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").RetryOnConflict(42).
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
`{"index":{"_id":"1","_index":"index1","_retry_on_conflict":42,"_type":"tweet"}}`,
`{"index":{"_id":"1","_index":"index1","_retry_on_conflict":42,"_type":"doc"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #4
{
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").Pipeline("my_pipeline").
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").Pipeline("my_pipeline").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
`{"index":{"_id":"1","_index":"index1","_type":"tweet","pipeline":"my_pipeline"}}`,
`{"index":{"_id":"1","_index":"index1","_type":"doc","pipeline":"my_pipeline"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #5
{
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").TTL("1m").
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").TTL("1m").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
`{"index":{"_id":"1","_index":"index1","_ttl":"1m","_type":"tweet"}}`,
`{"index":{"_id":"1","_index":"index1","_ttl":"1m","_type":"doc"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
Expand Down Expand Up @@ -92,7 +92,7 @@ func TestBulkIndexRequestSerialization(t *testing.T) {
var bulkIndexRequestSerializationResult string

func BenchmarkBulkIndexRequestSerialization(b *testing.B) {
r := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").
r := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
var s string
for n := 0; n < b.N; n++ {
Expand Down
8 changes: 4 additions & 4 deletions bulk_processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func TestBulkProcessorBasedOnFlushInterval(t *testing.T) {

for i := 1; i <= numDocs; i++ {
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
p.Add(request)
}

Expand Down Expand Up @@ -209,7 +209,7 @@ func TestBulkProcessorClose(t *testing.T) {

for i := 1; i <= numDocs; i++ {
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
p.Add(request)
}

Expand Down Expand Up @@ -275,7 +275,7 @@ func TestBulkProcessorFlush(t *testing.T) {

for i := 1; i <= numDocs; i++ {
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
p.Add(request)
}

Expand Down Expand Up @@ -356,7 +356,7 @@ func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) {

for i := 1; i <= numDocs; i++ {
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))}
request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
p.Add(request)
}

Expand Down
Loading

0 comments on commit 217c9db

Please sign in to comment.