diff --git a/Gopkg.lock b/Gopkg.lock
index 289aae1..dc59244 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -32,12 +32,46 @@
]
revision = "f24972fa72facf59d05c91c848b65eac38815915"
+[[projects]]
+ branch = "master"
+ name = "github.com/gin-contrib/sse"
+ packages = ["."]
+ revision = "22d885f9ecc78bf4ee5d72b937e4bbcdc58e8cae"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/gin-gonic/contrib"
+ packages = ["static"]
+ revision = "39cfb9727134fef3120d2458fce5fab14265a46c"
+
+[[projects]]
+ name = "github.com/gin-gonic/gin"
+ packages = [
+ ".",
+ "binding",
+ "render"
+ ]
+ revision = "d459835d2b077e44f7c9b453505ee29881d5d12d"
+ version = "v1.2"
+
+[[projects]]
+ name = "github.com/golang/protobuf"
+ packages = ["proto"]
+ revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
+ version = "v1.1.0"
+
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
+[[projects]]
+ name = "github.com/mattn/go-isatty"
+ packages = ["."]
+ revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
+ version = "v0.0.3"
+
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
@@ -62,6 +96,12 @@
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
+[[projects]]
+ name = "github.com/ugorji/go"
+ packages = ["codec"]
+ revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab"
+ version = "v1.1.1"
+
[[projects]]
branch = "master"
name = "github.com/vjeantet/jodaTime"
@@ -77,9 +117,27 @@
]
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/sys"
+ packages = ["unix"]
+ revision = "0ffbfd41fbef8ffcf9b62b0b0aa3a5873ed7a4fe"
+
+[[projects]]
+ name = "gopkg.in/go-playground/validator.v8"
+ packages = ["."]
+ revision = "5f1438d3fca68893a817e4a66806cea46a9e4ebf"
+ version = "v8.18.2"
+
+[[projects]]
+ name = "gopkg.in/yaml.v2"
+ packages = ["."]
+ revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
+ version = "v2.2.1"
+
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "09a674309a4c2d020e5d507b31b3c3855a150458e345c2d6e543003d6a36287a"
+ inputs-digest = "a435b62a6991c825a8a6bfde146f0cff531222abc9be68a97ebc370c1d01bda3"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/vendor/github.com/gin-contrib/sse/.travis.yml b/vendor/github.com/gin-contrib/sse/.travis.yml
new file mode 100644
index 0000000..a556ac0
--- /dev/null
+++ b/vendor/github.com/gin-contrib/sse/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+sudo: false
+go:
+ - 1.6.4
+ - 1.7.4
+ - tip
+
+git:
+ depth: 3
+
+script:
+ - go test -v -covermode=count -coverprofile=coverage.out
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
\ No newline at end of file
diff --git a/vendor/github.com/gin-contrib/sse/LICENSE b/vendor/github.com/gin-contrib/sse/LICENSE
new file mode 100644
index 0000000..1ff7f37
--- /dev/null
+++ b/vendor/github.com/gin-contrib/sse/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Manuel Martínez-Almeida
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/gin-contrib/sse/README.md b/vendor/github.com/gin-contrib/sse/README.md
new file mode 100644
index 0000000..c9c49cf
--- /dev/null
+++ b/vendor/github.com/gin-contrib/sse/README.md
@@ -0,0 +1,58 @@
+# Server-Sent Events
+
+[![GoDoc](https://godoc.org/github.com/gin-contrib/sse?status.svg)](https://godoc.org/github.com/gin-contrib/sse)
+[![Build Status](https://travis-ci.org/gin-contrib/sse.svg)](https://travis-ci.org/gin-contrib/sse)
+[![codecov](https://codecov.io/gh/gin-contrib/sse/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-contrib/sse)
+[![Go Report Card](https://goreportcard.com/badge/github.com/gin-contrib/sse)](https://goreportcard.com/report/github.com/gin-contrib/sse)
+
+Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/).
+
+- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/)
+- [Browser support](http://caniuse.com/#feat=eventsource)
+
+## Sample code
+
+```go
+import "github.com/gin-contrib/sse"
+
+func httpHandler(w http.ResponseWriter, req *http.Request) {
+ // data can be a primitive like a string, an integer or a float
+ sse.Encode(w, sse.Event{
+ Event: "message",
+ Data: "some data\nmore data",
+ })
+
+ // also a complex type, like a map, a struct or a slice
+ sse.Encode(w, sse.Event{
+ Id: "124",
+ Event: "message",
+ Data: map[string]interface{}{
+ "user": "manu",
+ "date": time.Now().Unix(),
+ "content": "hi!",
+ },
+ })
+}
+```
+```
+event: message
+data: some data\\nmore data
+
+id: 124
+event: message
+data: {"content":"hi!","date":1431540810,"user":"manu"}
+
+```
+
+## Content-Type
+
+```go
+fmt.Println(sse.ContentType)
+```
+```
+text/event-stream
+```
+
+## Decoding support
+
+There is a client-side implementation of SSE coming soon.
diff --git a/vendor/github.com/gin-contrib/sse/sse-decoder.go b/vendor/github.com/gin-contrib/sse/sse-decoder.go
new file mode 100644
index 0000000..fd49b9c
--- /dev/null
+++ b/vendor/github.com/gin-contrib/sse/sse-decoder.go
@@ -0,0 +1,116 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package sse
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+)
+
+type decoder struct {
+ events []Event
+}
+
+func Decode(r io.Reader) ([]Event, error) {
+ var dec decoder
+ return dec.decode(r)
+}
+
+func (d *decoder) dispatchEvent(event Event, data string) {
+ dataLength := len(data)
+ if dataLength > 0 {
+ //If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer.
+ data = data[:dataLength-1]
+ dataLength--
+ }
+ if dataLength == 0 && event.Event == "" {
+ return
+ }
+ if event.Event == "" {
+ event.Event = "message"
+ }
+ event.Data = data
+ d.events = append(d.events, event)
+}
+
+func (d *decoder) decode(r io.Reader) ([]Event, error) {
+ buf, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ var currentEvent Event
+ var dataBuffer *bytes.Buffer = new(bytes.Buffer)
+ // TODO (and unit tests)
+ // Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair,
+ // a single U+000A LINE FEED (LF) character,
+ // or a single U+000D CARRIAGE RETURN (CR) character.
+ lines := bytes.Split(buf, []byte{'\n'})
+ for _, line := range lines {
+ if len(line) == 0 {
+ // If the line is empty (a blank line). Dispatch the event.
+ d.dispatchEvent(currentEvent, dataBuffer.String())
+
+ // reset current event and data buffer
+ currentEvent = Event{}
+ dataBuffer.Reset()
+ continue
+ }
+ if line[0] == byte(':') {
+ // If the line starts with a U+003A COLON character (:), ignore the line.
+ continue
+ }
+
+ var field, value []byte
+ colonIndex := bytes.IndexRune(line, ':')
+ if colonIndex != -1 {
+ // If the line contains a U+003A COLON character character (:)
+ // Collect the characters on the line before the first U+003A COLON character (:),
+ // and let field be that string.
+ field = line[:colonIndex]
+ // Collect the characters on the line after the first U+003A COLON character (:),
+ // and let value be that string.
+ value = line[colonIndex+1:]
+ // If value starts with a single U+0020 SPACE character, remove it from value.
+ if len(value) > 0 && value[0] == ' ' {
+ value = value[1:]
+ }
+ } else {
+ // Otherwise, the string is not empty but does not contain a U+003A COLON character character (:)
+ // Use the whole line as the field name, and the empty string as the field value.
+ field = line
+ value = []byte{}
+ }
+ // The steps to process the field given a field name and a field value depend on the field name,
+ // as given in the following list. Field names must be compared literally,
+ // with no case folding performed.
+ switch string(field) {
+ case "event":
+ // Set the event name buffer to field value.
+ currentEvent.Event = string(value)
+ case "id":
+ // Set the event stream's last event ID to the field value.
+ currentEvent.Id = string(value)
+ case "retry":
+ // If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9),
+ // then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer.
+ // Otherwise, ignore the field.
+ currentEvent.Id = string(value)
+ case "data":
+ // Append the field value to the data buffer,
+ dataBuffer.Write(value)
+ // then append a single U+000A LINE FEED (LF) character to the data buffer.
+ dataBuffer.WriteString("\n")
+ default:
+ //Otherwise. The field is ignored.
+ continue
+ }
+ }
+ // Once the end of the file is reached, the user agent must dispatch the event one final time.
+ d.dispatchEvent(currentEvent, dataBuffer.String())
+
+ return d.events, nil
+}
diff --git a/vendor/github.com/gin-contrib/sse/sse-encoder.go b/vendor/github.com/gin-contrib/sse/sse-encoder.go
new file mode 100644
index 0000000..f9c8087
--- /dev/null
+++ b/vendor/github.com/gin-contrib/sse/sse-encoder.go
@@ -0,0 +1,110 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package sse
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// Server-Sent Events
+// W3C Working Draft 29 October 2009
+// http://www.w3.org/TR/2009/WD-eventsource-20091029/
+
+const ContentType = "text/event-stream"
+
+var contentType = []string{ContentType}
+var noCache = []string{"no-cache"}
+
+var fieldReplacer = strings.NewReplacer(
+ "\n", "\\n",
+ "\r", "\\r")
+
+var dataReplacer = strings.NewReplacer(
+ "\n", "\ndata:",
+ "\r", "\\r")
+
+type Event struct {
+ Event string
+ Id string
+ Retry uint
+ Data interface{}
+}
+
+func Encode(writer io.Writer, event Event) error {
+ w := checkWriter(writer)
+ writeId(w, event.Id)
+ writeEvent(w, event.Event)
+ writeRetry(w, event.Retry)
+ return writeData(w, event.Data)
+}
+
+func writeId(w stringWriter, id string) {
+ if len(id) > 0 {
+ w.WriteString("id:")
+ fieldReplacer.WriteString(w, id)
+ w.WriteString("\n")
+ }
+}
+
+func writeEvent(w stringWriter, event string) {
+ if len(event) > 0 {
+ w.WriteString("event:")
+ fieldReplacer.WriteString(w, event)
+ w.WriteString("\n")
+ }
+}
+
+func writeRetry(w stringWriter, retry uint) {
+ if retry > 0 {
+ w.WriteString("retry:")
+ w.WriteString(strconv.FormatUint(uint64(retry), 10))
+ w.WriteString("\n")
+ }
+}
+
+func writeData(w stringWriter, data interface{}) error {
+ w.WriteString("data:")
+ switch kindOfData(data) {
+ case reflect.Struct, reflect.Slice, reflect.Map:
+ err := json.NewEncoder(w).Encode(data)
+ if err != nil {
+ return err
+ }
+ w.WriteString("\n")
+ default:
+ dataReplacer.WriteString(w, fmt.Sprint(data))
+ w.WriteString("\n\n")
+ }
+ return nil
+}
+
+func (r Event) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+ return Encode(w, r)
+}
+
+func (r Event) WriteContentType(w http.ResponseWriter) {
+ header := w.Header()
+ header["Content-Type"] = contentType
+
+ if _, exist := header["Cache-Control"]; !exist {
+ header["Cache-Control"] = noCache
+ }
+}
+
+func kindOfData(data interface{}) reflect.Kind {
+ value := reflect.ValueOf(data)
+ valueType := value.Kind()
+ if valueType == reflect.Ptr {
+ valueType = value.Elem().Kind()
+ }
+ return valueType
+}
diff --git a/vendor/github.com/gin-contrib/sse/writer.go b/vendor/github.com/gin-contrib/sse/writer.go
new file mode 100644
index 0000000..6f9806c
--- /dev/null
+++ b/vendor/github.com/gin-contrib/sse/writer.go
@@ -0,0 +1,24 @@
+package sse
+
+import "io"
+
+type stringWriter interface {
+ io.Writer
+ WriteString(string) (int, error)
+}
+
+type stringWrapper struct {
+ io.Writer
+}
+
+func (w stringWrapper) WriteString(str string) (int, error) {
+ return w.Writer.Write([]byte(str))
+}
+
+func checkWriter(writer io.Writer) stringWriter {
+ if w, ok := writer.(stringWriter); ok {
+ return w
+ } else {
+ return stringWrapper{writer}
+ }
+}
diff --git a/vendor/github.com/gin-gonic/contrib/static/README.md b/vendor/github.com/gin-gonic/contrib/static/README.md
new file mode 100644
index 0000000..7d0ae0b
--- /dev/null
+++ b/vendor/github.com/gin-gonic/contrib/static/README.md
@@ -0,0 +1,5 @@
+# static
+
+## EOL-warning
+
+**This package has been abandoned on 2016-12-13. Please use [gin-contrib/static](https://github.com/gin-contrib/static) instead.**
diff --git a/vendor/github.com/gin-gonic/contrib/static/static.go b/vendor/github.com/gin-gonic/contrib/static/static.go
new file mode 100644
index 0000000..6e4050e
--- /dev/null
+++ b/vendor/github.com/gin-gonic/contrib/static/static.go
@@ -0,0 +1,62 @@
+package static
+
+import (
+ "net/http"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/gin-gonic/gin"
+)
+
+type ServeFileSystem interface {
+ http.FileSystem
+ Exists(prefix string, path string) bool
+}
+
+type localFileSystem struct {
+ http.FileSystem
+ root string
+ indexes bool
+}
+
+func LocalFile(root string, indexes bool) *localFileSystem {
+ return &localFileSystem{
+ FileSystem: gin.Dir(root, indexes),
+ root: root,
+ indexes: indexes,
+ }
+}
+
+func (l *localFileSystem) Exists(prefix string, filepath string) bool {
+ if p := strings.TrimPrefix(filepath, prefix); len(p) < len(filepath) {
+ name := path.Join(l.root, p)
+ stats, err := os.Stat(name)
+ if err != nil {
+ return false
+ }
+ if !l.indexes && stats.IsDir() {
+ return false
+ }
+ return true
+ }
+ return false
+}
+
+func ServeRoot(urlPrefix, root string) gin.HandlerFunc {
+ return Serve(urlPrefix, LocalFile(root, false))
+}
+
+// Static returns a middleware handler that serves static files in the given directory.
+func Serve(urlPrefix string, fs ServeFileSystem) gin.HandlerFunc {
+ fileserver := http.FileServer(fs)
+ if urlPrefix != "" {
+ fileserver = http.StripPrefix(urlPrefix, fileserver)
+ }
+ return func(c *gin.Context) {
+ if fs.Exists(urlPrefix, c.Request.URL.Path) {
+ fileserver.ServeHTTP(c.Writer, c.Request)
+ c.Abort()
+ }
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/.gitignore b/vendor/github.com/gin-gonic/gin/.gitignore
new file mode 100644
index 0000000..f3b636d
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/.gitignore
@@ -0,0 +1,4 @@
+vendor/*
+!vendor/vendor.json
+coverage.out
+count.out
diff --git a/vendor/github.com/gin-gonic/gin/.travis.yml b/vendor/github.com/gin-gonic/gin/.travis.yml
new file mode 100644
index 0000000..6532a33
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/.travis.yml
@@ -0,0 +1,31 @@
+language: go
+sudo: false
+go:
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - master
+
+git:
+ depth: 3
+
+install:
+ - make install
+
+script:
+ - make vet
+ - make fmt-check
+ - make embedmd
+ - make misspell-check
+ - make test
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
+
+notifications:
+ webhooks:
+ urls:
+ - https://webhooks.gitter.im/e/7f95bf605c4d356372f4
+ on_success: change # options: [always|never|change] default: always
+ on_failure: always # options: [always|never|change] default: always
+ on_start: false # default: false
diff --git a/vendor/github.com/gin-gonic/gin/AUTHORS.md b/vendor/github.com/gin-gonic/gin/AUTHORS.md
new file mode 100644
index 0000000..7ab7213
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/AUTHORS.md
@@ -0,0 +1,227 @@
+List of all the awesome people working to make Gin the best Web Framework in Go.
+
+## gin 0.x series authors
+
+**Maintainer:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho)
+
+People and companies, who have contributed, in alphabetical order.
+
+**@858806258 (杰哥)**
+- Fix typo in example
+
+
+**@achedeuzot (Klemen Sever)**
+- Fix newline debug printing
+
+
+**@adammck (Adam Mckaig)**
+- Add MIT license
+
+
+**@AlexanderChen1989 (Alexander)**
+- Typos in README
+
+
+**@alexanderdidenko (Aleksandr Didenko)**
+- Add support multipart/form-data
+
+
+**@alexandernyquist (Alexander Nyquist)**
+- Using template.Must to fix multiple return issue
+- ★ Added support for OPTIONS verb
+- ★ Setting response headers before calling WriteHeader
+- Improved documentation for model binding
+- ★ Added Content.Redirect()
+- ★ Added tons of Unit tests
+
+
+**@austinheap (Austin Heap)**
+- Added travis CI integration
+
+
+**@andredublin (Andre Dublin)**
+- Fix typo in comment
+
+
+**@bredov (Ludwig Valda Vasquez)**
+- Fix html templating in debug mode
+
+
+**@bluele (Jun Kimura)**
+- Fixes code examples in README
+
+
+**@chad-russell**
+- ★ Support for serializing gin.H into XML
+
+
+**@dickeyxxx (Jeff Dickey)**
+- Typos in README
+- Add example about serving static files
+
+
+**@donileo (Adonis)**
+- Add NoMethod handler
+
+
+**@dutchcoders (DutchCoders)**
+- ★ Fix security bug that allows client to spoof ip
+- Fix typo. r.HTMLTemplates -> SetHTMLTemplate
+
+
+**@el3ctro- (Joshua Loper)**
+- Fix typo in example
+
+
+**@ethankan (Ethan Kan)**
+- Unsigned integers in binding
+
+
+**(Evgeny Persienko)**
+- Validate sub structures
+
+
+**@frankbille (Frank Bille)**
+- Add support for HTTP Realm Auth
+
+
+**@fmd (Fareed Dudhia)**
+- Fix typo. SetHTTPTemplate -> SetHTMLTemplate
+
+
+**@ironiridis (Christopher Harrington)**
+- Remove old reference
+
+
+**@jammie-stackhouse (Jamie Stackhouse)**
+- Add more shortcuts for router methods
+
+
+**@jasonrhansen**
+- Fix spelling and grammar errors in documentation
+
+
+**@JasonSoft (Jason Lee)**
+- Fix typo in comment
+
+
+**@joiggama (Ignacio Galindo)**
+- Add utf-8 charset header on renders
+
+
+**@julienschmidt (Julien Schmidt)**
+- gofmt the code examples
+
+
+**@kelcecil (Kel Cecil)**
+- Fix readme typo
+
+
+**@kyledinh (Kyle Dinh)**
+- Adds RunTLS()
+
+
+**@LinusU (Linus Unnebäck)**
+- Small fixes in README
+
+
+**@loongmxbt (Saint Asky)**
+- Fix typo in example
+
+
+**@lucas-clemente (Lucas Clemente)**
+- ★ work around path.Join removing trailing slashes from routes
+
+
+**@mattn (Yasuhiro Matsumoto)**
+- Improve color logger
+
+
+**@mdigger (Dmitry Sedykh)**
+- Fixes Form binding when content-type is x-www-form-urlencoded
+- No repeat call c.Writer.Status() in gin.Logger
+- Fixes Content-Type for json render
+
+
+**@mirzac (Mirza Ceric)**
+- Fix debug printing
+
+
+**@mopemope (Yutaka Matsubara)**
+- ★ Adds Godep support (Dependencies Manager)
+- Fix variadic parameter in the flexible render API
+- Fix Corrupted plain render
+- Add Pluggable View Renderer Example
+
+
+**@msemenistyi (Mykyta Semenistyi)**
+- update Readme.md. Add code to String method
+
+
+**@msoedov (Sasha Myasoedov)**
+- ★ Adds tons of unit tests.
+
+
+**@ngerakines (Nick Gerakines)**
+- ★ Improves API, c.GET() doesn't panic
+- Adds MustGet() method
+
+
+**@r8k (Rajiv Kilaparti)**
+- Fix Port usage in README.
+
+
+**@rayrod2030 (Ray Rodriguez)**
+- Fix typo in example
+
+
+**@rns**
+- Fix typo in example
+
+
+**@RobAWilkinson (Robert Wilkinson)**
+- Add example of forms and params
+
+
+**@rogierlommers (Rogier Lommers)**
+- Add updated static serve example
+
+
+**@se77en (Damon Zhao)**
+- Improve color logging
+
+
+**@silasb (Silas Baronda)**
+- Fixing quotes in README
+
+
+**@SkuliOskarsson (Skuli Oskarsson)**
+- Fixes some texts in README II
+
+
+**@slimmy (Jimmy Pettersson)**
+- Added messages for required bindings
+
+
+**@smira (Andrey Smirnov)**
+- Add support for ignored/unexported fields in binding
+
+
+**@superalsrk (SRK.Lyu)**
+- Update httprouter godeps
+
+
+**@tebeka (Miki Tebeka)**
+- Use net/http constants instead of numeric values
+
+
+**@techjanitor**
+- Update context.go reserved IPs
+
+
+**@yosssi (Keiji Yoshida)**
+- Fix link in README
+
+
+**@yuyabee**
+- Fixed README
diff --git a/vendor/github.com/gin-gonic/gin/BENCHMARKS.md b/vendor/github.com/gin-gonic/gin/BENCHMARKS.md
new file mode 100644
index 0000000..6efe3ca
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/BENCHMARKS.md
@@ -0,0 +1,298 @@
+**Machine:** intel i7 ivy bridge quad-core. 8GB RAM.
+**Date:** June 4th, 2015
+[https://github.com/gin-gonic/go-http-routing-benchmark](https://github.com/gin-gonic/go-http-routing-benchmark)
+
+```
+BenchmarkAce_Param 5000000 372 ns/op 32 B/op 1 allocs/op
+BenchmarkBear_Param 1000000 1165 ns/op 424 B/op 5 allocs/op
+BenchmarkBeego_Param 1000000 2440 ns/op 720 B/op 10 allocs/op
+BenchmarkBone_Param 1000000 1067 ns/op 384 B/op 3 allocs/op
+BenchmarkDenco_Param 5000000 240 ns/op 32 B/op 1 allocs/op
+BenchmarkEcho_Param 10000000 130 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_Param 10000000 133 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_Param 1000000 1826 ns/op 656 B/op 9 allocs/op
+BenchmarkGoji_Param 2000000 957 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_Param 1000000 2021 ns/op 657 B/op 14 allocs/op
+BenchmarkGoRestful_Param 200000 8825 ns/op 2496 B/op 31 allocs/op
+BenchmarkGorillaMux_Param 500000 3340 ns/op 784 B/op 9 allocs/op
+BenchmarkHttpRouter_Param 10000000 152 ns/op 32 B/op 1 allocs/op
+BenchmarkHttpTreeMux_Param 2000000 717 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_Param 3000000 423 ns/op 56 B/op 3 allocs/op
+BenchmarkMacaron_Param 1000000 3410 ns/op 1104 B/op 11 allocs/op
+BenchmarkMartini_Param 200000 7101 ns/op 1152 B/op 12 allocs/op
+BenchmarkPat_Param 1000000 2040 ns/op 656 B/op 14 allocs/op
+BenchmarkPossum_Param 1000000 2048 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_Param 1000000 1144 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_Param 200000 6725 ns/op 1672 B/op 28 allocs/op
+BenchmarkRivet_Param 1000000 1121 ns/op 464 B/op 5 allocs/op
+BenchmarkTango_Param 1000000 1479 ns/op 256 B/op 10 allocs/op
+BenchmarkTigerTonic_Param 1000000 3393 ns/op 992 B/op 19 allocs/op
+BenchmarkTraffic_Param 300000 5525 ns/op 1984 B/op 23 allocs/op
+BenchmarkVulcan_Param 2000000 924 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_Param 1000000 1084 ns/op 368 B/op 3 allocs/op
+BenchmarkAce_Param5 3000000 614 ns/op 160 B/op 1 allocs/op
+BenchmarkBear_Param5 1000000 1617 ns/op 469 B/op 5 allocs/op
+BenchmarkBeego_Param5 1000000 3373 ns/op 992 B/op 13 allocs/op
+BenchmarkBone_Param5 1000000 1478 ns/op 432 B/op 3 allocs/op
+BenchmarkDenco_Param5 3000000 570 ns/op 160 B/op 1 allocs/op
+BenchmarkEcho_Param5 5000000 256 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_Param5 10000000 222 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_Param5 1000000 2789 ns/op 928 B/op 12 allocs/op
+BenchmarkGoji_Param5 1000000 1287 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_Param5 1000000 3670 ns/op 1105 B/op 17 allocs/op
+BenchmarkGoRestful_Param5 200000 10756 ns/op 2672 B/op 31 allocs/op
+BenchmarkGorillaMux_Param5 300000 5543 ns/op 912 B/op 9 allocs/op
+BenchmarkHttpRouter_Param5 5000000 403 ns/op 160 B/op 1 allocs/op
+BenchmarkHttpTreeMux_Param5 1000000 1089 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_Param5 1000000 1682 ns/op 440 B/op 10 allocs/op
+BenchmarkMacaron_Param5 300000 4596 ns/op 1376 B/op 14 allocs/op
+BenchmarkMartini_Param5 100000 15703 ns/op 1280 B/op 12 allocs/op
+BenchmarkPat_Param5 300000 5320 ns/op 1008 B/op 42 allocs/op
+BenchmarkPossum_Param5 1000000 2155 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_Param5 1000000 1559 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_Param5 200000 8184 ns/op 2024 B/op 35 allocs/op
+BenchmarkRivet_Param5 1000000 1914 ns/op 528 B/op 9 allocs/op
+BenchmarkTango_Param5 1000000 3280 ns/op 944 B/op 18 allocs/op
+BenchmarkTigerTonic_Param5 200000 11638 ns/op 2519 B/op 53 allocs/op
+BenchmarkTraffic_Param5 200000 8941 ns/op 2280 B/op 31 allocs/op
+BenchmarkVulcan_Param5 1000000 1279 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_Param5 1000000 1574 ns/op 416 B/op 3 allocs/op
+BenchmarkAce_Param20 1000000 1528 ns/op 640 B/op 1 allocs/op
+BenchmarkBear_Param20 300000 4906 ns/op 1633 B/op 5 allocs/op
+BenchmarkBeego_Param20 200000 10529 ns/op 3868 B/op 17 allocs/op
+BenchmarkBone_Param20 300000 7362 ns/op 2539 B/op 5 allocs/op
+BenchmarkDenco_Param20 1000000 1884 ns/op 640 B/op 1 allocs/op
+BenchmarkEcho_Param20 2000000 689 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_Param20 3000000 545 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_Param20 200000 9437 ns/op 3804 B/op 16 allocs/op
+BenchmarkGoji_Param20 500000 3987 ns/op 1246 B/op 2 allocs/op
+BenchmarkGoJsonRest_Param20 100000 12799 ns/op 4492 B/op 21 allocs/op
+BenchmarkGoRestful_Param20 100000 19451 ns/op 5244 B/op 33 allocs/op
+BenchmarkGorillaMux_Param20 100000 12456 ns/op 3275 B/op 11 allocs/op
+BenchmarkHttpRouter_Param20 1000000 1333 ns/op 640 B/op 1 allocs/op
+BenchmarkHttpTreeMux_Param20 300000 6490 ns/op 2187 B/op 4 allocs/op
+BenchmarkKocha_Param20 300000 5335 ns/op 1808 B/op 27 allocs/op
+BenchmarkMacaron_Param20 200000 11325 ns/op 4252 B/op 18 allocs/op
+BenchmarkMartini_Param20 20000 64419 ns/op 3644 B/op 14 allocs/op
+BenchmarkPat_Param20 50000 24672 ns/op 4888 B/op 151 allocs/op
+BenchmarkPossum_Param20 1000000 2085 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_Param20 300000 6809 ns/op 2283 B/op 8 allocs/op
+BenchmarkRevel_Param20 100000 16600 ns/op 5551 B/op 54 allocs/op
+BenchmarkRivet_Param20 200000 8428 ns/op 2620 B/op 26 allocs/op
+BenchmarkTango_Param20 100000 16302 ns/op 8224 B/op 48 allocs/op
+BenchmarkTigerTonic_Param20 30000 46828 ns/op 10538 B/op 178 allocs/op
+BenchmarkTraffic_Param20 50000 28871 ns/op 7998 B/op 66 allocs/op
+BenchmarkVulcan_Param20 1000000 2267 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_Param20 300000 6828 ns/op 2507 B/op 5 allocs/op
+BenchmarkAce_ParamWrite 3000000 502 ns/op 40 B/op 2 allocs/op
+BenchmarkBear_ParamWrite 1000000 1303 ns/op 424 B/op 5 allocs/op
+BenchmarkBeego_ParamWrite 1000000 2489 ns/op 728 B/op 11 allocs/op
+BenchmarkBone_ParamWrite 1000000 1181 ns/op 384 B/op 3 allocs/op
+BenchmarkDenco_ParamWrite 5000000 315 ns/op 32 B/op 1 allocs/op
+BenchmarkEcho_ParamWrite 10000000 237 ns/op 8 B/op 1 allocs/op
+BenchmarkGin_ParamWrite 5000000 336 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_ParamWrite 1000000 2079 ns/op 664 B/op 10 allocs/op
+BenchmarkGoji_ParamWrite 1000000 1092 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_ParamWrite 1000000 3329 ns/op 1136 B/op 19 allocs/op
+BenchmarkGoRestful_ParamWrite 200000 9273 ns/op 2504 B/op 32 allocs/op
+BenchmarkGorillaMux_ParamWrite 500000 3919 ns/op 792 B/op 10 allocs/op
+BenchmarkHttpRouter_ParamWrite 10000000 223 ns/op 32 B/op 1 allocs/op
+BenchmarkHttpTreeMux_ParamWrite 2000000 788 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_ParamWrite 3000000 549 ns/op 56 B/op 3 allocs/op
+BenchmarkMacaron_ParamWrite 500000 4558 ns/op 1216 B/op 16 allocs/op
+BenchmarkMartini_ParamWrite 200000 8850 ns/op 1256 B/op 16 allocs/op
+BenchmarkPat_ParamWrite 500000 3679 ns/op 1088 B/op 19 allocs/op
+BenchmarkPossum_ParamWrite 1000000 2114 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_ParamWrite 1000000 1320 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_ParamWrite 200000 8048 ns/op 2128 B/op 33 allocs/op
+BenchmarkRivet_ParamWrite 1000000 1393 ns/op 472 B/op 6 allocs/op
+BenchmarkTango_ParamWrite 2000000 819 ns/op 136 B/op 5 allocs/op
+BenchmarkTigerTonic_ParamWrite 300000 5860 ns/op 1440 B/op 25 allocs/op
+BenchmarkTraffic_ParamWrite 200000 7429 ns/op 2400 B/op 27 allocs/op
+BenchmarkVulcan_ParamWrite 2000000 972 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_ParamWrite 1000000 1226 ns/op 368 B/op 3 allocs/op
+BenchmarkAce_GithubStatic 5000000 294 ns/op 0 B/op 0 allocs/op
+BenchmarkBear_GithubStatic 3000000 575 ns/op 88 B/op 3 allocs/op
+BenchmarkBeego_GithubStatic 1000000 1561 ns/op 368 B/op 7 allocs/op
+BenchmarkBone_GithubStatic 200000 12301 ns/op 2880 B/op 60 allocs/op
+BenchmarkDenco_GithubStatic 20000000 74.6 ns/op 0 B/op 0 allocs/op
+BenchmarkEcho_GithubStatic 10000000 176 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GithubStatic 10000000 159 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GithubStatic 1000000 1116 ns/op 304 B/op 6 allocs/op
+BenchmarkGoji_GithubStatic 5000000 413 ns/op 0 B/op 0 allocs/op
+BenchmarkGoRestful_GithubStatic 30000 55200 ns/op 3520 B/op 36 allocs/op
+BenchmarkGoJsonRest_GithubStatic 1000000 1504 ns/op 337 B/op 12 allocs/op
+BenchmarkGorillaMux_GithubStatic 100000 23620 ns/op 464 B/op 8 allocs/op
+BenchmarkHttpRouter_GithubStatic 20000000 78.3 ns/op 0 B/op 0 allocs/op
+BenchmarkHttpTreeMux_GithubStatic 20000000 84.9 ns/op 0 B/op 0 allocs/op
+BenchmarkKocha_GithubStatic 20000000 111 ns/op 0 B/op 0 allocs/op
+BenchmarkMacaron_GithubStatic 1000000 2686 ns/op 752 B/op 8 allocs/op
+BenchmarkMartini_GithubStatic 100000 22244 ns/op 832 B/op 11 allocs/op
+BenchmarkPat_GithubStatic 100000 13278 ns/op 3648 B/op 76 allocs/op
+BenchmarkPossum_GithubStatic 1000000 1429 ns/op 480 B/op 4 allocs/op
+BenchmarkR2router_GithubStatic 2000000 726 ns/op 144 B/op 5 allocs/op
+BenchmarkRevel_GithubStatic 300000 6271 ns/op 1288 B/op 25 allocs/op
+BenchmarkRivet_GithubStatic 3000000 474 ns/op 112 B/op 2 allocs/op
+BenchmarkTango_GithubStatic 1000000 1842 ns/op 256 B/op 10 allocs/op
+BenchmarkTigerTonic_GithubStatic 5000000 361 ns/op 48 B/op 1 allocs/op
+BenchmarkTraffic_GithubStatic 30000 47197 ns/op 18920 B/op 149 allocs/op
+BenchmarkVulcan_GithubStatic 1000000 1415 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GithubStatic 1000000 2522 ns/op 512 B/op 11 allocs/op
+BenchmarkAce_GithubParam 3000000 578 ns/op 96 B/op 1 allocs/op
+BenchmarkBear_GithubParam 1000000 1592 ns/op 464 B/op 5 allocs/op
+BenchmarkBeego_GithubParam 1000000 2891 ns/op 784 B/op 11 allocs/op
+BenchmarkBone_GithubParam 300000 6440 ns/op 1456 B/op 16 allocs/op
+BenchmarkDenco_GithubParam 3000000 514 ns/op 128 B/op 1 allocs/op
+BenchmarkEcho_GithubParam 5000000 292 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GithubParam 10000000 242 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GithubParam 1000000 2343 ns/op 720 B/op 10 allocs/op
+BenchmarkGoji_GithubParam 1000000 1566 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_GithubParam 1000000 2828 ns/op 721 B/op 15 allocs/op
+BenchmarkGoRestful_GithubParam 10000 177711 ns/op 2816 B/op 35 allocs/op
+BenchmarkGorillaMux_GithubParam 100000 13591 ns/op 816 B/op 9 allocs/op
+BenchmarkHttpRouter_GithubParam 5000000 352 ns/op 96 B/op 1 allocs/op
+BenchmarkHttpTreeMux_GithubParam 2000000 973 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_GithubParam 2000000 889 ns/op 128 B/op 5 allocs/op
+BenchmarkMacaron_GithubParam 500000 4047 ns/op 1168 B/op 12 allocs/op
+BenchmarkMartini_GithubParam 50000 28982 ns/op 1184 B/op 12 allocs/op
+BenchmarkPat_GithubParam 200000 8747 ns/op 2480 B/op 56 allocs/op
+BenchmarkPossum_GithubParam 1000000 2158 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_GithubParam 1000000 1352 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_GithubParam 200000 7673 ns/op 1784 B/op 30 allocs/op
+BenchmarkRivet_GithubParam 1000000 1573 ns/op 480 B/op 6 allocs/op
+BenchmarkTango_GithubParam 1000000 2418 ns/op 480 B/op 13 allocs/op
+BenchmarkTigerTonic_GithubParam 300000 6048 ns/op 1440 B/op 28 allocs/op
+BenchmarkTraffic_GithubParam 100000 20143 ns/op 6024 B/op 55 allocs/op
+BenchmarkVulcan_GithubParam 1000000 2224 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GithubParam 500000 4156 ns/op 1312 B/op 12 allocs/op
+BenchmarkAce_GithubAll 10000 109482 ns/op 13792 B/op 167 allocs/op
+BenchmarkBear_GithubAll 10000 287490 ns/op 79952 B/op 943 allocs/op
+BenchmarkBeego_GithubAll 3000 562184 ns/op 146272 B/op 2092 allocs/op
+BenchmarkBone_GithubAll 500 2578716 ns/op 648016 B/op 8119 allocs/op
+BenchmarkDenco_GithubAll 20000 94955 ns/op 20224 B/op 167 allocs/op
+BenchmarkEcho_GithubAll 30000 58705 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GithubAll 30000 50991 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GithubAll 5000 449648 ns/op 133280 B/op 1889 allocs/op
+BenchmarkGoji_GithubAll 2000 689748 ns/op 56113 B/op 334 allocs/op
+BenchmarkGoJsonRest_GithubAll 5000 537769 ns/op 135995 B/op 2940 allocs/op
+BenchmarkGoRestful_GithubAll 100 18410628 ns/op 797236 B/op 7725 allocs/op
+BenchmarkGorillaMux_GithubAll 200 8036360 ns/op 153137 B/op 1791 allocs/op
+BenchmarkHttpRouter_GithubAll 20000 63506 ns/op 13792 B/op 167 allocs/op
+BenchmarkHttpTreeMux_GithubAll 10000 165927 ns/op 56112 B/op 334 allocs/op
+BenchmarkKocha_GithubAll 10000 171362 ns/op 23304 B/op 843 allocs/op
+BenchmarkMacaron_GithubAll 2000 817008 ns/op 224960 B/op 2315 allocs/op
+BenchmarkMartini_GithubAll 100 12609209 ns/op 237952 B/op 2686 allocs/op
+BenchmarkPat_GithubAll 300 4830398 ns/op 1504101 B/op 32222 allocs/op
+BenchmarkPossum_GithubAll 10000 301716 ns/op 97440 B/op 812 allocs/op
+BenchmarkR2router_GithubAll 10000 270691 ns/op 77328 B/op 1182 allocs/op
+BenchmarkRevel_GithubAll 1000 1491919 ns/op 345553 B/op 5918 allocs/op
+BenchmarkRivet_GithubAll 10000 283860 ns/op 84272 B/op 1079 allocs/op
+BenchmarkTango_GithubAll 5000 473821 ns/op 87078 B/op 2470 allocs/op
+BenchmarkTigerTonic_GithubAll 2000 1120131 ns/op 241088 B/op 6052 allocs/op
+BenchmarkTraffic_GithubAll 200 8708979 ns/op 2664762 B/op 22390 allocs/op
+BenchmarkVulcan_GithubAll 5000 353392 ns/op 19894 B/op 609 allocs/op
+BenchmarkZeus_GithubAll 2000 944234 ns/op 300688 B/op 2648 allocs/op
+BenchmarkAce_GPlusStatic 5000000 251 ns/op 0 B/op 0 allocs/op
+BenchmarkBear_GPlusStatic 3000000 415 ns/op 72 B/op 3 allocs/op
+BenchmarkBeego_GPlusStatic 1000000 1416 ns/op 352 B/op 7 allocs/op
+BenchmarkBone_GPlusStatic 10000000 192 ns/op 32 B/op 1 allocs/op
+BenchmarkDenco_GPlusStatic 30000000 47.6 ns/op 0 B/op 0 allocs/op
+BenchmarkEcho_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GPlusStatic 1000000 1035 ns/op 288 B/op 6 allocs/op
+BenchmarkGoji_GPlusStatic 5000000 304 ns/op 0 B/op 0 allocs/op
+BenchmarkGoJsonRest_GPlusStatic 1000000 1286 ns/op 337 B/op 12 allocs/op
+BenchmarkGoRestful_GPlusStatic 200000 9649 ns/op 2160 B/op 30 allocs/op
+BenchmarkGorillaMux_GPlusStatic 1000000 2346 ns/op 464 B/op 8 allocs/op
+BenchmarkHttpRouter_GPlusStatic 30000000 42.7 ns/op 0 B/op 0 allocs/op
+BenchmarkHttpTreeMux_GPlusStatic 30000000 49.5 ns/op 0 B/op 0 allocs/op
+BenchmarkKocha_GPlusStatic 20000000 74.8 ns/op 0 B/op 0 allocs/op
+BenchmarkMacaron_GPlusStatic 1000000 2520 ns/op 736 B/op 8 allocs/op
+BenchmarkMartini_GPlusStatic 300000 5310 ns/op 832 B/op 11 allocs/op
+BenchmarkPat_GPlusStatic 5000000 398 ns/op 96 B/op 2 allocs/op
+BenchmarkPossum_GPlusStatic 1000000 1434 ns/op 480 B/op 4 allocs/op
+BenchmarkR2router_GPlusStatic 2000000 646 ns/op 144 B/op 5 allocs/op
+BenchmarkRevel_GPlusStatic 300000 6172 ns/op 1272 B/op 25 allocs/op
+BenchmarkRivet_GPlusStatic 3000000 444 ns/op 112 B/op 2 allocs/op
+BenchmarkTango_GPlusStatic 1000000 1400 ns/op 208 B/op 10 allocs/op
+BenchmarkTigerTonic_GPlusStatic 10000000 213 ns/op 32 B/op 1 allocs/op
+BenchmarkTraffic_GPlusStatic 1000000 3091 ns/op 1208 B/op 16 allocs/op
+BenchmarkVulcan_GPlusStatic 2000000 863 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GPlusStatic 10000000 237 ns/op 16 B/op 1 allocs/op
+BenchmarkAce_GPlusParam 3000000 435 ns/op 64 B/op 1 allocs/op
+BenchmarkBear_GPlusParam 1000000 1205 ns/op 448 B/op 5 allocs/op
+BenchmarkBeego_GPlusParam 1000000 2494 ns/op 720 B/op 10 allocs/op
+BenchmarkBone_GPlusParam 1000000 1126 ns/op 384 B/op 3 allocs/op
+BenchmarkDenco_GPlusParam 5000000 325 ns/op 64 B/op 1 allocs/op
+BenchmarkEcho_GPlusParam 10000000 168 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GPlusParam 10000000 170 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GPlusParam 1000000 1895 ns/op 656 B/op 9 allocs/op
+BenchmarkGoji_GPlusParam 1000000 1071 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_GPlusParam 1000000 2282 ns/op 657 B/op 14 allocs/op
+BenchmarkGoRestful_GPlusParam 100000 19400 ns/op 2560 B/op 33 allocs/op
+BenchmarkGorillaMux_GPlusParam 500000 5001 ns/op 784 B/op 9 allocs/op
+BenchmarkHttpRouter_GPlusParam 10000000 240 ns/op 64 B/op 1 allocs/op
+BenchmarkHttpTreeMux_GPlusParam 2000000 797 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_GPlusParam 3000000 505 ns/op 56 B/op 3 allocs/op
+BenchmarkMacaron_GPlusParam 1000000 3668 ns/op 1104 B/op 11 allocs/op
+BenchmarkMartini_GPlusParam 200000 10672 ns/op 1152 B/op 12 allocs/op
+BenchmarkPat_GPlusParam 1000000 2376 ns/op 704 B/op 14 allocs/op
+BenchmarkPossum_GPlusParam 1000000 2090 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_GPlusParam 1000000 1233 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_GPlusParam 200000 6778 ns/op 1704 B/op 28 allocs/op
+BenchmarkRivet_GPlusParam 1000000 1279 ns/op 464 B/op 5 allocs/op
+BenchmarkTango_GPlusParam 1000000 1981 ns/op 272 B/op 10 allocs/op
+BenchmarkTigerTonic_GPlusParam 500000 3893 ns/op 1064 B/op 19 allocs/op
+BenchmarkTraffic_GPlusParam 200000 6585 ns/op 2000 B/op 23 allocs/op
+BenchmarkVulcan_GPlusParam 1000000 1233 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GPlusParam 1000000 1350 ns/op 368 B/op 3 allocs/op
+BenchmarkAce_GPlus2Params 3000000 512 ns/op 64 B/op 1 allocs/op
+BenchmarkBear_GPlus2Params 1000000 1564 ns/op 464 B/op 5 allocs/op
+BenchmarkBeego_GPlus2Params 1000000 3043 ns/op 784 B/op 11 allocs/op
+BenchmarkBone_GPlus2Params 1000000 3152 ns/op 736 B/op 7 allocs/op
+BenchmarkDenco_GPlus2Params 3000000 431 ns/op 64 B/op 1 allocs/op
+BenchmarkEcho_GPlus2Params 5000000 247 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GPlus2Params 10000000 219 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GPlus2Params 1000000 2363 ns/op 720 B/op 10 allocs/op
+BenchmarkGoji_GPlus2Params 1000000 1540 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_GPlus2Params 1000000 2872 ns/op 721 B/op 15 allocs/op
+BenchmarkGoRestful_GPlus2Params 100000 23030 ns/op 2720 B/op 35 allocs/op
+BenchmarkGorillaMux_GPlus2Params 200000 10516 ns/op 816 B/op 9 allocs/op
+BenchmarkHttpRouter_GPlus2Params 5000000 273 ns/op 64 B/op 1 allocs/op
+BenchmarkHttpTreeMux_GPlus2Params 2000000 939 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_GPlus2Params 2000000 844 ns/op 128 B/op 5 allocs/op
+BenchmarkMacaron_GPlus2Params 500000 3914 ns/op 1168 B/op 12 allocs/op
+BenchmarkMartini_GPlus2Params 50000 35759 ns/op 1280 B/op 16 allocs/op
+BenchmarkPat_GPlus2Params 200000 7089 ns/op 2304 B/op 41 allocs/op
+BenchmarkPossum_GPlus2Params 1000000 2093 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_GPlus2Params 1000000 1320 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_GPlus2Params 200000 7351 ns/op 1800 B/op 30 allocs/op
+BenchmarkRivet_GPlus2Params 1000000 1485 ns/op 480 B/op 6 allocs/op
+BenchmarkTango_GPlus2Params 1000000 2111 ns/op 448 B/op 12 allocs/op
+BenchmarkTigerTonic_GPlus2Params 300000 6271 ns/op 1528 B/op 28 allocs/op
+BenchmarkTraffic_GPlus2Params 100000 14886 ns/op 3312 B/op 34 allocs/op
+BenchmarkVulcan_GPlus2Params 1000000 1883 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GPlus2Params 1000000 2686 ns/op 784 B/op 6 allocs/op
+BenchmarkAce_GPlusAll 300000 5912 ns/op 640 B/op 11 allocs/op
+BenchmarkBear_GPlusAll 100000 16448 ns/op 5072 B/op 61 allocs/op
+BenchmarkBeego_GPlusAll 50000 32916 ns/op 8976 B/op 129 allocs/op
+BenchmarkBone_GPlusAll 50000 25836 ns/op 6992 B/op 76 allocs/op
+BenchmarkDenco_GPlusAll 500000 4462 ns/op 672 B/op 11 allocs/op
+BenchmarkEcho_GPlusAll 500000 2806 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GPlusAll 500000 2579 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GPlusAll 50000 25223 ns/op 8144 B/op 116 allocs/op
+BenchmarkGoji_GPlusAll 100000 14237 ns/op 3696 B/op 22 allocs/op
+BenchmarkGoJsonRest_GPlusAll 50000 29227 ns/op 8221 B/op 183 allocs/op
+BenchmarkGoRestful_GPlusAll 10000 203144 ns/op 36064 B/op 441 allocs/op
+BenchmarkGorillaMux_GPlusAll 20000 80906 ns/op 9712 B/op 115 allocs/op
+BenchmarkHttpRouter_GPlusAll 500000 3040 ns/op 640 B/op 11 allocs/op
+BenchmarkHttpTreeMux_GPlusAll 200000 9627 ns/op 3696 B/op 22 allocs/op
+BenchmarkKocha_GPlusAll 200000 8108 ns/op 976 B/op 43 allocs/op
+BenchmarkMacaron_GPlusAll 30000 48083 ns/op 13968 B/op 142 allocs/op
+BenchmarkMartini_GPlusAll 10000 196978 ns/op 15072 B/op 178 allocs/op
+BenchmarkPat_GPlusAll 30000 58865 ns/op 16880 B/op 343 allocs/op
+BenchmarkPossum_GPlusAll 100000 19685 ns/op 6240 B/op 52 allocs/op
+BenchmarkR2router_GPlusAll 100000 16251 ns/op 5040 B/op 76 allocs/op
+BenchmarkRevel_GPlusAll 20000 93489 ns/op 21656 B/op 368 allocs/op
+BenchmarkRivet_GPlusAll 100000 16907 ns/op 5408 B/op 64 allocs/op
+```
diff --git a/vendor/github.com/gin-gonic/gin/CHANGELOG.md b/vendor/github.com/gin-gonic/gin/CHANGELOG.md
new file mode 100644
index 0000000..ee485ec
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/CHANGELOG.md
@@ -0,0 +1,191 @@
+# CHANGELOG
+
+### Gin 1.2
+
+- [NEW] Switch from godeps to govendor
+- [NEW] Add support for Let's Encrypt via gin-gonic/autotls
+- [NEW] Improve README examples and add extra at examples folder
+- [NEW] Improved support with App Engine
+- [NEW] Add custom template delimiters, see #860
+- [NEW] Add Template Func Maps, see #962
+- [NEW] Add \*context.Handler(), see #928
+- [NEW] Add \*context.GetRawData()
+- [NEW] Add \*context.GetHeader() (request)
+- [NEW] Add \*context.AbortWithStatusJSON() (JSON content type)
+- [NEW] Add \*context.Keys type cast helpers
+- [NEW] Add \*context.ShouldBindWith()
+- [NEW] Add \*context.MustBindWith()
+- [NEW] Add \*engine.SetFuncMap()
+- [DEPRECATE] On next release: \*context.BindWith(), see #855
+- [FIX] Refactor render
+- [FIX] Reworked tests
+- [FIX] logger now supports cygwin
+- [FIX] Use X-Forwarded-For before X-Real-Ip
+- [FIX] time.Time binding (#904)
+
+### Gin 1.1.4
+
+- [NEW] Support google appengine for IsTerminal func
+
+### Gin 1.1.3
+
+- [FIX] Reverted Logger: skip ANSI color commands
+
+### Gin 1.1
+
+- [NEW] Implement QueryArray and PostArray methods
+- [NEW] Refactor GetQuery and GetPostForm
+- [NEW] Add contribution guide
+- [FIX] Corrected typos in README
+- [FIX] Removed additional Iota
+- [FIX] Changed imports to gopkg instead of github in README (#733)
+- [FIX] Logger: skip ANSI color commands if output is not a tty
+
+### Gin 1.0rc2 (...)
+
+- [PERFORMANCE] Fast path for writing Content-Type.
+- [PERFORMANCE] Much faster 404 routing
+- [PERFORMANCE] Allocation optimizations
+- [PERFORMANCE] Faster root tree lookup
+- [PERFORMANCE] Zero overhead, String() and JSON() rendering.
+- [PERFORMANCE] Faster ClientIP parsing
+- [PERFORMANCE] Much faster SSE implementation
+- [NEW] Benchmarks suite
+- [NEW] Bind validation can be disabled and replaced with custom validators.
+- [NEW] More flexible HTML render
+- [NEW] Multipart and PostForm bindings
+- [NEW] Adds method to return all the registered routes
+- [NEW] Context.HandlerName() returns the main handler's name
+- [NEW] Adds Error.IsType() helper
+- [FIX] Binding multipart form
+- [FIX] Integration tests
+- [FIX] Crash when binding non struct object in Context.
+- [FIX] RunTLS() implementation
+- [FIX] Logger() unit tests
+- [FIX] Adds SetHTMLTemplate() warning
+- [FIX] Context.IsAborted()
+- [FIX] More unit tests
+- [FIX] JSON, XML, HTML renders accept custom content-types
+- [FIX] gin.AbortIndex is unexported
+- [FIX] Better approach to avoid directory listing in StaticFS()
+- [FIX] Context.ClientIP() always returns the IP with trimmed spaces.
+- [FIX] Better warning when running in debug mode.
+- [FIX] Google App Engine integration. debugPrint does not use os.Stdout
+- [FIX] Fixes integer overflow in error type
+- [FIX] Error implements the json.Marshaller interface
+- [FIX] MIT license in every file
+
+
+### Gin 1.0rc1 (May 22, 2015)
+
+- [PERFORMANCE] Zero allocation router
+- [PERFORMANCE] Faster JSON, XML and text rendering
+- [PERFORMANCE] Custom hand optimized HttpRouter for Gin
+- [PERFORMANCE] Misc code optimizations. Inlining, tail call optimizations
+- [NEW] Built-in support for golang.org/x/net/context
+- [NEW] Any(path, handler). Create a route that matches any path
+- [NEW] Refactored rendering pipeline (faster and static typeded)
+- [NEW] Refactored errors API
+- [NEW] IndentedJSON() prints pretty JSON
+- [NEW] Added gin.DefaultWriter
+- [NEW] UNIX socket support
+- [NEW] RouterGroup.BasePath is exposed
+- [NEW] JSON validation using go-validate-yourself (very powerful options)
+- [NEW] Completed suite of unit tests
+- [NEW] HTTP streaming with c.Stream()
+- [NEW] StaticFile() creates a router for serving just one file.
+- [NEW] StaticFS() has an option to disable directory listing.
+- [NEW] StaticFS() for serving static files through virtual filesystems
+- [NEW] Server-Sent Events native support
+- [NEW] WrapF() and WrapH() helpers for wrapping http.HandlerFunc and http.Handler
+- [NEW] Added LoggerWithWriter() middleware
+- [NEW] Added RecoveryWithWriter() middleware
+- [NEW] Added DefaultPostFormValue()
+- [NEW] Added DefaultFormValue()
+- [NEW] Added DefaultParamValue()
+- [FIX] BasicAuth() when using custom realm
+- [FIX] Bug when serving static files in nested routing group
+- [FIX] Redirect using built-in http.Redirect()
+- [FIX] Logger when printing the requested path
+- [FIX] Documentation typos
+- [FIX] Context.Engine renamed to Context.engine
+- [FIX] Better debugging messages
+- [FIX] ErrorLogger
+- [FIX] Debug HTTP render
+- [FIX] Refactored binding and render modules
+- [FIX] Refactored Context initialization
+- [FIX] Refactored BasicAuth()
+- [FIX] NoMethod/NoRoute handlers
+- [FIX] Hijacking http
+- [FIX] Better support for Google App Engine (using log instead of fmt)
+
+
+### Gin 0.6 (Mar 9, 2015)
+
+- [NEW] Support multipart/form-data
+- [NEW] NoMethod handler
+- [NEW] Validate sub structures
+- [NEW] Support for HTTP Realm Auth
+- [FIX] Unsigned integers in binding
+- [FIX] Improve color logger
+
+
+### Gin 0.5 (Feb 7, 2015)
+
+- [NEW] Content Negotiation
+- [FIX] Solved security bug that allow a client to spoof ip
+- [FIX] Fix unexported/ignored fields in binding
+
+
+### Gin 0.4 (Aug 21, 2014)
+
+- [NEW] Development mode
+- [NEW] Unit tests
+- [NEW] Add Content.Redirect()
+- [FIX] Deferring WriteHeader()
+- [FIX] Improved documentation for model binding
+
+
+### Gin 0.3 (Jul 18, 2014)
+
+- [PERFORMANCE] Normal log and error log are printed in the same call.
+- [PERFORMANCE] Improve performance of NoRouter()
+- [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults.
+- [NEW] Flexible rendering API
+- [NEW] Add Context.File()
+- [NEW] Add shorcut RunTLS() for http.ListenAndServeTLS
+- [FIX] Rename NotFound404() to NoRoute()
+- [FIX] Errors in context are purged
+- [FIX] Adds HEAD method in Static file serving
+- [FIX] Refactors Static() file serving
+- [FIX] Using keyed initialization to fix app-engine integration
+- [FIX] Can't unmarshal JSON array, #63
+- [FIX] Renaming Context.Req to Context.Request
+- [FIX] Check application/x-www-form-urlencoded when parsing form
+
+
+### Gin 0.2b (Jul 08, 2014)
+- [PERFORMANCE] Using sync.Pool to allocatio/gc overhead
+- [NEW] Travis CI integration
+- [NEW] Completely new logger
+- [NEW] New API for serving static files. gin.Static()
+- [NEW] gin.H() can be serialized into XML
+- [NEW] Typed errors. Errors can be typed. Internet/external/custom.
+- [NEW] Support for Godeps
+- [NEW] Travis/Godocs badges in README
+- [NEW] New Bind() and BindWith() methods for parsing request body.
+- [NEW] Add Content.Copy()
+- [NEW] Add context.LastError()
+- [NEW] Add shorcut for OPTIONS HTTP method
+- [FIX] Tons of README fixes
+- [FIX] Header is written before body
+- [FIX] BasicAuth() and changes API a little bit
+- [FIX] Recovery() middleware only prints panics
+- [FIX] Context.Get() does not panic anymore. Use MustGet() instead.
+- [FIX] Multiple http.WriteHeader() in NotFound handlers
+- [FIX] Engine.Run() panics if http server can't be setted up
+- [FIX] Crash when route path doesn't start with '/'
+- [FIX] Do not update header when status code is negative
+- [FIX] Setting response headers before calling WriteHeader in context.String()
+- [FIX] Add MIT license
+- [FIX] Changes behaviour of ErrorLogger() and Logger()
diff --git a/vendor/github.com/gin-gonic/gin/LICENSE b/vendor/github.com/gin-gonic/gin/LICENSE
new file mode 100644
index 0000000..1ff7f37
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Manuel Martínez-Almeida
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/gin-gonic/gin/Makefile b/vendor/github.com/gin-gonic/gin/Makefile
new file mode 100644
index 0000000..9ba475a
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/Makefile
@@ -0,0 +1,61 @@
+GOFMT ?= gofmt "-s"
+PACKAGES ?= $(shell go list ./... | grep -v /vendor/)
+GOFILES := $(shell find . -name "*.go" -type f -not -path "./vendor/*")
+
+all: build
+
+install: deps
+ govendor sync
+
+.PHONY: test
+test:
+ go test -v -covermode=count -coverprofile=coverage.out
+
+.PHONY: fmt
+fmt:
+ $(GOFMT) -w $(GOFILES)
+
+.PHONY: fmt-check
+fmt-check:
+ # get all go files and run go fmt on them
+ @diff=$$($(GOFMT) -d $(GOFILES)); \
+ if [ -n "$$diff" ]; then \
+ echo "Please run 'make fmt' and commit the result:"; \
+ echo "$${diff}"; \
+ exit 1; \
+ fi;
+
+vet:
+ go vet $(PACKAGES)
+
+deps:
+ @hash govendor > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
+ go get -u github.com/kardianos/govendor; \
+ fi
+ @hash embedmd > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
+ go get -u github.com/campoy/embedmd; \
+ fi
+
+embedmd:
+ embedmd -d *.md
+
+.PHONY: lint
+lint:
+ @hash golint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
+ go get -u github.com/golang/lint/golint; \
+ fi
+ for PKG in $(PACKAGES); do golint -set_exit_status $$PKG || exit 1; done;
+
+.PHONY: misspell-check
+misspell-check:
+ @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
+ go get -u github.com/client9/misspell/cmd/misspell; \
+ fi
+ misspell -error $(GOFILES)
+
+.PHONY: misspell
+misspell:
+ @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
+ go get -u github.com/client9/misspell/cmd/misspell; \
+ fi
+ misspell -w $(GOFILES)
diff --git a/vendor/github.com/gin-gonic/gin/README.md b/vendor/github.com/gin-gonic/gin/README.md
new file mode 100644
index 0000000..029606b
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/README.md
@@ -0,0 +1,977 @@
+# Gin Web Framework
+
+
+
+[![Build Status](https://travis-ci.org/gin-gonic/gin.svg)](https://travis-ci.org/gin-gonic/gin)
+ [![codecov](https://codecov.io/gh/gin-gonic/gin/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-gonic/gin)
+ [![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin)
+ [![GoDoc](https://godoc.org/github.com/gin-gonic/gin?status.svg)](https://godoc.org/github.com/gin-gonic/gin)
+ [![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+Gin is a web framework written in Go (Golang). It features a martini-like API with much better performance, up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin.
+
+![Gin console logger](https://gin-gonic.github.io/gin/other/console.png)
+
+```sh
+$ cat test.go
+```
+
+```go
+package main
+
+import "github.com/gin-gonic/gin"
+
+func main() {
+ r := gin.Default()
+ r.GET("/ping", func(c *gin.Context) {
+ c.JSON(200, gin.H{
+ "message": "pong",
+ })
+ })
+ r.Run() // listen and serve on 0.0.0.0:8080
+}
+```
+
+## Benchmarks
+
+Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter)
+
+[See all benchmarks](/BENCHMARKS.md)
+
+
+Benchmark name | (1) | (2) | (3) | (4)
+--------------------------------|----------:|----------:|----------:|------:
+BenchmarkAce_GithubAll | 10000 | 109482 | 13792 | 167
+BenchmarkBear_GithubAll | 10000 | 287490 | 79952 | 943
+BenchmarkBeego_GithubAll | 3000 | 562184 | 146272 | 2092
+BenchmarkBone_GithubAll | 500 | 2578716 | 648016 | 8119
+BenchmarkDenco_GithubAll | 20000 | 94955 | 20224 | 167
+BenchmarkEcho_GithubAll | 30000 | 58705 | 0 | 0
+**BenchmarkGin_GithubAll** | **30000** | **50991** | **0** | **0**
+BenchmarkGocraftWeb_GithubAll | 5000 | 449648 | 133280 | 1889
+BenchmarkGoji_GithubAll | 2000 | 689748 | 56113 | 334
+BenchmarkGoJsonRest_GithubAll | 5000 | 537769 | 135995 | 2940
+BenchmarkGoRestful_GithubAll | 100 | 18410628 | 797236 | 7725
+BenchmarkGorillaMux_GithubAll | 200 | 8036360 | 153137 | 1791
+BenchmarkHttpRouter_GithubAll | 20000 | 63506 | 13792 | 167
+BenchmarkHttpTreeMux_GithubAll | 10000 | 165927 | 56112 | 334
+BenchmarkKocha_GithubAll | 10000 | 171362 | 23304 | 843
+BenchmarkMacaron_GithubAll | 2000 | 817008 | 224960 | 2315
+BenchmarkMartini_GithubAll | 100 | 12609209 | 237952 | 2686
+BenchmarkPat_GithubAll | 300 | 4830398 | 1504101 | 32222
+BenchmarkPossum_GithubAll | 10000 | 301716 | 97440 | 812
+BenchmarkR2router_GithubAll | 10000 | 270691 | 77328 | 1182
+BenchmarkRevel_GithubAll | 1000 | 1491919 | 345553 | 5918
+BenchmarkRivet_GithubAll | 10000 | 283860 | 84272 | 1079
+BenchmarkTango_GithubAll | 5000 | 473821 | 87078 | 2470
+BenchmarkTigerTonic_GithubAll | 2000 | 1120131 | 241088 | 6052
+BenchmarkTraffic_GithubAll | 200 | 8708979 | 2664762 | 22390
+BenchmarkVulcan_GithubAll | 5000 | 353392 | 19894 | 609
+BenchmarkZeus_GithubAll | 2000 | 944234 | 300688 | 2648
+
+(1): Total Repetitions
+(2): Single Repetition Duration (ns/op)
+(3): Heap Memory (B/op)
+(4): Average Allocations per Repetition (allocs/op)
+
+## Gin v1. stable
+
+- [x] Zero allocation router.
+- [x] Still the fastest http router and framework. From routing to writing.
+- [x] Complete suite of unit tests
+- [x] Battle tested
+- [x] API frozen, new releases will not break your code.
+
+
+## Start using it
+
+1. Download and install it:
+
+```sh
+$ go get github.com/gin-gonic/gin
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/gin-gonic/gin"
+```
+
+3. (Optional) Import `net/http`. This is required for example if using constants such as `http.StatusOK`.
+
+```go
+import "net/http"
+```
+
+## API Examples
+
+### Using GET, POST, PUT, PATCH, DELETE and OPTIONS
+
+```go
+func main() {
+ // Disable Console Color
+ // gin.DisableConsoleColor()
+
+ // Creates a gin router with default middleware:
+ // logger and recovery (crash-free) middleware
+ router := gin.Default()
+
+ router.GET("/someGet", getting)
+ router.POST("/somePost", posting)
+ router.PUT("/somePut", putting)
+ router.DELETE("/someDelete", deleting)
+ router.PATCH("/somePatch", patching)
+ router.HEAD("/someHead", head)
+ router.OPTIONS("/someOptions", options)
+
+ // By default it serves on :8080 unless a
+ // PORT environment variable was defined.
+ router.Run()
+ // router.Run(":3000") for a hard coded port
+}
+```
+
+### Parameters in path
+
+```go
+func main() {
+ router := gin.Default()
+
+ // This handler will match /user/john but will not match neither /user/ or /user
+ router.GET("/user/:name", func(c *gin.Context) {
+ name := c.Param("name")
+ c.String(http.StatusOK, "Hello %s", name)
+ })
+
+ // However, this one will match /user/john/ and also /user/john/send
+ // If no other routers match /user/john, it will redirect to /user/john/
+ router.GET("/user/:name/*action", func(c *gin.Context) {
+ name := c.Param("name")
+ action := c.Param("action")
+ message := name + " is " + action
+ c.String(http.StatusOK, message)
+ })
+
+ router.Run(":8080")
+}
+```
+
+### Querystring parameters
+
+```go
+func main() {
+ router := gin.Default()
+
+ // Query string parameters are parsed using the existing underlying request object.
+ // The request responds to a url matching: /welcome?firstname=Jane&lastname=Doe
+ router.GET("/welcome", func(c *gin.Context) {
+ firstname := c.DefaultQuery("firstname", "Guest")
+ lastname := c.Query("lastname") // shortcut for c.Request.URL.Query().Get("lastname")
+
+ c.String(http.StatusOK, "Hello %s %s", firstname, lastname)
+ })
+ router.Run(":8080")
+}
+```
+
+### Multipart/Urlencoded Form
+
+```go
+func main() {
+ router := gin.Default()
+
+ router.POST("/form_post", func(c *gin.Context) {
+ message := c.PostForm("message")
+ nick := c.DefaultPostForm("nick", "anonymous")
+
+ c.JSON(200, gin.H{
+ "status": "posted",
+ "message": message,
+ "nick": nick,
+ })
+ })
+ router.Run(":8080")
+}
+```
+
+### Another example: query + post form
+
+```
+POST /post?id=1234&page=1 HTTP/1.1
+Content-Type: application/x-www-form-urlencoded
+
+name=manu&message=this_is_great
+```
+
+```go
+func main() {
+ router := gin.Default()
+
+ router.POST("/post", func(c *gin.Context) {
+
+ id := c.Query("id")
+ page := c.DefaultQuery("page", "0")
+ name := c.PostForm("name")
+ message := c.PostForm("message")
+
+ fmt.Printf("id: %s; page: %s; name: %s; message: %s", id, page, name, message)
+ })
+ router.Run(":8080")
+}
+```
+
+```
+id: 1234; page: 1; name: manu; message: this_is_great
+```
+
+### Upload files
+
+#### Single file
+
+References issue [#774](https://github.com/gin-gonic/gin/issues/774) and detail [example code](examples/upload-file/single).
+
+```go
+func main() {
+ router := gin.Default()
+ router.POST("/upload", func(c *gin.Context) {
+ // single file
+ file, _ := c.FormFile("file")
+ log.Println(file.Filename)
+
+ c.String(http.StatusOK, fmt.Printf("'%s' uploaded!", file.Filename))
+ })
+ router.Run(":8080")
+}
+```
+
+How to `curl`:
+
+```bash
+curl -X POST http://localhost:8080/upload \
+ -F "file=@/Users/appleboy/test.zip" \
+ -H "Content-Type: multipart/form-data"
+```
+
+#### Multiple files
+
+See the detail [example code](examples/upload-file/multiple).
+
+```go
+func main() {
+ router := gin.Default()
+ router.POST("/upload", func(c *gin.Context) {
+ // Multipart form
+ form, _ := c.MultipartForm()
+ files := form.File["upload[]"]
+
+ for _, file := range files {
+ log.Println(file.Filename)
+ }
+ c.String(http.StatusOK, fmt.Printf("%d files uploaded!", len(files)))
+ })
+ router.Run(":8080")
+}
+```
+
+How to `curl`:
+
+```bash
+curl -X POST http://localhost:8080/upload \
+ -F "upload[]=@/Users/appleboy/test1.zip" \
+ -F "upload[]=@/Users/appleboy/test2.zip" \
+ -H "Content-Type: multipart/form-data"
+```
+
+### Grouping routes
+
+```go
+func main() {
+ router := gin.Default()
+
+ // Simple group: v1
+ v1 := router.Group("/v1")
+ {
+ v1.POST("/login", loginEndpoint)
+ v1.POST("/submit", submitEndpoint)
+ v1.POST("/read", readEndpoint)
+ }
+
+ // Simple group: v2
+ v2 := router.Group("/v2")
+ {
+ v2.POST("/login", loginEndpoint)
+ v2.POST("/submit", submitEndpoint)
+ v2.POST("/read", readEndpoint)
+ }
+
+ router.Run(":8080")
+}
+```
+
+### Blank Gin without middleware by default
+
+Use
+
+```go
+r := gin.New()
+```
+
+instead of
+
+```go
+r := gin.Default()
+```
+
+
+### Using middleware
+```go
+func main() {
+ // Creates a router without any middleware by default
+ r := gin.New()
+
+ // Global middleware
+ r.Use(gin.Logger())
+ r.Use(gin.Recovery())
+
+ // Per route middleware, you can add as many as you desire.
+ r.GET("/benchmark", MyBenchLogger(), benchEndpoint)
+
+ // Authorization group
+ // authorized := r.Group("/", AuthRequired())
+ // exactly the same as:
+ authorized := r.Group("/")
+ // per group middleware! in this case we use the custom created
+ // AuthRequired() middleware just in the "authorized" group.
+ authorized.Use(AuthRequired())
+ {
+ authorized.POST("/login", loginEndpoint)
+ authorized.POST("/submit", submitEndpoint)
+ authorized.POST("/read", readEndpoint)
+
+ // nested group
+ testing := authorized.Group("testing")
+ testing.GET("/analytics", analyticsEndpoint)
+ }
+
+ // Listen and serve on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+### Model binding and validation
+
+To bind a request body into a type, use model binding. We currently support binding of JSON, XML and standard form values (foo=bar&boo=baz).
+
+Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`.
+
+When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use BindWith.
+
+You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, the current request will fail with an error.
+
+```go
+// Binding from JSON
+type Login struct {
+ User string `form:"user" json:"user" binding:"required"`
+ Password string `form:"password" json:"password" binding:"required"`
+}
+
+func main() {
+ router := gin.Default()
+
+ // Example for binding JSON ({"user": "manu", "password": "123"})
+ router.POST("/loginJSON", func(c *gin.Context) {
+ var json Login
+ if c.BindJSON(&json) == nil {
+ if json.User == "manu" && json.Password == "123" {
+ c.JSON(http.StatusOK, gin.H{"status": "you are logged in"})
+ } else {
+ c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"})
+ }
+ }
+ })
+
+ // Example for binding a HTML form (user=manu&password=123)
+ router.POST("/loginForm", func(c *gin.Context) {
+ var form Login
+ // This will infer what binder to use depending on the content-type header.
+ if c.Bind(&form) == nil {
+ if form.User == "manu" && form.Password == "123" {
+ c.JSON(http.StatusOK, gin.H{"status": "you are logged in"})
+ } else {
+ c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"})
+ }
+ }
+ })
+
+ // Listen and serve on 0.0.0.0:8080
+ router.Run(":8080")
+}
+```
+
+### Bind Query String
+
+See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-264681292).
+
+```go
+package main
+
+import "log"
+import "github.com/gin-gonic/gin"
+
+type Person struct {
+ Name string `form:"name"`
+ Address string `form:"address"`
+}
+
+func main() {
+ route := gin.Default()
+ route.GET("/testing", startPage)
+ route.Run(":8085")
+}
+
+func startPage(c *gin.Context) {
+ var person Person
+ // If `GET`, only `Form` binding engine (`query`) used.
+ // If `POST`, first checks the `content-type` for `JSON` or `XML`, then uses `Form` (`form-data`).
+ // See more at https://github.com/gin-gonic/gin/blob/develop/binding/binding.go#L45
+ if c.Bind(&person) == nil {
+ log.Println(person.Name)
+ log.Println(person.Address)
+ }
+
+ c.String(200, "Success")
+}
+```
+
+### Multipart/Urlencoded binding
+
+```go
+package main
+
+import (
+ "github.com/gin-gonic/gin"
+)
+
+type LoginForm struct {
+ User string `form:"user" binding:"required"`
+ Password string `form:"password" binding:"required"`
+}
+
+func main() {
+ router := gin.Default()
+ router.POST("/login", func(c *gin.Context) {
+ // you can bind multipart form with explicit binding declaration:
+ // c.MustBindWith(&form, binding.Form)
+ // or you can simply use autobinding with Bind method:
+ var form LoginForm
+ // in this case proper binding will be automatically selected
+ if c.Bind(&form) == nil {
+ if form.User == "user" && form.Password == "password" {
+ c.JSON(200, gin.H{"status": "you are logged in"})
+ } else {
+ c.JSON(401, gin.H{"status": "unauthorized"})
+ }
+ }
+ })
+ router.Run(":8080")
+}
+```
+
+Test it with:
+```sh
+$ curl -v --form user=user --form password=password http://localhost:8080/login
+```
+
+### XML, JSON and YAML rendering
+
+```go
+func main() {
+ r := gin.Default()
+
+ // gin.H is a shortcut for map[string]interface{}
+ r.GET("/someJSON", func(c *gin.Context) {
+ c.JSON(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK})
+ })
+
+ r.GET("/moreJSON", func(c *gin.Context) {
+ // You also can use a struct
+ var msg struct {
+ Name string `json:"user"`
+ Message string
+ Number int
+ }
+ msg.Name = "Lena"
+ msg.Message = "hey"
+ msg.Number = 123
+ // Note that msg.Name becomes "user" in the JSON
+ // Will output : {"user": "Lena", "Message": "hey", "Number": 123}
+ c.JSON(http.StatusOK, msg)
+ })
+
+ r.GET("/someXML", func(c *gin.Context) {
+ c.XML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK})
+ })
+
+ r.GET("/someYAML", func(c *gin.Context) {
+ c.YAML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK})
+ })
+
+ // Listen and serve on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+### Serving static files
+
+```go
+func main() {
+ router := gin.Default()
+ router.Static("/assets", "./assets")
+ router.StaticFS("/more_static", http.Dir("my_file_system"))
+ router.StaticFile("/favicon.ico", "./resources/favicon.ico")
+
+ // Listen and serve on 0.0.0.0:8080
+ router.Run(":8080")
+}
+```
+
+### HTML rendering
+
+Using LoadHTMLGlob() or LoadHTMLFiles()
+
+```go
+func main() {
+ router := gin.Default()
+ router.LoadHTMLGlob("templates/*")
+ //router.LoadHTMLFiles("templates/template1.html", "templates/template2.html")
+ router.GET("/index", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "index.tmpl", gin.H{
+ "title": "Main website",
+ })
+ })
+ router.Run(":8080")
+}
+```
+
+templates/index.tmpl
+
+```html
+
+
+ {{ .title }}
+
+
+```
+
+Using templates with same name in different directories
+
+```go
+func main() {
+ router := gin.Default()
+ router.LoadHTMLGlob("templates/**/*")
+ router.GET("/posts/index", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "posts/index.tmpl", gin.H{
+ "title": "Posts",
+ })
+ })
+ router.GET("/users/index", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "users/index.tmpl", gin.H{
+ "title": "Users",
+ })
+ })
+ router.Run(":8080")
+}
+```
+
+templates/posts/index.tmpl
+
+```html
+{{ define "posts/index.tmpl" }}
+
+ {{ .title }}
+
+Using posts/index.tmpl
+
+{{ end }}
+```
+
+templates/users/index.tmpl
+
+```html
+{{ define "users/index.tmpl" }}
+
+ {{ .title }}
+
+Using users/index.tmpl
+
+{{ end }}
+```
+
+You can also use your own html template render
+
+```go
+import "html/template"
+
+func main() {
+ router := gin.Default()
+ html := template.Must(template.ParseFiles("file1", "file2"))
+ router.SetHTMLTemplate(html)
+ router.Run(":8080")
+}
+```
+
+You may use custom delims
+
+```go
+ r := gin.Default()
+ r.Delims("{[{", "}]}")
+ r.LoadHTMLGlob("/path/to/templates"))
+```
+
+#### Add custom template funcs
+
+main.go
+
+```go
+ ...
+
+ func formatAsDate(t time.Time) string {
+ year, month, day := t.Date()
+ return fmt.Sprintf("%d/%02d/%02d", year, month, day)
+ }
+
+ ...
+
+ router.SetFuncMap(template.FuncMap{
+ "formatAsDate": formatAsDate,
+ })
+
+ ...
+
+ router.GET("/raw", func(c *Context) {
+ c.HTML(http.StatusOK, "raw.tmpl", map[string]interface{}{
+ "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC),
+ })
+ })
+
+ ...
+```
+
+raw.tmpl
+
+```html
+Date: {[{.now | formatAsDate}]}
+```
+
+Result:
+```
+Date: 2017/07/01
+```
+
+### Multitemplate
+
+Gin allow by default use only one html.Template. Check [a multitemplate render](https://github.com/gin-contrib/multitemplate) for using features like go 1.6 `block template`.
+
+### Redirects
+
+Issuing a HTTP redirect is easy:
+
+```go
+r.GET("/test", func(c *gin.Context) {
+ c.Redirect(http.StatusMovedPermanently, "http://www.google.com/")
+})
+```
+Both internal and external locations are supported.
+
+
+### Custom Middleware
+
+```go
+func Logger() gin.HandlerFunc {
+ return func(c *gin.Context) {
+ t := time.Now()
+
+ // Set example variable
+ c.Set("example", "12345")
+
+ // before request
+
+ c.Next()
+
+ // after request
+ latency := time.Since(t)
+ log.Print(latency)
+
+ // access the status we are sending
+ status := c.Writer.Status()
+ log.Println(status)
+ }
+}
+
+func main() {
+ r := gin.New()
+ r.Use(Logger())
+
+ r.GET("/test", func(c *gin.Context) {
+ example := c.MustGet("example").(string)
+
+ // it would print: "12345"
+ log.Println(example)
+ })
+
+ // Listen and serve on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+### Using BasicAuth() middleware
+
+```go
+// simulate some private data
+var secrets = gin.H{
+ "foo": gin.H{"email": "foo@bar.com", "phone": "123433"},
+ "austin": gin.H{"email": "austin@example.com", "phone": "666"},
+ "lena": gin.H{"email": "lena@guapa.com", "phone": "523443"},
+}
+
+func main() {
+ r := gin.Default()
+
+ // Group using gin.BasicAuth() middleware
+ // gin.Accounts is a shortcut for map[string]string
+ authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{
+ "foo": "bar",
+ "austin": "1234",
+ "lena": "hello2",
+ "manu": "4321",
+ }))
+
+ // /admin/secrets endpoint
+ // hit "localhost:8080/admin/secrets
+ authorized.GET("/secrets", func(c *gin.Context) {
+ // get user, it was set by the BasicAuth middleware
+ user := c.MustGet(gin.AuthUserKey).(string)
+ if secret, ok := secrets[user]; ok {
+ c.JSON(http.StatusOK, gin.H{"user": user, "secret": secret})
+ } else {
+ c.JSON(http.StatusOK, gin.H{"user": user, "secret": "NO SECRET :("})
+ }
+ })
+
+ // Listen and serve on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+### Goroutines inside a middleware
+
+When starting inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy.
+
+```go
+func main() {
+ r := gin.Default()
+
+ r.GET("/long_async", func(c *gin.Context) {
+ // create copy to be used inside the goroutine
+ cCp := c.Copy()
+ go func() {
+ // simulate a long task with time.Sleep(). 5 seconds
+ time.Sleep(5 * time.Second)
+
+ // note that you are using the copied context "cCp", IMPORTANT
+ log.Println("Done! in path " + cCp.Request.URL.Path)
+ }()
+ })
+
+ r.GET("/long_sync", func(c *gin.Context) {
+ // simulate a long task with time.Sleep(). 5 seconds
+ time.Sleep(5 * time.Second)
+
+ // since we are NOT using a goroutine, we do not have to copy the context
+ log.Println("Done! in path " + c.Request.URL.Path)
+ })
+
+ // Listen and serve on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+### Custom HTTP configuration
+
+Use `http.ListenAndServe()` directly, like this:
+
+```go
+func main() {
+ router := gin.Default()
+ http.ListenAndServe(":8080", router)
+}
+```
+or
+
+```go
+func main() {
+ router := gin.Default()
+
+ s := &http.Server{
+ Addr: ":8080",
+ Handler: router,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ MaxHeaderBytes: 1 << 20,
+ }
+ s.ListenAndServe()
+}
+```
+
+### Support Let's Encrypt
+
+example for 1-line LetsEncrypt HTTPS servers.
+
+[embedmd]:# (examples/auto-tls/example1.go go)
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/gin-gonic/autotls"
+ "github.com/gin-gonic/gin"
+)
+
+func main() {
+ r := gin.Default()
+
+ // Ping handler
+ r.GET("/ping", func(c *gin.Context) {
+ c.String(200, "pong")
+ })
+
+ log.Fatal(autotls.Run(r, "example1.com", "example2.com"))
+}
+```
+
+example for custom autocert manager.
+
+[embedmd]:# (examples/auto-tls/example2.go go)
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/gin-gonic/autotls"
+ "github.com/gin-gonic/gin"
+ "golang.org/x/crypto/acme/autocert"
+)
+
+func main() {
+ r := gin.Default()
+
+ // Ping handler
+ r.GET("/ping", func(c *gin.Context) {
+ c.String(200, "pong")
+ })
+
+ m := autocert.Manager{
+ Prompt: autocert.AcceptTOS,
+ HostPolicy: autocert.HostWhitelist("example1.com", "example2.com"),
+ Cache: autocert.DirCache("/var/www/.cache"),
+ }
+
+ log.Fatal(autotls.RunWithManager(r, m))
+}
+```
+
+### Graceful restart or stop
+
+Do you want to graceful restart or stop your web server?
+There are some ways this can be done.
+
+We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details.
+
+```go
+router := gin.Default()
+router.GET("/", handler)
+// [...]
+endless.ListenAndServe(":4242", router)
+```
+
+An alternative to endless:
+
+* [manners](https://github.com/braintree/manners): A polite Go HTTP server that shuts down gracefully.
+* [graceful](https://github.com/tylerb/graceful): Graceful is a Go package enabling graceful shutdown of an http.Handler server.
+* [grace](https://github.com/facebookgo/grace): Graceful restart & zero downtime deploy for Go servers.
+
+If you are using Go 1.8, you may not need to use this library! Consider using http.Server's built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown) method for graceful shutdowns. See the full [graceful-shutdown](./examples/graceful-shutdown) example with gin.
+
+[embedmd]:# (examples/graceful-shutdown/graceful-shutdown/server.go go)
+```go
+// +build go1.8
+
+package main
+
+import (
+ "context"
+ "log"
+ "net/http"
+ "os"
+ "os/signal"
+ "time"
+
+ "github.com/gin-gonic/gin"
+)
+
+func main() {
+ router := gin.Default()
+ router.GET("/", func(c *gin.Context) {
+ time.Sleep(5 * time.Second)
+ c.String(http.StatusOK, "Welcome Gin Server")
+ })
+
+ srv := &http.Server{
+ Addr: ":8080",
+ Handler: router,
+ }
+
+ go func() {
+ // service connections
+ if err := srv.ListenAndServe(); err != nil {
+ log.Printf("listen: %s\n", err)
+ }
+ }()
+
+ // Wait for interrupt signal to gracefully shutdown the server with
+ // a timeout of 5 seconds.
+ quit := make(chan os.Signal)
+ signal.Notify(quit, os.Interrupt)
+ <-quit
+ log.Println("Shutdown Server ...")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ if err := srv.Shutdown(ctx); err != nil {
+ log.Fatal("Server Shutdown:", err)
+ }
+ log.Println("Server exist")
+}
+```
+
+## Contributing
+
+- With issues:
+ - Use the search tool before opening a new issue.
+ - Please provide source code and commit sha if you found a bug.
+ - Review existing issues and provide feedback or react to them.
+- With pull requests:
+ - Open your pull request against develop
+ - Your pull request should have no more than two commits, if not you should squash them.
+ - It should pass all tests in the available continuous integrations systems such as TravisCI.
+ - You should add/modify tests to cover your proposed code changes.
+ - If your pull request contains a new feature, please document it on the README.
+
+## Users
+
+Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framework.
+
+* [drone](https://github.com/drone/drone): Drone is a Continuous Delivery platform built on Docker, written in Go
+* [gorush](https://github.com/appleboy/gorush): A push notification server written in Go.
diff --git a/vendor/github.com/gin-gonic/gin/auth.go b/vendor/github.com/gin-gonic/gin/auth.go
new file mode 100644
index 0000000..125e659
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/auth.go
@@ -0,0 +1,92 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "crypto/subtle"
+ "encoding/base64"
+ "strconv"
+)
+
+const AuthUserKey = "user"
+
+type (
+ Accounts map[string]string
+ authPair struct {
+ Value string
+ User string
+ }
+ authPairs []authPair
+)
+
+func (a authPairs) searchCredential(authValue string) (string, bool) {
+ if len(authValue) == 0 {
+ return "", false
+ }
+ for _, pair := range a {
+ if pair.Value == authValue {
+ return pair.User, true
+ }
+ }
+ return "", false
+}
+
+// BasicAuthForRealm returns a Basic HTTP Authorization middleware. It takes as arguments a map[string]string where
+// the key is the user name and the value is the password, as well as the name of the Realm.
+// If the realm is empty, "Authorization Required" will be used by default.
+// (see http://tools.ietf.org/html/rfc2617#section-1.2)
+func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc {
+ if realm == "" {
+ realm = "Authorization Required"
+ }
+ realm = "Basic realm=" + strconv.Quote(realm)
+ pairs := processAccounts(accounts)
+ return func(c *Context) {
+ // Search user in the slice of allowed credentials
+ user, found := pairs.searchCredential(c.Request.Header.Get("Authorization"))
+ if !found {
+ // Credentials doesn't match, we return 401 and abort handlers chain.
+ c.Header("WWW-Authenticate", realm)
+ c.AbortWithStatus(401)
+ } else {
+ // The user credentials was found, set user's id to key AuthUserKey in this context, the userId can be read later using
+ // c.MustGet(gin.AuthUserKey)
+ c.Set(AuthUserKey, user)
+ }
+ }
+}
+
+// BasicAuth returns a Basic HTTP Authorization middleware. It takes as argument a map[string]string where
+// the key is the user name and the value is the password.
+func BasicAuth(accounts Accounts) HandlerFunc {
+ return BasicAuthForRealm(accounts, "")
+}
+
+func processAccounts(accounts Accounts) authPairs {
+ assert1(len(accounts) > 0, "Empty list of authorized credentials")
+ pairs := make(authPairs, 0, len(accounts))
+ for user, password := range accounts {
+ assert1(len(user) > 0, "User can not be empty")
+ value := authorizationHeader(user, password)
+ pairs = append(pairs, authPair{
+ Value: value,
+ User: user,
+ })
+ }
+ return pairs
+}
+
+func authorizationHeader(user, password string) string {
+ base := user + ":" + password
+ return "Basic " + base64.StdEncoding.EncodeToString([]byte(base))
+}
+
+func secureCompare(given, actual string) bool {
+ if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 {
+ return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1
+ }
+ /* Securely compare actual to itself to keep constant time, but always return false */
+ return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/binding.go b/vendor/github.com/gin-gonic/gin/binding/binding.go
new file mode 100644
index 0000000..1dbf246
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/binding.go
@@ -0,0 +1,72 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import "net/http"
+
+const (
+ MIMEJSON = "application/json"
+ MIMEHTML = "text/html"
+ MIMEXML = "application/xml"
+ MIMEXML2 = "text/xml"
+ MIMEPlain = "text/plain"
+ MIMEPOSTForm = "application/x-www-form-urlencoded"
+ MIMEMultipartPOSTForm = "multipart/form-data"
+ MIMEPROTOBUF = "application/x-protobuf"
+ MIMEMSGPACK = "application/x-msgpack"
+ MIMEMSGPACK2 = "application/msgpack"
+)
+
+type Binding interface {
+ Name() string
+ Bind(*http.Request, interface{}) error
+}
+
+type StructValidator interface {
+ // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right.
+ // If the received type is not a struct, any validation should be skipped and nil must be returned.
+ // If the received type is a struct or pointer to a struct, the validation should be performed.
+ // If the struct is not valid or the validation itself fails, a descriptive error should be returned.
+ // Otherwise nil must be returned.
+ ValidateStruct(interface{}) error
+}
+
+var Validator StructValidator = &defaultValidator{}
+
+var (
+ JSON = jsonBinding{}
+ XML = xmlBinding{}
+ Form = formBinding{}
+ FormPost = formPostBinding{}
+ FormMultipart = formMultipartBinding{}
+ ProtoBuf = protobufBinding{}
+ MsgPack = msgpackBinding{}
+)
+
+func Default(method, contentType string) Binding {
+ if method == "GET" {
+ return Form
+ }
+
+ switch contentType {
+ case MIMEJSON:
+ return JSON
+ case MIMEXML, MIMEXML2:
+ return XML
+ case MIMEPROTOBUF:
+ return ProtoBuf
+ case MIMEMSGPACK, MIMEMSGPACK2:
+ return MsgPack
+ default: //case MIMEPOSTForm, MIMEMultipartPOSTForm:
+ return Form
+ }
+}
+
+func validate(obj interface{}) error {
+ if Validator == nil {
+ return nil
+ }
+ return Validator.ValidateStruct(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/default_validator.go b/vendor/github.com/gin-gonic/gin/binding/default_validator.go
new file mode 100644
index 0000000..19885f1
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/default_validator.go
@@ -0,0 +1,45 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "reflect"
+ "sync"
+
+ "gopkg.in/go-playground/validator.v8"
+)
+
+type defaultValidator struct {
+ once sync.Once
+ validate *validator.Validate
+}
+
+var _ StructValidator = &defaultValidator{}
+
+func (v *defaultValidator) ValidateStruct(obj interface{}) error {
+ if kindOfData(obj) == reflect.Struct {
+ v.lazyinit()
+ if err := v.validate.Struct(obj); err != nil {
+ return error(err)
+ }
+ }
+ return nil
+}
+
+func (v *defaultValidator) lazyinit() {
+ v.once.Do(func() {
+ config := &validator.Config{TagName: "binding"}
+ v.validate = validator.New(config)
+ })
+}
+
+func kindOfData(data interface{}) reflect.Kind {
+ value := reflect.ValueOf(data)
+ valueType := value.Kind()
+ if valueType == reflect.Ptr {
+ valueType = value.Elem().Kind()
+ }
+ return valueType
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/form.go b/vendor/github.com/gin-gonic/gin/binding/form.go
new file mode 100644
index 0000000..557333e
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/form.go
@@ -0,0 +1,54 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import "net/http"
+
+type formBinding struct{}
+type formPostBinding struct{}
+type formMultipartBinding struct{}
+
+func (formBinding) Name() string {
+ return "form"
+}
+
+func (formBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseForm(); err != nil {
+ return err
+ }
+ req.ParseMultipartForm(32 << 10) // 32 MB
+ if err := mapForm(obj, req.Form); err != nil {
+ return err
+ }
+ return validate(obj)
+}
+
+func (formPostBinding) Name() string {
+ return "form-urlencoded"
+}
+
+func (formPostBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseForm(); err != nil {
+ return err
+ }
+ if err := mapForm(obj, req.PostForm); err != nil {
+ return err
+ }
+ return validate(obj)
+}
+
+func (formMultipartBinding) Name() string {
+ return "multipart/form-data"
+}
+
+func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseMultipartForm(32 << 10); err != nil {
+ return err
+ }
+ if err := mapForm(obj, req.MultipartForm.Value); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go
new file mode 100644
index 0000000..34f1267
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go
@@ -0,0 +1,182 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "errors"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+func mapForm(ptr interface{}, form map[string][]string) error {
+ typ := reflect.TypeOf(ptr).Elem()
+ val := reflect.ValueOf(ptr).Elem()
+ for i := 0; i < typ.NumField(); i++ {
+ typeField := typ.Field(i)
+ structField := val.Field(i)
+ if !structField.CanSet() {
+ continue
+ }
+
+ structFieldKind := structField.Kind()
+ inputFieldName := typeField.Tag.Get("form")
+ if inputFieldName == "" {
+ inputFieldName = typeField.Name
+
+ // if "form" tag is nil, we inspect if the field is a struct.
+ // this would not make sense for JSON parsing but it does for a form
+ // since data is flatten
+ if structFieldKind == reflect.Struct {
+ err := mapForm(structField.Addr().Interface(), form)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ }
+ inputValue, exists := form[inputFieldName]
+ if !exists {
+ continue
+ }
+
+ numElems := len(inputValue)
+ if structFieldKind == reflect.Slice && numElems > 0 {
+ sliceOf := structField.Type().Elem().Kind()
+ slice := reflect.MakeSlice(structField.Type(), numElems, numElems)
+ for i := 0; i < numElems; i++ {
+ if err := setWithProperType(sliceOf, inputValue[i], slice.Index(i)); err != nil {
+ return err
+ }
+ }
+ val.Field(i).Set(slice)
+ } else {
+ if _, isTime := structField.Interface().(time.Time); isTime {
+ if err := setTimeField(inputValue[0], typeField, structField); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := setWithProperType(typeField.Type.Kind(), inputValue[0], structField); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func setWithProperType(valueKind reflect.Kind, val string, structField reflect.Value) error {
+ switch valueKind {
+ case reflect.Int:
+ return setIntField(val, 0, structField)
+ case reflect.Int8:
+ return setIntField(val, 8, structField)
+ case reflect.Int16:
+ return setIntField(val, 16, structField)
+ case reflect.Int32:
+ return setIntField(val, 32, structField)
+ case reflect.Int64:
+ return setIntField(val, 64, structField)
+ case reflect.Uint:
+ return setUintField(val, 0, structField)
+ case reflect.Uint8:
+ return setUintField(val, 8, structField)
+ case reflect.Uint16:
+ return setUintField(val, 16, structField)
+ case reflect.Uint32:
+ return setUintField(val, 32, structField)
+ case reflect.Uint64:
+ return setUintField(val, 64, structField)
+ case reflect.Bool:
+ return setBoolField(val, structField)
+ case reflect.Float32:
+ return setFloatField(val, 32, structField)
+ case reflect.Float64:
+ return setFloatField(val, 64, structField)
+ case reflect.String:
+ structField.SetString(val)
+ default:
+ return errors.New("Unknown type")
+ }
+ return nil
+}
+
+func setIntField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0"
+ }
+ intVal, err := strconv.ParseInt(val, 10, bitSize)
+ if err == nil {
+ field.SetInt(intVal)
+ }
+ return err
+}
+
+func setUintField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0"
+ }
+ uintVal, err := strconv.ParseUint(val, 10, bitSize)
+ if err == nil {
+ field.SetUint(uintVal)
+ }
+ return err
+}
+
+func setBoolField(val string, field reflect.Value) error {
+ if val == "" {
+ val = "false"
+ }
+ boolVal, err := strconv.ParseBool(val)
+ if err == nil {
+ field.SetBool(boolVal)
+ }
+ return nil
+}
+
+func setFloatField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0.0"
+ }
+ floatVal, err := strconv.ParseFloat(val, bitSize)
+ if err == nil {
+ field.SetFloat(floatVal)
+ }
+ return err
+}
+
+func setTimeField(val string, structField reflect.StructField, value reflect.Value) error {
+ timeFormat := structField.Tag.Get("time_format")
+ if timeFormat == "" {
+ return errors.New("Blank time format")
+ }
+
+ if val == "" {
+ value.Set(reflect.ValueOf(time.Time{}))
+ return nil
+ }
+
+ l := time.Local
+ if isUTC, _ := strconv.ParseBool(structField.Tag.Get("time_utc")); isUTC {
+ l = time.UTC
+ }
+
+ t, err := time.ParseInLocation(timeFormat, val, l)
+ if err != nil {
+ return err
+ }
+
+ value.Set(reflect.ValueOf(t))
+ return nil
+}
+
+// Don't pass in pointers to bind to. Can lead to bugs. See:
+// https://github.com/codegangsta/martini-contrib/issues/40
+// https://github.com/codegangsta/martini-contrib/pull/34#issuecomment-29683659
+func ensureNotPointer(obj interface{}) {
+ if reflect.TypeOf(obj).Kind() == reflect.Ptr {
+ panic("Pointers are not accepted as binding models")
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/json.go b/vendor/github.com/gin-gonic/gin/binding/json.go
new file mode 100644
index 0000000..486b973
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/json.go
@@ -0,0 +1,24 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+type jsonBinding struct{}
+
+func (jsonBinding) Name() string {
+ return "json"
+}
+
+func (jsonBinding) Bind(req *http.Request, obj interface{}) error {
+ decoder := json.NewDecoder(req.Body)
+ if err := decoder.Decode(obj); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/msgpack.go b/vendor/github.com/gin-gonic/gin/binding/msgpack.go
new file mode 100644
index 0000000..6936717
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/msgpack.go
@@ -0,0 +1,28 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "net/http"
+
+ "github.com/ugorji/go/codec"
+)
+
+type msgpackBinding struct{}
+
+func (msgpackBinding) Name() string {
+ return "msgpack"
+}
+
+func (msgpackBinding) Bind(req *http.Request, obj interface{}) error {
+
+ if err := codec.NewDecoder(req.Body, new(codec.MsgpackHandle)).Decode(&obj); err != nil {
+ //var decoder *codec.Decoder = codec.NewDecoder(req.Body, &codec.MsgpackHandle)
+ //if err := decoder.Decode(&obj); err != nil {
+ return err
+ }
+ return validate(obj)
+
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/protobuf.go b/vendor/github.com/gin-gonic/gin/binding/protobuf.go
new file mode 100644
index 0000000..c7eb84e
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/protobuf.go
@@ -0,0 +1,35 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "io/ioutil"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+)
+
+type protobufBinding struct{}
+
+func (protobufBinding) Name() string {
+ return "protobuf"
+}
+
+func (protobufBinding) Bind(req *http.Request, obj interface{}) error {
+
+ buf, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ return err
+ }
+
+ if err = proto.Unmarshal(buf, obj.(proto.Message)); err != nil {
+ return err
+ }
+
+ //Here it's same to return validate(obj), but util now we cann't add `binding:""` to the struct
+ //which automatically generate by gen-proto
+ return nil
+ //return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/xml.go b/vendor/github.com/gin-gonic/gin/binding/xml.go
new file mode 100644
index 0000000..f84a6b7
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/xml.go
@@ -0,0 +1,24 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "encoding/xml"
+ "net/http"
+)
+
+type xmlBinding struct{}
+
+func (xmlBinding) Name() string {
+ return "xml"
+}
+
+func (xmlBinding) Bind(req *http.Request, obj interface{}) error {
+ decoder := xml.NewDecoder(req.Body)
+ if err := decoder.Decode(obj); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/codecov.yml b/vendor/github.com/gin-gonic/gin/codecov.yml
new file mode 100644
index 0000000..c9c9a52
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/codecov.yml
@@ -0,0 +1,5 @@
+coverage:
+ notify:
+ gitter:
+ default:
+ url: https://webhooks.gitter.im/e/d90dcdeeab2f1e357165
diff --git a/vendor/github.com/gin-gonic/gin/context.go b/vendor/github.com/gin-gonic/gin/context.go
new file mode 100644
index 0000000..5c4d27d
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/context.go
@@ -0,0 +1,766 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "errors"
+ "io"
+ "io/ioutil"
+ "math"
+ "mime/multipart"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/gin-contrib/sse"
+ "github.com/gin-gonic/gin/binding"
+ "github.com/gin-gonic/gin/render"
+)
+
+// Content-Type MIME of the most common data formats
+const (
+ MIMEJSON = binding.MIMEJSON
+ MIMEHTML = binding.MIMEHTML
+ MIMEXML = binding.MIMEXML
+ MIMEXML2 = binding.MIMEXML2
+ MIMEPlain = binding.MIMEPlain
+ MIMEPOSTForm = binding.MIMEPOSTForm
+ MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm
+)
+
+const (
+ defaultMemory = 32 << 20 // 32 MB
+ abortIndex int8 = math.MaxInt8 / 2
+)
+
+// Context is the most important part of gin. It allows us to pass variables between middleware,
+// manage the flow, validate the JSON of a request and render a JSON response for example.
+type Context struct {
+ writermem responseWriter
+ Request *http.Request
+ Writer ResponseWriter
+
+ Params Params
+ handlers HandlersChain
+ index int8
+
+ engine *Engine
+ Keys map[string]interface{}
+ Errors errorMsgs
+ Accepted []string
+}
+
+/************************************/
+/********** CONTEXT CREATION ********/
+/************************************/
+
+func (c *Context) reset() {
+ c.Writer = &c.writermem
+ c.Params = c.Params[0:0]
+ c.handlers = nil
+ c.index = -1
+ c.Keys = nil
+ c.Errors = c.Errors[0:0]
+ c.Accepted = nil
+}
+
+// Copy returns a copy of the current context that can be safely used outside the request's scope.
+// This has to be used when the context has to be passed to a goroutine.
+func (c *Context) Copy() *Context {
+ var cp = *c
+ cp.writermem.ResponseWriter = nil
+ cp.Writer = &cp.writermem
+ cp.index = abortIndex
+ cp.handlers = nil
+ return &cp
+}
+
+// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", this
+// function will return "main.handleGetUsers"
+func (c *Context) HandlerName() string {
+ return nameOfFunction(c.handlers.Last())
+}
+
+// Handler returns the main handler.
+func (c *Context) Handler() HandlerFunc {
+ return c.handlers.Last()
+}
+
+/************************************/
+/*********** FLOW CONTROL ***********/
+/************************************/
+
+// Next should be used only inside middleware.
+// It executes the pending handlers in the chain inside the calling handler.
+// See example in GitHub.
+func (c *Context) Next() {
+ c.index++
+ s := int8(len(c.handlers))
+ for ; c.index < s; c.index++ {
+ c.handlers[c.index](c)
+ }
+}
+
+// IsAborted returns true if the current context was aborted.
+func (c *Context) IsAborted() bool {
+ return c.index >= abortIndex
+}
+
+// Abort prevents pending handlers from being called. Note that this will not stop the current handler.
+// Let's say you have an authorization middleware that validates that the current request is authorized. If the
+// authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers
+// for this request are not called.
+func (c *Context) Abort() {
+ c.index = abortIndex
+}
+
+// AbortWithStatus calls `Abort()` and writes the headers with the specified status code.
+// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401).
+func (c *Context) AbortWithStatus(code int) {
+ c.Status(code)
+ c.Writer.WriteHeaderNow()
+ c.Abort()
+}
+
+// AbortWithStatusJSON calls `Abort()` and then `JSON` internally. This method stops the chain, writes the status code and return a JSON body
+// It also sets the Content-Type as "application/json".
+func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) {
+ c.Abort()
+ c.JSON(code, jsonObj)
+}
+
+// AbortWithError calls `AbortWithStatus()` and `Error()` internally. This method stops the chain, writes the status code and
+// pushes the specified error to `c.Errors`.
+// See Context.Error() for more details.
+func (c *Context) AbortWithError(code int, err error) *Error {
+ c.AbortWithStatus(code)
+ return c.Error(err)
+}
+
+/************************************/
+/********* ERROR MANAGEMENT *********/
+/************************************/
+
+// Attaches an error to the current context. The error is pushed to a list of errors.
+// It's a good idea to call Error for each error that occurred during the resolution of a request.
+// A middleware can be used to collect all the errors
+// and push them to a database together, print a log, or append it in the HTTP response.
+func (c *Context) Error(err error) *Error {
+ var parsedError *Error
+ switch err.(type) {
+ case *Error:
+ parsedError = err.(*Error)
+ default:
+ parsedError = &Error{
+ Err: err,
+ Type: ErrorTypePrivate,
+ }
+ }
+ c.Errors = append(c.Errors, parsedError)
+ return parsedError
+}
+
+/************************************/
+/******** METADATA MANAGEMENT********/
+/************************************/
+
+// Set is used to store a new key/value pair exclusively for this context.
+// It also lazy initializes c.Keys if it was not used previously.
+func (c *Context) Set(key string, value interface{}) {
+ if c.Keys == nil {
+ c.Keys = make(map[string]interface{})
+ }
+ c.Keys[key] = value
+}
+
+// Get returns the value for the given key, ie: (value, true).
+// If the value does not exists it returns (nil, false)
+func (c *Context) Get(key string) (value interface{}, exists bool) {
+ value, exists = c.Keys[key]
+ return
+}
+
+// MustGet returns the value for the given key if it exists, otherwise it panics.
+func (c *Context) MustGet(key string) interface{} {
+ if value, exists := c.Get(key); exists {
+ return value
+ }
+ panic("Key \"" + key + "\" does not exist")
+}
+
+// GetString returns the value associated with the key as a string.
+func (c *Context) GetString(key string) (s string) {
+ if val, ok := c.Get(key); ok && val != nil {
+ s, _ = val.(string)
+ }
+ return
+}
+
+// GetBool returns the value associated with the key as a boolean.
+func (c *Context) GetBool(key string) (b bool) {
+ if val, ok := c.Get(key); ok && val != nil {
+ b, _ = val.(bool)
+ }
+ return
+}
+
+// GetInt returns the value associated with the key as an integer.
+func (c *Context) GetInt(key string) (i int) {
+ if val, ok := c.Get(key); ok && val != nil {
+ i, _ = val.(int)
+ }
+ return
+}
+
+// GetInt64 returns the value associated with the key as an integer.
+func (c *Context) GetInt64(key string) (i64 int64) {
+ if val, ok := c.Get(key); ok && val != nil {
+ i64, _ = val.(int64)
+ }
+ return
+}
+
+// GetFloat64 returns the value associated with the key as a float64.
+func (c *Context) GetFloat64(key string) (f64 float64) {
+ if val, ok := c.Get(key); ok && val != nil {
+ f64, _ = val.(float64)
+ }
+ return
+}
+
+// GetTime returns the value associated with the key as time.
+func (c *Context) GetTime(key string) (t time.Time) {
+ if val, ok := c.Get(key); ok && val != nil {
+ t, _ = val.(time.Time)
+ }
+ return
+}
+
+// GetDuration returns the value associated with the key as a duration.
+func (c *Context) GetDuration(key string) (d time.Duration) {
+ if val, ok := c.Get(key); ok && val != nil {
+ d, _ = val.(time.Duration)
+ }
+ return
+}
+
+// GetStringSlice returns the value associated with the key as a slice of strings.
+func (c *Context) GetStringSlice(key string) (ss []string) {
+ if val, ok := c.Get(key); ok && val != nil {
+ ss, _ = val.([]string)
+ }
+ return
+}
+
+// GetStringMap returns the value associated with the key as a map of interfaces.
+func (c *Context) GetStringMap(key string) (sm map[string]interface{}) {
+ if val, ok := c.Get(key); ok && val != nil {
+ sm, _ = val.(map[string]interface{})
+ }
+ return
+}
+
+// GetStringMapString returns the value associated with the key as a map of strings.
+func (c *Context) GetStringMapString(key string) (sms map[string]string) {
+ if val, ok := c.Get(key); ok && val != nil {
+ sms, _ = val.(map[string]string)
+ }
+ return
+}
+
+// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
+func (c *Context) GetStringMapStringSlice(key string) (smss map[string][]string) {
+ if val, ok := c.Get(key); ok && val != nil {
+ smss, _ = val.(map[string][]string)
+ }
+ return
+}
+
+/************************************/
+/************ INPUT DATA ************/
+/************************************/
+
+// Param returns the value of the URL param.
+// It is a shortcut for c.Params.ByName(key)
+// router.GET("/user/:id", func(c *gin.Context) {
+// // a GET request to /user/john
+// id := c.Param("id") // id == "john"
+// })
+func (c *Context) Param(key string) string {
+ return c.Params.ByName(key)
+}
+
+// Query returns the keyed url query value if it exists,
+// otherwise it returns an empty string `("")`.
+// It is shortcut for `c.Request.URL.Query().Get(key)`
+// GET /path?id=1234&name=Manu&value=
+// c.Query("id") == "1234"
+// c.Query("name") == "Manu"
+// c.Query("value") == ""
+// c.Query("wtf") == ""
+func (c *Context) Query(key string) string {
+ value, _ := c.GetQuery(key)
+ return value
+}
+
+// DefaultQuery returns the keyed url query value if it exists,
+// otherwise it returns the specified defaultValue string.
+// See: Query() and GetQuery() for further information.
+// GET /?name=Manu&lastname=
+// c.DefaultQuery("name", "unknown") == "Manu"
+// c.DefaultQuery("id", "none") == "none"
+// c.DefaultQuery("lastname", "none") == ""
+func (c *Context) DefaultQuery(key, defaultValue string) string {
+ if value, ok := c.GetQuery(key); ok {
+ return value
+ }
+ return defaultValue
+}
+
+// GetQuery is like Query(), it returns the keyed url query value
+// if it exists `(value, true)` (even when the value is an empty string),
+// otherwise it returns `("", false)`.
+// It is shortcut for `c.Request.URL.Query().Get(key)`
+// GET /?name=Manu&lastname=
+// ("Manu", true) == c.GetQuery("name")
+// ("", false) == c.GetQuery("id")
+// ("", true) == c.GetQuery("lastname")
+func (c *Context) GetQuery(key string) (string, bool) {
+ if values, ok := c.GetQueryArray(key); ok {
+ return values[0], ok
+ }
+ return "", false
+}
+
+// QueryArray returns a slice of strings for a given query key.
+// The length of the slice depends on the number of params with the given key.
+func (c *Context) QueryArray(key string) []string {
+ values, _ := c.GetQueryArray(key)
+ return values
+}
+
+// GetQueryArray returns a slice of strings for a given query key, plus
+// a boolean value whether at least one value exists for the given key.
+func (c *Context) GetQueryArray(key string) ([]string, bool) {
+ req := c.Request
+ if values, ok := req.URL.Query()[key]; ok && len(values) > 0 {
+ return values, true
+ }
+ return []string{}, false
+}
+
+// PostForm returns the specified key from a POST urlencoded form or multipart form
+// when it exists, otherwise it returns an empty string `("")`.
+func (c *Context) PostForm(key string) string {
+ value, _ := c.GetPostForm(key)
+ return value
+}
+
+// DefaultPostForm returns the specified key from a POST urlencoded form or multipart form
+// when it exists, otherwise it returns the specified defaultValue string.
+// See: PostForm() and GetPostForm() for further information.
+func (c *Context) DefaultPostForm(key, defaultValue string) string {
+ if value, ok := c.GetPostForm(key); ok {
+ return value
+ }
+ return defaultValue
+}
+
+// GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded
+// form or multipart form when it exists `(value, true)` (even when the value is an empty string),
+// otherwise it returns ("", false).
+// For example, during a PATCH request to update the user's email:
+// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com"
+// email= --> ("", true) := GetPostForm("email") // set email to ""
+// --> ("", false) := GetPostForm("email") // do nothing with email
+func (c *Context) GetPostForm(key string) (string, bool) {
+ if values, ok := c.GetPostFormArray(key); ok {
+ return values[0], ok
+ }
+ return "", false
+}
+
+// PostFormArray returns a slice of strings for a given form key.
+// The length of the slice depends on the number of params with the given key.
+func (c *Context) PostFormArray(key string) []string {
+ values, _ := c.GetPostFormArray(key)
+ return values
+}
+
+// GetPostFormArray returns a slice of strings for a given form key, plus
+// a boolean value whether at least one value exists for the given key.
+func (c *Context) GetPostFormArray(key string) ([]string, bool) {
+ req := c.Request
+ req.ParseForm()
+ req.ParseMultipartForm(defaultMemory)
+ if values := req.PostForm[key]; len(values) > 0 {
+ return values, true
+ }
+ if req.MultipartForm != nil && req.MultipartForm.File != nil {
+ if values := req.MultipartForm.Value[key]; len(values) > 0 {
+ return values, true
+ }
+ }
+ return []string{}, false
+}
+
+// FormFile returns the first file for the provided form key.
+func (c *Context) FormFile(name string) (*multipart.FileHeader, error) {
+ _, fh, err := c.Request.FormFile(name)
+ return fh, err
+}
+
+// MultipartForm is the parsed multipart form, including file uploads.
+func (c *Context) MultipartForm() (*multipart.Form, error) {
+ err := c.Request.ParseMultipartForm(defaultMemory)
+ return c.Request.MultipartForm, err
+}
+
+// Bind checks the Content-Type to select a binding engine automatically,
+// Depending the "Content-Type" header different bindings are used:
+// "application/json" --> JSON binding
+// "application/xml" --> XML binding
+// otherwise --> returns an error
+// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input.
+// It decodes the json payload into the struct specified as a pointer.
+// Like ParseBody() but this method also writes a 400 error if the json is not valid.
+func (c *Context) Bind(obj interface{}) error {
+ b := binding.Default(c.Request.Method, c.ContentType())
+ return c.MustBindWith(obj, b)
+}
+
+// BindJSON is a shortcut for c.MustBindWith(obj, binding.JSON)
+func (c *Context) BindJSON(obj interface{}) error {
+ return c.MustBindWith(obj, binding.JSON)
+}
+
+// MustBindWith binds the passed struct pointer using the specified binding
+// engine. It will abort the request with HTTP 400 if any error ocurrs.
+// See the binding package.
+func (c *Context) MustBindWith(obj interface{}, b binding.Binding) (err error) {
+ if err = c.ShouldBindWith(obj, b); err != nil {
+ c.AbortWithError(400, err).SetType(ErrorTypeBind)
+ }
+
+ return
+}
+
+// ShouldBindWith binds the passed struct pointer using the specified binding
+// engine.
+// See the binding package.
+func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error {
+ return b.Bind(c.Request, obj)
+}
+
+// ClientIP implements a best effort algorithm to return the real client IP, it parses
+// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy.
+// Use X-Forwarded-For before X-Real-Ip as nginx uses X-Real-Ip with the proxy's IP.
+func (c *Context) ClientIP() string {
+ if c.engine.ForwardedByClientIP {
+ clientIP := c.requestHeader("X-Forwarded-For")
+ if index := strings.IndexByte(clientIP, ','); index >= 0 {
+ clientIP = clientIP[0:index]
+ }
+ clientIP = strings.TrimSpace(clientIP)
+ if len(clientIP) > 0 {
+ return clientIP
+ }
+ clientIP = strings.TrimSpace(c.requestHeader("X-Real-Ip"))
+ if len(clientIP) > 0 {
+ return clientIP
+ }
+ }
+
+ if c.engine.AppEngine {
+ if addr := c.Request.Header.Get("X-Appengine-Remote-Addr"); addr != "" {
+ return addr
+ }
+ }
+
+ if ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)); err == nil {
+ return ip
+ }
+
+ return ""
+}
+
+// ContentType returns the Content-Type header of the request.
+func (c *Context) ContentType() string {
+ return filterFlags(c.requestHeader("Content-Type"))
+}
+
+// IsWebsocket returns true if the request headers indicate that a websocket
+// handshake is being initiated by the client.
+func (c *Context) IsWebsocket() bool {
+ if strings.Contains(strings.ToLower(c.requestHeader("Connection")), "upgrade") &&
+ strings.ToLower(c.requestHeader("Upgrade")) == "websocket" {
+ return true
+ }
+ return false
+}
+
+func (c *Context) requestHeader(key string) string {
+ if values, _ := c.Request.Header[key]; len(values) > 0 {
+ return values[0]
+ }
+ return ""
+}
+
+/************************************/
+/******** RESPONSE RENDERING ********/
+/************************************/
+
+// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *Context) Status(code int) {
+ c.writermem.WriteHeader(code)
+}
+
+// Header is a intelligent shortcut for c.Writer.Header().Set(key, value)
+// It writes a header in the response.
+// If value == "", this method removes the header `c.Writer.Header().Del(key)`
+func (c *Context) Header(key, value string) {
+ if len(value) == 0 {
+ c.Writer.Header().Del(key)
+ } else {
+ c.Writer.Header().Set(key, value)
+ }
+}
+
+// GetHeader returns value from request headers
+func (c *Context) GetHeader(key string) string {
+ return c.requestHeader(key)
+}
+
+// GetRawData return stream data
+func (c *Context) GetRawData() ([]byte, error) {
+ return ioutil.ReadAll(c.Request.Body)
+}
+
+func (c *Context) SetCookie(
+ name string,
+ value string,
+ maxAge int,
+ path string,
+ domain string,
+ secure bool,
+ httpOnly bool,
+) {
+ if path == "" {
+ path = "/"
+ }
+ http.SetCookie(c.Writer, &http.Cookie{
+ Name: name,
+ Value: url.QueryEscape(value),
+ MaxAge: maxAge,
+ Path: path,
+ Domain: domain,
+ Secure: secure,
+ HttpOnly: httpOnly,
+ })
+}
+
+func (c *Context) Cookie(name string) (string, error) {
+ cookie, err := c.Request.Cookie(name)
+ if err != nil {
+ return "", err
+ }
+ val, _ := url.QueryUnescape(cookie.Value)
+ return val, nil
+}
+
+func (c *Context) Render(code int, r render.Render) {
+ c.Status(code)
+
+ if !bodyAllowedForStatus(code) {
+ r.WriteContentType(c.Writer)
+ c.Writer.WriteHeaderNow()
+ return
+ }
+
+ if err := r.Render(c.Writer); err != nil {
+ panic(err)
+ }
+}
+
+// HTML renders the HTTP template specified by its file name.
+// It also updates the HTTP code and sets the Content-Type as "text/html".
+// See http://golang.org/doc/articles/wiki/
+func (c *Context) HTML(code int, name string, obj interface{}) {
+ instance := c.engine.HTMLRender.Instance(name, obj)
+ c.Render(code, instance)
+}
+
+// IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body.
+// It also sets the Content-Type as "application/json".
+// WARNING: we recommend to use this only for development purposes since printing pretty JSON is
+// more CPU and bandwidth consuming. Use Context.JSON() instead.
+func (c *Context) IndentedJSON(code int, obj interface{}) {
+ c.Render(code, render.IndentedJSON{Data: obj})
+}
+
+// JSON serializes the given struct as JSON into the response body.
+// It also sets the Content-Type as "application/json".
+func (c *Context) JSON(code int, obj interface{}) {
+ c.Render(code, render.JSON{Data: obj})
+}
+
+// XML serializes the given struct as XML into the response body.
+// It also sets the Content-Type as "application/xml".
+func (c *Context) XML(code int, obj interface{}) {
+ c.Render(code, render.XML{Data: obj})
+}
+
+// YAML serializes the given struct as YAML into the response body.
+func (c *Context) YAML(code int, obj interface{}) {
+ c.Render(code, render.YAML{Data: obj})
+}
+
+// String writes the given string into the response body.
+func (c *Context) String(code int, format string, values ...interface{}) {
+ c.Render(code, render.String{Format: format, Data: values})
+}
+
+// Redirect returns a HTTP redirect to the specific location.
+func (c *Context) Redirect(code int, location string) {
+ c.Render(-1, render.Redirect{
+ Code: code,
+ Location: location,
+ Request: c.Request,
+ })
+}
+
+// Data writes some data into the body stream and updates the HTTP code.
+func (c *Context) Data(code int, contentType string, data []byte) {
+ c.Render(code, render.Data{
+ ContentType: contentType,
+ Data: data,
+ })
+}
+
+// File writes the specified file into the body stream in a efficient way.
+func (c *Context) File(filepath string) {
+ http.ServeFile(c.Writer, c.Request, filepath)
+}
+
+// SSEvent writes a Server-Sent Event into the body stream.
+func (c *Context) SSEvent(name string, message interface{}) {
+ c.Render(-1, sse.Event{
+ Event: name,
+ Data: message,
+ })
+}
+
+func (c *Context) Stream(step func(w io.Writer) bool) {
+ w := c.Writer
+ clientGone := w.CloseNotify()
+ for {
+ select {
+ case <-clientGone:
+ return
+ default:
+ keepOpen := step(w)
+ w.Flush()
+ if !keepOpen {
+ return
+ }
+ }
+ }
+}
+
+/************************************/
+/******** CONTENT NEGOTIATION *******/
+/************************************/
+
+type Negotiate struct {
+ Offered []string
+ HTMLName string
+ HTMLData interface{}
+ JSONData interface{}
+ XMLData interface{}
+ Data interface{}
+}
+
+func (c *Context) Negotiate(code int, config Negotiate) {
+ switch c.NegotiateFormat(config.Offered...) {
+ case binding.MIMEJSON:
+ data := chooseData(config.JSONData, config.Data)
+ c.JSON(code, data)
+
+ case binding.MIMEHTML:
+ data := chooseData(config.HTMLData, config.Data)
+ c.HTML(code, config.HTMLName, data)
+
+ case binding.MIMEXML:
+ data := chooseData(config.XMLData, config.Data)
+ c.XML(code, data)
+
+ default:
+ c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server"))
+ }
+}
+
+func (c *Context) NegotiateFormat(offered ...string) string {
+ assert1(len(offered) > 0, "you must provide at least one offer")
+
+ if c.Accepted == nil {
+ c.Accepted = parseAccept(c.requestHeader("Accept"))
+ }
+ if len(c.Accepted) == 0 {
+ return offered[0]
+ }
+ for _, accepted := range c.Accepted {
+ for _, offert := range offered {
+ if accepted == offert {
+ return offert
+ }
+ }
+ }
+ return ""
+}
+
+func (c *Context) SetAccepted(formats ...string) {
+ c.Accepted = formats
+}
+
+/************************************/
+/***** GOLANG.ORG/X/NET/CONTEXT *****/
+/************************************/
+
+func (c *Context) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (c *Context) Done() <-chan struct{} {
+ return nil
+}
+
+func (c *Context) Err() error {
+ return nil
+}
+
+func (c *Context) Value(key interface{}) interface{} {
+ if key == 0 {
+ return c.Request
+ }
+ if keyAsString, ok := key.(string); ok {
+ val, _ := c.Get(keyAsString)
+ return val
+ }
+ return nil
+}
diff --git a/vendor/github.com/gin-gonic/gin/context_appengine.go b/vendor/github.com/gin-gonic/gin/context_appengine.go
new file mode 100644
index 0000000..38c189a
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/context_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+func init() {
+ defaultAppEngine = true
+}
diff --git a/vendor/github.com/gin-gonic/gin/debug.go b/vendor/github.com/gin-gonic/gin/debug.go
new file mode 100644
index 0000000..a121591
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/debug.go
@@ -0,0 +1,71 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bytes"
+ "html/template"
+ "log"
+)
+
+func init() {
+ log.SetFlags(0)
+}
+
+// IsDebugging returns true if the framework is running in debug mode.
+// Use SetMode(gin.Release) to switch to disable the debug mode.
+func IsDebugging() bool {
+ return ginMode == debugCode
+}
+
+func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) {
+ if IsDebugging() {
+ nuHandlers := len(handlers)
+ handlerName := nameOfFunction(handlers.Last())
+ debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers)
+ }
+}
+
+func debugPrintLoadTemplate(tmpl *template.Template) {
+ if IsDebugging() {
+ var buf bytes.Buffer
+ for _, tmpl := range tmpl.Templates() {
+ buf.WriteString("\t- ")
+ buf.WriteString(tmpl.Name())
+ buf.WriteString("\n")
+ }
+ debugPrint("Loaded HTML Templates (%d): \n%s\n", len(tmpl.Templates()), buf.String())
+ }
+}
+
+func debugPrint(format string, values ...interface{}) {
+ if IsDebugging() {
+ log.Printf("[GIN-debug] "+format, values...)
+ }
+}
+
+func debugPrintWARNINGNew() {
+ debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production.
+ - using env: export GIN_MODE=release
+ - using code: gin.SetMode(gin.ReleaseMode)
+
+`)
+}
+
+func debugPrintWARNINGSetHTMLTemplate() {
+ debugPrint(`[WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called
+at initialization. ie. before any route is registered or the router is listening in a socket:
+
+ router := gin.Default()
+ router.SetHTMLTemplate(template) // << good place
+
+`)
+}
+
+func debugPrintError(err error) {
+ if err != nil {
+ debugPrint("[ERROR] %v\n", err)
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/deprecated.go b/vendor/github.com/gin-gonic/gin/deprecated.go
new file mode 100644
index 0000000..27e8f55
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/deprecated.go
@@ -0,0 +1,25 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "github.com/gin-gonic/gin/binding"
+ "log"
+)
+
+func (c *Context) GetCookie(name string) (string, error) {
+ log.Println("GetCookie() method is deprecated. Use Cookie() instead.")
+ return c.Cookie(name)
+}
+
+// BindWith binds the passed struct pointer using the specified binding engine.
+// See the binding package.
+func (c *Context) BindWith(obj interface{}, b binding.Binding) error {
+ log.Println(`BindWith(\"interface{}, binding.Binding\") error is going to
+ be deprecated, please check issue #662 and either use MustBindWith() if you
+ want HTTP 400 to be automatically returned if any error occur, of use
+ ShouldBindWith() if you need to manage the error.`)
+ return c.MustBindWith(obj, b)
+}
diff --git a/vendor/github.com/gin-gonic/gin/errors.go b/vendor/github.com/gin-gonic/gin/errors.go
new file mode 100644
index 0000000..896af6f
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/errors.go
@@ -0,0 +1,159 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+type ErrorType uint64
+
+const (
+ ErrorTypeBind ErrorType = 1 << 63 // used when c.Bind() fails
+ ErrorTypeRender ErrorType = 1 << 62 // used when c.Render() fails
+ ErrorTypePrivate ErrorType = 1 << 0
+ ErrorTypePublic ErrorType = 1 << 1
+
+ ErrorTypeAny ErrorType = 1<<64 - 1
+ ErrorTypeNu = 2
+)
+
+type (
+ Error struct {
+ Err error
+ Type ErrorType
+ Meta interface{}
+ }
+
+ errorMsgs []*Error
+)
+
+var _ error = &Error{}
+
+func (msg *Error) SetType(flags ErrorType) *Error {
+ msg.Type = flags
+ return msg
+}
+
+func (msg *Error) SetMeta(data interface{}) *Error {
+ msg.Meta = data
+ return msg
+}
+
+func (msg *Error) JSON() interface{} {
+ json := H{}
+ if msg.Meta != nil {
+ value := reflect.ValueOf(msg.Meta)
+ switch value.Kind() {
+ case reflect.Struct:
+ return msg.Meta
+ case reflect.Map:
+ for _, key := range value.MapKeys() {
+ json[key.String()] = value.MapIndex(key).Interface()
+ }
+ default:
+ json["meta"] = msg.Meta
+ }
+ }
+ if _, ok := json["error"]; !ok {
+ json["error"] = msg.Error()
+ }
+ return json
+}
+
+// MarshalJSON implements the json.Marshaller interface
+func (msg *Error) MarshalJSON() ([]byte, error) {
+ return json.Marshal(msg.JSON())
+}
+
+// Implements the error interface
+func (msg Error) Error() string {
+ return msg.Err.Error()
+}
+
+func (msg *Error) IsType(flags ErrorType) bool {
+ return (msg.Type & flags) > 0
+}
+
+// Returns a readonly copy filtered the byte.
+// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic
+func (a errorMsgs) ByType(typ ErrorType) errorMsgs {
+ if len(a) == 0 {
+ return nil
+ }
+ if typ == ErrorTypeAny {
+ return a
+ }
+ var result errorMsgs
+ for _, msg := range a {
+ if msg.IsType(typ) {
+ result = append(result, msg)
+ }
+ }
+ return result
+}
+
+// Returns the last error in the slice. It returns nil if the array is empty.
+// Shortcut for errors[len(errors)-1]
+func (a errorMsgs) Last() *Error {
+ length := len(a)
+ if length > 0 {
+ return a[length-1]
+ }
+ return nil
+}
+
+// Returns an array will all the error messages.
+// Example:
+// c.Error(errors.New("first"))
+// c.Error(errors.New("second"))
+// c.Error(errors.New("third"))
+// c.Errors.Errors() // == []string{"first", "second", "third"}
+func (a errorMsgs) Errors() []string {
+ if len(a) == 0 {
+ return nil
+ }
+ errorStrings := make([]string, len(a))
+ for i, err := range a {
+ errorStrings[i] = err.Error()
+ }
+ return errorStrings
+}
+
+func (a errorMsgs) JSON() interface{} {
+ switch len(a) {
+ case 0:
+ return nil
+ case 1:
+ return a.Last().JSON()
+ default:
+ json := make([]interface{}, len(a))
+ for i, err := range a {
+ json[i] = err.JSON()
+ }
+ return json
+ }
+}
+
+func (a errorMsgs) MarshalJSON() ([]byte, error) {
+ return json.Marshal(a.JSON())
+}
+
+func (a errorMsgs) String() string {
+ if len(a) == 0 {
+ return ""
+ }
+ var buffer bytes.Buffer
+ for i, msg := range a {
+ fmt.Fprintf(&buffer, "Error #%02d: %s\n", (i + 1), msg.Err)
+ if msg.Meta != nil {
+ fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta)
+ }
+ }
+ return buffer.String()
+}
diff --git a/vendor/github.com/gin-gonic/gin/fs.go b/vendor/github.com/gin-gonic/gin/fs.go
new file mode 100644
index 0000000..1264582
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/fs.go
@@ -0,0 +1,46 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "net/http"
+ "os"
+)
+
+type (
+ onlyfilesFS struct {
+ fs http.FileSystem
+ }
+ neuteredReaddirFile struct {
+ http.File
+ }
+)
+
+// Dir returns a http.Filesystem that can be used by http.FileServer(). It is used internally
+// in router.Static().
+// if listDirectory == true, then it works the same as http.Dir() otherwise it returns
+// a filesystem that prevents http.FileServer() to list the directory files.
+func Dir(root string, listDirectory bool) http.FileSystem {
+ fs := http.Dir(root)
+ if listDirectory {
+ return fs
+ }
+ return &onlyfilesFS{fs}
+}
+
+// Conforms to http.Filesystem
+func (fs onlyfilesFS) Open(name string) (http.File, error) {
+ f, err := fs.fs.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return neuteredReaddirFile{f}, nil
+}
+
+// Overrides the http.File default implementation
+func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) {
+ // this disables directory listing
+ return nil, nil
+}
diff --git a/vendor/github.com/gin-gonic/gin/gin.go b/vendor/github.com/gin-gonic/gin/gin.go
new file mode 100644
index 0000000..c4118a4
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/gin.go
@@ -0,0 +1,418 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "html/template"
+ "net"
+ "net/http"
+ "os"
+ "sync"
+
+ "github.com/gin-gonic/gin/render"
+)
+
+// Version is Framework's version
+const Version = "v1.2"
+
+var default404Body = []byte("404 page not found")
+var default405Body = []byte("405 method not allowed")
+var defaultAppEngine bool
+
+type HandlerFunc func(*Context)
+type HandlersChain []HandlerFunc
+
+// Last returns the last handler in the chain. ie. the last handler is the main own.
+func (c HandlersChain) Last() HandlerFunc {
+ length := len(c)
+ if length > 0 {
+ return c[length-1]
+ }
+ return nil
+}
+
+type (
+ RoutesInfo []RouteInfo
+ RouteInfo struct {
+ Method string
+ Path string
+ Handler string
+ }
+
+ // Engine is the framework's instance, it contains the muxer, middleware and configuration settings.
+ // Create an instance of Engine, by using New() or Default()
+ Engine struct {
+ RouterGroup
+ delims render.Delims
+ HTMLRender render.HTMLRender
+ FuncMap template.FuncMap
+ allNoRoute HandlersChain
+ allNoMethod HandlersChain
+ noRoute HandlersChain
+ noMethod HandlersChain
+ pool sync.Pool
+ trees methodTrees
+
+ // Enables automatic redirection if the current route can't be matched but a
+ // handler for the path with (without) the trailing slash exists.
+ // For example if /foo/ is requested but a route only exists for /foo, the
+ // client is redirected to /foo with http status code 301 for GET requests
+ // and 307 for all other request methods.
+ RedirectTrailingSlash bool
+
+ // If enabled, the router tries to fix the current request path, if no
+ // handle is registered for it.
+ // First superfluous path elements like ../ or // are removed.
+ // Afterwards the router does a case-insensitive lookup of the cleaned path.
+ // If a handle can be found for this route, the router makes a redirection
+ // to the corrected path with status code 301 for GET requests and 307 for
+ // all other request methods.
+ // For example /FOO and /..//Foo could be redirected to /foo.
+ // RedirectTrailingSlash is independent of this option.
+ RedirectFixedPath bool
+
+ // If enabled, the router checks if another method is allowed for the
+ // current route, if the current request can not be routed.
+ // If this is the case, the request is answered with 'Method Not Allowed'
+ // and HTTP status code 405.
+ // If no other Method is allowed, the request is delegated to the NotFound
+ // handler.
+ HandleMethodNotAllowed bool
+ ForwardedByClientIP bool
+
+ // #726 #755 If enabled, it will thrust some headers starting with
+ // 'X-AppEngine...' for better integration with that PaaS.
+ AppEngine bool
+
+ // If enabled, the url.RawPath will be used to find parameters.
+ UseRawPath bool
+ // If true, the path value will be unescaped.
+ // If UseRawPath is false (by default), the UnescapePathValues effectively is true,
+ // as url.Path gonna be used, which is already unescaped.
+ UnescapePathValues bool
+ }
+)
+
+var _ IRouter = &Engine{}
+
+// New returns a new blank Engine instance without any middleware attached.
+// By default the configuration is:
+// - RedirectTrailingSlash: true
+// - RedirectFixedPath: false
+// - HandleMethodNotAllowed: false
+// - ForwardedByClientIP: true
+// - UseRawPath: false
+// - UnescapePathValues: true
+func New() *Engine {
+ debugPrintWARNINGNew()
+ engine := &Engine{
+ RouterGroup: RouterGroup{
+ Handlers: nil,
+ basePath: "/",
+ root: true,
+ },
+ FuncMap: template.FuncMap{},
+ RedirectTrailingSlash: true,
+ RedirectFixedPath: false,
+ HandleMethodNotAllowed: false,
+ ForwardedByClientIP: true,
+ AppEngine: defaultAppEngine,
+ UseRawPath: false,
+ UnescapePathValues: true,
+ trees: make(methodTrees, 0, 9),
+ delims: render.Delims{"{{", "}}"},
+ }
+ engine.RouterGroup.engine = engine
+ engine.pool.New = func() interface{} {
+ return engine.allocateContext()
+ }
+ return engine
+}
+
+// Default returns an Engine instance with the Logger and Recovery middleware already attached.
+func Default() *Engine {
+ engine := New()
+ engine.Use(Logger(), Recovery())
+ return engine
+}
+
+func (engine *Engine) allocateContext() *Context {
+ return &Context{engine: engine}
+}
+
+func (engine *Engine) Delims(left, right string) *Engine {
+ engine.delims = render.Delims{left, right}
+ return engine
+}
+
+func (engine *Engine) LoadHTMLGlob(pattern string) {
+ if IsDebugging() {
+ debugPrintLoadTemplate(template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseGlob(pattern)))
+ engine.HTMLRender = render.HTMLDebug{Glob: pattern, FuncMap: engine.FuncMap, Delims: engine.delims}
+ } else {
+ templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseGlob(pattern))
+ engine.SetHTMLTemplate(templ)
+ }
+}
+
+func (engine *Engine) LoadHTMLFiles(files ...string) {
+ if IsDebugging() {
+ engine.HTMLRender = render.HTMLDebug{Files: files, FuncMap: engine.FuncMap, Delims: engine.delims}
+ } else {
+ templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseFiles(files...))
+ engine.SetHTMLTemplate(templ)
+ }
+}
+
+func (engine *Engine) SetHTMLTemplate(templ *template.Template) {
+ if len(engine.trees) > 0 {
+ debugPrintWARNINGSetHTMLTemplate()
+ }
+
+ engine.HTMLRender = render.HTMLProduction{Template: templ.Funcs(engine.FuncMap)}
+}
+
+func (engine *Engine) SetFuncMap(funcMap template.FuncMap) {
+ engine.FuncMap = funcMap
+}
+
+// NoRoute adds handlers for NoRoute. It return a 404 code by default.
+func (engine *Engine) NoRoute(handlers ...HandlerFunc) {
+ engine.noRoute = handlers
+ engine.rebuild404Handlers()
+}
+
+// NoMethod sets the handlers called when... TODO
+func (engine *Engine) NoMethod(handlers ...HandlerFunc) {
+ engine.noMethod = handlers
+ engine.rebuild405Handlers()
+}
+
+// Use attachs a global middleware to the router. ie. the middleware attached though Use() will be
+// included in the handlers chain for every single request. Even 404, 405, static files...
+// For example, this is the right place for a logger or error management middleware.
+func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes {
+ engine.RouterGroup.Use(middleware...)
+ engine.rebuild404Handlers()
+ engine.rebuild405Handlers()
+ return engine
+}
+
+func (engine *Engine) rebuild404Handlers() {
+ engine.allNoRoute = engine.combineHandlers(engine.noRoute)
+}
+
+func (engine *Engine) rebuild405Handlers() {
+ engine.allNoMethod = engine.combineHandlers(engine.noMethod)
+}
+
+func (engine *Engine) addRoute(method, path string, handlers HandlersChain) {
+ assert1(path[0] == '/', "path must begin with '/'")
+ assert1(len(method) > 0, "HTTP method can not be empty")
+ assert1(len(handlers) > 0, "there must be at least one handler")
+
+ debugPrintRoute(method, path, handlers)
+ root := engine.trees.get(method)
+ if root == nil {
+ root = new(node)
+ engine.trees = append(engine.trees, methodTree{method: method, root: root})
+ }
+ root.addRoute(path, handlers)
+}
+
+// Routes returns a slice of registered routes, including some useful information, such as:
+// the http method, path and the handler name.
+func (engine *Engine) Routes() (routes RoutesInfo) {
+ for _, tree := range engine.trees {
+ routes = iterate("", tree.method, routes, tree.root)
+ }
+ return routes
+}
+
+func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo {
+ path += root.path
+ if len(root.handlers) > 0 {
+ routes = append(routes, RouteInfo{
+ Method: method,
+ Path: path,
+ Handler: nameOfFunction(root.handlers.Last()),
+ })
+ }
+ for _, child := range root.children {
+ routes = iterate(path, method, routes, child)
+ }
+ return routes
+}
+
+// Run attaches the router to a http.Server and starts listening and serving HTTP requests.
+// It is a shortcut for http.ListenAndServe(addr, router)
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) Run(addr ...string) (err error) {
+ defer func() { debugPrintError(err) }()
+
+ address := resolveAddress(addr)
+ debugPrint("Listening and serving HTTP on %s\n", address)
+ err = http.ListenAndServe(address, engine)
+ return
+}
+
+// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests.
+// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router)
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) RunTLS(addr string, certFile string, keyFile string) (err error) {
+ debugPrint("Listening and serving HTTPS on %s\n", addr)
+ defer func() { debugPrintError(err) }()
+
+ err = http.ListenAndServeTLS(addr, certFile, keyFile, engine)
+ return
+}
+
+// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests
+// through the specified unix socket (ie. a file).
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) RunUnix(file string) (err error) {
+ debugPrint("Listening and serving HTTP on unix:/%s", file)
+ defer func() { debugPrintError(err) }()
+
+ os.Remove(file)
+ listener, err := net.Listen("unix", file)
+ if err != nil {
+ return
+ }
+ defer listener.Close()
+ err = http.Serve(listener, engine)
+ return
+}
+
+// Conforms to the http.Handler interface.
+func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ c := engine.pool.Get().(*Context)
+ c.writermem.reset(w)
+ c.Request = req
+ c.reset()
+
+ engine.handleHTTPRequest(c)
+
+ engine.pool.Put(c)
+}
+
+// Re-enter a context that has been rewritten.
+// This can be done by setting c.Request.Path to your new target.
+// Disclaimer: You can loop yourself to death with this, use wisely.
+func (engine *Engine) HandleContext(c *Context) {
+ c.reset()
+ engine.handleHTTPRequest(c)
+ engine.pool.Put(c)
+}
+
+func (engine *Engine) handleHTTPRequest(context *Context) {
+ httpMethod := context.Request.Method
+ var path string
+ var unescape bool
+ if engine.UseRawPath && len(context.Request.URL.RawPath) > 0 {
+ path = context.Request.URL.RawPath
+ unescape = engine.UnescapePathValues
+ } else {
+ path = context.Request.URL.Path
+ unescape = false
+ }
+
+ // Find root of the tree for the given HTTP method
+ t := engine.trees
+ for i, tl := 0, len(t); i < tl; i++ {
+ if t[i].method == httpMethod {
+ root := t[i].root
+ // Find route in tree
+ handlers, params, tsr := root.getValue(path, context.Params, unescape)
+ if handlers != nil {
+ context.handlers = handlers
+ context.Params = params
+ context.Next()
+ context.writermem.WriteHeaderNow()
+ return
+ }
+ if httpMethod != "CONNECT" && path != "/" {
+ if tsr && engine.RedirectTrailingSlash {
+ redirectTrailingSlash(context)
+ return
+ }
+ if engine.RedirectFixedPath && redirectFixedPath(context, root, engine.RedirectFixedPath) {
+ return
+ }
+ }
+ break
+ }
+ }
+
+ // TODO: unit test
+ if engine.HandleMethodNotAllowed {
+ for _, tree := range engine.trees {
+ if tree.method != httpMethod {
+ if handlers, _, _ := tree.root.getValue(path, nil, unescape); handlers != nil {
+ context.handlers = engine.allNoMethod
+ serveError(context, 405, default405Body)
+ return
+ }
+ }
+ }
+ }
+ context.handlers = engine.allNoRoute
+ serveError(context, 404, default404Body)
+}
+
+var mimePlain = []string{MIMEPlain}
+
+func serveError(c *Context, code int, defaultMessage []byte) {
+ c.writermem.status = code
+ c.Next()
+ if !c.writermem.Written() {
+ if c.writermem.Status() == code {
+ c.writermem.Header()["Content-Type"] = mimePlain
+ c.Writer.Write(defaultMessage)
+ } else {
+ c.writermem.WriteHeaderNow()
+ }
+ }
+}
+
+func redirectTrailingSlash(c *Context) {
+ req := c.Request
+ path := req.URL.Path
+ code := 301 // Permanent redirect, request with GET method
+ if req.Method != "GET" {
+ code = 307
+ }
+
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ req.URL.Path = path[:len(path)-1]
+ } else {
+ req.URL.Path = path + "/"
+ }
+ debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String())
+ http.Redirect(c.Writer, req, req.URL.String(), code)
+ c.writermem.WriteHeaderNow()
+}
+
+func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool {
+ req := c.Request
+ path := req.URL.Path
+
+ fixedPath, found := root.findCaseInsensitivePath(
+ cleanPath(path),
+ trailingSlash,
+ )
+ if found {
+ code := 301 // Permanent redirect, request with GET method
+ if req.Method != "GET" {
+ code = 307
+ }
+ req.URL.Path = string(fixedPath)
+ debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String())
+ http.Redirect(c.Writer, req, req.URL.String(), code)
+ c.writermem.WriteHeaderNow()
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/gin-gonic/gin/logger.go b/vendor/github.com/gin-gonic/gin/logger.go
new file mode 100644
index 0000000..dc6f141
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/logger.go
@@ -0,0 +1,142 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/mattn/go-isatty"
+)
+
+var (
+ green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
+ white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
+ yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
+ red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
+ blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
+ magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
+ cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
+ reset = string([]byte{27, 91, 48, 109})
+ disableColor = false
+)
+
+func DisableConsoleColor() {
+ disableColor = true
+}
+
+func ErrorLogger() HandlerFunc {
+ return ErrorLoggerT(ErrorTypeAny)
+}
+
+func ErrorLoggerT(typ ErrorType) HandlerFunc {
+ return func(c *Context) {
+ c.Next()
+ errors := c.Errors.ByType(typ)
+ if len(errors) > 0 {
+ c.JSON(-1, errors)
+ }
+ }
+}
+
+// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter
+// By default gin.DefaultWriter = os.Stdout
+func Logger() HandlerFunc {
+ return LoggerWithWriter(DefaultWriter)
+}
+
+// LoggerWithWriter instance a Logger middleware with the specified writter buffer.
+// Example: os.Stdout, a file opened in write mode, a socket...
+func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc {
+ isTerm := true
+
+ if w, ok := out.(*os.File); !ok ||
+ (os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd()))) ||
+ disableColor {
+ isTerm = false
+ }
+
+ var skip map[string]struct{}
+
+ if length := len(notlogged); length > 0 {
+ skip = make(map[string]struct{}, length)
+
+ for _, path := range notlogged {
+ skip[path] = struct{}{}
+ }
+ }
+
+ return func(c *Context) {
+ // Start timer
+ start := time.Now()
+ path := c.Request.URL.Path
+
+ // Process request
+ c.Next()
+
+ // Log only when path is not being skipped
+ if _, ok := skip[path]; !ok {
+ // Stop timer
+ end := time.Now()
+ latency := end.Sub(start)
+
+ clientIP := c.ClientIP()
+ method := c.Request.Method
+ statusCode := c.Writer.Status()
+ var statusColor, methodColor string
+ if isTerm {
+ statusColor = colorForStatus(statusCode)
+ methodColor = colorForMethod(method)
+ }
+ comment := c.Errors.ByType(ErrorTypePrivate).String()
+
+ fmt.Fprintf(out, "[GIN] %v |%s %3d %s| %13v | %15s |%s %s %-7s %s\n%s",
+ end.Format("2006/01/02 - 15:04:05"),
+ statusColor, statusCode, reset,
+ latency,
+ clientIP,
+ methodColor, method, reset,
+ path,
+ comment,
+ )
+ }
+ }
+}
+
+func colorForStatus(code int) string {
+ switch {
+ case code >= 200 && code < 300:
+ return green
+ case code >= 300 && code < 400:
+ return white
+ case code >= 400 && code < 500:
+ return yellow
+ default:
+ return red
+ }
+}
+
+func colorForMethod(method string) string {
+ switch method {
+ case "GET":
+ return blue
+ case "POST":
+ return cyan
+ case "PUT":
+ return yellow
+ case "DELETE":
+ return red
+ case "PATCH":
+ return green
+ case "HEAD":
+ return magenta
+ case "OPTIONS":
+ return white
+ default:
+ return reset
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/logo.jpg b/vendor/github.com/gin-gonic/gin/logo.jpg
new file mode 100644
index 0000000..bb51852
Binary files /dev/null and b/vendor/github.com/gin-gonic/gin/logo.jpg differ
diff --git a/vendor/github.com/gin-gonic/gin/mode.go b/vendor/github.com/gin-gonic/gin/mode.go
new file mode 100644
index 0000000..e24dbdc
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/mode.go
@@ -0,0 +1,69 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "io"
+ "os"
+
+ "github.com/gin-gonic/gin/binding"
+)
+
+const ENV_GIN_MODE = "GIN_MODE"
+
+const (
+ DebugMode string = "debug"
+ ReleaseMode string = "release"
+ TestMode string = "test"
+)
+const (
+ debugCode = iota
+ releaseCode
+ testCode
+)
+
+// DefaultWriter is the default io.Writer used the Gin for debug output and
+// middleware output like Logger() or Recovery().
+// Note that both Logger and Recovery provides custom ways to configure their
+// output io.Writer.
+// To support coloring in Windows use:
+// import "github.com/mattn/go-colorable"
+// gin.DefaultWriter = colorable.NewColorableStdout()
+var DefaultWriter io.Writer = os.Stdout
+var DefaultErrorWriter io.Writer = os.Stderr
+
+var ginMode = debugCode
+var modeName = DebugMode
+
+func init() {
+ mode := os.Getenv(ENV_GIN_MODE)
+ if len(mode) == 0 {
+ SetMode(DebugMode)
+ } else {
+ SetMode(mode)
+ }
+}
+
+func SetMode(value string) {
+ switch value {
+ case DebugMode:
+ ginMode = debugCode
+ case ReleaseMode:
+ ginMode = releaseCode
+ case TestMode:
+ ginMode = testCode
+ default:
+ panic("gin mode unknown: " + value)
+ }
+ modeName = value
+}
+
+func DisableBindValidation() {
+ binding.Validator = nil
+}
+
+func Mode() string {
+ return modeName
+}
diff --git a/vendor/github.com/gin-gonic/gin/path.go b/vendor/github.com/gin-gonic/gin/path.go
new file mode 100644
index 0000000..d7e7458
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/path.go
@@ -0,0 +1,123 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Based on the path package, Copyright 2009 The Go Authors.
+// Use of this source code is governed by a BSD-style license that can be found
+// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE.
+
+package gin
+
+// CleanPath is the URL version of path.Clean, it returns a canonical URL path
+// for p, eliminating . and .. elements.
+//
+// The following rules are applied iteratively until no further processing can
+// be done:
+// 1. Replace multiple slashes with a single slash.
+// 2. Eliminate each . path name element (the current directory).
+// 3. Eliminate each inner .. path name element (the parent directory)
+// along with the non-.. element that precedes it.
+// 4. Eliminate .. elements that begin a rooted path:
+// that is, replace "/.." by "/" at the beginning of a path.
+//
+// If the result of this process is an empty string, "/" is returned
+func cleanPath(p string) string {
+ // Turn empty string into "/"
+ if p == "" {
+ return "/"
+ }
+
+ n := len(p)
+ var buf []byte
+
+ // Invariants:
+ // reading from path; r is index of next byte to process.
+ // writing to buf; w is index of next byte to write.
+
+ // path must start with '/'
+ r := 1
+ w := 1
+
+ if p[0] != '/' {
+ r = 0
+ buf = make([]byte, n+1)
+ buf[0] = '/'
+ }
+
+ trailing := n > 2 && p[n-1] == '/'
+
+ // A bit more clunky without a 'lazybuf' like the path package, but the loop
+ // gets completely inlined (bufApp). So in contrast to the path package this
+ // loop has no expensive function calls (except 1x make)
+
+ for r < n {
+ switch {
+ case p[r] == '/':
+ // empty path element, trailing slash is added after the end
+ r++
+
+ case p[r] == '.' && r+1 == n:
+ trailing = true
+ r++
+
+ case p[r] == '.' && p[r+1] == '/':
+ // . element
+ r++
+
+ case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
+ // .. element: remove to last /
+ r += 2
+
+ if w > 1 {
+ // can backtrack
+ w--
+
+ if buf == nil {
+ for w > 1 && p[w] != '/' {
+ w--
+ }
+ } else {
+ for w > 1 && buf[w] != '/' {
+ w--
+ }
+ }
+ }
+
+ default:
+ // real path element.
+ // add slash if needed
+ if w > 1 {
+ bufApp(&buf, p, w, '/')
+ w++
+ }
+
+ // copy element
+ for r < n && p[r] != '/' {
+ bufApp(&buf, p, w, p[r])
+ w++
+ r++
+ }
+ }
+ }
+
+ // re-append trailing slash
+ if trailing && w > 1 {
+ bufApp(&buf, p, w, '/')
+ w++
+ }
+
+ if buf == nil {
+ return p[:w]
+ }
+ return string(buf[:w])
+}
+
+// internal helper to lazily create a buffer if necessary
+func bufApp(buf *[]byte, s string, w int, c byte) {
+ if *buf == nil {
+ if s[w] == c {
+ return
+ }
+
+ *buf = make([]byte, len(s))
+ copy(*buf, s[:w])
+ }
+ (*buf)[w] = c
+}
diff --git a/vendor/github.com/gin-gonic/gin/recovery.go b/vendor/github.com/gin-gonic/gin/recovery.go
new file mode 100644
index 0000000..c502f35
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/recovery.go
@@ -0,0 +1,108 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http/httputil"
+ "runtime"
+)
+
+var (
+ dunno = []byte("???")
+ centerDot = []byte("·")
+ dot = []byte(".")
+ slash = []byte("/")
+)
+
+// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
+func Recovery() HandlerFunc {
+ return RecoveryWithWriter(DefaultErrorWriter)
+}
+
+func RecoveryWithWriter(out io.Writer) HandlerFunc {
+ var logger *log.Logger
+ if out != nil {
+ logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags)
+ }
+ return func(c *Context) {
+ defer func() {
+ if err := recover(); err != nil {
+ if logger != nil {
+ stack := stack(3)
+ httprequest, _ := httputil.DumpRequest(c.Request, false)
+ logger.Printf("[Recovery] panic recovered:\n%s\n%s\n%s%s", string(httprequest), err, stack, reset)
+ }
+ c.AbortWithStatus(500)
+ }
+ }()
+ c.Next()
+ }
+}
+
+// stack returns a nicely formated stack frame, skipping skip frames
+func stack(skip int) []byte {
+ buf := new(bytes.Buffer) // the returned data
+ // As we loop, we open files and read them. These variables record the currently
+ // loaded file.
+ var lines [][]byte
+ var lastFile string
+ for i := skip; ; i++ { // Skip the expected number of frames
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ // Print this much at least. If we can't find the source, it won't show.
+ fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+ if file != lastFile {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ continue
+ }
+ lines = bytes.Split(data, []byte{'\n'})
+ lastFile = file
+ }
+ fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+ }
+ return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+ n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+ if n < 0 || n >= len(lines) {
+ return dunno
+ }
+ return bytes.TrimSpace(lines[n])
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return dunno
+ }
+ name := []byte(fn.Name())
+ // The name includes the path name to the package, which is unnecessary
+ // since the file name is already included. Plus, it has center dots.
+ // That is, we see
+ // runtime/debug.*T·ptrmethod
+ // and want
+ // *T.ptrmethod
+ // Also the package path might contains dot (e.g. code.google.com/...),
+ // so first eliminate the path prefix
+ if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
+ name = name[lastslash+1:]
+ }
+ if period := bytes.Index(name, dot); period >= 0 {
+ name = name[period+1:]
+ }
+ name = bytes.Replace(name, centerDot, dot, -1)
+ return name
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/data.go b/vendor/github.com/gin-gonic/gin/render/data.go
new file mode 100644
index 0000000..c296042
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/data.go
@@ -0,0 +1,23 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import "net/http"
+
+type Data struct {
+ ContentType string
+ Data []byte
+}
+
+// Render (Data) writes data with custom ContentType
+func (r Data) Render(w http.ResponseWriter) (err error) {
+ r.WriteContentType(w)
+ _, err = w.Write(r.Data)
+ return
+}
+
+func (r Data) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, []string{r.ContentType})
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/html.go b/vendor/github.com/gin-gonic/gin/render/html.go
new file mode 100644
index 0000000..cf91219
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/html.go
@@ -0,0 +1,82 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "html/template"
+ "net/http"
+)
+
+type (
+ Delims struct {
+ Left string
+ Right string
+ }
+
+ HTMLRender interface {
+ Instance(string, interface{}) Render
+ }
+
+ HTMLProduction struct {
+ Template *template.Template
+ Delims Delims
+ }
+
+ HTMLDebug struct {
+ Files []string
+ Glob string
+ Delims Delims
+ FuncMap template.FuncMap
+ }
+
+ HTML struct {
+ Template *template.Template
+ Name string
+ Data interface{}
+ }
+)
+
+var htmlContentType = []string{"text/html; charset=utf-8"}
+
+func (r HTMLProduction) Instance(name string, data interface{}) Render {
+ return HTML{
+ Template: r.Template,
+ Name: name,
+ Data: data,
+ }
+}
+
+func (r HTMLDebug) Instance(name string, data interface{}) Render {
+ return HTML{
+ Template: r.loadTemplate(),
+ Name: name,
+ Data: data,
+ }
+}
+func (r HTMLDebug) loadTemplate() *template.Template {
+ if r.FuncMap == nil {
+ r.FuncMap = template.FuncMap{}
+ }
+ if len(r.Files) > 0 {
+ return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseFiles(r.Files...))
+ }
+ if len(r.Glob) > 0 {
+ return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseGlob(r.Glob))
+ }
+ panic("the HTML debug render was created without files or glob pattern")
+}
+
+func (r HTML) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+
+ if len(r.Name) == 0 {
+ return r.Template.Execute(w, r.Data)
+ }
+ return r.Template.ExecuteTemplate(w, r.Name, r.Data)
+}
+
+func (r HTML) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, htmlContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/json.go b/vendor/github.com/gin-gonic/gin/render/json.go
new file mode 100644
index 0000000..3ee8b13
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/json.go
@@ -0,0 +1,57 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+type (
+ JSON struct {
+ Data interface{}
+ }
+
+ IndentedJSON struct {
+ Data interface{}
+ }
+)
+
+var jsonContentType = []string{"application/json; charset=utf-8"}
+
+func (r JSON) Render(w http.ResponseWriter) (err error) {
+ if err = WriteJSON(w, r.Data); err != nil {
+ panic(err)
+ }
+ return
+}
+
+func (r JSON) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, jsonContentType)
+}
+
+func WriteJSON(w http.ResponseWriter, obj interface{}) error {
+ writeContentType(w, jsonContentType)
+ jsonBytes, err := json.Marshal(obj)
+ if err != nil {
+ return err
+ }
+ w.Write(jsonBytes)
+ return nil
+}
+
+func (r IndentedJSON) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+ jsonBytes, err := json.MarshalIndent(r.Data, "", " ")
+ if err != nil {
+ return err
+ }
+ w.Write(jsonBytes)
+ return nil
+}
+
+func (r IndentedJSON) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, jsonContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/msgpack.go b/vendor/github.com/gin-gonic/gin/render/msgpack.go
new file mode 100644
index 0000000..e6c13e5
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/msgpack.go
@@ -0,0 +1,31 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "net/http"
+
+ "github.com/ugorji/go/codec"
+)
+
+type MsgPack struct {
+ Data interface{}
+}
+
+var msgpackContentType = []string{"application/msgpack; charset=utf-8"}
+
+func (r MsgPack) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, msgpackContentType)
+}
+
+func (r MsgPack) Render(w http.ResponseWriter) error {
+ return WriteMsgPack(w, r.Data)
+}
+
+func WriteMsgPack(w http.ResponseWriter, obj interface{}) error {
+ writeContentType(w, msgpackContentType)
+ var h codec.Handle = new(codec.MsgpackHandle)
+ return codec.NewEncoder(w, h).Encode(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/redirect.go b/vendor/github.com/gin-gonic/gin/render/redirect.go
new file mode 100644
index 0000000..f874a35
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/redirect.go
@@ -0,0 +1,26 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "fmt"
+ "net/http"
+)
+
+type Redirect struct {
+ Code int
+ Request *http.Request
+ Location string
+}
+
+func (r Redirect) Render(w http.ResponseWriter) error {
+ if (r.Code < 300 || r.Code > 308) && r.Code != 201 {
+ panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code))
+ }
+ http.Redirect(w, r.Request, r.Location, r.Code)
+ return nil
+}
+
+func (r Redirect) WriteContentType(http.ResponseWriter) {}
diff --git a/vendor/github.com/gin-gonic/gin/render/render.go b/vendor/github.com/gin-gonic/gin/render/render.go
new file mode 100644
index 0000000..4629142
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/render.go
@@ -0,0 +1,34 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import "net/http"
+
+type Render interface {
+ Render(http.ResponseWriter) error
+ WriteContentType(w http.ResponseWriter)
+}
+
+var (
+ _ Render = JSON{}
+ _ Render = IndentedJSON{}
+ _ Render = XML{}
+ _ Render = String{}
+ _ Render = Redirect{}
+ _ Render = Data{}
+ _ Render = HTML{}
+ _ HTMLRender = HTMLDebug{}
+ _ HTMLRender = HTMLProduction{}
+ _ Render = YAML{}
+ _ Render = MsgPack{}
+ _ Render = MsgPack{}
+)
+
+func writeContentType(w http.ResponseWriter, value []string) {
+ header := w.Header()
+ if val := header["Content-Type"]; len(val) == 0 {
+ header["Content-Type"] = value
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/text.go b/vendor/github.com/gin-gonic/gin/render/text.go
new file mode 100644
index 0000000..74cd26b
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/text.go
@@ -0,0 +1,36 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+)
+
+type String struct {
+ Format string
+ Data []interface{}
+}
+
+var plainContentType = []string{"text/plain; charset=utf-8"}
+
+func (r String) Render(w http.ResponseWriter) error {
+ WriteString(w, r.Format, r.Data)
+ return nil
+}
+
+func (r String) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, plainContentType)
+}
+
+func WriteString(w http.ResponseWriter, format string, data []interface{}) {
+ writeContentType(w, plainContentType)
+ if len(data) > 0 {
+ fmt.Fprintf(w, format, data...)
+ } else {
+ io.WriteString(w, format)
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/xml.go b/vendor/github.com/gin-gonic/gin/render/xml.go
new file mode 100644
index 0000000..cff1ac3
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/xml.go
@@ -0,0 +1,25 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "encoding/xml"
+ "net/http"
+)
+
+type XML struct {
+ Data interface{}
+}
+
+var xmlContentType = []string{"application/xml; charset=utf-8"}
+
+func (r XML) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+ return xml.NewEncoder(w).Encode(r.Data)
+}
+
+func (r XML) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, xmlContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/yaml.go b/vendor/github.com/gin-gonic/gin/render/yaml.go
new file mode 100644
index 0000000..25d0ebd
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/yaml.go
@@ -0,0 +1,33 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "net/http"
+
+ "gopkg.in/yaml.v2"
+)
+
+type YAML struct {
+ Data interface{}
+}
+
+var yamlContentType = []string{"application/x-yaml; charset=utf-8"}
+
+func (r YAML) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+
+ bytes, err := yaml.Marshal(r.Data)
+ if err != nil {
+ return err
+ }
+
+ w.Write(bytes)
+ return nil
+}
+
+func (r YAML) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, yamlContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/response_writer.go b/vendor/github.com/gin-gonic/gin/response_writer.go
new file mode 100644
index 0000000..fcbe230
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/response_writer.go
@@ -0,0 +1,116 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ noWritten = -1
+ defaultStatus = 200
+)
+
+type (
+ ResponseWriter interface {
+ http.ResponseWriter
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+
+ // Returns the HTTP response status code of the current request.
+ Status() int
+
+ // Returns the number of bytes already written into the response http body.
+ // See Written()
+ Size() int
+
+ // Writes the string into the response body.
+ WriteString(string) (int, error)
+
+ // Returns true if the response body was already written.
+ Written() bool
+
+ // Forces to write the http header (status code + headers).
+ WriteHeaderNow()
+ }
+
+ responseWriter struct {
+ http.ResponseWriter
+ size int
+ status int
+ }
+)
+
+var _ ResponseWriter = &responseWriter{}
+
+func (w *responseWriter) reset(writer http.ResponseWriter) {
+ w.ResponseWriter = writer
+ w.size = noWritten
+ w.status = defaultStatus
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ if code > 0 && w.status != code {
+ if w.Written() {
+ debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code)
+ }
+ w.status = code
+ }
+}
+
+func (w *responseWriter) WriteHeaderNow() {
+ if !w.Written() {
+ w.size = 0
+ w.ResponseWriter.WriteHeader(w.status)
+ }
+}
+
+func (w *responseWriter) Write(data []byte) (n int, err error) {
+ w.WriteHeaderNow()
+ n, err = w.ResponseWriter.Write(data)
+ w.size += n
+ return
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ w.WriteHeaderNow()
+ n, err = io.WriteString(w.ResponseWriter, s)
+ w.size += n
+ return
+}
+
+func (w *responseWriter) Status() int {
+ return w.status
+}
+
+func (w *responseWriter) Size() int {
+ return w.size
+}
+
+func (w *responseWriter) Written() bool {
+ return w.size != noWritten
+}
+
+// Implements the http.Hijacker interface
+func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if w.size < 0 {
+ w.size = 0
+ }
+ return w.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+// Implements the http.CloseNotify interface
+func (w *responseWriter) CloseNotify() <-chan bool {
+ return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Implements the http.Flush interface
+func (w *responseWriter) Flush() {
+ w.ResponseWriter.(http.Flusher).Flush()
+}
diff --git a/vendor/github.com/gin-gonic/gin/routergroup.go b/vendor/github.com/gin-gonic/gin/routergroup.go
new file mode 100644
index 0000000..f22729b
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/routergroup.go
@@ -0,0 +1,215 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "net/http"
+ "path"
+ "regexp"
+ "strings"
+)
+
+type (
+ IRouter interface {
+ IRoutes
+ Group(string, ...HandlerFunc) *RouterGroup
+ }
+
+ IRoutes interface {
+ Use(...HandlerFunc) IRoutes
+
+ Handle(string, string, ...HandlerFunc) IRoutes
+ Any(string, ...HandlerFunc) IRoutes
+ GET(string, ...HandlerFunc) IRoutes
+ POST(string, ...HandlerFunc) IRoutes
+ DELETE(string, ...HandlerFunc) IRoutes
+ PATCH(string, ...HandlerFunc) IRoutes
+ PUT(string, ...HandlerFunc) IRoutes
+ OPTIONS(string, ...HandlerFunc) IRoutes
+ HEAD(string, ...HandlerFunc) IRoutes
+
+ StaticFile(string, string) IRoutes
+ Static(string, string) IRoutes
+ StaticFS(string, http.FileSystem) IRoutes
+ }
+
+ // RouterGroup is used internally to configure router, a RouterGroup is associated with a prefix
+ // and an array of handlers (middleware)
+ RouterGroup struct {
+ Handlers HandlersChain
+ basePath string
+ engine *Engine
+ root bool
+ }
+)
+
+var _ IRouter = &RouterGroup{}
+
+// Use adds middleware to the group, see example code in github.
+func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes {
+ group.Handlers = append(group.Handlers, middleware...)
+ return group.returnObj()
+}
+
+// Group creates a new router group. You should add all the routes that have common middlwares or the same path prefix.
+// For example, all the routes that use a common middlware for authorization could be grouped.
+func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup {
+ return &RouterGroup{
+ Handlers: group.combineHandlers(handlers),
+ basePath: group.calculateAbsolutePath(relativePath),
+ engine: group.engine,
+ }
+}
+
+func (group *RouterGroup) BasePath() string {
+ return group.basePath
+}
+
+func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes {
+ absolutePath := group.calculateAbsolutePath(relativePath)
+ handlers = group.combineHandlers(handlers)
+ group.engine.addRoute(httpMethod, absolutePath, handlers)
+ return group.returnObj()
+}
+
+// Handle registers a new request handle and middleware with the given path and method.
+// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes.
+// See the example code in github.
+//
+// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut
+// functions can be used.
+//
+// This function is intended for bulk loading and to allow the usage of less
+// frequently used, non-standardized or custom methods (e.g. for internal
+// communication with a proxy).
+func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes {
+ if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil {
+ panic("http method " + httpMethod + " is not valid")
+ }
+ return group.handle(httpMethod, relativePath, handlers)
+}
+
+// POST is a shortcut for router.Handle("POST", path, handle)
+func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("POST", relativePath, handlers)
+}
+
+// GET is a shortcut for router.Handle("GET", path, handle)
+func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("GET", relativePath, handlers)
+}
+
+// DELETE is a shortcut for router.Handle("DELETE", path, handle)
+func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("DELETE", relativePath, handlers)
+}
+
+// PATCH is a shortcut for router.Handle("PATCH", path, handle)
+func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("PATCH", relativePath, handlers)
+}
+
+// PUT is a shortcut for router.Handle("PUT", path, handle)
+func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("PUT", relativePath, handlers)
+}
+
+// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle)
+func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("OPTIONS", relativePath, handlers)
+}
+
+// HEAD is a shortcut for router.Handle("HEAD", path, handle)
+func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("HEAD", relativePath, handlers)
+}
+
+// Any registers a route that matches all the HTTP methods.
+// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE
+func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes {
+ group.handle("GET", relativePath, handlers)
+ group.handle("POST", relativePath, handlers)
+ group.handle("PUT", relativePath, handlers)
+ group.handle("PATCH", relativePath, handlers)
+ group.handle("HEAD", relativePath, handlers)
+ group.handle("OPTIONS", relativePath, handlers)
+ group.handle("DELETE", relativePath, handlers)
+ group.handle("CONNECT", relativePath, handlers)
+ group.handle("TRACE", relativePath, handlers)
+ return group.returnObj()
+}
+
+// StaticFile registers a single route in order to server a single file of the local filesystem.
+// router.StaticFile("favicon.ico", "./resources/favicon.ico")
+func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes {
+ if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
+ panic("URL parameters can not be used when serving a static file")
+ }
+ handler := func(c *Context) {
+ c.File(filepath)
+ }
+ group.GET(relativePath, handler)
+ group.HEAD(relativePath, handler)
+ return group.returnObj()
+}
+
+// Static serves files from the given file system root.
+// Internally a http.FileServer is used, therefore http.NotFound is used instead
+// of the Router's NotFound handler.
+// To use the operating system's file system implementation,
+// use :
+// router.Static("/static", "/var/www")
+func (group *RouterGroup) Static(relativePath, root string) IRoutes {
+ return group.StaticFS(relativePath, Dir(root, false))
+}
+
+// StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead.
+// Gin by default user: gin.Dir()
+func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes {
+ if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
+ panic("URL parameters can not be used when serving a static folder")
+ }
+ handler := group.createStaticHandler(relativePath, fs)
+ urlPattern := path.Join(relativePath, "/*filepath")
+
+ // Register GET and HEAD handlers
+ group.GET(urlPattern, handler)
+ group.HEAD(urlPattern, handler)
+ return group.returnObj()
+}
+
+func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc {
+ absolutePath := group.calculateAbsolutePath(relativePath)
+ fileServer := http.StripPrefix(absolutePath, http.FileServer(fs))
+ _, nolisting := fs.(*onlyfilesFS)
+ return func(c *Context) {
+ if nolisting {
+ c.Writer.WriteHeader(404)
+ }
+ fileServer.ServeHTTP(c.Writer, c.Request)
+ }
+}
+
+func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain {
+ finalSize := len(group.Handlers) + len(handlers)
+ if finalSize >= int(abortIndex) {
+ panic("too many handlers")
+ }
+ mergedHandlers := make(HandlersChain, finalSize)
+ copy(mergedHandlers, group.Handlers)
+ copy(mergedHandlers[len(group.Handlers):], handlers)
+ return mergedHandlers
+}
+
+func (group *RouterGroup) calculateAbsolutePath(relativePath string) string {
+ return joinPaths(group.basePath, relativePath)
+}
+
+func (group *RouterGroup) returnObj() IRoutes {
+ if group.root {
+ return group.engine
+ }
+ return group
+}
diff --git a/vendor/github.com/gin-gonic/gin/test_helpers.go b/vendor/github.com/gin-gonic/gin/test_helpers.go
new file mode 100644
index 0000000..e7dd55f
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/test_helpers.go
@@ -0,0 +1,17 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "net/http"
+)
+
+func CreateTestContext(w http.ResponseWriter) (c *Context, r *Engine) {
+ r = New()
+ c = r.allocateContext()
+ c.reset()
+ c.writermem.reset(w)
+ return
+}
diff --git a/vendor/github.com/gin-gonic/gin/tree.go b/vendor/github.com/gin-gonic/gin/tree.go
new file mode 100644
index 0000000..a39f43b
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/tree.go
@@ -0,0 +1,622 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found
+// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE
+
+package gin
+
+import (
+ "net/url"
+ "strings"
+ "unicode"
+)
+
+// Param is a single URL parameter, consisting of a key and a value.
+type Param struct {
+ Key string
+ Value string
+}
+
+// Params is a Param-slice, as returned by the router.
+// The slice is ordered, the first URL parameter is also the first slice value.
+// It is therefore safe to read values by the index.
+type Params []Param
+
+// Get returns the value of the first Param which key matches the given name.
+// If no matching Param is found, an empty string is returned.
+func (ps Params) Get(name string) (string, bool) {
+ for _, entry := range ps {
+ if entry.Key == name {
+ return entry.Value, true
+ }
+ }
+ return "", false
+}
+
+// ByName returns the value of the first Param which key matches the given name.
+// If no matching Param is found, an empty string is returned.
+func (ps Params) ByName(name string) (va string) {
+ va, _ = ps.Get(name)
+ return
+}
+
+type methodTree struct {
+ method string
+ root *node
+}
+
+type methodTrees []methodTree
+
+func (trees methodTrees) get(method string) *node {
+ for _, tree := range trees {
+ if tree.method == method {
+ return tree.root
+ }
+ }
+ return nil
+}
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+func countParams(path string) uint8 {
+ var n uint
+ for i := 0; i < len(path); i++ {
+ if path[i] != ':' && path[i] != '*' {
+ continue
+ }
+ n++
+ }
+ if n >= 255 {
+ return 255
+ }
+ return uint8(n)
+}
+
+type nodeType uint8
+
+const (
+ static nodeType = iota // default
+ root
+ param
+ catchAll
+)
+
+type node struct {
+ path string
+ wildChild bool
+ nType nodeType
+ maxParams uint8
+ indices string
+ children []*node
+ handlers HandlersChain
+ priority uint32
+}
+
+// increments priority of the given child and reorders if necessary
+func (n *node) incrementChildPrio(pos int) int {
+ n.children[pos].priority++
+ prio := n.children[pos].priority
+
+ // adjust position (move to front)
+ newPos := pos
+ for newPos > 0 && n.children[newPos-1].priority < prio {
+ // swap node positions
+ tmpN := n.children[newPos-1]
+ n.children[newPos-1] = n.children[newPos]
+ n.children[newPos] = tmpN
+
+ newPos--
+ }
+
+ // build new index char string
+ if newPos != pos {
+ n.indices = n.indices[:newPos] + // unchanged prefix, might be empty
+ n.indices[pos:pos+1] + // the index char we move
+ n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos'
+ }
+
+ return newPos
+}
+
+// addRoute adds a node with the given handle to the path.
+// Not concurrency-safe!
+func (n *node) addRoute(path string, handlers HandlersChain) {
+ fullPath := path
+ n.priority++
+ numParams := countParams(path)
+
+ // non-empty tree
+ if len(n.path) > 0 || len(n.children) > 0 {
+ walk:
+ for {
+ // Update maxParams of the current node
+ if numParams > n.maxParams {
+ n.maxParams = numParams
+ }
+
+ // Find the longest common prefix.
+ // This also implies that the common prefix contains no ':' or '*'
+ // since the existing key can't contain those chars.
+ i := 0
+ max := min(len(path), len(n.path))
+ for i < max && path[i] == n.path[i] {
+ i++
+ }
+
+ // Split edge
+ if i < len(n.path) {
+ child := node{
+ path: n.path[i:],
+ wildChild: n.wildChild,
+ indices: n.indices,
+ children: n.children,
+ handlers: n.handlers,
+ priority: n.priority - 1,
+ }
+
+ // Update maxParams (max of all children)
+ for i := range child.children {
+ if child.children[i].maxParams > child.maxParams {
+ child.maxParams = child.children[i].maxParams
+ }
+ }
+
+ n.children = []*node{&child}
+ // []byte for proper unicode char conversion, see #65
+ n.indices = string([]byte{n.path[i]})
+ n.path = path[:i]
+ n.handlers = nil
+ n.wildChild = false
+ }
+
+ // Make new node a child of this node
+ if i < len(path) {
+ path = path[i:]
+
+ if n.wildChild {
+ n = n.children[0]
+ n.priority++
+
+ // Update maxParams of the child node
+ if numParams > n.maxParams {
+ n.maxParams = numParams
+ }
+ numParams--
+
+ // Check if the wildcard matches
+ if len(path) >= len(n.path) && n.path == path[:len(n.path)] {
+ // check for longer wildcard, e.g. :name and :names
+ if len(n.path) >= len(path) || path[len(n.path)] == '/' {
+ continue walk
+ }
+ }
+
+ panic("path segment '" + path +
+ "' conflicts with existing wildcard '" + n.path +
+ "' in path '" + fullPath + "'")
+ }
+
+ c := path[0]
+
+ // slash after param
+ if n.nType == param && c == '/' && len(n.children) == 1 {
+ n = n.children[0]
+ n.priority++
+ continue walk
+ }
+
+ // Check if a child with the next path byte exists
+ for i := 0; i < len(n.indices); i++ {
+ if c == n.indices[i] {
+ i = n.incrementChildPrio(i)
+ n = n.children[i]
+ continue walk
+ }
+ }
+
+ // Otherwise insert it
+ if c != ':' && c != '*' {
+ // []byte for proper unicode char conversion, see #65
+ n.indices += string([]byte{c})
+ child := &node{
+ maxParams: numParams,
+ }
+ n.children = append(n.children, child)
+ n.incrementChildPrio(len(n.indices) - 1)
+ n = child
+ }
+ n.insertChild(numParams, path, fullPath, handlers)
+ return
+
+ } else if i == len(path) { // Make node a (in-path) leaf
+ if n.handlers != nil {
+ panic("handlers are already registered for path ''" + fullPath + "'")
+ }
+ n.handlers = handlers
+ }
+ return
+ }
+ } else { // Empty tree
+ n.insertChild(numParams, path, fullPath, handlers)
+ n.nType = root
+ }
+}
+
+func (n *node) insertChild(numParams uint8, path string, fullPath string, handlers HandlersChain) {
+ var offset int // already handled bytes of the path
+
+ // find prefix until first wildcard (beginning with ':'' or '*'')
+ for i, max := 0, len(path); numParams > 0; i++ {
+ c := path[i]
+ if c != ':' && c != '*' {
+ continue
+ }
+
+ // find wildcard end (either '/' or path end)
+ end := i + 1
+ for end < max && path[end] != '/' {
+ switch path[end] {
+ // the wildcard name must not contain ':' and '*'
+ case ':', '*':
+ panic("only one wildcard per path segment is allowed, has: '" +
+ path[i:] + "' in path '" + fullPath + "'")
+ default:
+ end++
+ }
+ }
+
+ // check if this Node existing children which would be
+ // unreachable if we insert the wildcard here
+ if len(n.children) > 0 {
+ panic("wildcard route '" + path[i:end] +
+ "' conflicts with existing children in path '" + fullPath + "'")
+ }
+
+ // check if the wildcard has a name
+ if end-i < 2 {
+ panic("wildcards must be named with a non-empty name in path '" + fullPath + "'")
+ }
+
+ if c == ':' { // param
+ // split path at the beginning of the wildcard
+ if i > 0 {
+ n.path = path[offset:i]
+ offset = i
+ }
+
+ child := &node{
+ nType: param,
+ maxParams: numParams,
+ }
+ n.children = []*node{child}
+ n.wildChild = true
+ n = child
+ n.priority++
+ numParams--
+
+ // if the path doesn't end with the wildcard, then there
+ // will be another non-wildcard subpath starting with '/'
+ if end < max {
+ n.path = path[offset:end]
+ offset = end
+
+ child := &node{
+ maxParams: numParams,
+ priority: 1,
+ }
+ n.children = []*node{child}
+ n = child
+ }
+
+ } else { // catchAll
+ if end != max || numParams > 1 {
+ panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'")
+ }
+
+ if len(n.path) > 0 && n.path[len(n.path)-1] == '/' {
+ panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'")
+ }
+
+ // currently fixed width 1 for '/'
+ i--
+ if path[i] != '/' {
+ panic("no / before catch-all in path '" + fullPath + "'")
+ }
+
+ n.path = path[offset:i]
+
+ // first node: catchAll node with empty path
+ child := &node{
+ wildChild: true,
+ nType: catchAll,
+ maxParams: 1,
+ }
+ n.children = []*node{child}
+ n.indices = string(path[i])
+ n = child
+ n.priority++
+
+ // second node: node holding the variable
+ child = &node{
+ path: path[i:],
+ nType: catchAll,
+ maxParams: 1,
+ handlers: handlers,
+ priority: 1,
+ }
+ n.children = []*node{child}
+
+ return
+ }
+ }
+
+ // insert remaining path part and handle to the leaf
+ n.path = path[offset:]
+ n.handlers = handlers
+}
+
+// Returns the handle registered with the given path (key). The values of
+// wildcards are saved to a map.
+// If no handle can be found, a TSR (trailing slash redirect) recommendation is
+// made if a handle exists with an extra (without the) trailing slash for the
+// given path.
+func (n *node) getValue(path string, po Params, unescape bool) (handlers HandlersChain, p Params, tsr bool) {
+ p = po
+walk: // Outer loop for walking the tree
+ for {
+ if len(path) > len(n.path) {
+ if path[:len(n.path)] == n.path {
+ path = path[len(n.path):]
+ // If this node does not have a wildcard (param or catchAll)
+ // child, we can just look up the next child node and continue
+ // to walk down the tree
+ if !n.wildChild {
+ c := path[0]
+ for i := 0; i < len(n.indices); i++ {
+ if c == n.indices[i] {
+ n = n.children[i]
+ continue walk
+ }
+ }
+
+ // Nothing found.
+ // We can recommend to redirect to the same URL without a
+ // trailing slash if a leaf exists for that path.
+ tsr = (path == "/" && n.handlers != nil)
+ return
+ }
+
+ // handle wildcard child
+ n = n.children[0]
+ switch n.nType {
+ case param:
+ // find param end (either '/' or path end)
+ end := 0
+ for end < len(path) && path[end] != '/' {
+ end++
+ }
+
+ // save param value
+ if cap(p) < int(n.maxParams) {
+ p = make(Params, 0, n.maxParams)
+ }
+ i := len(p)
+ p = p[:i+1] // expand slice within preallocated capacity
+ p[i].Key = n.path[1:]
+ val := path[:end]
+ if unescape {
+ var err error
+ if p[i].Value, err = url.QueryUnescape(val); err != nil {
+ p[i].Value = val // fallback, in case of error
+ }
+ } else {
+ p[i].Value = val
+ }
+
+ // we need to go deeper!
+ if end < len(path) {
+ if len(n.children) > 0 {
+ path = path[end:]
+ n = n.children[0]
+ continue walk
+ }
+
+ // ... but we can't
+ tsr = (len(path) == end+1)
+ return
+ }
+
+ if handlers = n.handlers; handlers != nil {
+ return
+ }
+ if len(n.children) == 1 {
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists for TSR recommendation
+ n = n.children[0]
+ tsr = (n.path == "/" && n.handlers != nil)
+ }
+
+ return
+
+ case catchAll:
+ // save param value
+ if cap(p) < int(n.maxParams) {
+ p = make(Params, 0, n.maxParams)
+ }
+ i := len(p)
+ p = p[:i+1] // expand slice within preallocated capacity
+ p[i].Key = n.path[2:]
+ if unescape {
+ var err error
+ if p[i].Value, err = url.QueryUnescape(path); err != nil {
+ p[i].Value = path // fallback, in case of error
+ }
+ } else {
+ p[i].Value = path
+ }
+
+ handlers = n.handlers
+ return
+
+ default:
+ panic("invalid node type")
+ }
+ }
+ } else if path == n.path {
+ // We should have reached the node containing the handle.
+ // Check if this node has a handle registered.
+ if handlers = n.handlers; handlers != nil {
+ return
+ }
+
+ if path == "/" && n.wildChild && n.nType != root {
+ tsr = true
+ return
+ }
+
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists for trailing slash recommendation
+ for i := 0; i < len(n.indices); i++ {
+ if n.indices[i] == '/' {
+ n = n.children[i]
+ tsr = (len(n.path) == 1 && n.handlers != nil) ||
+ (n.nType == catchAll && n.children[0].handlers != nil)
+ return
+ }
+ }
+
+ return
+ }
+
+ // Nothing found. We can recommend to redirect to the same URL with an
+ // extra trailing slash if a leaf exists for that path
+ tsr = (path == "/") ||
+ (len(n.path) == len(path)+1 && n.path[len(path)] == '/' &&
+ path == n.path[:len(n.path)-1] && n.handlers != nil)
+ return
+ }
+}
+
+// Makes a case-insensitive lookup of the given path and tries to find a handler.
+// It can optionally also fix trailing slashes.
+// It returns the case-corrected path and a bool indicating whether the lookup
+// was successful.
+func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) {
+ ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory
+
+ // Outer loop for walking the tree
+ for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) {
+ path = path[len(n.path):]
+ ciPath = append(ciPath, n.path...)
+
+ if len(path) > 0 {
+ // If this node does not have a wildcard (param or catchAll) child,
+ // we can just look up the next child node and continue to walk down
+ // the tree
+ if !n.wildChild {
+ r := unicode.ToLower(rune(path[0]))
+ for i, index := range n.indices {
+ // must use recursive approach since both index and
+ // ToLower(index) could exist. We must check both.
+ if r == unicode.ToLower(index) {
+ out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash)
+ if found {
+ return append(ciPath, out...), true
+ }
+ }
+ }
+
+ // Nothing found. We can recommend to redirect to the same URL
+ // without a trailing slash if a leaf exists for that path
+ found = (fixTrailingSlash && path == "/" && n.handlers != nil)
+ return
+ }
+
+ n = n.children[0]
+ switch n.nType {
+ case param:
+ // find param end (either '/' or path end)
+ k := 0
+ for k < len(path) && path[k] != '/' {
+ k++
+ }
+
+ // add param value to case insensitive path
+ ciPath = append(ciPath, path[:k]...)
+
+ // we need to go deeper!
+ if k < len(path) {
+ if len(n.children) > 0 {
+ path = path[k:]
+ n = n.children[0]
+ continue
+ }
+
+ // ... but we can't
+ if fixTrailingSlash && len(path) == k+1 {
+ return ciPath, true
+ }
+ return
+ }
+
+ if n.handlers != nil {
+ return ciPath, true
+ } else if fixTrailingSlash && len(n.children) == 1 {
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists
+ n = n.children[0]
+ if n.path == "/" && n.handlers != nil {
+ return append(ciPath, '/'), true
+ }
+ }
+ return
+
+ case catchAll:
+ return append(ciPath, path...), true
+
+ default:
+ panic("invalid node type")
+ }
+ } else {
+ // We should have reached the node containing the handle.
+ // Check if this node has a handle registered.
+ if n.handlers != nil {
+ return ciPath, true
+ }
+
+ // No handle found.
+ // Try to fix the path by adding a trailing slash
+ if fixTrailingSlash {
+ for i := 0; i < len(n.indices); i++ {
+ if n.indices[i] == '/' {
+ n = n.children[i]
+ if (len(n.path) == 1 && n.handlers != nil) ||
+ (n.nType == catchAll && n.children[0].handlers != nil) {
+ return append(ciPath, '/'), true
+ }
+ return
+ }
+ }
+ }
+ return
+ }
+ }
+
+ // Nothing found.
+ // Try to fix the path by adding / removing a trailing slash
+ if fixTrailingSlash {
+ if path == "/" {
+ return ciPath, true
+ }
+ if len(path)+1 == len(n.path) && n.path[len(path)] == '/' &&
+ strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) &&
+ n.handlers != nil {
+ return append(ciPath, n.path...), true
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/gin-gonic/gin/utils.go b/vendor/github.com/gin-gonic/gin/utils.go
new file mode 100644
index 0000000..18064fb
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/utils.go
@@ -0,0 +1,154 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "encoding/xml"
+ "net/http"
+ "os"
+ "path"
+ "reflect"
+ "runtime"
+ "strings"
+)
+
+const BindKey = "_gin-gonic/gin/bindkey"
+
+func Bind(val interface{}) HandlerFunc {
+ value := reflect.ValueOf(val)
+ if value.Kind() == reflect.Ptr {
+ panic(`Bind struct can not be a pointer. Example:
+ Use: gin.Bind(Struct{}) instead of gin.Bind(&Struct{})
+`)
+ }
+ typ := value.Type()
+
+ return func(c *Context) {
+ obj := reflect.New(typ).Interface()
+ if c.Bind(obj) == nil {
+ c.Set(BindKey, obj)
+ }
+ }
+}
+
+func WrapF(f http.HandlerFunc) HandlerFunc {
+ return func(c *Context) {
+ f(c.Writer, c.Request)
+ }
+}
+
+func WrapH(h http.Handler) HandlerFunc {
+ return func(c *Context) {
+ h.ServeHTTP(c.Writer, c.Request)
+ }
+}
+
+type H map[string]interface{}
+
+// MarshalXML allows type H to be used with xml.Marshal
+func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ start.Name = xml.Name{
+ Space: "",
+ Local: "map",
+ }
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+ for key, value := range h {
+ elem := xml.StartElement{
+ Name: xml.Name{Space: "", Local: key},
+ Attr: []xml.Attr{},
+ }
+ if err := e.EncodeElement(value, elem); err != nil {
+ return err
+ }
+ }
+ if err := e.EncodeToken(xml.EndElement{Name: start.Name}); err != nil {
+ return err
+ }
+ return nil
+}
+
+func assert1(guard bool, text string) {
+ if !guard {
+ panic(text)
+ }
+}
+
+func filterFlags(content string) string {
+ for i, char := range content {
+ if char == ' ' || char == ';' {
+ return content[:i]
+ }
+ }
+ return content
+}
+
+func chooseData(custom, wildcard interface{}) interface{} {
+ if custom == nil {
+ if wildcard == nil {
+ panic("negotiation config is invalid")
+ }
+ return wildcard
+ }
+ return custom
+}
+
+func parseAccept(acceptHeader string) []string {
+ parts := strings.Split(acceptHeader, ",")
+ out := make([]string, 0, len(parts))
+ for _, part := range parts {
+ index := strings.IndexByte(part, ';')
+ if index >= 0 {
+ part = part[0:index]
+ }
+ part = strings.TrimSpace(part)
+ if len(part) > 0 {
+ out = append(out, part)
+ }
+ }
+ return out
+}
+
+func lastChar(str string) uint8 {
+ size := len(str)
+ if size == 0 {
+ panic("The length of the string can't be 0")
+ }
+ return str[size-1]
+}
+
+func nameOfFunction(f interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
+}
+
+func joinPaths(absolutePath, relativePath string) string {
+ if len(relativePath) == 0 {
+ return absolutePath
+ }
+
+ finalPath := path.Join(absolutePath, relativePath)
+ appendSlash := lastChar(relativePath) == '/' && lastChar(finalPath) != '/'
+ if appendSlash {
+ return finalPath + "/"
+ }
+ return finalPath
+}
+
+func resolveAddress(addr []string) string {
+ switch len(addr) {
+ case 0:
+ if port := os.Getenv("PORT"); len(port) > 0 {
+ debugPrint("Environment variable PORT=\"%s\"", port)
+ return ":" + port
+ }
+ debugPrint("Environment variable PORT is undefined. Using port :8080 by default")
+ return ":8080"
+ case 1:
+ return addr[0]
+ default:
+ panic("too much parameters")
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/wercker.yml b/vendor/github.com/gin-gonic/gin/wercker.yml
new file mode 100644
index 0000000..3ab8084
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/wercker.yml
@@ -0,0 +1 @@
+box: wercker/default
\ No newline at end of file
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
+ if in.IsNil() {
+ return src
+ }
+ out := reflect.New(in.Type().Elem())
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+ }
+ if in.IsNil() {
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..d9aa3c4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,428 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
+ }
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..dea2617
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..c27d35f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,221 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..d4db5a1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ return false
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..816a3b9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,543 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
+ }
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, err := extendable(base)
+ if err != nil {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, err := extendable(pb)
+ if err != nil {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok := extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate space to store the pointer/slice.
+ value := reflect.New(t).Elem()
+
+ var err error
+ for {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ wire := int(x) & 7
+
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..0e2191b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,921 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{vs: vs}
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..3b6ca41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,314 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ return marshalMessageSet(exts, false)
+}
+
+// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
+func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ var u marshalInfo
+ siz := u.sizeMessageSet(exts)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, exts, deterministic)
+
+ case map[int32]Extension:
+ // This is an old-style extension map.
+ // Wrap it in a new-style XXX_InternalExtensions.
+ ie := XXX_InternalExtensions{
+ p: &struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }{
+ extensionMap: exts,
+ },
+ }
+
+ var u marshalInfo
+ siz := u.sizeMessageSet(&ie)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, &ie, deterministic)
+
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ var mu sync.Locker
+ m, mu = exts.extensionsRead()
+ if m != nil {
+ // Keep the extensions map locked until we're done marshaling to prevent
+ // races between marshaling and unmarshaling the lazily-{en,de}coded
+ // values.
+ mu.Lock()
+ defer mu.Unlock()
+ }
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+
+ if i > 0 && b.Len() > 1 {
+ b.WriteByte(',')
+ }
+
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..b6cad90
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "reflect"
+ "sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
+}
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+ return
+ }
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
+ }
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..d55a335
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ }
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+ return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..f710ada
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,544 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ case "zigzag64":
+ p.WireType = WireVarint
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+outer:
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break outer
+ }
+ }
+ }
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ switch t1 := typ; t1.Kind() {
+ case reflect.Ptr:
+ if t1.Elem().Kind() == reflect.Struct {
+ p.stype = t1.Elem()
+ }
+
+ case reflect.Slice:
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
+ }
+
+ case reflect.Map:
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ _, _, _, oots = om.XXX_OneofFuncs()
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypedNils[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..0f212b3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2681 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errreq error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required && errreq == nil {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ errreq = &RequiredNotSetError{f.name}
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errreq
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err, errreq error
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if err != nil {
+ if _, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = err
+ }
+ continue
+ }
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, errreq
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err, errreq error
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if err != nil {
+ if _, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = err
+ }
+ continue
+ }
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, errreq
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != nil && err != ErrNil { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nil
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if err != nil {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..55f0340
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,1967 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1< 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ rnse = r
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if rnse != nil {
+ // A required field of a submessage/group is missing. Return that error.
+ return rnse
+ }
+ if reqMask != u.reqMask {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ return &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return nil
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask)
+ }
+
+ // Find any types associated with oneof fields.
+ // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
+ if fn.IsValid() {
+ res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
+ for i := res.Len() - 1; i >= 0; i-- {
+ v := res.Index(i) // interface{}
+ tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
+ tag, err := strconv.Atoi(tagstr)
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagstr)
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(tag, of.field, unmarshal, 0)
+ }
+ }
+ }
+ }
+
+ // Get extension ranges, if any.
+ fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0)
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if err == nil {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nil
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nil
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) <= 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..2205fda
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, err := extendable(pv.Interface()); err == nil {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..0685bae
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ return um.UnmarshalText([]byte(s))
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
new file mode 100644
index 0000000..b9f8b23
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+go:
+ - tip
+
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 0000000..65dc692
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) Yasuhiro MATSUMOTO
+
+MIT License (Expat)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 0000000..1e69004
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,50 @@
+# go-isatty
+
+[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
+[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty)
+[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
+
+isatty for golang
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/mattn/go-isatty"
+ "os"
+)
+
+func main() {
+ if isatty.IsTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Terminal")
+ } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Cygwin/MSYS2 Terminal")
+ } else {
+ fmt.Println("Is Not Terminal")
+ }
+}
+```
+
+## Installation
+
+```
+$ go get github.com/mattn/go-isatty
+```
+
+## License
+
+MIT
+
+## Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
+
+## Thanks
+
+* k-takata: base idea for IsCygwinTerminal
+
+ https://github.com/k-takata/go-iscygpty
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 0000000..17d4f90
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
+// Package isatty implements interface to isatty
+package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
new file mode 100644
index 0000000..9584a98
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on on appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+ return false
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 0000000..42f2514
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go
new file mode 100644
index 0000000..7384cf9
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+// +build !appengine,!ppc64,!ppc64le
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
new file mode 100644
index 0000000..44e5d21
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
@@ -0,0 +1,19 @@
+// +build linux
+// +build ppc64 ppc64le
+
+package isatty
+
+import (
+ "unsafe"
+
+ syscall "golang.org/x/sys/unix"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
new file mode 100644
index 0000000..ff4de3d
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -0,0 +1,10 @@
+// +build !windows
+// +build !appengine
+
+package isatty
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 0000000..1f0c6bf
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,16 @@
+// +build solaris
+// +build !appengine
+
+package isatty
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func IsTerminal(fd uintptr) bool {
+ var termio unix.Termio
+ err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
+ return err == nil
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 0000000..af51cbc
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,94 @@
+// +build windows
+// +build !appengine
+
+package isatty
+
+import (
+ "strings"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ fileNameInfo uintptr = 2
+ fileTypePipe = 3
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
+ procGetFileType = kernel32.NewProc("GetFileType")
+)
+
+func init() {
+ // Check if GetFileInformationByHandleEx is available.
+ if procGetFileInformationByHandleEx.Find() != nil {
+ procGetFileInformationByHandleEx = nil
+ }
+}
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+// Check pipe name is used for cygwin/msys2 pty.
+// Cygwin/MSYS2 PTY has a name like:
+// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
+func isCygwinPipeName(name string) bool {
+ token := strings.Split(name, "-")
+ if len(token) < 5 {
+ return false
+ }
+
+ if token[0] != `\msys` && token[0] != `\cygwin` {
+ return false
+ }
+
+ if token[1] == "" {
+ return false
+ }
+
+ if !strings.HasPrefix(token[2], "pty") {
+ return false
+ }
+
+ if token[3] != `from` && token[3] != `to` {
+ return false
+ }
+
+ if token[4] != "master" {
+ return false
+ }
+
+ return true
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal.
+func IsCygwinTerminal(fd uintptr) bool {
+ if procGetFileInformationByHandleEx == nil {
+ return false
+ }
+
+ // Cygwin/msys's pty is a pipe.
+ ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
+ if ft != fileTypePipe || e != 0 {
+ return false
+ }
+
+ var buf [2 + syscall.MAX_PATH]uint16
+ r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
+ 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
+ uintptr(len(buf)*2), 0, 0)
+ if r == 0 || e != 0 {
+ return false
+ }
+
+ l := *(*uint32)(unsafe.Pointer(&buf))
+ return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
+}
diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE
new file mode 100644
index 0000000..95a0f05
--- /dev/null
+++ b/vendor/github.com/ugorji/go/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2012-2015 Ugorji Nwoke.
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go
new file mode 100644
index 0000000..b61a818
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/0doc.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+Package codec provides a
+High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library
+for binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+This package will carefully use 'unsafe' for performance reasons in specific places.
+You can build without unsafe use by passing the safe or appengine tag
+i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
+go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
+go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
+
+For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Support for go1.4 and above, while selectively using newer APIs for later releases
+ - Excellent code coverage ( > 90% )
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Careful selected use of 'unsafe' for targeted performance gains.
+ 100% mode exists where 'unsafe' is not used at all.
+ - Lock-free (sans mutex) concurrency for scaling to 100's of cores
+ - Coerce types where appropriate
+ e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Support IsZero() bool to determine if a value is a zero value.
+ Analogous to time.Time.IsZero() bool.
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Mapping a non-interface type to an interface, so we can decode appropriately
+ into any interface type with a correctly configured non-interface value.
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Option to encode struct keys as numbers (instead of strings)
+ (to support structured streams with fields encoded as numeric codes)
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance.
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosyncrasies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+Custom Encoding and Decoding
+
+This package maintains symmetry in the encoding and decoding halfs.
+We determine how to encode or decode by walking this decision tree
+
+ - is type a codec.Selfer?
+ - is there an extension registered for the type?
+ - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
+ - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
+ - is format text-based, and type an encoding.TextMarshaler?
+ - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
+
+This symmetry is important to reduce chances of issues happening because the
+encoding and decoding sides are out of sync e.g. decoded via very specific
+encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
+
+Consequently, if a type only defines one-half of the symmetry
+(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
+then that type doesn't satisfy the check and we will continue walking down the
+decision tree.
+
+RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+Usage
+
+The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
+
+The Encoder and Decoder are NOT safe for concurrent use.
+
+Consequently, the usage model is basically:
+
+ - Create and initialize the Handle before any use.
+ Once created, DO NOT modify it.
+ - Multiple Encoders or Decoders can now use the Handle concurrently.
+ They only read information off the Handle (never write).
+ - However, each Encoder or Decoder MUST not be used concurrently
+ - To re-use an Encoder/Decoder, call Reset(...) on it first.
+ This allows you use state maintained on the Encoder/Decoder.
+
+Sample usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
+Running Tests
+
+To run tests, use the following:
+
+ go test
+
+To run the full suite of tests, use the following:
+
+ go test -tags alltests -run Suite
+
+You can run the tag 'safe' to run tests or build in safe mode. e.g.
+
+ go test -tags safe -run Json
+ go test -tags "alltests safe" -run Suite
+
+Running Benchmarks
+
+Please see http://github.com/ugorji/go-codec-bench .
+
+Caveats
+
+Struct fields matching the following are ignored during encoding and decoding
+ - struct tag value set to -
+ - func, complex numbers, unsafe pointers
+ - unexported and not embedded
+ - unexported and embedded and not struct kind
+ - unexported and embedded pointers (from go1.10)
+
+Every other field in a struct will be encoded/decoded.
+
+Embedded fields are encoded as if they exist in the top-level struct,
+with some caveats. See Encode documentation.
+
+*/
+package codec
+
+// TODO:
+// - For Go 1.11, when mid-stack inlining is enabled,
+// we should use committed functions for writeXXX and readXXX calls.
+// This involves uncommenting the methods for decReaderSwitch and encWriterSwitch
+// and using those (decReaderSwitch and encWriterSwitch) in all handles
+// instead of encWriter and decReader.
+// The benefit is that, for the (En|De)coder over []byte, the encWriter/decReader
+// will be inlined, giving a performance bump for that typical case.
+// However, it will only be inlined if mid-stack inlining is enabled,
+// as we call panic to raise errors, and panic currently prevents inlining.
+//
+// PUNTED:
+// - To make Handle comparable, make extHandle in BasicHandle a non-embedded pointer,
+// and use overlay methods on *BasicHandle to call through to extHandle after initializing
+// the "xh *extHandle" to point to a real slice.
+//
+// BEFORE EACH RELEASE:
+// - Look through and fix padding for each type, to eliminate false sharing
+// - critical shared objects that are read many times
+// TypeInfos
+// - pooled objects:
+// decNaked, decNakedContainers, codecFner, typeInfoLoadArray,
+// - small objects allocated independently, that we read/use much across threads:
+// codecFn, typeInfo
+// - Objects allocated independently and used a lot
+// Decoder, Encoder,
+// xxxHandle, xxxEncDriver, xxxDecDriver (xxx = json, msgpack, cbor, binc, simple)
+// - In all above, arrange values modified together to be close to each other.
+//
+// For all of these, either ensure that they occupy full cache lines,
+// or ensure that the things just past the cache line boundary are hardly read/written
+// e.g. JsonHandle.RawBytesExt - which is copied into json(En|De)cDriver at init
+//
+// Occupying full cache lines means they occupy 8*N words (where N is an integer).
+// Check this out by running: ./run.sh -z
+// - look at those tagged ****, meaning they are not occupying full cache lines
+// - look at those tagged <<<<, meaning they are larger than 32 words (something to watch)
+// - Run "golint -min_confidence 0.81"
diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md
new file mode 100644
index 0000000..50d65e5
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/README.md
@@ -0,0 +1,206 @@
+# Codec
+
+High Performance, Feature-Rich Idiomatic Go codec/encoding library for
+binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+This package will carefully use 'unsafe' for performance reasons in specific places.
+You can build without unsafe use by passing the safe or appengine tag
+i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
+go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
+go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
+
+Online documentation: http://godoc.org/github.com/ugorji/go/codec
+Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Support for go1.4 and above, while selectively using newer APIs for later releases
+ - Excellent code coverage ( > 90% )
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Careful selected use of 'unsafe' for targeted performance gains.
+ 100% mode exists where 'unsafe' is not used at all.
+ - Lock-free (sans mutex) concurrency for scaling to 100's of cores
+ - Coerce types where appropriate
+ e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Support IsZero() bool to determine if a value is a zero value.
+ Analogous to time.Time.IsZero() bool.
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Mapping a non-interface type to an interface, so we can decode appropriately
+ into any interface type with a correctly configured non-interface value.
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Option to encode struct keys as numbers (instead of strings)
+ (to support structured streams with fields encoded as numeric codes)
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance.
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosyncrasies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+## Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+## Custom Encoding and Decoding
+
+This package maintains symmetry in the encoding and decoding halfs.
+We determine how to encode or decode by walking this decision tree
+
+ - is type a codec.Selfer?
+ - is there an extension registered for the type?
+ - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
+ - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
+ - is format text-based, and type an encoding.TextMarshaler?
+ - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
+
+This symmetry is important to reduce chances of issues happening because the
+encoding and decoding sides are out of sync e.g. decoded via very specific
+encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
+
+Consequently, if a type only defines one-half of the symmetry
+(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
+then that type doesn't satisfy the check and we will continue walking down the
+decision tree.
+
+## RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+## Usage
+
+Typical usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
+## Running Tests
+
+To run tests, use the following:
+
+ go test
+
+To run the full suite of tests, use the following:
+
+ go test -tags alltests -run Suite
+
+You can run the tag 'safe' to run tests or build in safe mode. e.g.
+
+ go test -tags safe -run Json
+ go test -tags "alltests safe" -run Suite
+
+## Running Benchmarks
+
+Please see http://github.com/ugorji/go-codec-bench .
+
+## Caveats
+
+Struct fields matching the following are ignored during encoding and decoding
+
+ - struct tag value set to -
+ - func, complex numbers, unsafe pointers
+ - unexported and not embedded
+ - unexported and embedded and not struct kind
+ - unexported and embedded pointers (from go1.10)
+
+Every other field in a struct will be encoded/decoded.
+
+Embedded fields are encoded as if they exist in the top-level struct,
+with some caveats. See Encode documentation.
diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go
new file mode 100644
index 0000000..a3c96fe
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/binc.go
@@ -0,0 +1,1168 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
+
+// vd as low 4 bits (there are 16 slots)
+const (
+ bincVdSpecial byte = iota
+ bincVdPosInt
+ bincVdNegInt
+ bincVdFloat
+
+ bincVdString
+ bincVdByteArray
+ bincVdArray
+ bincVdMap
+
+ bincVdTimestamp
+ bincVdSmallInt
+ bincVdUnicodeOther
+ bincVdSymbol
+
+ bincVdDecimal
+ _ // open slot
+ _ // open slot
+ bincVdCustomExt = 0x0f
+)
+
+const (
+ bincSpNil byte = iota
+ bincSpFalse
+ bincSpTrue
+ bincSpNan
+ bincSpPosInf
+ bincSpNegInf
+ bincSpZeroFloat
+ bincSpZero
+ bincSpNegOne
+)
+
+const (
+ bincFlBin16 byte = iota
+ bincFlBin32
+ _ // bincFlBin32e
+ bincFlBin64
+ _ // bincFlBin64e
+ // others not currently supported
+)
+
+func bincdesc(vd, vs byte) string {
+ switch vd {
+ case bincVdSpecial:
+ switch vs {
+ case bincSpNil:
+ return "nil"
+ case bincSpFalse:
+ return "false"
+ case bincSpTrue:
+ return "true"
+ case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat:
+ return "float"
+ case bincSpZero:
+ return "uint"
+ case bincSpNegOne:
+ return "int"
+ default:
+ return "unknown"
+ }
+ case bincVdSmallInt, bincVdPosInt:
+ return "uint"
+ case bincVdNegInt:
+ return "int"
+ case bincVdFloat:
+ return "float"
+ case bincVdSymbol:
+ return "string"
+ case bincVdString:
+ return "string"
+ case bincVdByteArray:
+ return "bytes"
+ case bincVdTimestamp:
+ return "time"
+ case bincVdCustomExt:
+ return "ext"
+ case bincVdArray:
+ return "array"
+ case bincVdMap:
+ return "map"
+ default:
+ return "unknown"
+ }
+}
+
+type bincEncDriver struct {
+ e *Encoder
+ h *BincHandle
+ w encWriter
+ m map[string]uint16 // symbols
+ b [16]byte // scratch, used for encoding numbers - bigendian style
+ s uint16 // symbols sequencer
+ // c containerState
+ encDriverTrackContainerWriter
+ noBuiltInTypes
+ // encNoSeparator
+}
+
+func (e *bincEncDriver) EncodeNil() {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNil)
+}
+
+func (e *bincEncDriver) EncodeTime(t time.Time) {
+ if t.IsZero() {
+ e.EncodeNil()
+ } else {
+ bs := bincEncodeTime(t)
+ e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
+ e.w.writeb(bs)
+ }
+}
+
+func (e *bincEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
+ } else {
+ e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
+ }
+}
+
+func (e *bincEncDriver) EncodeFloat32(f float32) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin32)
+ bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *bincEncDriver) EncodeFloat64(f float64) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ bigen.PutUint64(e.b[:8], math.Float64bits(f))
+ if bincDoPrune {
+ i := 7
+ for ; i >= 0 && (e.b[i] == 0); i-- {
+ }
+ i++
+ if i <= 6 {
+ e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
+ e.w.writen1(byte(i))
+ e.w.writeb(e.b[:i])
+ return
+ }
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin64)
+ e.w.writeb(e.b[:8])
+}
+
+func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
+ if lim == 4 {
+ bigen.PutUint32(e.b[:lim], uint32(v))
+ } else {
+ bigen.PutUint64(e.b[:lim], v)
+ }
+ if bincDoPrune {
+ i := pruneSignExt(e.b[:lim], pos)
+ e.w.writen1(bd | lim - 1 - byte(i))
+ e.w.writeb(e.b[i:lim])
+ } else {
+ e.w.writen1(bd | lim - 1)
+ e.w.writeb(e.b[:lim])
+ }
+}
+
+func (e *bincEncDriver) EncodeInt(v int64) {
+ const nbd byte = bincVdNegInt << 4
+ if v >= 0 {
+ e.encUint(bincVdPosInt<<4, true, uint64(v))
+ } else if v == -1 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
+ } else {
+ e.encUint(bincVdNegInt<<4, false, uint64(-v))
+ }
+}
+
+func (e *bincEncDriver) EncodeUint(v uint64) {
+ e.encUint(bincVdPosInt<<4, true, v)
+}
+
+func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
+ if v == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZero)
+ } else if pos && v >= 1 && v <= 16 {
+ e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
+ } else if v <= math.MaxUint8 {
+ e.w.writen2(bd|0x0, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd | 0x01)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.encIntegerPrune(bd, pos, v, 4)
+ } else {
+ e.encIntegerPrune(bd, pos, v, 8)
+ }
+}
+
+func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(rv)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+}
+
+func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(bincVdCustomExt<<4, uint64(length))
+ e.w.writen1(xtag)
+}
+
+func (e *bincEncDriver) WriteArrayStart(length int) {
+ e.encLen(bincVdArray<<4, uint64(length))
+ e.c = containerArrayStart
+}
+
+func (e *bincEncDriver) WriteMapStart(length int) {
+ e.encLen(bincVdMap<<4, uint64(length))
+ e.c = containerMapStart
+}
+
+func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
+ if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) {
+ e.EncodeSymbol(v)
+ return
+ }
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeSymbol(v string) {
+ // if WriteSymbolsNoRefs {
+ // e.encodeString(cUTF8, v)
+ // return
+ // }
+
+ //symbols only offer benefit when string length > 1.
+ //This is because strings with length 1 take only 2 bytes to store
+ //(bd with embedded length, and single byte for string val).
+
+ l := len(v)
+ if l == 0 {
+ e.encBytesLen(cUTF8, 0)
+ return
+ } else if l == 1 {
+ e.encBytesLen(cUTF8, 1)
+ e.w.writen1(v[0])
+ return
+ }
+ if e.m == nil {
+ e.m = make(map[string]uint16, 16)
+ }
+ ui, ok := e.m[v]
+ if ok {
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8)
+ bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+ }
+ } else {
+ e.s++
+ ui = e.s
+ //ui = uint16(atomic.AddUint32(&e.s, 1))
+ e.m[v] = ui
+ var lenprec uint8
+ if l <= math.MaxUint8 {
+ // lenprec = 0
+ } else if l <= math.MaxUint16 {
+ lenprec = 1
+ } else if int64(l) <= math.MaxUint32 {
+ lenprec = 2
+ } else {
+ lenprec = 3
+ }
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
+ bigenHelper{e.b[:2], e.w}.writeUint16(ui)
+ }
+ if lenprec == 0 {
+ e.w.writen1(byte(l))
+ } else if lenprec == 1 {
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
+ } else if lenprec == 2 {
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
+ } else {
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
+ }
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ if v == nil {
+ e.EncodeNil()
+ return
+ }
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writeb(v)
+ }
+}
+
+func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
+ //TODO: support bincUnicodeOther (for now, just use string or bytearray)
+ if c == cRAW {
+ e.encLen(bincVdByteArray<<4, length)
+ } else {
+ e.encLen(bincVdString<<4, length)
+ }
+}
+
+func (e *bincEncDriver) encLen(bd byte, l uint64) {
+ if l < 12 {
+ e.w.writen1(bd | uint8(l+4))
+ } else {
+ e.encLenNumber(bd, l)
+ }
+}
+
+func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
+ if v <= math.MaxUint8 {
+ e.w.writen2(bd, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd | 0x01)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd | 0x02)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+ } else {
+ e.w.writen1(bd | 0x03)
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
+ }
+}
+
+//------------------------------------
+
+type bincDecSymbol struct {
+ s string
+ b []byte
+ i uint16
+}
+
+type bincDecDriver struct {
+ decDriverNoopContainerReader
+ noBuiltInTypes
+
+ d *Decoder
+ h *BincHandle
+ r decReader
+ br bool // bytes reader
+ bdRead bool
+ bd byte
+ vd byte
+ vs byte
+ _ [3]byte // padding
+ // linear searching on this slice is ok,
+ // because we typically expect < 32 symbols in each stream.
+ s []bincDecSymbol
+
+ // noStreamingCodec
+ // decNoSeparator
+
+ b [8 * 8]byte // scratch
+}
+
+func (d *bincDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.vd = d.bd >> 4
+ d.vs = d.bd & 0x0f
+ d.bdRead = true
+}
+
+func (d *bincDecDriver) uncacheRead() {
+ if d.bdRead {
+ d.r.unreadn1()
+ d.bdRead = false
+ }
+}
+
+func (d *bincDecDriver) ContainerType() (vt valueType) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.vd == bincVdSpecial && d.vs == bincSpNil {
+ return valueTypeNil
+ } else if d.vd == bincVdByteArray {
+ return valueTypeBytes
+ } else if d.vd == bincVdString {
+ return valueTypeString
+ } else if d.vd == bincVdArray {
+ return valueTypeArray
+ } else if d.vd == bincVdMap {
+ return valueTypeMap
+ }
+ // else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ // }
+ return valueTypeUnset
+}
+
+func (d *bincDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *bincDecDriver) DecodeTime() (t time.Time) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return
+ }
+ if d.vd != bincVdTimestamp {
+ d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ t, err := bincDecodeTime(d.r.readx(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
+ if vs&0x8 == 0 {
+ d.r.readb(d.b[0:defaultLen])
+ } else {
+ l := d.r.readn1()
+ if l > 8 {
+ d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l)
+ return
+ }
+ for i := l; i < 8; i++ {
+ d.b[i] = 0
+ }
+ d.r.readb(d.b[0:l])
+ }
+}
+
+func (d *bincDecDriver) decFloat() (f float64) {
+ //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
+ if x := d.vs & 0x7; x == bincFlBin32 {
+ d.decFloatPre(d.vs, 4)
+ f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
+ } else if x == bincFlBin64 {
+ d.decFloatPre(d.vs, 8)
+ f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
+ } else {
+ d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) decUint() (v uint64) {
+ // need to inline the code (interface conversion and type assertion expensive)
+ switch d.vs {
+ case 0:
+ v = uint64(d.r.readn1())
+ case 1:
+ d.r.readb(d.b[6:8])
+ v = uint64(bigen.Uint16(d.b[6:8]))
+ case 2:
+ d.b[4] = 0
+ d.r.readb(d.b[5:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ case 3:
+ d.r.readb(d.b[4:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ case 4, 5, 6:
+ lim := int(7 - d.vs)
+ d.r.readb(d.b[lim:8])
+ for i := 0; i < lim; i++ {
+ d.b[i] = 0
+ }
+ v = uint64(bigen.Uint64(d.b[:8]))
+ case 7:
+ d.r.readb(d.b[:8])
+ v = uint64(bigen.Uint64(d.b[:8]))
+ default:
+ d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ vd, vs := d.vd, d.vs
+ if vd == bincVdPosInt {
+ ui = d.decUint()
+ } else if vd == bincVdNegInt {
+ ui = d.decUint()
+ neg = true
+ } else if vd == bincVdSmallInt {
+ ui = uint64(d.vs) + 1
+ } else if vd == bincVdSpecial {
+ if vs == bincSpZero {
+ //i = 0
+ } else if vs == bincSpNegOne {
+ neg = true
+ ui = 1
+ } else {
+ d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s",
+ d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ } else {
+ d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
+ return
+ }
+ return
+}
+
+func (d *bincDecDriver) DecodeInt64() (i int64) {
+ ui, neg := d.decCheckInteger()
+ i = chkOvf.SignedIntV(ui)
+ if neg {
+ i = -i
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeUint64() (ui uint64) {
+ ui, neg := d.decCheckInteger()
+ if neg {
+ d.d.errorf("assigning negative signed value to unsigned integer type")
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeFloat64() (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ vd, vs := d.vd, d.vs
+ if vd == bincVdSpecial {
+ d.bdRead = false
+ if vs == bincSpNan {
+ return math.NaN()
+ } else if vs == bincSpPosInf {
+ return math.Inf(1)
+ } else if vs == bincSpZeroFloat || vs == bincSpZero {
+ return
+ } else if vs == bincSpNegInf {
+ return math.Inf(-1)
+ } else {
+ d.d.errorf("float - invalid special value from descriptor %x-%x/%s",
+ d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ } else if vd == bincVdFloat {
+ f = d.decFloat()
+ } else {
+ f = float64(d.DecodeInt64())
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *bincDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
+ // b = false
+ } else if bd == (bincVdSpecial | bincSpTrue) {
+ b = true
+ } else {
+ d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadMapStart() (length int) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.vd != bincVdMap {
+ d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadArrayStart() (length int) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.vd != bincVdArray {
+ d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decLen() int {
+ if d.vs > 3 {
+ return int(d.vs - 4)
+ }
+ return int(d.decLenNumber())
+}
+
+func (d *bincDecDriver) decLenNumber() (v uint64) {
+ if x := d.vs; x == 0 {
+ v = uint64(d.r.readn1())
+ } else if x == 1 {
+ d.r.readb(d.b[6:8])
+ v = uint64(bigen.Uint16(d.b[6:8]))
+ } else if x == 2 {
+ d.r.readb(d.b[4:8])
+ v = uint64(bigen.Uint32(d.b[4:8]))
+ } else {
+ d.r.readb(d.b[:8])
+ v = bigen.Uint64(d.b[:8])
+ }
+ return
+}
+
+func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (
+ bs2 []byte, s string) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return
+ }
+ var slen = -1
+ // var ok bool
+ switch d.vd {
+ case bincVdString, bincVdByteArray:
+ slen = d.decLen()
+ if zerocopy {
+ if d.br {
+ bs2 = d.r.readx(slen)
+ } else if len(bs) == 0 {
+ bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:])
+ } else {
+ bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
+ }
+ } else {
+ bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
+ }
+ if withString {
+ s = string(bs2)
+ }
+ case bincVdSymbol:
+ // zerocopy doesn't apply for symbols,
+ // as the values must be stored in a table for later use.
+ //
+ //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
+ //extract symbol
+ //if containsStringVal, read it and put in map
+ //else look in map for string value
+ var symbol uint16
+ vs := d.vs
+ if vs&0x8 == 0 {
+ symbol = uint16(d.r.readn1())
+ } else {
+ symbol = uint16(bigen.Uint16(d.r.readx(2)))
+ }
+ if d.s == nil {
+ d.s = make([]bincDecSymbol, 0, 16)
+ }
+
+ if vs&0x4 == 0 {
+ for i := range d.s {
+ j := &d.s[i]
+ if j.i == symbol {
+ bs2 = j.b
+ if withString {
+ if j.s == "" && bs2 != nil {
+ j.s = string(bs2)
+ }
+ s = j.s
+ }
+ break
+ }
+ }
+ } else {
+ switch vs & 0x3 {
+ case 0:
+ slen = int(d.r.readn1())
+ case 1:
+ slen = int(bigen.Uint16(d.r.readx(2)))
+ case 2:
+ slen = int(bigen.Uint32(d.r.readx(4)))
+ case 3:
+ slen = int(bigen.Uint64(d.r.readx(8)))
+ }
+ // since using symbols, do not store any part of
+ // the parameter bs in the map, as it might be a shared buffer.
+ // bs2 = decByteSlice(d.r, slen, bs)
+ bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil)
+ if withString {
+ s = string(bs2)
+ }
+ d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
+ }
+ default:
+ d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeString() (s string) {
+ // DecodeBytes does not accommodate symbols, whose impl stores string version in map.
+ // Use decStringAndBytes directly.
+ // return string(d.DecodeBytes(d.b[:], true, true))
+ _, s = d.decStringAndBytes(d.b[:], true, true)
+ return
+}
+
+func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) {
+ s, _ = d.decStringAndBytes(d.b[:], false, true)
+ return
+}
+
+func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return nil
+ }
+ // check if an "array" of uint8's (see ContainerType for how to infer if an array)
+ if d.vd == bincVdArray {
+ bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+ return
+ }
+ var clen int
+ if d.vd == bincVdString || d.vd == bincVdByteArray {
+ clen = d.decLen()
+ } else {
+ d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.vd == bincVdCustomExt {
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(l)
+ } else if d.vd == bincVdByteArray {
+ xbs = d.DecodeBytes(nil, true)
+ } else {
+ d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := d.d.n
+ var decodeFurther bool
+
+ switch d.vd {
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpNil:
+ n.v = valueTypeNil
+ case bincSpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case bincSpTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case bincSpNan:
+ n.v = valueTypeFloat
+ n.f = math.NaN()
+ case bincSpPosInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(1)
+ case bincSpNegInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(-1)
+ case bincSpZeroFloat:
+ n.v = valueTypeFloat
+ n.f = float64(0)
+ case bincSpZero:
+ n.v = valueTypeUint
+ n.u = uint64(0) // int8(0)
+ case bincSpNegOne:
+ n.v = valueTypeInt
+ n.i = int64(-1) // int8(-1)
+ default:
+ d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ case bincVdSmallInt:
+ n.v = valueTypeUint
+ n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
+ case bincVdPosInt:
+ n.v = valueTypeUint
+ n.u = d.decUint()
+ case bincVdNegInt:
+ n.v = valueTypeInt
+ n.i = -(int64(d.decUint()))
+ case bincVdFloat:
+ n.v = valueTypeFloat
+ n.f = d.decFloat()
+ case bincVdSymbol:
+ n.v = valueTypeSymbol
+ n.s = d.DecodeString()
+ case bincVdString:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case bincVdByteArray:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false)
+ case bincVdTimestamp:
+ n.v = valueTypeTime
+ tt, err := bincDecodeTime(d.r.readx(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ n.t = tt
+ case bincVdCustomExt:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
+ case bincVdArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bincVdMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+ return
+}
+
+//------------------------------------
+
+//BincHandle is a Handle for the Binc Schema-Free Encoding Format
+//defined at https://github.com/ugorji/binc .
+//
+//BincHandle currently supports all Binc features with the following EXCEPTIONS:
+// - only integers up to 64 bits of precision are supported.
+// big integers are unsupported.
+// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
+// extended precision and decimal IEEE 754 floats are unsupported.
+// - Only UTF-8 strings supported.
+// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
+//
+//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
+type BincHandle struct {
+ BasicHandle
+ binaryEncodingType
+ noElemSeparators
+
+ // AsSymbols defines what should be encoded as symbols.
+ //
+ // Encoding as symbols can reduce the encoded size significantly.
+ //
+ // However, during decoding, each string to be encoded as a symbol must
+ // be checked to see if it has been seen before. Consequently, encoding time
+ // will increase if using symbols, because string comparisons has a clear cost.
+ //
+ // Values:
+ // - 0: default: library uses best judgement
+ // - 1: use symbols
+ // - 2: do not use symbols
+ AsSymbols uint8
+
+ // AsSymbols: may later on introduce more options ...
+ // - m: map keys
+ // - s: struct fields
+ // - n: none
+ // - a: all: same as m, s, ...
+
+ // _ [1]uint64 // padding
+}
+
+// Name returns the name of the handle: binc
+func (h *BincHandle) Name() string { return "binc" }
+
+// SetBytesExt sets an extension
+func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}})
+}
+
+func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
+ return &bincEncDriver{e: e, h: h, w: e.w}
+}
+
+func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
+ return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes}
+}
+
+func (e *bincEncDriver) reset() {
+ e.w = e.e.w
+ e.s = 0
+ e.c = 0
+ e.m = nil
+}
+
+func (d *bincDecDriver) reset() {
+ d.r, d.br = d.d.r, d.d.bytes
+ d.s = nil
+ d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
+}
+
+// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+
+// EncodeTime encodes a time.Time as a []byte, including
+// information on the instant in time and UTC offset.
+//
+// Format Description
+//
+// A timestamp is composed of 3 components:
+//
+// - secs: signed integer representing seconds since unix epoch
+// - nsces: unsigned integer representing fractional seconds as a
+// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
+// - tz: signed integer representing timezone offset in minutes east of UTC,
+// and a dst (daylight savings time) flag
+//
+// When encoding a timestamp, the first byte is the descriptor, which
+// defines which components are encoded and how many bytes are used to
+// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
+// is not encoded in the byte array explicitly*.
+//
+// Descriptor 8 bits are of the form `A B C DDD EE`:
+// A: Is secs component encoded? 1 = true
+// B: Is nsecs component encoded? 1 = true
+// C: Is tz component encoded? 1 = true
+// DDD: Number of extra bytes for secs (range 0-7).
+// If A = 1, secs encoded in DDD+1 bytes.
+// If A = 0, secs is not encoded, and is assumed to be 0.
+// If A = 1, then we need at least 1 byte to encode secs.
+// DDD says the number of extra bytes beyond that 1.
+// E.g. if DDD=0, then secs is represented in 1 byte.
+// if DDD=2, then secs is represented in 3 bytes.
+// EE: Number of extra bytes for nsecs (range 0-3).
+// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
+//
+// Following the descriptor bytes, subsequent bytes are:
+//
+// secs component encoded in `DDD + 1` bytes (if A == 1)
+// nsecs component encoded in `EE + 1` bytes (if B == 1)
+// tz component encoded in 2 bytes (if C == 1)
+//
+// secs and nsecs components are integers encoded in a BigEndian
+// 2-complement encoding format.
+//
+// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
+// Least significant bit 0 are described below:
+//
+// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
+// Bit 15 = have\_dst: set to 1 if we set the dst flag.
+// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
+// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
+//
+func bincEncodeTime(t time.Time) []byte {
+ //t := rv.Interface().(time.Time)
+ tsecs, tnsecs := t.Unix(), t.Nanosecond()
+ var (
+ bd byte
+ btmp [8]byte
+ bs [16]byte
+ i int = 1
+ )
+ l := t.Location()
+ if l == time.UTC {
+ l = nil
+ }
+ if tsecs != 0 {
+ bd = bd | 0x80
+ bigen.PutUint64(btmp[:], uint64(tsecs))
+ f := pruneSignExt(btmp[:], tsecs >= 0)
+ bd = bd | (byte(7-f) << 2)
+ copy(bs[i:], btmp[f:])
+ i = i + (8 - f)
+ }
+ if tnsecs != 0 {
+ bd = bd | 0x40
+ bigen.PutUint32(btmp[:4], uint32(tnsecs))
+ f := pruneSignExt(btmp[:4], true)
+ bd = bd | byte(3-f)
+ copy(bs[i:], btmp[f:4])
+ i = i + (4 - f)
+ }
+ if l != nil {
+ bd = bd | 0x20
+ // Note that Go Libs do not give access to dst flag.
+ _, zoneOffset := t.Zone()
+ //zoneName, zoneOffset := t.Zone()
+ zoneOffset /= 60
+ z := uint16(zoneOffset)
+ bigen.PutUint16(btmp[:2], z)
+ // clear dst flags
+ bs[i] = btmp[0] & 0x3f
+ bs[i+1] = btmp[1]
+ i = i + 2
+ }
+ bs[0] = bd
+ return bs[0:i]
+}
+
+// bincDecodeTime decodes a []byte into a time.Time.
+func bincDecodeTime(bs []byte) (tt time.Time, err error) {
+ bd := bs[0]
+ var (
+ tsec int64
+ tnsec uint32
+ tz uint16
+ i byte = 1
+ i2 byte
+ n byte
+ )
+ if bd&(1<<7) != 0 {
+ var btmp [8]byte
+ n = ((bd >> 2) & 0x7) + 1
+ i2 = i + n
+ copy(btmp[8-n:], bs[i:i2])
+ //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
+ if bs[i]&(1<<7) != 0 {
+ copy(btmp[0:8-n], bsAll0xff)
+ //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
+ }
+ i = i2
+ tsec = int64(bigen.Uint64(btmp[:]))
+ }
+ if bd&(1<<6) != 0 {
+ var btmp [4]byte
+ n = (bd & 0x3) + 1
+ i2 = i + n
+ copy(btmp[4-n:], bs[i:i2])
+ i = i2
+ tnsec = bigen.Uint32(btmp[:])
+ }
+ if bd&(1<<5) == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ return
+ }
+ // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
+ // However, we need name here, so it can be shown when time is printed.
+ // Zone name is in form: UTC-08:00.
+ // Note that Go Libs do not give access to dst flag, so we ignore dst bits
+
+ i2 = i + 2
+ tz = bigen.Uint16(bs[i:i2])
+ // i = i2
+ // sign extend sign bit into top 2 MSB (which were dst bits):
+ if tz&(1<<13) == 0 { // positive
+ tz = tz & 0x3fff //clear 2 MSBs: dst bits
+ } else { // negative
+ tz = tz | 0xc000 //set 2 MSBs: dst bits
+ }
+ tzint := int16(tz)
+ if tzint == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ } else {
+ // For Go Time, do not use a descriptive timezone.
+ // It's unnecessary, and makes it harder to do a reflect.DeepEqual.
+ // The Offset already tells what the offset should be, if not on UTC and unknown zone name.
+ // var zoneName = timeLocUTCName(tzint)
+ tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
+ }
+ return
+}
+
+var _ decDriver = (*bincDecDriver)(nil)
+var _ encDriver = (*bincEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go
new file mode 100644
index 0000000..7633c04
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/cbor.go
@@ -0,0 +1,756 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+const (
+ cborMajorUint byte = iota
+ cborMajorNegInt
+ cborMajorBytes
+ cborMajorText
+ cborMajorArray
+ cborMajorMap
+ cborMajorTag
+ cborMajorOther
+)
+
+const (
+ cborBdFalse byte = 0xf4 + iota
+ cborBdTrue
+ cborBdNil
+ cborBdUndefined
+ cborBdExt
+ cborBdFloat16
+ cborBdFloat32
+ cborBdFloat64
+)
+
+const (
+ cborBdIndefiniteBytes byte = 0x5f
+ cborBdIndefiniteString = 0x7f
+ cborBdIndefiniteArray = 0x9f
+ cborBdIndefiniteMap = 0xbf
+ cborBdBreak = 0xff
+)
+
+// These define some in-stream descriptors for
+// manual encoding e.g. when doing explicit indefinite-length
+const (
+ CborStreamBytes byte = 0x5f
+ CborStreamString = 0x7f
+ CborStreamArray = 0x9f
+ CborStreamMap = 0xbf
+ CborStreamBreak = 0xff
+)
+
+const (
+ cborBaseUint byte = 0x00
+ cborBaseNegInt = 0x20
+ cborBaseBytes = 0x40
+ cborBaseString = 0x60
+ cborBaseArray = 0x80
+ cborBaseMap = 0xa0
+ cborBaseTag = 0xc0
+ cborBaseSimple = 0xe0
+)
+
+func cbordesc(bd byte) string {
+ switch bd {
+ case cborBdNil:
+ return "nil"
+ case cborBdFalse:
+ return "false"
+ case cborBdTrue:
+ return "true"
+ case cborBdFloat16, cborBdFloat32, cborBdFloat64:
+ return "float"
+ case cborBdIndefiniteBytes:
+ return "bytes*"
+ case cborBdIndefiniteString:
+ return "string*"
+ case cborBdIndefiniteArray:
+ return "array*"
+ case cborBdIndefiniteMap:
+ return "map*"
+ default:
+ switch {
+ case bd >= cborBaseUint && bd < cborBaseNegInt:
+ return "(u)int"
+ case bd >= cborBaseNegInt && bd < cborBaseBytes:
+ return "int"
+ case bd >= cborBaseBytes && bd < cborBaseString:
+ return "bytes"
+ case bd >= cborBaseString && bd < cborBaseArray:
+ return "string"
+ case bd >= cborBaseArray && bd < cborBaseMap:
+ return "array"
+ case bd >= cborBaseMap && bd < cborBaseTag:
+ return "map"
+ case bd >= cborBaseTag && bd < cborBaseSimple:
+ return "ext"
+ default:
+ return "unknown"
+ }
+ }
+}
+
+// -------------------
+
+type cborEncDriver struct {
+ noBuiltInTypes
+ encDriverNoopContainerWriter
+ // encNoSeparator
+ e *Encoder
+ w encWriter
+ h *CborHandle
+ x [8]byte
+ _ [3]uint64 // padding
+}
+
+func (e *cborEncDriver) EncodeNil() {
+ e.w.writen1(cborBdNil)
+}
+
+func (e *cborEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(cborBdTrue)
+ } else {
+ e.w.writen1(cborBdFalse)
+ }
+}
+
+func (e *cborEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(cborBdFloat32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *cborEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(cborBdFloat64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *cborEncDriver) encUint(v uint64, bd byte) {
+ if v <= 0x17 {
+ e.w.writen1(byte(v) + bd)
+ } else if v <= math.MaxUint8 {
+ e.w.writen2(bd+0x18, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd + 0x19)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd + 0x1a)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.w.writen1(bd + 0x1b)
+ bigenHelper{e.x[:8], e.w}.writeUint64(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-1-v), cborBaseNegInt)
+ } else {
+ e.encUint(uint64(v), cborBaseUint)
+ }
+}
+
+func (e *cborEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, cborBaseUint)
+}
+
+func (e *cborEncDriver) encLen(bd byte, length int) {
+ e.encUint(uint64(length), bd)
+}
+
+func (e *cborEncDriver) EncodeTime(t time.Time) {
+ if t.IsZero() {
+ e.EncodeNil()
+ } else if e.h.TimeRFC3339 {
+ e.encUint(0, cborBaseTag)
+ e.EncodeString(cUTF8, t.Format(time.RFC3339Nano))
+ } else {
+ e.encUint(1, cborBaseTag)
+ t = t.UTC().Round(time.Microsecond)
+ sec, nsec := t.Unix(), uint64(t.Nanosecond())
+ if nsec == 0 {
+ e.EncodeInt(sec)
+ } else {
+ e.EncodeFloat64(float64(sec) + float64(nsec)/1e9)
+ }
+ }
+}
+
+func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+ e.encUint(uint64(xtag), cborBaseTag)
+ if v := ext.ConvertExt(rv); v == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+ e.encUint(uint64(re.Tag), cborBaseTag)
+ if false && re.Data != nil {
+ en.encode(re.Data)
+ } else if re.Value != nil {
+ en.encode(re.Value)
+ } else {
+ e.EncodeNil()
+ }
+}
+
+func (e *cborEncDriver) WriteArrayStart(length int) {
+ if e.h.IndefiniteLength {
+ e.w.writen1(cborBdIndefiniteArray)
+ } else {
+ e.encLen(cborBaseArray, length)
+ }
+}
+
+func (e *cborEncDriver) WriteMapStart(length int) {
+ if e.h.IndefiniteLength {
+ e.w.writen1(cborBdIndefiniteMap)
+ } else {
+ e.encLen(cborBaseMap, length)
+ }
+}
+
+func (e *cborEncDriver) WriteMapEnd() {
+ if e.h.IndefiniteLength {
+ e.w.writen1(cborBdBreak)
+ }
+}
+
+func (e *cborEncDriver) WriteArrayEnd() {
+ if e.h.IndefiniteLength {
+ e.w.writen1(cborBdBreak)
+ }
+}
+
+func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
+ e.encStringBytesS(cborBaseString, v)
+}
+
+func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ if v == nil {
+ e.EncodeNil()
+ } else if c == cRAW {
+ e.encStringBytesS(cborBaseBytes, stringView(v))
+ } else {
+ e.encStringBytesS(cborBaseString, stringView(v))
+ }
+}
+
+func (e *cborEncDriver) encStringBytesS(bb byte, v string) {
+ if e.h.IndefiniteLength {
+ if bb == cborBaseBytes {
+ e.w.writen1(cborBdIndefiniteBytes)
+ } else {
+ e.w.writen1(cborBdIndefiniteString)
+ }
+ blen := len(v) / 4
+ if blen == 0 {
+ blen = 64
+ } else if blen > 1024 {
+ blen = 1024
+ }
+ for i := 0; i < len(v); {
+ var v2 string
+ i2 := i + blen
+ if i2 < len(v) {
+ v2 = v[i:i2]
+ } else {
+ v2 = v[i:]
+ }
+ e.encLen(bb, len(v2))
+ e.w.writestr(v2)
+ i = i2
+ }
+ e.w.writen1(cborBdBreak)
+ } else {
+ e.encLen(bb, len(v))
+ e.w.writestr(v)
+ }
+}
+
+// ----------------------
+
+type cborDecDriver struct {
+ d *Decoder
+ h *CborHandle
+ r decReader
+ // b [scratchByteArrayLen]byte
+ br bool // bytes reader
+ bdRead bool
+ bd byte
+ noBuiltInTypes
+ // decNoSeparator
+ decDriverNoopContainerReader
+ _ [3]uint64 // padding
+}
+
+func (d *cborDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *cborDecDriver) uncacheRead() {
+ if d.bdRead {
+ d.r.unreadn1()
+ d.bdRead = false
+ }
+}
+
+func (d *cborDecDriver) ContainerType() (vt valueType) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdNil {
+ return valueTypeNil
+ } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
+ return valueTypeBytes
+ } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
+ return valueTypeString
+ } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
+ return valueTypeArray
+ } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
+ return valueTypeMap
+ }
+ // else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ // }
+ return valueTypeUnset
+}
+
+func (d *cborDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ // treat Nil and Undefined as nil values
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *cborDecDriver) CheckBreak() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdBreak {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *cborDecDriver) decUint() (ui uint64) {
+ v := d.bd & 0x1f
+ if v <= 0x17 {
+ ui = uint64(v)
+ } else {
+ if v == 0x18 {
+ ui = uint64(d.r.readn1())
+ } else if v == 0x19 {
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ } else if v == 0x1a {
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ } else if v == 0x1b {
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ } else {
+ d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
+ return
+ }
+ }
+ return
+}
+
+func (d *cborDecDriver) decCheckInteger() (neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ major := d.bd >> 5
+ if major == cborMajorUint {
+ } else if major == cborMajorNegInt {
+ neg = true
+ } else {
+ d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", major, d.bd, cbordesc(d.bd))
+ return
+ }
+ return
+}
+
+func (d *cborDecDriver) DecodeInt64() (i int64) {
+ neg := d.decCheckInteger()
+ ui := d.decUint()
+ // check if this number can be converted to an int without overflow
+ if neg {
+ i = -(chkOvf.SignedIntV(ui + 1))
+ } else {
+ i = chkOvf.SignedIntV(ui)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeUint64() (ui uint64) {
+ if d.decCheckInteger() {
+ d.d.errorf("assigning negative signed value to unsigned type")
+ return
+ }
+ ui = d.decUint()
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeFloat64() (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == cborBdFloat16 {
+ f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
+ } else if bd == cborBdFloat32 {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if bd == cborBdFloat64 {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else if bd >= cborBaseUint && bd < cborBaseBytes {
+ f = float64(d.DecodeInt64())
+ } else {
+ d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd))
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *cborDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if bd := d.bd; bd == cborBdTrue {
+ b = true
+ } else if bd == cborBdFalse {
+ } else {
+ d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) ReadMapStart() (length int) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteMap {
+ return -1
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) ReadArrayStart() (length int) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteArray {
+ return -1
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) decLen() int {
+ return int(d.decUint())
+}
+
+func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
+ d.bdRead = false
+ for {
+ if d.CheckBreak() {
+ break
+ }
+ if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
+ d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+
+ " got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd))
+ return nil
+ }
+ n := d.decLen()
+ oldLen := len(bs)
+ newLen := oldLen + n
+ if newLen > cap(bs) {
+ bs2 := make([]byte, newLen, 2*cap(bs)+n)
+ copy(bs2, bs)
+ bs = bs2
+ } else {
+ bs = bs[:newLen]
+ }
+ d.r.readb(bs[oldLen:newLen])
+ // bs = append(bs, d.r.readn()...)
+ d.bdRead = false
+ }
+ d.bdRead = false
+ return bs
+}
+
+func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return nil
+ }
+ if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
+ d.bdRead = false
+ if bs == nil {
+ if zerocopy {
+ return d.decAppendIndefiniteBytes(d.d.b[:0])
+ }
+ return d.decAppendIndefiniteBytes(zeroByteSlice)
+ }
+ return d.decAppendIndefiniteBytes(bs[:0])
+ }
+ // check if an "array" of uint8's (see ContainerType for how to infer if an array)
+ if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
+ bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+ return
+ }
+ clen := d.decLen()
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, d.h.MaxInitLen, bs)
+}
+
+func (d *cborDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.d.b[:], true))
+}
+
+func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
+ return d.DecodeBytes(d.d.b[:], true)
+}
+
+func (d *cborDecDriver) DecodeTime() (t time.Time) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return
+ }
+ xtag := d.decUint()
+ d.bdRead = false
+ return d.decodeTime(xtag)
+}
+
+func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch xtag {
+ case 0:
+ var err error
+ if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil {
+ d.d.errorv(err)
+ }
+ case 1:
+ // decode an int64 or a float, and infer time.Time from there.
+ // for floats, round to microseconds, as that is what is guaranteed to fit well.
+ switch {
+ case d.bd == cborBdFloat16, d.bd == cborBdFloat32:
+ f1, f2 := math.Modf(d.DecodeFloat64())
+ t = time.Unix(int64(f1), int64(f2*1e9))
+ case d.bd == cborBdFloat64:
+ f1, f2 := math.Modf(d.DecodeFloat64())
+ t = time.Unix(int64(f1), int64(f2*1e9))
+ case d.bd >= cborBaseUint && d.bd < cborBaseNegInt,
+ d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
+ t = time.Unix(d.DecodeInt64(), 0)
+ default:
+ d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)")
+ }
+ default:
+ d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag)
+ }
+ t = t.UTC().Round(time.Microsecond)
+ return
+}
+
+func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ u := d.decUint()
+ d.bdRead = false
+ realxtag = u
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ d.d.decode(&re.Value)
+ } else if xtag != realxtag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
+ return
+ } else {
+ var v interface{}
+ d.d.decode(&v)
+ ext.UpdateExt(rv, v)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := d.d.n
+ var decodeFurther bool
+
+ switch d.bd {
+ case cborBdNil:
+ n.v = valueTypeNil
+ case cborBdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case cborBdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case cborBdFloat16, cborBdFloat32, cborBdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat64()
+ case cborBdIndefiniteBytes:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false)
+ case cborBdIndefiniteString:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case cborBdIndefiniteArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case cborBdIndefiniteMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ switch {
+ case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt64()
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint64()
+ }
+ case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt64()
+ case d.bd >= cborBaseBytes && d.bd < cborBaseString:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false)
+ case d.bd >= cborBaseString && d.bd < cborBaseArray:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case d.bd >= cborBaseArray && d.bd < cborBaseMap:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case d.bd >= cborBaseMap && d.bd < cborBaseTag:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
+ n.v = valueTypeExt
+ n.u = d.decUint()
+ n.l = nil
+ if n.u == 0 || n.u == 1 {
+ d.bdRead = false
+ n.v = valueTypeTime
+ n.t = d.decodeTime(n.u)
+ }
+ // d.bdRead = false
+ // d.d.decode(&re.Value) // handled by decode itself.
+ // decodeFurther = true
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ return
+ }
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+// -------------------------
+
+// CborHandle is a Handle for the CBOR encoding format,
+// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
+//
+// CBOR is comprehensively supported, including support for:
+// - indefinite-length arrays/maps/bytes/strings
+// - (extension) tags in range 0..0xffff (0 .. 65535)
+// - half, single and double-precision floats
+// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
+// - nil, true, false, ...
+// - arrays and maps, bytes and text strings
+//
+// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
+// Users can implement them as needed (using SetExt), including spec-documented ones:
+// - timestamp, BigNum, BigFloat, Decimals,
+// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
+type CborHandle struct {
+ binaryEncodingType
+ noElemSeparators
+ BasicHandle
+
+ // IndefiniteLength=true, means that we encode using indefinitelength
+ IndefiniteLength bool
+
+ // TimeRFC3339 says to encode time.Time using RFC3339 format.
+ // If unset, we encode time.Time using seconds past epoch.
+ TimeRFC3339 bool
+
+ // _ [1]uint64 // padding
+}
+
+// Name returns the name of the handle: cbor
+func (h *CborHandle) Name() string { return "cbor" }
+
+// SetInterfaceExt sets an extension
+func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
+}
+
+func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
+ return &cborEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
+ return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes}
+}
+
+func (e *cborEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *cborDecDriver) reset() {
+ d.r, d.br = d.d.r, d.d.bytes
+ d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*cborDecDriver)(nil)
+var _ encDriver = (*cborEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go
new file mode 100644
index 0000000..1c0817a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/decode.go
@@ -0,0 +1,2552 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// Some tagging information for error messages.
+const (
+ msgBadDesc = "unrecognized descriptor byte"
+ msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v"
+)
+
+const decDefSliceCap = 8
+const decDefChanCap = 64 // should be large, as cap cannot be expanded
+const decScratchByteArrayLen = cacheLineSize - 8
+
+var (
+ errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct"
+ errstrCannotDecodeIntoNil = "cannot decode into nil"
+
+ errmsgExpandSliceOverflow = "expand slice: slice overflow"
+ errmsgExpandSliceCannotChange = "expand slice: cannot change"
+
+ errDecoderNotInitialized = errors.New("Decoder not initialized")
+
+ errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read")
+ errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read")
+ errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown")
+)
+
+// decReader abstracts the reading source, allowing implementations that can
+// read from an io.Reader or directly off a byte slice with zero-copying.
+type decReader interface {
+ unreadn1()
+
+ // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR
+ // just return a view of the []byte being decoded from.
+ // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control.
+ readx(n int) []byte
+ readb([]byte)
+ readn1() uint8
+ numread() int // number of bytes read
+ track()
+ stopTrack() []byte
+
+ // skip will skip any byte that matches, and return the first non-matching byte
+ skip(accept *bitset256) (token byte)
+ // readTo will read any byte that matches, stopping once no-longer matching.
+ readTo(in []byte, accept *bitset256) (out []byte)
+ // readUntil will read, only stopping once it matches the 'stop' byte.
+ readUntil(in []byte, stop byte) (out []byte)
+}
+
+type decDriver interface {
+ // this will check if the next token is a break.
+ CheckBreak() bool
+ // Note: TryDecodeAsNil should be careful not to share any temporary []byte with
+ // the rest of the decDriver. This is because sometimes, we optimize by holding onto
+ // a transient []byte, and ensuring the only other call we make to the decDriver
+ // during that time is maybe a TryDecodeAsNil() call.
+ TryDecodeAsNil() bool
+ // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known.
+ ContainerType() (vt valueType)
+ // IsBuiltinType(rt uintptr) bool
+
+ // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
+ // For maps and arrays, it will not do the decoding in-band, but will signal
+ // the decoder, so that is done later, by setting the decNaked.valueType field.
+ //
+ // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
+ // for extensions, DecodeNaked must read the tag and the []byte if it exists.
+ // if the []byte is not read, then kInterfaceNaked will treat it as a Handle
+ // that stores the subsequent value in-band, and complete reading the RawExt.
+ //
+ // extensions should also use readx to decode them, for efficiency.
+ // kInterface will extract the detached byte slice if it has to pass it outside its realm.
+ DecodeNaked()
+
+ // Deprecated: use DecodeInt64 and DecodeUint64 instead
+ // DecodeInt(bitsize uint8) (i int64)
+ // DecodeUint(bitsize uint8) (ui uint64)
+
+ DecodeInt64() (i int64)
+ DecodeUint64() (ui uint64)
+
+ DecodeFloat64() (f float64)
+ DecodeBool() (b bool)
+ // DecodeString can also decode symbols.
+ // It looks redundant as DecodeBytes is available.
+ // However, some codecs (e.g. binc) support symbols and can
+ // return a pre-stored string value, meaning that it can bypass
+ // the cost of []byte->string conversion.
+ DecodeString() (s string)
+ DecodeStringAsBytes() (v []byte)
+
+ // DecodeBytes may be called directly, without going through reflection.
+ // Consequently, it must be designed to handle possible nil.
+ DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte)
+ // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
+
+ // decodeExt will decode into a *RawExt or into an extension.
+ DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64)
+ // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
+
+ DecodeTime() (t time.Time)
+
+ ReadArrayStart() int
+ ReadArrayElem()
+ ReadArrayEnd()
+ ReadMapStart() int
+ ReadMapElemKey()
+ ReadMapElemValue()
+ ReadMapEnd()
+
+ reset()
+ uncacheRead()
+}
+
+type decDriverNoopContainerReader struct{}
+
+func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return }
+func (x decDriverNoopContainerReader) ReadArrayElem() {}
+func (x decDriverNoopContainerReader) ReadArrayEnd() {}
+func (x decDriverNoopContainerReader) ReadMapStart() (v int) { return }
+func (x decDriverNoopContainerReader) ReadMapElemKey() {}
+func (x decDriverNoopContainerReader) ReadMapElemValue() {}
+func (x decDriverNoopContainerReader) ReadMapEnd() {}
+func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return }
+
+// func (x decNoSeparator) uncacheRead() {}
+
+// DecodeOptions captures configuration options during decode.
+type DecodeOptions struct {
+ // MapType specifies type to use during schema-less decoding of a map in the stream.
+ // If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true,
+ // else map[interface{}]interface{}.
+ MapType reflect.Type
+
+ // SliceType specifies type to use during schema-less decoding of an array in the stream.
+ // If nil (unset), we default to []interface{} for all formats.
+ SliceType reflect.Type
+
+ // MaxInitLen defines the maxinum initial length that we "make" a collection
+ // (string, slice, map, chan). If 0 or negative, we default to a sensible value
+ // based on the size of an element in the collection.
+ //
+ // For example, when decoding, a stream may say that it has 2^64 elements.
+ // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash.
+ // Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
+ MaxInitLen int
+
+ // ReaderBufferSize is the size of the buffer used when reading.
+ //
+ // if > 0, we use a smart buffer internally for performance purposes.
+ ReaderBufferSize int
+
+ // If ErrorIfNoField, return an error when decoding a map
+ // from a codec stream into a struct, and no matching struct field is found.
+ ErrorIfNoField bool
+
+ // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
+ // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
+ // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
+ ErrorIfNoArrayExpand bool
+
+ // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
+ SignedInteger bool
+
+ // MapValueReset controls how we decode into a map value.
+ //
+ // By default, we MAY retrieve the mapping for a key, and then decode into that.
+ // However, especially with big maps, that retrieval may be expensive and unnecessary
+ // if the stream already contains all that is necessary to recreate the value.
+ //
+ // If true, we will never retrieve the previous mapping,
+ // but rather decode into a new value and set that in the map.
+ //
+ // If false, we will retrieve the previous mapping if necessary e.g.
+ // the previous mapping is a pointer, or is a struct or array with pre-set state,
+ // or is an interface.
+ MapValueReset bool
+
+ // SliceElementReset: on decoding a slice, reset the element to a zero value first.
+ //
+ // concern: if the slice already contained some garbage, we will decode into that garbage.
+ SliceElementReset bool
+
+ // InterfaceReset controls how we decode into an interface.
+ //
+ // By default, when we see a field that is an interface{...},
+ // or a map with interface{...} value, we will attempt decoding into the
+ // "contained" value.
+ //
+ // However, this prevents us from reading a string into an interface{}
+ // that formerly contained a number.
+ //
+ // If true, we will decode into a new "blank" value, and set that in the interface.
+ // If false, we will decode into whatever is contained in the interface.
+ InterfaceReset bool
+
+ // InternString controls interning of strings during decoding.
+ //
+ // Some handles, e.g. json, typically will read map keys as strings.
+ // If the set of keys are finite, it may help reduce allocation to
+ // look them up from a map (than to allocate them afresh).
+ //
+ // Note: Handles will be smart when using the intern functionality.
+ // Every string should not be interned.
+ // An excellent use-case for interning is struct field names,
+ // or map keys where key type is string.
+ InternString bool
+
+ // PreferArrayOverSlice controls whether to decode to an array or a slice.
+ //
+ // This only impacts decoding into a nil interface{}.
+ // Consequently, it has no effect on codecgen.
+ //
+ // *Note*: This only applies if using go1.5 and above,
+ // as it requires reflect.ArrayOf support which was absent before go1.5.
+ PreferArrayOverSlice bool
+
+ // DeleteOnNilMapValue controls how to decode a nil value in the stream.
+ //
+ // If true, we will delete the mapping of the key.
+ // Else, just set the mapping to the zero value of the type.
+ DeleteOnNilMapValue bool
+}
+
+// ------------------------------------
+
+type bufioDecReader struct {
+ buf []byte
+ r io.Reader
+
+ c int // cursor
+ n int // num read
+ err error
+
+ tr []byte
+ trb bool
+ b [4]byte
+}
+
+func (z *bufioDecReader) reset(r io.Reader) {
+ z.r, z.c, z.n, z.err, z.trb = r, 0, 0, nil, false
+ if z.tr != nil {
+ z.tr = z.tr[:0]
+ }
+}
+
+func (z *bufioDecReader) Read(p []byte) (n int, err error) {
+ if z.err != nil {
+ return 0, z.err
+ }
+ p0 := p
+ n = copy(p, z.buf[z.c:])
+ z.c += n
+ if z.c == len(z.buf) {
+ z.c = 0
+ }
+ z.n += n
+ if len(p) == n {
+ if z.c == 0 {
+ z.buf = z.buf[:1]
+ z.buf[0] = p[len(p)-1]
+ z.c = 1
+ }
+ if z.trb {
+ z.tr = append(z.tr, p0[:n]...)
+ }
+ return
+ }
+ p = p[n:]
+ var n2 int
+ // if we are here, then z.buf is all read
+ if len(p) > len(z.buf) {
+ n2, err = decReadFull(z.r, p)
+ n += n2
+ z.n += n2
+ z.err = err
+ // don't return EOF if some bytes were read. keep for next time.
+ if n > 0 && err == io.EOF {
+ err = nil
+ }
+ // always keep last byte in z.buf
+ z.buf = z.buf[:1]
+ z.buf[0] = p[len(p)-1]
+ z.c = 1
+ if z.trb {
+ z.tr = append(z.tr, p0[:n]...)
+ }
+ return
+ }
+ // z.c is now 0, and len(p) <= len(z.buf)
+ for len(p) > 0 && z.err == nil {
+ // println("len(p) loop starting ... ")
+ z.c = 0
+ z.buf = z.buf[0:cap(z.buf)]
+ n2, err = z.r.Read(z.buf)
+ if n2 > 0 {
+ if err == io.EOF {
+ err = nil
+ }
+ z.buf = z.buf[:n2]
+ n2 = copy(p, z.buf)
+ z.c = n2
+ n += n2
+ z.n += n2
+ p = p[n2:]
+ }
+ z.err = err
+ // println("... len(p) loop done")
+ }
+ if z.c == 0 {
+ z.buf = z.buf[:1]
+ z.buf[0] = p[len(p)-1]
+ z.c = 1
+ }
+ if z.trb {
+ z.tr = append(z.tr, p0[:n]...)
+ }
+ return
+}
+
+func (z *bufioDecReader) ReadByte() (b byte, err error) {
+ z.b[0] = 0
+ _, err = z.Read(z.b[:1])
+ b = z.b[0]
+ return
+}
+
+func (z *bufioDecReader) UnreadByte() (err error) {
+ if z.err != nil {
+ return z.err
+ }
+ if z.c > 0 {
+ z.c--
+ z.n--
+ if z.trb {
+ z.tr = z.tr[:len(z.tr)-1]
+ }
+ return
+ }
+ return errDecUnreadByteNothingToRead
+}
+
+func (z *bufioDecReader) numread() int {
+ return z.n
+}
+
+func (z *bufioDecReader) readx(n int) (bs []byte) {
+ if n <= 0 || z.err != nil {
+ return
+ }
+ if z.c+n <= len(z.buf) {
+ bs = z.buf[z.c : z.c+n]
+ z.n += n
+ z.c += n
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+ return
+ }
+ bs = make([]byte, n)
+ _, err := z.Read(bs)
+ if err != nil {
+ panic(err)
+ }
+ return
+}
+
+func (z *bufioDecReader) readb(bs []byte) {
+ _, err := z.Read(bs)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// func (z *bufioDecReader) readn1eof() (b uint8, eof bool) {
+// b, err := z.ReadByte()
+// if err != nil {
+// if err == io.EOF {
+// eof = true
+// } else {
+// panic(err)
+// }
+// }
+// return
+// }
+
+func (z *bufioDecReader) readn1() (b uint8) {
+ b, err := z.ReadByte()
+ if err != nil {
+ panic(err)
+ }
+ return
+}
+
+func (z *bufioDecReader) search(in []byte, accept *bitset256, stop, flag uint8) (token byte, out []byte) {
+ // flag: 1 (skip), 2 (readTo), 4 (readUntil)
+ if flag == 4 {
+ for i := z.c; i < len(z.buf); i++ {
+ if z.buf[i] == stop {
+ token = z.buf[i]
+ z.n = z.n + (i - z.c) - 1
+ i++
+ out = z.buf[z.c:i]
+ if z.trb {
+ z.tr = append(z.tr, z.buf[z.c:i]...)
+ }
+ z.c = i
+ return
+ }
+ }
+ } else {
+ for i := z.c; i < len(z.buf); i++ {
+ if !accept.isset(z.buf[i]) {
+ token = z.buf[i]
+ z.n = z.n + (i - z.c) - 1
+ if flag == 1 {
+ i++
+ } else {
+ out = z.buf[z.c:i]
+ }
+ if z.trb {
+ z.tr = append(z.tr, z.buf[z.c:i]...)
+ }
+ z.c = i
+ return
+ }
+ }
+ }
+ z.n += len(z.buf) - z.c
+ if flag != 1 {
+ out = append(in, z.buf[z.c:]...)
+ }
+ if z.trb {
+ z.tr = append(z.tr, z.buf[z.c:]...)
+ }
+ var n2 int
+ if z.err != nil {
+ return
+ }
+ for {
+ z.c = 0
+ z.buf = z.buf[0:cap(z.buf)]
+ n2, z.err = z.r.Read(z.buf)
+ if n2 > 0 && z.err != nil {
+ z.err = nil
+ }
+ z.buf = z.buf[:n2]
+ if flag == 4 {
+ for i := 0; i < n2; i++ {
+ if z.buf[i] == stop {
+ token = z.buf[i]
+ z.n += i - 1
+ i++
+ out = append(out, z.buf[z.c:i]...)
+ if z.trb {
+ z.tr = append(z.tr, z.buf[z.c:i]...)
+ }
+ z.c = i
+ return
+ }
+ }
+ } else {
+ for i := 0; i < n2; i++ {
+ if !accept.isset(z.buf[i]) {
+ token = z.buf[i]
+ z.n += i - 1
+ if flag == 1 {
+ i++
+ }
+ if flag != 1 {
+ out = append(out, z.buf[z.c:i]...)
+ }
+ if z.trb {
+ z.tr = append(z.tr, z.buf[z.c:i]...)
+ }
+ z.c = i
+ return
+ }
+ }
+ }
+ if flag != 1 {
+ out = append(out, z.buf[:n2]...)
+ }
+ z.n += n2
+ if z.err != nil {
+ return
+ }
+ if z.trb {
+ z.tr = append(z.tr, z.buf[:n2]...)
+ }
+ }
+}
+
+func (z *bufioDecReader) skip(accept *bitset256) (token byte) {
+ token, _ = z.search(nil, accept, 0, 1)
+ return
+}
+
+func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) {
+ _, out = z.search(in, accept, 0, 2)
+ return
+}
+
+func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) {
+ _, out = z.search(in, nil, stop, 4)
+ return
+}
+
+func (z *bufioDecReader) unreadn1() {
+ err := z.UnreadByte()
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (z *bufioDecReader) track() {
+ if z.tr != nil {
+ z.tr = z.tr[:0]
+ }
+ z.trb = true
+}
+
+func (z *bufioDecReader) stopTrack() (bs []byte) {
+ z.trb = false
+ return z.tr
+}
+
+// ioDecReader is a decReader that reads off an io.Reader.
+//
+// It also has a fallback implementation of ByteScanner if needed.
+type ioDecReader struct {
+ r io.Reader // the reader passed in
+
+ rr io.Reader
+ br io.ByteScanner
+
+ l byte // last byte
+ ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread
+ trb bool // tracking bytes turned on
+ _ bool
+ b [4]byte // tiny buffer for reading single bytes
+
+ x [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc
+ n int // num read
+ tr []byte // tracking bytes read
+}
+
+func (z *ioDecReader) reset(r io.Reader) {
+ z.r = r
+ z.rr = r
+ z.l, z.ls, z.n, z.trb = 0, 0, 0, false
+ if z.tr != nil {
+ z.tr = z.tr[:0]
+ }
+ var ok bool
+ if z.br, ok = r.(io.ByteScanner); !ok {
+ z.br = z
+ z.rr = z
+ }
+}
+
+func (z *ioDecReader) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return
+ }
+ var firstByte bool
+ if z.ls == 1 {
+ z.ls = 2
+ p[0] = z.l
+ if len(p) == 1 {
+ n = 1
+ return
+ }
+ firstByte = true
+ p = p[1:]
+ }
+ n, err = z.r.Read(p)
+ if n > 0 {
+ if err == io.EOF && n == len(p) {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ z.l = p[n-1]
+ z.ls = 2
+ }
+ if firstByte {
+ n++
+ }
+ return
+}
+
+func (z *ioDecReader) ReadByte() (c byte, err error) {
+ n, err := z.Read(z.b[:1])
+ if n == 1 {
+ c = z.b[0]
+ if err == io.EOF {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ }
+ return
+}
+
+func (z *ioDecReader) UnreadByte() (err error) {
+ switch z.ls {
+ case 2:
+ z.ls = 1
+ case 0:
+ err = errDecUnreadByteNothingToRead
+ case 1:
+ err = errDecUnreadByteLastByteNotRead
+ default:
+ err = errDecUnreadByteUnknown
+ }
+ return
+}
+
+func (z *ioDecReader) numread() int {
+ return z.n
+}
+
+func (z *ioDecReader) readx(n int) (bs []byte) {
+ if n <= 0 {
+ return
+ }
+ if n < len(z.x) {
+ bs = z.x[:n]
+ } else {
+ bs = make([]byte, n)
+ }
+ if _, err := decReadFull(z.rr, bs); err != nil {
+ panic(err)
+ }
+ z.n += len(bs)
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+ return
+}
+
+func (z *ioDecReader) readb(bs []byte) {
+ // if len(bs) == 0 {
+ // return
+ // }
+ if _, err := decReadFull(z.rr, bs); err != nil {
+ panic(err)
+ }
+ z.n += len(bs)
+ if z.trb {
+ z.tr = append(z.tr, bs...)
+ }
+}
+
+func (z *ioDecReader) readn1eof() (b uint8, eof bool) {
+ b, err := z.br.ReadByte()
+ if err == nil {
+ z.n++
+ if z.trb {
+ z.tr = append(z.tr, b)
+ }
+ } else if err == io.EOF {
+ eof = true
+ } else {
+ panic(err)
+ }
+ return
+}
+
+func (z *ioDecReader) readn1() (b uint8) {
+ var err error
+ if b, err = z.br.ReadByte(); err == nil {
+ z.n++
+ if z.trb {
+ z.tr = append(z.tr, b)
+ }
+ return
+ }
+ panic(err)
+}
+
+func (z *ioDecReader) skip(accept *bitset256) (token byte) {
+ for {
+ var eof bool
+ token, eof = z.readn1eof()
+ if eof {
+ return
+ }
+ if accept.isset(token) {
+ continue
+ }
+ return
+ }
+}
+
+func (z *ioDecReader) readTo(in []byte, accept *bitset256) (out []byte) {
+ out = in
+ for {
+ token, eof := z.readn1eof()
+ if eof {
+ return
+ }
+ if accept.isset(token) {
+ out = append(out, token)
+ } else {
+ z.unreadn1()
+ return
+ }
+ }
+}
+
+func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) {
+ out = in
+ for {
+ token, eof := z.readn1eof()
+ if eof {
+ panic(io.EOF)
+ }
+ out = append(out, token)
+ if token == stop {
+ return
+ }
+ }
+}
+
+func (z *ioDecReader) unreadn1() {
+ err := z.br.UnreadByte()
+ if err != nil {
+ panic(err)
+ }
+ z.n--
+ if z.trb {
+ if l := len(z.tr) - 1; l >= 0 {
+ z.tr = z.tr[:l]
+ }
+ }
+}
+
+func (z *ioDecReader) track() {
+ if z.tr != nil {
+ z.tr = z.tr[:0]
+ }
+ z.trb = true
+}
+
+func (z *ioDecReader) stopTrack() (bs []byte) {
+ z.trb = false
+ return z.tr
+}
+
+// ------------------------------------
+
+var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read")
+
+// bytesDecReader is a decReader that reads off a byte slice with zero copying
+type bytesDecReader struct {
+ b []byte // data
+ c int // cursor
+ a int // available
+ t int // track start
+}
+
+func (z *bytesDecReader) reset(in []byte) {
+ z.b = in
+ z.a = len(in)
+ z.c = 0
+ z.t = 0
+}
+
+func (z *bytesDecReader) numread() int {
+ return z.c
+}
+
+func (z *bytesDecReader) unreadn1() {
+ if z.c == 0 || len(z.b) == 0 {
+ panic(errBytesDecReaderCannotUnread)
+ }
+ z.c--
+ z.a++
+ return
+}
+
+func (z *bytesDecReader) readx(n int) (bs []byte) {
+ // slicing from a non-constant start position is more expensive,
+ // as more computation is required to decipher the pointer start position.
+ // However, we do it only once, and it's better than reslicing both z.b and return value.
+
+ if n <= 0 {
+ } else if z.a == 0 {
+ panic(io.EOF)
+ } else if n > z.a {
+ panic(io.ErrUnexpectedEOF)
+ } else {
+ c0 := z.c
+ z.c = c0 + n
+ z.a = z.a - n
+ bs = z.b[c0:z.c]
+ }
+ return
+}
+
+func (z *bytesDecReader) readb(bs []byte) {
+ copy(bs, z.readx(len(bs)))
+}
+
+func (z *bytesDecReader) readn1() (v uint8) {
+ if z.a == 0 {
+ panic(io.EOF)
+ }
+ v = z.b[z.c]
+ z.c++
+ z.a--
+ return
+}
+
+// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) {
+// if z.a == 0 {
+// eof = true
+// return
+// }
+// v = z.b[z.c]
+// z.c++
+// z.a--
+// return
+// }
+
+func (z *bytesDecReader) skip(accept *bitset256) (token byte) {
+ if z.a == 0 {
+ return
+ }
+ blen := len(z.b)
+ for i := z.c; i < blen; i++ {
+ if !accept.isset(z.b[i]) {
+ token = z.b[i]
+ i++
+ z.a -= (i - z.c)
+ z.c = i
+ return
+ }
+ }
+ z.a, z.c = 0, blen
+ return
+}
+
+func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) {
+ if z.a == 0 {
+ return
+ }
+ blen := len(z.b)
+ for i := z.c; i < blen; i++ {
+ if !accept.isset(z.b[i]) {
+ out = z.b[z.c:i]
+ z.a -= (i - z.c)
+ z.c = i
+ return
+ }
+ }
+ out = z.b[z.c:]
+ z.a, z.c = 0, blen
+ return
+}
+
+func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) {
+ if z.a == 0 {
+ panic(io.EOF)
+ }
+ blen := len(z.b)
+ for i := z.c; i < blen; i++ {
+ if z.b[i] == stop {
+ i++
+ out = z.b[z.c:i]
+ z.a -= (i - z.c)
+ z.c = i
+ return
+ }
+ }
+ z.a, z.c = 0, blen
+ panic(io.EOF)
+}
+
+func (z *bytesDecReader) track() {
+ z.t = z.c
+}
+
+func (z *bytesDecReader) stopTrack() (bs []byte) {
+ return z.b[z.t:z.c]
+}
+
+// ----------------------------------------
+
+// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) {
+// d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv))
+// }
+
+func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) {
+ d.d.DecodeExt(rv2i(rv), 0, nil)
+}
+
+func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) {
+ d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn)
+}
+
+func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) {
+ rv2i(rv).(Selfer).CodecDecodeSelf(d)
+}
+
+func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) {
+ bm := rv2i(rv).(encoding.BinaryUnmarshaler)
+ xbs := d.d.DecodeBytes(nil, true)
+ if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) {
+ tm := rv2i(rv).(encoding.TextUnmarshaler)
+ fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) {
+ tm := rv2i(rv).(jsonUnmarshaler)
+ // bs := d.d.DecodeBytes(d.b[:], true, true)
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) {
+ d.errorf("no decoding function defined for kind %v", rv.Kind())
+}
+
+// var kIntfCtr uint64
+
+func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
+ // nil interface:
+ // use some hieristics to decode it appropriately
+ // based on the detected next value in the stream.
+ n := d.naked()
+ d.d.DecodeNaked()
+ if n.v == valueTypeNil {
+ return
+ }
+ // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
+ if f.ti.numMeth > 0 {
+ d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
+ return
+ }
+ // var useRvn bool
+ switch n.v {
+ case valueTypeMap:
+ // if json, default to a map type with string keys
+ mtid := d.mtid
+ if mtid == 0 {
+ if d.jsms {
+ mtid = mapStrIntfTypId
+ } else {
+ mtid = mapIntfIntfTypId
+ }
+ }
+ if mtid == mapIntfIntfTypId {
+ n.initContainers()
+ if n.lm < arrayCacheLen {
+ n.ma[n.lm] = nil
+ rvn = n.rma[n.lm]
+ n.lm++
+ d.decode(&n.ma[n.lm-1])
+ n.lm--
+ } else {
+ var v2 map[interface{}]interface{}
+ d.decode(&v2)
+ rvn = reflect.ValueOf(&v2).Elem()
+ }
+ } else if mtid == mapStrIntfTypId { // for json performance
+ n.initContainers()
+ if n.ln < arrayCacheLen {
+ n.na[n.ln] = nil
+ rvn = n.rna[n.ln]
+ n.ln++
+ d.decode(&n.na[n.ln-1])
+ n.ln--
+ } else {
+ var v2 map[string]interface{}
+ d.decode(&v2)
+ rvn = reflect.ValueOf(&v2).Elem()
+ }
+ } else {
+ if d.mtr {
+ rvn = reflect.New(d.h.MapType)
+ d.decode(rv2i(rvn))
+ rvn = rvn.Elem()
+ } else {
+ rvn = reflect.New(d.h.MapType).Elem()
+ d.decodeValue(rvn, nil, true)
+ }
+ }
+ case valueTypeArray:
+ if d.stid == 0 || d.stid == intfSliceTypId {
+ n.initContainers()
+ if n.ls < arrayCacheLen {
+ n.sa[n.ls] = nil
+ rvn = n.rsa[n.ls]
+ n.ls++
+ d.decode(&n.sa[n.ls-1])
+ n.ls--
+ } else {
+ var v2 []interface{}
+ d.decode(&v2)
+ rvn = reflect.ValueOf(&v2).Elem()
+ }
+ if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice {
+ rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem()
+ reflect.Copy(rvn2, rvn)
+ rvn = rvn2
+ }
+ } else {
+ if d.str {
+ rvn = reflect.New(d.h.SliceType)
+ d.decode(rv2i(rvn))
+ rvn = rvn.Elem()
+ } else {
+ rvn = reflect.New(d.h.SliceType).Elem()
+ d.decodeValue(rvn, nil, true)
+ }
+ }
+ case valueTypeExt:
+ var v interface{}
+ tag, bytes := n.u, n.l // calling decode below might taint the values
+ if bytes == nil {
+ n.initContainers()
+ if n.li < arrayCacheLen {
+ n.ia[n.li] = nil
+ n.li++
+ d.decode(&n.ia[n.li-1])
+ // v = *(&n.ia[l])
+ n.li--
+ v = n.ia[n.li]
+ n.ia[n.li] = nil
+ } else {
+ d.decode(&v)
+ }
+ }
+ bfn := d.h.getExtForTag(tag)
+ if bfn == nil {
+ var re RawExt
+ re.Tag = tag
+ re.Data = detachZeroCopyBytes(d.bytes, nil, bytes)
+ re.Value = v
+ rvn = reflect.ValueOf(&re).Elem()
+ } else {
+ rvnA := reflect.New(bfn.rt)
+ if bytes != nil {
+ bfn.ext.ReadExt(rv2i(rvnA), bytes)
+ } else {
+ bfn.ext.UpdateExt(rv2i(rvnA), v)
+ }
+ rvn = rvnA.Elem()
+ }
+ case valueTypeNil:
+ // no-op
+ case valueTypeInt:
+ rvn = n.ri
+ case valueTypeUint:
+ rvn = n.ru
+ case valueTypeFloat:
+ rvn = n.rf
+ case valueTypeBool:
+ rvn = n.rb
+ case valueTypeString, valueTypeSymbol:
+ rvn = n.rs
+ case valueTypeBytes:
+ rvn = n.rl
+ case valueTypeTime:
+ rvn = n.rt
+ default:
+ panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v)
+ }
+ return
+}
+
+func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
+ // Note:
+ // A consequence of how kInterface works, is that
+ // if an interface already contains something, we try
+ // to decode into what was there before.
+ // We do not replace with a generic value (as got from decodeNaked).
+
+ // every interface passed here MUST be settable.
+ var rvn reflect.Value
+ if rv.IsNil() || d.h.InterfaceReset {
+ // check if mapping to a type: if so, initialize it and move on
+ rvn = d.h.intf2impl(f.ti.rtid)
+ if rvn.IsValid() {
+ rv.Set(rvn)
+ } else {
+ rvn = d.kInterfaceNaked(f)
+ if rvn.IsValid() {
+ rv.Set(rvn)
+ } else if d.h.InterfaceReset {
+ // reset to zero value based on current type in there.
+ rv.Set(reflect.Zero(rv.Elem().Type()))
+ }
+ return
+ }
+ } else {
+ // now we have a non-nil interface value, meaning it contains a type
+ rvn = rv.Elem()
+ }
+ if d.d.TryDecodeAsNil() {
+ rv.Set(reflect.Zero(rvn.Type()))
+ return
+ }
+
+ // Note: interface{} is settable, but underlying type may not be.
+ // Consequently, we MAY have to create a decodable value out of the underlying value,
+ // decode into it, and reset the interface itself.
+ // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type())
+
+ rvn2, canDecode := isDecodeable(rvn)
+ if canDecode {
+ d.decodeValue(rvn2, nil, true)
+ return
+ }
+
+ rvn2 = reflect.New(rvn.Type()).Elem()
+ rvn2.Set(rvn)
+ d.decodeValue(rvn2, nil, true)
+ rv.Set(rvn2)
+}
+
+func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) {
+ // use if-else-if, not switch (which compiles to binary-search)
+ // since keyType is typically valueTypeString, branch prediction is pretty good.
+
+ if keyType == valueTypeString {
+ rvkencname = dd.DecodeStringAsBytes()
+ } else if keyType == valueTypeInt {
+ rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10)
+ } else if keyType == valueTypeUint {
+ rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10)
+ } else if keyType == valueTypeFloat {
+ rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64)
+ } else {
+ rvkencname = dd.DecodeStringAsBytes()
+ }
+ return rvkencname
+}
+
+func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
+ fti := f.ti
+ dd := d.d
+ elemsep := d.esep
+ sfn := structFieldNode{v: rv, update: true}
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeMap {
+ containerLen := dd.ReadMapStart()
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return
+ }
+ tisfi := fti.sfiSort
+ hasLen := containerLen >= 0
+
+ var rvkencname []byte
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if elemsep {
+ dd.ReadMapElemKey()
+ }
+ rvkencname = decStructFieldKey(dd, fti.keyType, &d.b)
+ if elemsep {
+ dd.ReadMapElemValue()
+ }
+ if k := fti.indexForEncName(rvkencname); k > -1 {
+ si := tisfi[k]
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(sfn.field(si), nil, true)
+ }
+ } else {
+ d.structFieldNotFound(-1, stringView(rvkencname))
+ }
+ // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop
+ }
+ dd.ReadMapEnd()
+ } else if ctyp == valueTypeArray {
+ containerLen := dd.ReadArrayStart()
+ if containerLen == 0 {
+ dd.ReadArrayEnd()
+ return
+ }
+ // Not much gain from doing it two ways for array.
+ // Arrays are not used as much for structs.
+ hasLen := containerLen >= 0
+ for j, si := range fti.sfiSrc {
+ if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) {
+ break
+ }
+ if elemsep {
+ dd.ReadArrayElem()
+ }
+ if dd.TryDecodeAsNil() {
+ si.setToZeroValue(rv)
+ } else {
+ d.decodeValue(sfn.field(si), nil, true)
+ }
+ }
+ if containerLen > len(fti.sfiSrc) {
+ // read remaining values and throw away
+ for j := len(fti.sfiSrc); j < containerLen; j++ {
+ if elemsep {
+ dd.ReadArrayElem()
+ }
+ d.structFieldNotFound(j, "")
+ }
+ }
+ dd.ReadArrayEnd()
+ } else {
+ d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct)
+ return
+ }
+}
+
+func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
+ // A slice can be set from a map or array in stream.
+ // This way, the order can be kept (as order is lost with map).
+ ti := f.ti
+ if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 {
+ d.errorf("receive-only channel cannot be decoded")
+ }
+ dd := d.d
+ rtelem0 := ti.elem
+ ctyp := dd.ContainerType()
+ if ctyp == valueTypeBytes || ctyp == valueTypeString {
+ // you can only decode bytes or string in the stream into a slice or array of bytes
+ if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
+ d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt)
+ }
+ if f.seq == seqTypeChan {
+ bs2 := dd.DecodeBytes(nil, true)
+ irv := rv2i(rv)
+ ch, ok := irv.(chan<- byte)
+ if !ok {
+ ch = irv.(chan byte)
+ }
+ for _, b := range bs2 {
+ ch <- b
+ }
+ } else {
+ rvbs := rv.Bytes()
+ bs2 := dd.DecodeBytes(rvbs, false)
+ // if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
+ if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) {
+ if rv.CanSet() {
+ rv.SetBytes(bs2)
+ } else if len(rvbs) > 0 && len(bs2) > 0 {
+ copy(rvbs, bs2)
+ }
+ }
+ }
+ return
+ }
+
+ // array := f.seq == seqTypeChan
+
+ slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map)
+
+ // an array can never return a nil slice. so no need to check f.array here.
+ if containerLenS == 0 {
+ if rv.CanSet() {
+ if f.seq == seqTypeSlice {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeSlice(ti.rt, 0, 0))
+ } else {
+ rv.SetLen(0)
+ }
+ } else if f.seq == seqTypeChan {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeChan(ti.rt, 0))
+ }
+ }
+ }
+ slh.End()
+ return
+ }
+
+ rtelem0Size := int(rtelem0.Size())
+ rtElem0Kind := rtelem0.Kind()
+ rtelem0Mut := !isImmutableKind(rtElem0Kind)
+ rtelem := rtelem0
+ rtelemkind := rtelem.Kind()
+ for rtelemkind == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ rtelemkind = rtelem.Kind()
+ }
+
+ var fn *codecFn
+
+ var rvCanset = rv.CanSet()
+ var rvChanged bool
+ var rv0 = rv
+ var rv9 reflect.Value
+
+ rvlen := rv.Len()
+ rvcap := rv.Cap()
+ hasLen := containerLenS > 0
+ if hasLen && f.seq == seqTypeSlice {
+ if containerLenS > rvcap {
+ oldRvlenGtZero := rvlen > 0
+ rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size()))
+ if rvlen <= rvcap {
+ if rvCanset {
+ rv.SetLen(rvlen)
+ }
+ } else if rvCanset {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvcap = rvlen
+ rvChanged = true
+ } else {
+ d.errorf("cannot decode into non-settable slice")
+ }
+ if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) {
+ reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
+ }
+ } else if containerLenS != rvlen {
+ rvlen = containerLenS
+ if rvCanset {
+ rv.SetLen(rvlen)
+ }
+ // else {
+ // rv = rv.Slice(0, rvlen)
+ // rvChanged = true
+ // d.errorf("cannot decode into non-settable slice")
+ // }
+ }
+ }
+
+ // consider creating new element once, and just decoding into it.
+ var rtelem0Zero reflect.Value
+ var rtelem0ZeroValid bool
+ var decodeAsNil bool
+ var j int
+ d.cfer()
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() {
+ if hasLen {
+ rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size)
+ } else if f.seq == seqTypeSlice {
+ rvlen = decDefSliceCap
+ } else {
+ rvlen = decDefChanCap
+ }
+ if rvCanset {
+ if f.seq == seqTypeSlice {
+ rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
+ rvChanged = true
+ } else { // chan
+ // xdebugf(">>>>>> haslen = %v, make chan of type '%v' with length: %v", hasLen, ti.rt, rvlen)
+ rv = reflect.MakeChan(ti.rt, rvlen)
+ rvChanged = true
+ }
+ } else {
+ d.errorf("cannot decode into non-settable slice")
+ }
+ }
+ slh.ElemContainerState(j)
+ decodeAsNil = dd.TryDecodeAsNil()
+ if f.seq == seqTypeChan {
+ if decodeAsNil {
+ rv.Send(reflect.Zero(rtelem0))
+ continue
+ }
+ if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) {
+ rv9 = reflect.New(rtelem0).Elem()
+ }
+ if fn == nil {
+ fn = d.cf.get(rtelem, true, true)
+ }
+ d.decodeValue(rv9, fn, true)
+ // xdebugf(">>>> rv9 sent on %v during decode: %v, with len=%v, cap=%v", rv.Type(), rv9, rv.Len(), rv.Cap())
+ rv.Send(rv9)
+ } else {
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= rvlen {
+ if f.seq == seqTypeArray {
+ d.arrayCannotExpand(rvlen, j+1)
+ decodeIntoBlank = true
+ } else { // if f.seq == seqTypeSlice
+ // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs
+ var rvcap2 int
+ var rvErrmsg2 string
+ rv9, rvcap2, rvChanged, rvErrmsg2 =
+ expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap)
+ if rvErrmsg2 != "" {
+ d.errorf(rvErrmsg2)
+ }
+ rvlen++
+ if rvChanged {
+ rv = rv9
+ rvcap = rvcap2
+ }
+ }
+ }
+ if decodeIntoBlank {
+ if !decodeAsNil {
+ d.swallow()
+ }
+ } else {
+ rv9 = rv.Index(j)
+ if d.h.SliceElementReset || decodeAsNil {
+ if !rtelem0ZeroValid {
+ rtelem0ZeroValid = true
+ rtelem0Zero = reflect.Zero(rtelem0)
+ }
+ rv9.Set(rtelem0Zero)
+ }
+ if decodeAsNil {
+ continue
+ }
+
+ if fn == nil {
+ fn = d.cf.get(rtelem, true, true)
+ }
+ d.decodeValue(rv9, fn, true)
+ }
+ }
+ }
+ if f.seq == seqTypeSlice {
+ if j < rvlen {
+ if rv.CanSet() {
+ rv.SetLen(j)
+ } else if rvCanset {
+ rv = rv.Slice(0, j)
+ rvChanged = true
+ } // else { d.errorf("kSlice: cannot change non-settable slice") }
+ rvlen = j
+ } else if j == 0 && rv.IsNil() {
+ if rvCanset {
+ rv = reflect.MakeSlice(ti.rt, 0, 0)
+ rvChanged = true
+ } // else { d.errorf("kSlice: cannot change non-settable slice") }
+ }
+ }
+ slh.End()
+
+ if rvChanged { // infers rvCanset=true, so it can be reset
+ rv0.Set(rv)
+ }
+}
+
+// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) {
+// // d.decodeValueFn(rv.Slice(0, rv.Len()))
+// f.kSlice(rv.Slice(0, rv.Len()))
+// }
+
+func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
+ dd := d.d
+ containerLen := dd.ReadMapStart()
+ elemsep := d.esep
+ ti := f.ti
+ if rv.IsNil() {
+ rv.Set(makeMapReflect(ti.rt, containerLen))
+ }
+
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return
+ }
+
+ ktype, vtype := ti.key, ti.elem
+ ktypeId := rt2id(ktype)
+ vtypeKind := vtype.Kind()
+
+ var keyFn, valFn *codecFn
+ var ktypeLo, vtypeLo reflect.Type
+
+ for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() {
+ }
+
+ for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() {
+ }
+
+ var mapGet, mapSet bool
+ rvvImmut := isImmutableKind(vtypeKind)
+ if !d.h.MapValueReset {
+ // if pointer, mapGet = true
+ // if interface, mapGet = true if !DecodeNakedAlways (else false)
+ // if builtin, mapGet = false
+ // else mapGet = true
+ if vtypeKind == reflect.Ptr {
+ mapGet = true
+ } else if vtypeKind == reflect.Interface {
+ if !d.h.InterfaceReset {
+ mapGet = true
+ }
+ } else if !rvvImmut {
+ mapGet = true
+ }
+ }
+
+ var rvk, rvkp, rvv, rvz reflect.Value
+ rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk.
+ ktypeIsString := ktypeId == stringTypId
+ ktypeIsIntf := ktypeId == intfTypId
+ hasLen := containerLen > 0
+ var kstrbs []byte
+ d.cfer()
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if rvkMut || !rvkp.IsValid() {
+ rvkp = reflect.New(ktype)
+ rvk = rvkp.Elem()
+ }
+ if elemsep {
+ dd.ReadMapElemKey()
+ }
+ if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block
+ // Previously, if a nil key, we just ignored the mapped value and continued.
+ // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil}
+ // to be an empty map.
+ // Instead, we treat a nil key as the zero value of the type.
+ rvk.Set(reflect.Zero(ktype))
+ } else if ktypeIsString {
+ kstrbs = dd.DecodeStringAsBytes()
+ rvk.SetString(stringView(kstrbs))
+ // NOTE: if doing an insert, you MUST use a real string (not stringview)
+ } else {
+ if keyFn == nil {
+ keyFn = d.cf.get(ktypeLo, true, true)
+ }
+ d.decodeValue(rvk, keyFn, true)
+ }
+ // special case if a byte array.
+ if ktypeIsIntf {
+ if rvk2 := rvk.Elem(); rvk2.IsValid() {
+ if rvk2.Type() == uint8SliceTyp {
+ rvk = reflect.ValueOf(d.string(rvk2.Bytes()))
+ } else {
+ rvk = rvk2
+ }
+ }
+ }
+
+ if elemsep {
+ dd.ReadMapElemValue()
+ }
+
+ // Brittle, but OK per TryDecodeAsNil() contract.
+ // i.e. TryDecodeAsNil never shares slices with other decDriver procedures
+ if dd.TryDecodeAsNil() {
+ if ktypeIsString {
+ rvk.SetString(d.string(kstrbs))
+ }
+ if d.h.DeleteOnNilMapValue {
+ rv.SetMapIndex(rvk, reflect.Value{})
+ } else {
+ rv.SetMapIndex(rvk, reflect.Zero(vtype))
+ }
+ continue
+ }
+
+ mapSet = true // set to false if u do a get, and its a non-nil pointer
+ if mapGet {
+ // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable.
+ rvv = rv.MapIndex(rvk)
+ if !rvv.IsValid() {
+ rvv = reflect.New(vtype).Elem()
+ } else if vtypeKind == reflect.Ptr {
+ if rvv.IsNil() {
+ rvv = reflect.New(vtype).Elem()
+ } else {
+ mapSet = false
+ }
+ } else if vtypeKind == reflect.Interface {
+ // not addressable, and thus not settable.
+ // e MUST create a settable/addressable variant
+ rvv2 := reflect.New(rvv.Type()).Elem()
+ if !rvv.IsNil() {
+ rvv2.Set(rvv)
+ }
+ rvv = rvv2
+ }
+ // else it is ~mutable, and we can just decode into it directly
+ } else if rvvImmut {
+ if !rvz.IsValid() {
+ rvz = reflect.New(vtype).Elem()
+ }
+ rvv = rvz
+ } else {
+ rvv = reflect.New(vtype).Elem()
+ }
+
+ // We MUST be done with the stringview of the key, before decoding the value
+ // so that we don't bastardize the reused byte array.
+ if mapSet && ktypeIsString {
+ rvk.SetString(d.string(kstrbs))
+ }
+ if valFn == nil {
+ valFn = d.cf.get(vtypeLo, true, true)
+ }
+ d.decodeValue(rvv, valFn, true)
+ // d.decodeValueFn(rvv, valFn)
+ if mapSet {
+ rv.SetMapIndex(rvk, rvv)
+ }
+ // if ktypeIsString {
+ // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop
+ // }
+ }
+
+ dd.ReadMapEnd()
+}
+
+// decNaked is used to keep track of the primitives decoded.
+// Without it, we would have to decode each primitive and wrap it
+// in an interface{}, causing an allocation.
+// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
+// so we can rest assured that no other decoding happens while these
+// primitives are being decoded.
+//
+// maps and arrays are not handled by this mechanism.
+// However, RawExt is, and we accommodate for extensions that decode
+// RawExt from DecodeNaked, but need to decode the value subsequently.
+// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
+//
+// However, decNaked also keeps some arrays of default maps and slices
+// used in DecodeNaked. This way, we can get a pointer to it
+// without causing a new heap allocation.
+//
+// kInterfaceNaked will ensure that there is no allocation for the common
+// uses.
+
+type decNakedContainers struct {
+ // array/stacks for reducing allocation
+ // keep arrays at the bottom? Chance is that they are not used much.
+ ia [arrayCacheLen]interface{}
+ ma [arrayCacheLen]map[interface{}]interface{}
+ na [arrayCacheLen]map[string]interface{}
+ sa [arrayCacheLen][]interface{}
+
+ // ria [arrayCacheLen]reflect.Value // not needed, as we decode directly into &ia[n]
+ rma, rna, rsa [arrayCacheLen]reflect.Value // reflect.Value mapping to above
+}
+
+func (n *decNakedContainers) init() {
+ for i := 0; i < arrayCacheLen; i++ {
+ // n.ria[i] = reflect.ValueOf(&(n.ia[i])).Elem()
+ n.rma[i] = reflect.ValueOf(&(n.ma[i])).Elem()
+ n.rna[i] = reflect.ValueOf(&(n.na[i])).Elem()
+ n.rsa[i] = reflect.ValueOf(&(n.sa[i])).Elem()
+ }
+}
+
+type decNaked struct {
+ // r RawExt // used for RawExt, uint, []byte.
+
+ // primitives below
+ u uint64
+ i int64
+ f float64
+ l []byte
+ s string
+
+ // ---- cpu cache line boundary?
+ t time.Time
+ b bool
+
+ // state
+ v valueType
+ li, lm, ln, ls int8
+ inited bool
+
+ *decNakedContainers
+
+ ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
+
+ // _ [6]uint64 // padding // no padding - rt goes into next cache line
+}
+
+func (n *decNaked) init() {
+ if n.inited {
+ return
+ }
+ n.ru = reflect.ValueOf(&n.u).Elem()
+ n.ri = reflect.ValueOf(&n.i).Elem()
+ n.rf = reflect.ValueOf(&n.f).Elem()
+ n.rl = reflect.ValueOf(&n.l).Elem()
+ n.rs = reflect.ValueOf(&n.s).Elem()
+ n.rt = reflect.ValueOf(&n.t).Elem()
+ n.rb = reflect.ValueOf(&n.b).Elem()
+
+ n.inited = true
+ // n.rr[] = reflect.ValueOf(&n.)
+}
+
+func (n *decNaked) initContainers() {
+ if n.decNakedContainers == nil {
+ n.decNakedContainers = new(decNakedContainers)
+ n.decNakedContainers.init()
+ }
+}
+
+func (n *decNaked) reset() {
+ if n == nil {
+ return
+ }
+ n.li, n.lm, n.ln, n.ls = 0, 0, 0, 0
+}
+
+type rtid2rv struct {
+ rtid uintptr
+ rv reflect.Value
+}
+
+// --------------
+
+type decReaderSwitch struct {
+ rb bytesDecReader
+ // ---- cpu cache line boundary?
+ ri *ioDecReader
+ mtr, str bool // whether maptype or slicetype are known types
+
+ be bool // is binary encoding
+ bytes bool // is bytes reader
+ js bool // is json handle
+ jsms bool // is json handle, and MapKeyAsString
+ esep bool // has elem separators
+}
+
+// TODO: Uncomment after mid-stack inlining enabled in go 1.11
+//
+// func (z *decReaderSwitch) unreadn1() {
+// if z.bytes {
+// z.rb.unreadn1()
+// } else {
+// z.ri.unreadn1()
+// }
+// }
+// func (z *decReaderSwitch) readx(n int) []byte {
+// if z.bytes {
+// return z.rb.readx(n)
+// }
+// return z.ri.readx(n)
+// }
+// func (z *decReaderSwitch) readb(s []byte) {
+// if z.bytes {
+// z.rb.readb(s)
+// } else {
+// z.ri.readb(s)
+// }
+// }
+// func (z *decReaderSwitch) readn1() uint8 {
+// if z.bytes {
+// return z.rb.readn1()
+// }
+// return z.ri.readn1()
+// }
+// func (z *decReaderSwitch) numread() int {
+// if z.bytes {
+// return z.rb.numread()
+// }
+// return z.ri.numread()
+// }
+// func (z *decReaderSwitch) track() {
+// if z.bytes {
+// z.rb.track()
+// } else {
+// z.ri.track()
+// }
+// }
+// func (z *decReaderSwitch) stopTrack() []byte {
+// if z.bytes {
+// return z.rb.stopTrack()
+// }
+// return z.ri.stopTrack()
+// }
+// func (z *decReaderSwitch) skip(accept *bitset256) (token byte) {
+// if z.bytes {
+// return z.rb.skip(accept)
+// }
+// return z.ri.skip(accept)
+// }
+// func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) {
+// if z.bytes {
+// return z.rb.readTo(in, accept)
+// }
+// return z.ri.readTo(in, accept)
+// }
+// func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) {
+// if z.bytes {
+// return z.rb.readUntil(in, stop)
+// }
+// return z.ri.readUntil(in, stop)
+// }
+
+// A Decoder reads and decodes an object from an input stream in the codec format.
+type Decoder struct {
+ panicHdl
+ // hopefully, reduce derefencing cost by laying the decReader inside the Decoder.
+ // Try to put things that go together to fit within a cache line (8 words).
+
+ d decDriver
+ // NOTE: Decoder shouldn't call it's read methods,
+ // as the handler MAY need to do some coordination.
+ r decReader
+ h *BasicHandle
+ bi *bufioDecReader
+ // cache the mapTypeId and sliceTypeId for faster comparisons
+ mtid uintptr
+ stid uintptr
+
+ // ---- cpu cache line boundary?
+ decReaderSwitch
+
+ // ---- cpu cache line boundary?
+ codecFnPooler
+ // cr containerStateRecv
+ n *decNaked
+ nsp *sync.Pool
+ err error
+
+ // ---- cpu cache line boundary?
+ b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers
+ is map[string]string // used for interning strings
+
+ // padding - false sharing help // modify 232 if Decoder struct changes.
+ // _ [cacheLineSize - 232%cacheLineSize]byte
+}
+
+// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered reader
+// (eg bufio.Reader, bytes.Buffer).
+func NewDecoder(r io.Reader, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.Reset(r)
+ return d
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) *Decoder {
+ d := newDecoder(h)
+ d.ResetBytes(in)
+ return d
+}
+
+var defaultDecNaked decNaked
+
+func newDecoder(h Handle) *Decoder {
+ d := &Decoder{h: h.getBasicHandle(), err: errDecoderNotInitialized}
+ d.hh = h
+ d.be = h.isBinary()
+ // NOTE: do not initialize d.n here. It is lazily initialized in d.naked()
+ var jh *JsonHandle
+ jh, d.js = h.(*JsonHandle)
+ if d.js {
+ d.jsms = jh.MapKeyAsString
+ }
+ d.esep = d.hh.hasElemSeparators()
+ if d.h.InternString {
+ d.is = make(map[string]string, 32)
+ }
+ d.d = h.newDecDriver(d)
+ // d.cr, _ = d.d.(containerStateRecv)
+ return d
+}
+
+func (d *Decoder) resetCommon() {
+ d.n.reset()
+ d.d.reset()
+ d.err = nil
+ // reset all things which were cached from the Handle, but could change
+ d.mtid, d.stid = 0, 0
+ d.mtr, d.str = false, false
+ if d.h.MapType != nil {
+ d.mtid = rt2id(d.h.MapType)
+ d.mtr = fastpathAV.index(d.mtid) != -1
+ }
+ if d.h.SliceType != nil {
+ d.stid = rt2id(d.h.SliceType)
+ d.str = fastpathAV.index(d.stid) != -1
+ }
+}
+
+// Reset the Decoder with a new Reader to decode from,
+// clearing all state from last run(s).
+func (d *Decoder) Reset(r io.Reader) {
+ if r == nil {
+ return
+ }
+ if d.bi == nil {
+ d.bi = new(bufioDecReader)
+ }
+ d.bytes = false
+ if d.h.ReaderBufferSize > 0 {
+ d.bi.buf = make([]byte, 0, d.h.ReaderBufferSize)
+ d.bi.reset(r)
+ d.r = d.bi
+ } else {
+ // d.ri.x = &d.b
+ // d.s = d.sa[:0]
+ if d.ri == nil {
+ d.ri = new(ioDecReader)
+ }
+ d.ri.reset(r)
+ d.r = d.ri
+ }
+ d.resetCommon()
+}
+
+// ResetBytes resets the Decoder with a new []byte to decode from,
+// clearing all state from last run(s).
+func (d *Decoder) ResetBytes(in []byte) {
+ if in == nil {
+ return
+ }
+ d.bytes = true
+ d.rb.reset(in)
+ d.r = &d.rb
+ d.resetCommon()
+}
+
+// naked must be called before each call to .DecodeNaked,
+// as they will use it.
+func (d *Decoder) naked() *decNaked {
+ if d.n == nil {
+ // consider one of:
+ // - get from sync.Pool (if GC is frequent, there's no value here)
+ // - new alloc (safest. only init'ed if it a naked decode will be done)
+ // - field in Decoder (makes the Decoder struct very big)
+ // To support using a decoder where a DecodeNaked is not needed,
+ // we prefer #1 or #2.
+ // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool
+ // d.n.init()
+ var v interface{}
+ d.nsp, v = pool.decNaked()
+ d.n = v.(*decNaked)
+ }
+ return d.n
+}
+
+// Decode decodes the stream from reader and stores the result in the
+// value pointed to by v. v cannot be a nil pointer. v can also be
+// a reflect.Value of a pointer.
+//
+// Note that a pointer to a nil interface is not a nil pointer.
+// If you do not know what type of stream it is, pass in a pointer to a nil interface.
+// We will decode and store a value in that nil interface.
+//
+// Sample usages:
+// // Decoding into a non-nil typed value
+// var f float32
+// err = codec.NewDecoder(r, handle).Decode(&f)
+//
+// // Decoding into nil interface
+// var v interface{}
+// dec := codec.NewDecoder(r, handle)
+// err = dec.Decode(&v)
+//
+// When decoding into a nil interface{}, we will decode into an appropriate value based
+// on the contents of the stream:
+// - Numbers are decoded as float64, int64 or uint64.
+// - Other values are decoded appropriately depending on the type:
+// bool, string, []byte, time.Time, etc
+// - Extensions are decoded as RawExt (if no ext function registered for the tag)
+// Configurations exist on the Handle to override defaults
+// (e.g. for MapType, SliceType and how to decode raw bytes).
+//
+// When decoding into a non-nil interface{} value, the mode of encoding is based on the
+// type of the value. When a value is seen:
+// - If an extension is registered for it, call that extension function
+// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error
+// - Else decode it based on its reflect.Kind
+//
+// There are some special rules when decoding into containers (slice/array/map/struct).
+// Decode will typically use the stream contents to UPDATE the container.
+// - A map can be decoded from a stream map, by updating matching keys.
+// - A slice can be decoded from a stream array,
+// by updating the first n elements, where n is length of the stream.
+// - A slice can be decoded from a stream map, by decoding as if
+// it contains a sequence of key-value pairs.
+// - A struct can be decoded from a stream map, by updating matching fields.
+// - A struct can be decoded from a stream array,
+// by updating fields as they occur in the struct (by index).
+//
+// When decoding a stream map or array with length of 0 into a nil map or slice,
+// we reset the destination map or slice to a zero-length value.
+//
+// However, when decoding a stream nil, we reset the destination container
+// to its "zero" value (e.g. nil for slice/map, etc).
+//
+// Note: we allow nil values in the stream anywhere except for map keys.
+// A nil value in the encoded stream where a map key is expected is treated as an error.
+func (d *Decoder) Decode(v interface{}) (err error) {
+ defer d.deferred(&err)
+ d.MustDecode(v)
+ return
+}
+
+// MustDecode is like Decode, but panics if unable to Decode.
+// This provides insight to the code location that triggered the error.
+func (d *Decoder) MustDecode(v interface{}) {
+ // TODO: Top-level: ensure that v is a pointer and not nil.
+ if d.err != nil {
+ panic(d.err)
+ }
+ if d.d.TryDecodeAsNil() {
+ setZero(v)
+ } else {
+ d.decode(v)
+ }
+ d.alwaysAtEnd()
+ // xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn)
+}
+
+func (d *Decoder) deferred(err1 *error) {
+ d.alwaysAtEnd()
+ if recoverPanicToErr {
+ if x := recover(); x != nil {
+ panicValToErr(d, x, err1)
+ panicValToErr(d, x, &d.err)
+ }
+ }
+}
+
+func (d *Decoder) alwaysAtEnd() {
+ if d.n != nil {
+ // if n != nil, then nsp != nil (they are always set together)
+ d.nsp.Put(d.n)
+ d.n, d.nsp = nil, nil
+ }
+ d.codecFnPooler.alwaysAtEnd()
+}
+
+// // this is not a smart swallow, as it allocates objects and does unnecessary work.
+// func (d *Decoder) swallowViaHammer() {
+// var blank interface{}
+// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem())
+// }
+
+func (d *Decoder) swallow() {
+ // smarter decode that just swallows the content
+ dd := d.d
+ if dd.TryDecodeAsNil() {
+ return
+ }
+ elemsep := d.esep
+ switch dd.ContainerType() {
+ case valueTypeMap:
+ containerLen := dd.ReadMapStart()
+ hasLen := containerLen >= 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break}
+ if elemsep {
+ dd.ReadMapElemKey()
+ }
+ d.swallow()
+ if elemsep {
+ dd.ReadMapElemValue()
+ }
+ d.swallow()
+ }
+ dd.ReadMapEnd()
+ case valueTypeArray:
+ containerLen := dd.ReadArrayStart()
+ hasLen := containerLen >= 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if elemsep {
+ dd.ReadArrayElem()
+ }
+ d.swallow()
+ }
+ dd.ReadArrayEnd()
+ case valueTypeBytes:
+ dd.DecodeBytes(d.b[:], true)
+ case valueTypeString:
+ dd.DecodeStringAsBytes()
+ default:
+ // these are all primitives, which we can get from decodeNaked
+ // if RawExt using Value, complete the processing.
+ n := d.naked()
+ dd.DecodeNaked()
+ if n.v == valueTypeExt && n.l == nil {
+ n.initContainers()
+ if n.li < arrayCacheLen {
+ n.ia[n.li] = nil
+ n.li++
+ d.decode(&n.ia[n.li-1])
+ n.ia[n.li-1] = nil
+ n.li--
+ } else {
+ var v2 interface{}
+ d.decode(&v2)
+ }
+ }
+ }
+}
+
+func setZero(iv interface{}) {
+ if iv == nil || definitelyNil(iv) {
+ return
+ }
+ var canDecode bool
+ switch v := iv.(type) {
+ case *string:
+ *v = ""
+ case *bool:
+ *v = false
+ case *int:
+ *v = 0
+ case *int8:
+ *v = 0
+ case *int16:
+ *v = 0
+ case *int32:
+ *v = 0
+ case *int64:
+ *v = 0
+ case *uint:
+ *v = 0
+ case *uint8:
+ *v = 0
+ case *uint16:
+ *v = 0
+ case *uint32:
+ *v = 0
+ case *uint64:
+ *v = 0
+ case *float32:
+ *v = 0
+ case *float64:
+ *v = 0
+ case *[]uint8:
+ *v = nil
+ case *Raw:
+ *v = nil
+ case *time.Time:
+ *v = time.Time{}
+ case reflect.Value:
+ if v, canDecode = isDecodeable(v); canDecode && v.CanSet() {
+ v.Set(reflect.Zero(v.Type()))
+ } // TODO: else drain if chan, clear if map, set all to nil if slice???
+ default:
+ if !fastpathDecodeSetZeroTypeSwitch(iv) {
+ v := reflect.ValueOf(iv)
+ if v, canDecode = isDecodeable(v); canDecode && v.CanSet() {
+ v.Set(reflect.Zero(v.Type()))
+ } // TODO: else drain if chan, clear if map, set all to nil if slice???
+ }
+ }
+}
+
+func (d *Decoder) decode(iv interface{}) {
+ // check nil and interfaces explicitly,
+ // so that type switches just have a run of constant non-interface types.
+ if iv == nil {
+ d.errorstr(errstrCannotDecodeIntoNil)
+ return
+ }
+ if v, ok := iv.(Selfer); ok {
+ v.CodecDecodeSelf(d)
+ return
+ }
+
+ switch v := iv.(type) {
+ // case nil:
+ // case Selfer:
+
+ case reflect.Value:
+ v = d.ensureDecodeable(v)
+ d.decodeValue(v, nil, true)
+
+ case *string:
+ *v = d.d.DecodeString()
+ case *bool:
+ *v = d.d.DecodeBool()
+ case *int:
+ *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ case *int8:
+ *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8))
+ case *int16:
+ *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16))
+ case *int32:
+ *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ case *int64:
+ *v = d.d.DecodeInt64()
+ case *uint:
+ *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+ case *uint8:
+ *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ case *uint16:
+ *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))
+ case *uint32:
+ *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))
+ case *uint64:
+ *v = d.d.DecodeUint64()
+ case *float32:
+ f64 := d.d.DecodeFloat64()
+ if chkOvf.Float32(f64) {
+ d.errorf("float32 overflow: %v", f64)
+ }
+ *v = float32(f64)
+ case *float64:
+ *v = d.d.DecodeFloat64()
+ case *[]uint8:
+ *v = d.d.DecodeBytes(*v, false)
+ case []uint8:
+ b := d.d.DecodeBytes(v, false)
+ if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) {
+ copy(v, b)
+ }
+ case *time.Time:
+ *v = d.d.DecodeTime()
+ case *Raw:
+ *v = d.rawBytes()
+
+ case *interface{}:
+ d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true)
+ // d.decodeValueNotNil(reflect.ValueOf(iv).Elem())
+
+ default:
+ if !fastpathDecodeTypeSwitch(iv, d) {
+ v := reflect.ValueOf(iv)
+ v = d.ensureDecodeable(v)
+ d.decodeValue(v, nil, false)
+ // d.decodeValueFallback(v)
+ }
+ }
+}
+
+func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) {
+ // If stream is not containing a nil value, then we can deref to the base
+ // non-pointer value, and decode into that.
+ var rvp reflect.Value
+ var rvpValid bool
+ if rv.Kind() == reflect.Ptr {
+ rvpValid = true
+ for {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rvp = rv
+ rv = rv.Elem()
+ if rv.Kind() != reflect.Ptr {
+ break
+ }
+ }
+ }
+
+ if fn == nil {
+ // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
+ fn = d.cfer().get(rv.Type(), chkAll, true) // chkAll, chkAll)
+ }
+ if fn.i.addrD {
+ if rvpValid {
+ fn.fd(d, &fn.i, rvp)
+ } else if rv.CanAddr() {
+ fn.fd(d, &fn.i, rv.Addr())
+ } else if !fn.i.addrF {
+ fn.fd(d, &fn.i, rv)
+ } else {
+ d.errorf("cannot decode into a non-pointer value")
+ }
+ } else {
+ fn.fd(d, &fn.i, rv)
+ }
+ // return rv
+}
+
+func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
+ // NOTE: rvkencname may be a stringView, so don't pass it to another function.
+ if d.h.ErrorIfNoField {
+ if index >= 0 {
+ d.errorf("no matching struct field found when decoding stream array at index %v", index)
+ return
+ } else if rvkencname != "" {
+ d.errorf("no matching struct field found when decoding stream map with key " + rvkencname)
+ return
+ }
+ }
+ d.swallow()
+}
+
+func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) {
+ if d.h.ErrorIfNoArrayExpand {
+ d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen)
+ }
+}
+
+func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) {
+ switch rv.Kind() {
+ case reflect.Array:
+ return rv, true
+ case reflect.Ptr:
+ if !rv.IsNil() {
+ return rv.Elem(), true
+ }
+ case reflect.Slice, reflect.Chan, reflect.Map:
+ if !rv.IsNil() {
+ return rv, true
+ }
+ }
+ return
+}
+
+func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) {
+ // decode can take any reflect.Value that is a inherently addressable i.e.
+ // - array
+ // - non-nil chan (we will SEND to it)
+ // - non-nil slice (we will set its elements)
+ // - non-nil map (we will put into it)
+ // - non-nil pointer (we can "update" it)
+ rv2, canDecode := isDecodeable(rv)
+ if canDecode {
+ return
+ }
+ if !rv.IsValid() {
+ d.errorstr(errstrCannotDecodeIntoNil)
+ return
+ }
+ if !rv.CanInterface() {
+ d.errorf("cannot decode into a value without an interface: %v", rv)
+ return
+ }
+ rvi := rv2i(rv)
+ rvk := rv.Kind()
+ d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi)
+ return
+}
+
+// Possibly get an interned version of a string
+//
+// This should mostly be used for map keys, where the key type is string.
+// This is because keys of a map/struct are typically reused across many objects.
+func (d *Decoder) string(v []byte) (s string) {
+ if d.is == nil {
+ return string(v) // don't return stringView, as we need a real string here.
+ }
+ s, ok := d.is[string(v)] // no allocation here, per go implementation
+ if !ok {
+ s = string(v) // new allocation here
+ d.is[s] = s
+ }
+ return s
+}
+
+// nextValueBytes returns the next value in the stream as a set of bytes.
+func (d *Decoder) nextValueBytes() (bs []byte) {
+ d.d.uncacheRead()
+ d.r.track()
+ d.swallow()
+ bs = d.r.stopTrack()
+ return
+}
+
+func (d *Decoder) rawBytes() []byte {
+ // ensure that this is not a view into the bytes
+ // i.e. make new copy always.
+ bs := d.nextValueBytes()
+ bs2 := make([]byte, len(bs))
+ copy(bs2, bs)
+ return bs2
+}
+
+func (d *Decoder) wrapErrstr(v interface{}, err *error) {
+ *err = fmt.Errorf("%s decode error [pos %d]: %v", d.hh.Name(), d.r.numread(), v)
+}
+
+// --------------------------------------------------
+
+// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
+// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
+type decSliceHelper struct {
+ d *Decoder
+ // ct valueType
+ array bool
+}
+
+func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) {
+ dd := d.d
+ ctyp := dd.ContainerType()
+ switch ctyp {
+ case valueTypeArray:
+ x.array = true
+ clen = dd.ReadArrayStart()
+ case valueTypeMap:
+ clen = dd.ReadMapStart() * 2
+ default:
+ d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp)
+ }
+ // x.ct = ctyp
+ x.d = d
+ return
+}
+
+func (x decSliceHelper) End() {
+ if x.array {
+ x.d.d.ReadArrayEnd()
+ } else {
+ x.d.d.ReadMapEnd()
+ }
+}
+
+func (x decSliceHelper) ElemContainerState(index int) {
+ if x.array {
+ x.d.d.ReadArrayElem()
+ } else if index%2 == 0 {
+ x.d.d.ReadMapElemKey()
+ } else {
+ x.d.d.ReadMapElemValue()
+ }
+}
+
+func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) {
+ if clen == 0 {
+ return zeroByteSlice
+ }
+ if len(bs) == clen {
+ bsOut = bs
+ r.readb(bsOut)
+ } else if cap(bs) >= clen {
+ bsOut = bs[:clen]
+ r.readb(bsOut)
+ } else {
+ // bsOut = make([]byte, clen)
+ len2 := decInferLen(clen, maxInitLen, 1)
+ bsOut = make([]byte, len2)
+ r.readb(bsOut)
+ for len2 < clen {
+ len3 := decInferLen(clen-len2, maxInitLen, 1)
+ bs3 := bsOut
+ bsOut = make([]byte, len2+len3)
+ copy(bsOut, bs3)
+ r.readb(bsOut[len2:])
+ len2 += len3
+ }
+ }
+ return
+}
+
+func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) {
+ if xlen := len(in); xlen > 0 {
+ if isBytesReader || xlen <= scratchByteArrayLen {
+ if cap(dest) >= xlen {
+ out = dest[:xlen]
+ } else {
+ out = make([]byte, xlen)
+ }
+ copy(out, in)
+ return
+ }
+ }
+ return in
+}
+
+// decInferLen will infer a sensible length, given the following:
+// - clen: length wanted.
+// - maxlen: max length to be returned.
+// if <= 0, it is unset, and we infer it based on the unit size
+// - unit: number of bytes for each element of the collection
+func decInferLen(clen, maxlen, unit int) (rvlen int) {
+ // handle when maxlen is not set i.e. <= 0
+ if clen <= 0 {
+ return
+ }
+ if unit == 0 {
+ return clen
+ }
+ if maxlen <= 0 {
+ // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items.
+ // maxlen = 256 * 1024 / unit
+ // if maxlen < (4 * 1024) {
+ // maxlen = 4 * 1024
+ // }
+ if unit < (256 / 4) {
+ maxlen = 256 * 1024 / unit
+ } else {
+ maxlen = 4 * 1024
+ }
+ }
+ if clen > maxlen {
+ rvlen = maxlen
+ } else {
+ rvlen = clen
+ }
+ return
+}
+
+func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) (
+ s2 reflect.Value, scap2 int, changed bool, err string) {
+ l1 := slen + num // new slice length
+ if l1 < slen {
+ err = errmsgExpandSliceOverflow
+ return
+ }
+ if l1 <= scap {
+ if s.CanSet() {
+ s.SetLen(l1)
+ } else if canChange {
+ s2 = s.Slice(0, l1)
+ scap2 = scap
+ changed = true
+ } else {
+ err = errmsgExpandSliceCannotChange
+ return
+ }
+ return
+ }
+ if !canChange {
+ err = errmsgExpandSliceCannotChange
+ return
+ }
+ scap2 = growCap(scap, stElemSize, num)
+ s2 = reflect.MakeSlice(st, l1, scap2)
+ changed = true
+ reflect.Copy(s2, s)
+ return
+}
+
+func decReadFull(r io.Reader, bs []byte) (n int, err error) {
+ var nn int
+ for n < len(bs) && err == nil {
+ nn, err = r.Read(bs[n:])
+ if nn > 0 {
+ if err == io.EOF {
+ // leave EOF for next time
+ err = nil
+ }
+ n += nn
+ }
+ }
+
+ // do not do this - it serves no purpose
+ // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF }
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go
new file mode 100644
index 0000000..ef46529
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/encode.go
@@ -0,0 +1,1375 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bufio"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+)
+
+const defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
+
+var errEncoderNotInitialized = errors.New("Encoder not initialized")
+
+// encWriter abstracts writing to a byte array or to an io.Writer.
+type encWriter interface {
+ writeb([]byte)
+ writestr(string)
+ writen1(byte)
+ writen2(byte, byte)
+ atEndOfEncode()
+}
+
+// encDriver abstracts the actual codec (binc vs msgpack, etc)
+type encDriver interface {
+ EncodeNil()
+ EncodeInt(i int64)
+ EncodeUint(i uint64)
+ EncodeBool(b bool)
+ EncodeFloat32(f float32)
+ EncodeFloat64(f float64)
+ // encodeExtPreamble(xtag byte, length int)
+ EncodeRawExt(re *RawExt, e *Encoder)
+ EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder)
+ EncodeString(c charEncoding, v string)
+ // EncodeSymbol(v string)
+ EncodeStringBytes(c charEncoding, v []byte)
+ EncodeTime(time.Time)
+ //encBignum(f *big.Int)
+ //encStringRunes(c charEncoding, v []rune)
+ WriteArrayStart(length int)
+ WriteArrayElem()
+ WriteArrayEnd()
+ WriteMapStart(length int)
+ WriteMapElemKey()
+ WriteMapElemValue()
+ WriteMapEnd()
+
+ reset()
+ atEndOfEncode()
+}
+
+type ioEncStringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+type encDriverAsis interface {
+ EncodeAsis(v []byte)
+}
+
+type encDriverNoopContainerWriter struct{}
+
+func (encDriverNoopContainerWriter) WriteArrayStart(length int) {}
+func (encDriverNoopContainerWriter) WriteArrayElem() {}
+func (encDriverNoopContainerWriter) WriteArrayEnd() {}
+func (encDriverNoopContainerWriter) WriteMapStart(length int) {}
+func (encDriverNoopContainerWriter) WriteMapElemKey() {}
+func (encDriverNoopContainerWriter) WriteMapElemValue() {}
+func (encDriverNoopContainerWriter) WriteMapEnd() {}
+func (encDriverNoopContainerWriter) atEndOfEncode() {}
+
+type encDriverTrackContainerWriter struct {
+ c containerState
+}
+
+func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart }
+func (e *encDriverTrackContainerWriter) WriteArrayElem() { e.c = containerArrayElem }
+func (e *encDriverTrackContainerWriter) WriteArrayEnd() { e.c = containerArrayEnd }
+func (e *encDriverTrackContainerWriter) WriteMapStart(length int) { e.c = containerMapStart }
+func (e *encDriverTrackContainerWriter) WriteMapElemKey() { e.c = containerMapKey }
+func (e *encDriverTrackContainerWriter) WriteMapElemValue() { e.c = containerMapValue }
+func (e *encDriverTrackContainerWriter) WriteMapEnd() { e.c = containerMapEnd }
+func (e *encDriverTrackContainerWriter) atEndOfEncode() {}
+
+// type ioEncWriterWriter interface {
+// WriteByte(c byte) error
+// WriteString(s string) (n int, err error)
+// Write(p []byte) (n int, err error)
+// }
+
+// EncodeOptions captures configuration options during encode.
+type EncodeOptions struct {
+ // WriterBufferSize is the size of the buffer used when writing.
+ //
+ // if > 0, we use a smart buffer internally for performance purposes.
+ WriterBufferSize int
+
+ // ChanRecvTimeout is the timeout used when selecting from a chan.
+ //
+ // Configuring this controls how we receive from a chan during the encoding process.
+ // - If ==0, we only consume the elements currently available in the chan.
+ // - if <0, we consume until the chan is closed.
+ // - If >0, we consume until this timeout.
+ ChanRecvTimeout time.Duration
+
+ // StructToArray specifies to encode a struct as an array, and not as a map
+ StructToArray bool
+
+ // Canonical representation means that encoding a value will always result in the same
+ // sequence of bytes.
+ //
+ // This only affects maps, as the iteration order for maps is random.
+ //
+ // The implementation MAY use the natural sort order for the map keys if possible:
+ //
+ // - If there is a natural sort order (ie for number, bool, string or []byte keys),
+ // then the map keys are first sorted in natural order and then written
+ // with corresponding map values to the strema.
+ // - If there is no natural sort order, then the map keys will first be
+ // encoded into []byte, and then sorted,
+ // before writing the sorted keys and the corresponding map values to the stream.
+ //
+ Canonical bool
+
+ // CheckCircularRef controls whether we check for circular references
+ // and error fast during an encode.
+ //
+ // If enabled, an error is received if a pointer to a struct
+ // references itself either directly or through one of its fields (iteratively).
+ //
+ // This is opt-in, as there may be a performance hit to checking circular references.
+ CheckCircularRef bool
+
+ // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers
+ // when checking if a value is empty.
+ //
+ // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls.
+ RecursiveEmptyCheck bool
+
+ // Raw controls whether we encode Raw values.
+ // This is a "dangerous" option and must be explicitly set.
+ // If set, we blindly encode Raw values as-is, without checking
+ // if they are a correct representation of a value in that format.
+ // If unset, we error out.
+ Raw bool
+
+ // // AsSymbols defines what should be encoded as symbols.
+ // //
+ // // Encoding as symbols can reduce the encoded size significantly.
+ // //
+ // // However, during decoding, each string to be encoded as a symbol must
+ // // be checked to see if it has been seen before. Consequently, encoding time
+ // // will increase if using symbols, because string comparisons has a clear cost.
+ // //
+ // // Sample values:
+ // // AsSymbolNone
+ // // AsSymbolAll
+ // // AsSymbolMapStringKeys
+ // // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag
+ // AsSymbols AsSymbolFlag
+}
+
+// ---------------------------------------------
+
+// ioEncWriter implements encWriter and can write to an io.Writer implementation
+type ioEncWriter struct {
+ w io.Writer
+ ww io.Writer
+ bw io.ByteWriter
+ sw ioEncStringWriter
+ fw ioFlusher
+ b [8]byte
+}
+
+func (z *ioEncWriter) WriteByte(b byte) (err error) {
+ z.b[0] = b
+ _, err = z.w.Write(z.b[:1])
+ return
+}
+
+func (z *ioEncWriter) WriteString(s string) (n int, err error) {
+ return z.w.Write(bytesView(s))
+}
+
+func (z *ioEncWriter) writeb(bs []byte) {
+ if _, err := z.ww.Write(bs); err != nil {
+ panic(err)
+ }
+}
+
+func (z *ioEncWriter) writestr(s string) {
+ if _, err := z.sw.WriteString(s); err != nil {
+ panic(err)
+ }
+}
+
+func (z *ioEncWriter) writen1(b byte) {
+ if err := z.bw.WriteByte(b); err != nil {
+ panic(err)
+ }
+}
+
+func (z *ioEncWriter) writen2(b1, b2 byte) {
+ var err error
+ if err = z.bw.WriteByte(b1); err == nil {
+ if err = z.bw.WriteByte(b2); err == nil {
+ return
+ }
+ }
+ panic(err)
+}
+
+// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) {
+// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5
+// if _, err := z.ww.Write(z.b[:5]); err != nil {
+// panic(err)
+// }
+// }
+
+func (z *ioEncWriter) atEndOfEncode() {
+ if z.fw != nil {
+ if err := z.fw.Flush(); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// ---------------------------------------------
+
+// bytesEncAppender implements encWriter and can write to an byte slice.
+type bytesEncAppender struct {
+ b []byte
+ out *[]byte
+}
+
+func (z *bytesEncAppender) writeb(s []byte) {
+ z.b = append(z.b, s...)
+}
+func (z *bytesEncAppender) writestr(s string) {
+ z.b = append(z.b, s...)
+}
+func (z *bytesEncAppender) writen1(b1 byte) {
+ z.b = append(z.b, b1)
+}
+func (z *bytesEncAppender) writen2(b1, b2 byte) {
+ z.b = append(z.b, b1, b2)
+}
+func (z *bytesEncAppender) atEndOfEncode() {
+ *(z.out) = z.b
+}
+func (z *bytesEncAppender) reset(in []byte, out *[]byte) {
+ z.b = in[:0]
+ z.out = out
+}
+
+// ---------------------------------------------
+
+func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeRawExt(rv2i(rv).(*RawExt), e)
+}
+
+func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e)
+}
+
+func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) {
+ rv2i(rv).(Selfer).CodecEncodeSelf(e)
+}
+
+func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) {
+ bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary()
+ e.marshal(bs, fnerr, false, cRAW)
+}
+
+func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) {
+ bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText()
+ e.marshal(bs, fnerr, false, cUTF8)
+}
+
+func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) {
+ bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON()
+ e.marshal(bs, fnerr, true, cUTF8)
+}
+
+func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) {
+ e.rawBytes(rv2i(rv).(Raw))
+}
+
+func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeNil()
+}
+
+func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) {
+ e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
+}
+
+func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
+ ti := f.ti
+ ee := e.e
+ // array may be non-addressable, so we have to manage with care
+ // (don't call rv.Bytes, rv.Slice, etc).
+ // E.g. type struct S{B [2]byte};
+ // Encode(S{}) will bomb on "panic: slice of unaddressable array".
+ if f.seq != seqTypeArray {
+ if rv.IsNil() {
+ ee.EncodeNil()
+ return
+ }
+ // If in this method, then there was no extension function defined.
+ // So it's okay to treat as []byte.
+ if ti.rtid == uint8SliceTypId {
+ ee.EncodeStringBytes(cRAW, rv.Bytes())
+ return
+ }
+ }
+ if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 {
+ e.errorf("send-only channel cannot be encoded")
+ }
+ elemsep := e.esep
+ rtelem := ti.elem
+ rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8
+ var l int
+ // if a slice, array or chan of bytes, treat specially
+ if rtelemIsByte {
+ switch f.seq {
+ case seqTypeSlice:
+ ee.EncodeStringBytes(cRAW, rv.Bytes())
+ case seqTypeArray:
+ l = rv.Len()
+ if rv.CanAddr() {
+ ee.EncodeStringBytes(cRAW, rv.Slice(0, l).Bytes())
+ } else {
+ var bs []byte
+ if l <= cap(e.b) {
+ bs = e.b[:l]
+ } else {
+ bs = make([]byte, l)
+ }
+ reflect.Copy(reflect.ValueOf(bs), rv)
+ ee.EncodeStringBytes(cRAW, bs)
+ }
+ case seqTypeChan:
+ // do not use range, so that the number of elements encoded
+ // does not change, and encoding does not hang waiting on someone to close chan.
+ // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) }
+ // ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte.
+
+ if rv.IsNil() {
+ ee.EncodeNil()
+ break
+ }
+ bs := e.b[:0]
+ irv := rv2i(rv)
+ ch, ok := irv.(<-chan byte)
+ if !ok {
+ ch = irv.(chan byte)
+ }
+
+ L1:
+ switch timeout := e.h.ChanRecvTimeout; {
+ case timeout == 0: // only consume available
+ for {
+ select {
+ case b := <-ch:
+ bs = append(bs, b)
+ default:
+ break L1
+ }
+ }
+ case timeout > 0: // consume until timeout
+ tt := time.NewTimer(timeout)
+ for {
+ select {
+ case b := <-ch:
+ bs = append(bs, b)
+ case <-tt.C:
+ // close(tt.C)
+ break L1
+ }
+ }
+ default: // consume until close
+ for b := range ch {
+ bs = append(bs, b)
+ }
+ }
+
+ ee.EncodeStringBytes(cRAW, bs)
+ }
+ return
+ }
+
+ // if chan, consume chan into a slice, and work off that slice.
+ var rvcs reflect.Value
+ if f.seq == seqTypeChan {
+ rvcs = reflect.Zero(reflect.SliceOf(rtelem))
+ timeout := e.h.ChanRecvTimeout
+ if timeout < 0 { // consume until close
+ for {
+ recv, recvOk := rv.Recv()
+ if !recvOk {
+ break
+ }
+ rvcs = reflect.Append(rvcs, recv)
+ }
+ } else {
+ cases := make([]reflect.SelectCase, 2)
+ cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv}
+ if timeout == 0 {
+ cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault}
+ } else {
+ tt := time.NewTimer(timeout)
+ cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)}
+ }
+ for {
+ chosen, recv, recvOk := reflect.Select(cases)
+ if chosen == 1 || !recvOk {
+ break
+ }
+ rvcs = reflect.Append(rvcs, recv)
+ }
+ }
+ rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected
+ }
+
+ l = rv.Len()
+ if ti.mbs {
+ if l%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", l)
+ return
+ }
+ ee.WriteMapStart(l / 2)
+ } else {
+ ee.WriteArrayStart(l)
+ }
+
+ if l > 0 {
+ var fn *codecFn
+ for rtelem.Kind() == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ }
+ // if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+ if rtelem.Kind() != reflect.Interface {
+ fn = e.cfer().get(rtelem, true, true)
+ }
+ for j := 0; j < l; j++ {
+ if elemsep {
+ if ti.mbs {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ } else {
+ ee.WriteArrayElem()
+ }
+ }
+ e.encodeValue(rv.Index(j), fn, true)
+ }
+ }
+
+ if ti.mbs {
+ ee.WriteMapEnd()
+ } else {
+ ee.WriteArrayEnd()
+ }
+}
+
+func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) {
+ fti := f.ti
+ elemsep := e.esep
+ tisfi := fti.sfiSrc
+ toMap := !(fti.toArray || e.h.StructToArray)
+ if toMap {
+ tisfi = fti.sfiSort
+ }
+ ee := e.e
+
+ sfn := structFieldNode{v: rv, update: false}
+ if toMap {
+ ee.WriteMapStart(len(tisfi))
+ if elemsep {
+ for _, si := range tisfi {
+ ee.WriteMapElemKey()
+ // ee.EncodeString(cUTF8, si.encName)
+ encStructFieldKey(ee, fti.keyType, si.encName)
+ ee.WriteMapElemValue()
+ e.encodeValue(sfn.field(si), nil, true)
+ }
+ } else {
+ for _, si := range tisfi {
+ // ee.EncodeString(cUTF8, si.encName)
+ encStructFieldKey(ee, fti.keyType, si.encName)
+ e.encodeValue(sfn.field(si), nil, true)
+ }
+ }
+ ee.WriteMapEnd()
+ } else {
+ ee.WriteArrayStart(len(tisfi))
+ if elemsep {
+ for _, si := range tisfi {
+ ee.WriteArrayElem()
+ e.encodeValue(sfn.field(si), nil, true)
+ }
+ } else {
+ for _, si := range tisfi {
+ e.encodeValue(sfn.field(si), nil, true)
+ }
+ }
+ ee.WriteArrayEnd()
+ }
+}
+
+func encStructFieldKey(ee encDriver, keyType valueType, s string) {
+ var m must
+
+ // use if-else-if, not switch (which compiles to binary-search)
+ // since keyType is typically valueTypeString, branch prediction is pretty good.
+
+ if keyType == valueTypeString {
+ ee.EncodeString(cUTF8, s)
+ } else if keyType == valueTypeInt {
+ ee.EncodeInt(m.Int(strconv.ParseInt(s, 10, 64)))
+ } else if keyType == valueTypeUint {
+ ee.EncodeUint(m.Uint(strconv.ParseUint(s, 10, 64)))
+ } else if keyType == valueTypeFloat {
+ ee.EncodeFloat64(m.Float(strconv.ParseFloat(s, 64)))
+ } else {
+ ee.EncodeString(cUTF8, s)
+ }
+}
+
+func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) {
+ fti := f.ti
+ elemsep := e.esep
+ tisfi := fti.sfiSrc
+ toMap := !(fti.toArray || e.h.StructToArray)
+ // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
+ if toMap {
+ tisfi = fti.sfiSort
+ }
+ newlen := len(fti.sfiSort)
+ ee := e.e
+
+ // Use sync.Pool to reduce allocating slices unnecessarily.
+ // The cost of sync.Pool is less than the cost of new allocation.
+ //
+ // Each element of the array pools one of encStructPool(8|16|32|64).
+ // It allows the re-use of slices up to 64 in length.
+ // A performance cost of encoding structs was collecting
+ // which values were empty and should be omitted.
+ // We needed slices of reflect.Value and string to collect them.
+ // This shared pool reduces the amount of unnecessary creation we do.
+ // The cost is that of locking sometimes, but sync.Pool is efficient
+ // enough to reduce thread contention.
+
+ var spool *sync.Pool
+ var poolv interface{}
+ var fkvs []stringRv
+ // fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen)
+ if newlen <= 8 {
+ spool, poolv = pool.stringRv8()
+ fkvs = poolv.(*[8]stringRv)[:newlen]
+ } else if newlen <= 16 {
+ spool, poolv = pool.stringRv16()
+ fkvs = poolv.(*[16]stringRv)[:newlen]
+ } else if newlen <= 32 {
+ spool, poolv = pool.stringRv32()
+ fkvs = poolv.(*[32]stringRv)[:newlen]
+ } else if newlen <= 64 {
+ spool, poolv = pool.stringRv64()
+ fkvs = poolv.(*[64]stringRv)[:newlen]
+ } else if newlen <= 128 {
+ spool, poolv = pool.stringRv128()
+ fkvs = poolv.(*[128]stringRv)[:newlen]
+ } else {
+ fkvs = make([]stringRv, newlen)
+ }
+
+ newlen = 0
+ var kv stringRv
+ recur := e.h.RecursiveEmptyCheck
+ sfn := structFieldNode{v: rv, update: false}
+ for _, si := range tisfi {
+ // kv.r = si.field(rv, false)
+ kv.r = sfn.field(si)
+ if toMap {
+ if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) {
+ continue
+ }
+ kv.v = si.encName
+ } else {
+ // use the zero value.
+ // if a reference or struct, set to nil (so you do not output too much)
+ if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) {
+ switch kv.r.Kind() {
+ case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice:
+ kv.r = reflect.Value{} //encode as nil
+ }
+ }
+ }
+ fkvs[newlen] = kv
+ newlen++
+ }
+
+ if toMap {
+ ee.WriteMapStart(newlen)
+ if elemsep {
+ for j := 0; j < newlen; j++ {
+ kv = fkvs[j]
+ ee.WriteMapElemKey()
+ // ee.EncodeString(cUTF8, kv.v)
+ encStructFieldKey(ee, fti.keyType, kv.v)
+ ee.WriteMapElemValue()
+ e.encodeValue(kv.r, nil, true)
+ }
+ } else {
+ for j := 0; j < newlen; j++ {
+ kv = fkvs[j]
+ // ee.EncodeString(cUTF8, kv.v)
+ encStructFieldKey(ee, fti.keyType, kv.v)
+ e.encodeValue(kv.r, nil, true)
+ }
+ }
+ ee.WriteMapEnd()
+ } else {
+ ee.WriteArrayStart(newlen)
+ if elemsep {
+ for j := 0; j < newlen; j++ {
+ ee.WriteArrayElem()
+ e.encodeValue(fkvs[j].r, nil, true)
+ }
+ } else {
+ for j := 0; j < newlen; j++ {
+ e.encodeValue(fkvs[j].r, nil, true)
+ }
+ }
+ ee.WriteArrayEnd()
+ }
+
+ // do not use defer. Instead, use explicit pool return at end of function.
+ // defer has a cost we are trying to avoid.
+ // If there is a panic and these slices are not returned, it is ok.
+ if spool != nil {
+ spool.Put(poolv)
+ }
+}
+
+func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
+ ee := e.e
+ if rv.IsNil() {
+ ee.EncodeNil()
+ return
+ }
+
+ l := rv.Len()
+ ee.WriteMapStart(l)
+ elemsep := e.esep
+ if l == 0 {
+ ee.WriteMapEnd()
+ return
+ }
+ // var asSymbols bool
+ // determine the underlying key and val encFn's for the map.
+ // This eliminates some work which is done for each loop iteration i.e.
+ // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
+ //
+ // However, if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+ var keyFn, valFn *codecFn
+ ti := f.ti
+ rtkey0 := ti.key
+ rtkey := rtkey0
+ rtval0 := ti.elem
+ rtval := rtval0
+ // rtkeyid := rt2id(rtkey0)
+ for rtval.Kind() == reflect.Ptr {
+ rtval = rtval.Elem()
+ }
+ if rtval.Kind() != reflect.Interface {
+ valFn = e.cfer().get(rtval, true, true)
+ }
+ mks := rv.MapKeys()
+
+ if e.h.Canonical {
+ e.kMapCanonical(rtkey, rv, mks, valFn)
+ ee.WriteMapEnd()
+ return
+ }
+
+ var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid
+ if !keyTypeIsString {
+ for rtkey.Kind() == reflect.Ptr {
+ rtkey = rtkey.Elem()
+ }
+ if rtkey.Kind() != reflect.Interface {
+ // rtkeyid = rt2id(rtkey)
+ keyFn = e.cfer().get(rtkey, true, true)
+ }
+ }
+
+ // for j, lmks := 0, len(mks); j < lmks; j++ {
+ for j := range mks {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ if keyTypeIsString {
+ ee.EncodeString(cUTF8, mks[j].String())
+ } else {
+ e.encodeValue(mks[j], keyFn, true)
+ }
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mks[j]), valFn, true)
+
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) {
+ ee := e.e
+ elemsep := e.esep
+ // we previously did out-of-band if an extension was registered.
+ // This is not necessary, as the natural kind is sufficient for ordering.
+
+ switch rtkey.Kind() {
+ case reflect.Bool:
+ mksv := make([]boolRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Bool()
+ }
+ sort.Sort(boolRvSlice(mksv))
+ for i := range mksv {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ ee.EncodeBool(mksv[i].v)
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+ }
+ case reflect.String:
+ mksv := make([]stringRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.String()
+ }
+ sort.Sort(stringRvSlice(mksv))
+ for i := range mksv {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ ee.EncodeString(cUTF8, mksv[i].v)
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
+ mksv := make([]uintRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Uint()
+ }
+ sort.Sort(uintRvSlice(mksv))
+ for i := range mksv {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ ee.EncodeUint(mksv[i].v)
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+ }
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ mksv := make([]intRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Int()
+ }
+ sort.Sort(intRvSlice(mksv))
+ for i := range mksv {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ ee.EncodeInt(mksv[i].v)
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+ }
+ case reflect.Float32:
+ mksv := make([]floatRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(floatRvSlice(mksv))
+ for i := range mksv {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ ee.EncodeFloat32(float32(mksv[i].v))
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+ }
+ case reflect.Float64:
+ mksv := make([]floatRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(floatRvSlice(mksv))
+ for i := range mksv {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ ee.EncodeFloat64(mksv[i].v)
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+ }
+ case reflect.Struct:
+ if rv.Type() == timeTyp {
+ mksv := make([]timeRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = rv2i(k).(time.Time)
+ }
+ sort.Sort(timeRvSlice(mksv))
+ for i := range mksv {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ ee.EncodeTime(mksv[i].v)
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
+ }
+ break
+ }
+ fallthrough
+ default:
+ // out-of-band
+ // first encode each key to a []byte first, then sort them, then record
+ var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ mksbv := make([]bytesRv, len(mks))
+ for i, k := range mks {
+ v := &mksbv[i]
+ l := len(mksv)
+ e2.MustEncode(k)
+ v.r = k
+ v.v = mksv[l:]
+ }
+ sort.Sort(bytesRvSlice(mksbv))
+ for j := range mksbv {
+ if elemsep {
+ ee.WriteMapElemKey()
+ }
+ e.asis(mksbv[j].v)
+ if elemsep {
+ ee.WriteMapElemValue()
+ }
+ e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true)
+ }
+ }
+}
+
+// // --------------------------------------------------
+
+type encWriterSwitch struct {
+ wi *ioEncWriter
+ // wb bytesEncWriter
+ wb bytesEncAppender
+ wx bool // if bytes, wx=true
+ esep bool // whether it has elem separators
+ isas bool // whether e.as != nil
+}
+
+// // TODO: Uncomment after mid-stack inlining enabled in go 1.11
+
+// func (z *encWriterSwitch) writeb(s []byte) {
+// if z.wx {
+// z.wb.writeb(s)
+// } else {
+// z.wi.writeb(s)
+// }
+// }
+// func (z *encWriterSwitch) writestr(s string) {
+// if z.wx {
+// z.wb.writestr(s)
+// } else {
+// z.wi.writestr(s)
+// }
+// }
+// func (z *encWriterSwitch) writen1(b1 byte) {
+// if z.wx {
+// z.wb.writen1(b1)
+// } else {
+// z.wi.writen1(b1)
+// }
+// }
+// func (z *encWriterSwitch) writen2(b1, b2 byte) {
+// if z.wx {
+// z.wb.writen2(b1, b2)
+// } else {
+// z.wi.writen2(b1, b2)
+// }
+// }
+
+// An Encoder writes an object to an output stream in the codec format.
+type Encoder struct {
+ panicHdl
+ // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
+ e encDriver
+ // NOTE: Encoder shouldn't call it's write methods,
+ // as the handler MAY need to do some coordination.
+ w encWriter
+
+ h *BasicHandle
+ bw *bufio.Writer
+ as encDriverAsis
+
+ // ---- cpu cache line boundary?
+
+ // ---- cpu cache line boundary?
+ encWriterSwitch
+ err error
+
+ // ---- cpu cache line boundary?
+ codecFnPooler
+ ci set
+ js bool // here, so that no need to piggy back on *codecFner for this
+ be bool // here, so that no need to piggy back on *codecFner for this
+ _ [6]byte // padding
+
+ // ---- writable fields during execution --- *try* to keep in sep cache line
+
+ // ---- cpu cache line boundary?
+ // b [scratchByteArrayLen]byte
+ // _ [cacheLineSize - scratchByteArrayLen]byte // padding
+ b [cacheLineSize - 0]byte // used for encoding a chan or (non-addressable) array of bytes
+}
+
+// NewEncoder returns an Encoder for encoding into an io.Writer.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered writer
+// (eg bufio.Writer, bytes.Buffer).
+func NewEncoder(w io.Writer, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.Reset(w)
+ return e
+}
+
+// NewEncoderBytes returns an encoder for encoding directly and efficiently
+// into a byte slice, using zero-copying to temporary slices.
+//
+// It will potentially replace the output byte slice pointed to.
+// After encoding, the out parameter contains the encoded contents.
+func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
+ e := newEncoder(h)
+ e.ResetBytes(out)
+ return e
+}
+
+func newEncoder(h Handle) *Encoder {
+ e := &Encoder{h: h.getBasicHandle(), err: errEncoderNotInitialized}
+ e.hh = h
+ e.esep = h.hasElemSeparators()
+ return e
+}
+
+func (e *Encoder) resetCommon() {
+ if e.e == nil || e.hh.recreateEncDriver(e.e) {
+ e.e = e.hh.newEncDriver(e)
+ e.as, e.isas = e.e.(encDriverAsis)
+ // e.cr, _ = e.e.(containerStateRecv)
+ }
+ e.be = e.hh.isBinary()
+ _, e.js = e.hh.(*JsonHandle)
+ e.e.reset()
+ e.err = nil
+}
+
+// Reset resets the Encoder with a new output stream.
+//
+// This accommodates using the state of the Encoder,
+// where it has "cached" information about sub-engines.
+func (e *Encoder) Reset(w io.Writer) {
+ if w == nil {
+ return
+ }
+ if e.wi == nil {
+ e.wi = new(ioEncWriter)
+ }
+ var ok bool
+ e.wx = false
+ e.wi.w = w
+ if e.h.WriterBufferSize > 0 {
+ e.bw = bufio.NewWriterSize(w, e.h.WriterBufferSize)
+ e.wi.bw = e.bw
+ e.wi.sw = e.bw
+ e.wi.fw = e.bw
+ e.wi.ww = e.bw
+ } else {
+ if e.wi.bw, ok = w.(io.ByteWriter); !ok {
+ e.wi.bw = e.wi
+ }
+ if e.wi.sw, ok = w.(ioEncStringWriter); !ok {
+ e.wi.sw = e.wi
+ }
+ e.wi.fw, _ = w.(ioFlusher)
+ e.wi.ww = w
+ }
+ e.w = e.wi
+ e.resetCommon()
+}
+
+// ResetBytes resets the Encoder with a new destination output []byte.
+func (e *Encoder) ResetBytes(out *[]byte) {
+ if out == nil {
+ return
+ }
+ var in []byte
+ if out != nil {
+ in = *out
+ }
+ if in == nil {
+ in = make([]byte, defEncByteBufSize)
+ }
+ e.wx = true
+ e.wb.reset(in, out)
+ e.w = &e.wb
+ e.resetCommon()
+}
+
+// Encode writes an object into a stream.
+//
+// Encoding can be configured via the struct tag for the fields.
+// The key (in the struct tags) that we look at is configurable.
+//
+// By default, we look up the "codec" key in the struct field's tags,
+// and fall bak to the "json" key if "codec" is absent.
+// That key in struct field's tag value is the key name,
+// followed by an optional comma and options.
+//
+// To set an option on all fields (e.g. omitempty on all fields), you
+// can create a field called _struct, and set flags on it. The options
+// which can be set on _struct are:
+// - omitempty: so all fields are omitted if empty
+// - toarray: so struct is encoded as an array
+// - int: so struct key names are encoded as signed integers (instead of strings)
+// - uint: so struct key names are encoded as unsigned integers (instead of strings)
+// - float: so struct key names are encoded as floats (instead of strings)
+// More details on these below.
+//
+// Struct values "usually" encode as maps. Each exported struct field is encoded unless:
+// - the field's tag is "-", OR
+// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option.
+//
+// When encoding as a map, the first string in the tag (before the comma)
+// is the map key string to use when encoding.
+// ...
+// This key is typically encoded as a string.
+// However, there are instances where the encoded stream has mapping keys encoded as numbers.
+// For example, some cbor streams have keys as integer codes in the stream, but they should map
+// to fields in a structured object. Consequently, a struct is the natural representation in code.
+// For these, configure the struct to encode/decode the keys as numbers (instead of string).
+// This is done with the int,uint or float option on the _struct field (see above).
+//
+// However, struct values may encode as arrays. This happens when:
+// - StructToArray Encode option is set, OR
+// - the tag on the _struct field sets the "toarray" option
+// Note that omitempty is ignored when encoding struct values as arrays,
+// as an entry must be encoded for each field, to maintain its position.
+//
+// Values with types that implement MapBySlice are encoded as stream maps.
+//
+// The empty values (for omitempty option) are false, 0, any nil pointer
+// or interface value, and any array, slice, map, or string of length zero.
+//
+// Anonymous fields are encoded inline except:
+// - the struct tag specifies a replacement name (first value)
+// - the field is of an interface type
+//
+// Examples:
+//
+// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below.
+// type MyStruct struct {
+// _struct bool `codec:",omitempty"` //set omitempty for every field
+// Field1 string `codec:"-"` //skip this field
+// Field2 int `codec:"myName"` //Use key "myName" in encode stream
+// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty.
+// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty.
+// io.Reader //use key "Reader".
+// MyStruct `codec:"my1" //use key "my1".
+// MyStruct //inline it
+// ...
+// }
+//
+// type MyStruct struct {
+// _struct bool `codec:",toarray"` //encode struct as an array
+// }
+//
+// type MyStruct struct {
+// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys
+// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1)
+// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2)
+// }
+//
+// The mode of encoding is based on the type of the value. When a value is seen:
+// - If a Selfer, call its CodecEncodeSelf method
+// - If an extension is registered for it, call that extension function
+// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method
+// - Else encode it based on its reflect.Kind
+//
+// Note that struct field names and keys in map[string]XXX will be treated as symbols.
+// Some formats support symbols (e.g. binc) and will properly encode the string
+// only once in the stream, and use a tag to refer to it thereafter.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer e.deferred(&err)
+ e.MustEncode(v)
+ return
+}
+
+// MustEncode is like Encode, but panics if unable to Encode.
+// This provides insight to the code location that triggered the error.
+func (e *Encoder) MustEncode(v interface{}) {
+ if e.err != nil {
+ panic(e.err)
+ }
+ e.encode(v)
+ e.e.atEndOfEncode()
+ e.w.atEndOfEncode()
+ e.alwaysAtEnd()
+}
+
+func (e *Encoder) deferred(err1 *error) {
+ e.alwaysAtEnd()
+ if recoverPanicToErr {
+ if x := recover(); x != nil {
+ panicValToErr(e, x, err1)
+ panicValToErr(e, x, &e.err)
+ }
+ }
+}
+
+// func (e *Encoder) alwaysAtEnd() {
+// e.codecFnPooler.alwaysAtEnd()
+// }
+
+func (e *Encoder) encode(iv interface{}) {
+ if iv == nil || definitelyNil(iv) {
+ e.e.EncodeNil()
+ return
+ }
+ if v, ok := iv.(Selfer); ok {
+ v.CodecEncodeSelf(e)
+ return
+ }
+
+ // a switch with only concrete types can be optimized.
+ // consequently, we deal with nil and interfaces outside.
+
+ switch v := iv.(type) {
+ case Raw:
+ e.rawBytes(v)
+ case reflect.Value:
+ e.encodeValue(v, nil, true)
+
+ case string:
+ e.e.EncodeString(cUTF8, v)
+ case bool:
+ e.e.EncodeBool(v)
+ case int:
+ e.e.EncodeInt(int64(v))
+ case int8:
+ e.e.EncodeInt(int64(v))
+ case int16:
+ e.e.EncodeInt(int64(v))
+ case int32:
+ e.e.EncodeInt(int64(v))
+ case int64:
+ e.e.EncodeInt(v)
+ case uint:
+ e.e.EncodeUint(uint64(v))
+ case uint8:
+ e.e.EncodeUint(uint64(v))
+ case uint16:
+ e.e.EncodeUint(uint64(v))
+ case uint32:
+ e.e.EncodeUint(uint64(v))
+ case uint64:
+ e.e.EncodeUint(v)
+ case uintptr:
+ e.e.EncodeUint(uint64(v))
+ case float32:
+ e.e.EncodeFloat32(v)
+ case float64:
+ e.e.EncodeFloat64(v)
+ case time.Time:
+ e.e.EncodeTime(v)
+ case []uint8:
+ e.e.EncodeStringBytes(cRAW, v)
+
+ case *Raw:
+ e.rawBytes(*v)
+
+ case *string:
+ e.e.EncodeString(cUTF8, *v)
+ case *bool:
+ e.e.EncodeBool(*v)
+ case *int:
+ e.e.EncodeInt(int64(*v))
+ case *int8:
+ e.e.EncodeInt(int64(*v))
+ case *int16:
+ e.e.EncodeInt(int64(*v))
+ case *int32:
+ e.e.EncodeInt(int64(*v))
+ case *int64:
+ e.e.EncodeInt(*v)
+ case *uint:
+ e.e.EncodeUint(uint64(*v))
+ case *uint8:
+ e.e.EncodeUint(uint64(*v))
+ case *uint16:
+ e.e.EncodeUint(uint64(*v))
+ case *uint32:
+ e.e.EncodeUint(uint64(*v))
+ case *uint64:
+ e.e.EncodeUint(*v)
+ case *uintptr:
+ e.e.EncodeUint(uint64(*v))
+ case *float32:
+ e.e.EncodeFloat32(*v)
+ case *float64:
+ e.e.EncodeFloat64(*v)
+ case *time.Time:
+ e.e.EncodeTime(*v)
+
+ case *[]uint8:
+ e.e.EncodeStringBytes(cRAW, *v)
+
+ default:
+ if !fastpathEncodeTypeSwitch(iv, e) {
+ // checkfastpath=true (not false), as underlying slice/map type may be fast-path
+ e.encodeValue(reflect.ValueOf(iv), nil, true)
+ }
+ }
+}
+
+func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) {
+ // if a valid fn is passed, it MUST BE for the dereferenced type of rv
+ var sptr uintptr
+ var rvp reflect.Value
+ var rvpValid bool
+TOP:
+ switch rv.Kind() {
+ case reflect.Ptr:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ rvpValid = true
+ rvp = rv
+ rv = rv.Elem()
+ if e.h.CheckCircularRef && rv.Kind() == reflect.Struct {
+ // TODO: Movable pointers will be an issue here. Future problem.
+ sptr = rv.UnsafeAddr()
+ break TOP
+ }
+ goto TOP
+ case reflect.Interface:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ rv = rv.Elem()
+ goto TOP
+ case reflect.Slice, reflect.Map:
+ if rv.IsNil() {
+ e.e.EncodeNil()
+ return
+ }
+ case reflect.Invalid, reflect.Func:
+ e.e.EncodeNil()
+ return
+ }
+
+ if sptr != 0 && (&e.ci).add(sptr) {
+ e.errorf("circular reference found: # %d", sptr)
+ }
+
+ if fn == nil {
+ rt := rv.Type()
+ // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
+ fn = e.cfer().get(rt, checkFastpath, true)
+ }
+ if fn.i.addrE {
+ if rvpValid {
+ fn.fe(e, &fn.i, rvp)
+ } else if rv.CanAddr() {
+ fn.fe(e, &fn.i, rv.Addr())
+ } else {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ fn.fe(e, &fn.i, rv2)
+ }
+ } else {
+ fn.fe(e, &fn.i, rv)
+ }
+ if sptr != 0 {
+ (&e.ci).remove(sptr)
+ }
+}
+
+func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) {
+ if fnerr != nil {
+ panic(fnerr)
+ }
+ if bs == nil {
+ e.e.EncodeNil()
+ } else if asis {
+ e.asis(bs)
+ } else {
+ e.e.EncodeStringBytes(c, bs)
+ }
+}
+
+func (e *Encoder) asis(v []byte) {
+ if e.isas {
+ e.as.EncodeAsis(v)
+ } else {
+ e.w.writeb(v)
+ }
+}
+
+func (e *Encoder) rawBytes(vv Raw) {
+ v := []byte(vv)
+ if !e.h.Raw {
+ e.errorf("Raw values cannot be encoded: %v", v)
+ }
+ e.asis(v)
+}
+
+func (e *Encoder) wrapErrstr(v interface{}, err *error) {
+ *err = fmt.Errorf("%s encode error: %v", e.hh.Name(), v)
+}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
new file mode 100644
index 0000000..87f2562
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
@@ -0,0 +1,34522 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from fast-path.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types,
+// - map of all builtin types to string or interface value
+// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathEnabled = true
+
+type fastpathT struct{}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*Encoder, *codecFnInfo, reflect.Value)
+ decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+
+type fastpathA [271]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ h, i, j := 0, 0, 271 // len(x)
+ for i < j {
+ h = i + (j-i)/2
+ if x[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < 271 && x[i].rtid == rtid {
+ return i
+ }
+ return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ i := 0
+ fn := func(v interface{},
+ fe func(*Encoder, *codecFnInfo, reflect.Value),
+ fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) {
+ xrt := reflect.TypeOf(v)
+ xptr := rt2id(xrt)
+ fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+ i++
+ return
+ }
+
+ fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR)
+ fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR)
+ fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R)
+ fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R)
+ fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR)
+ fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R)
+ fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R)
+ fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R)
+ fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR)
+ fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR)
+ fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R)
+ fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R)
+ fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R)
+ fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R)
+ fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR)
+
+ fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR)
+ fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR)
+ fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR)
+ fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R)
+ fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R)
+ fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R)
+ fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R)
+ fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR)
+ fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR)
+ fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R)
+ fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R)
+ fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R)
+ fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R)
+ fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R)
+ fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R)
+ fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR)
+ fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR)
+ fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR)
+ fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR)
+ fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R)
+ fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R)
+ fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R)
+ fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R)
+ fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR)
+ fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR)
+ fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R)
+ fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R)
+ fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R)
+ fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R)
+ fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R)
+ fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R)
+ fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR)
+ fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR)
+ fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR)
+ fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR)
+ fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R)
+ fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R)
+ fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R)
+ fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R)
+ fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR)
+ fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR)
+ fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R)
+ fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R)
+ fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R)
+ fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R)
+ fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R)
+ fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R)
+ fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR)
+ fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR)
+ fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR)
+ fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR)
+ fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R)
+ fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R)
+ fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R)
+ fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R)
+ fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR)
+ fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR)
+ fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R)
+ fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R)
+ fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R)
+ fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R)
+ fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R)
+ fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R)
+ fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR)
+ fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR)
+ fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR)
+ fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR)
+ fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R)
+ fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R)
+ fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R)
+ fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R)
+ fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR)
+ fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR)
+ fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R)
+ fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R)
+ fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R)
+ fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R)
+ fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R)
+ fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R)
+ fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR)
+ fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR)
+ fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR)
+ fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR)
+ fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R)
+ fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R)
+ fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R)
+ fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R)
+ fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR)
+ fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR)
+ fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R)
+ fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R)
+ fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R)
+ fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R)
+ fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R)
+ fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R)
+ fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR)
+ fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR)
+ fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR)
+ fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR)
+ fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R)
+ fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R)
+ fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R)
+ fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R)
+ fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR)
+ fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR)
+ fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R)
+ fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R)
+ fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R)
+ fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R)
+ fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R)
+ fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R)
+ fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR)
+ fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR)
+ fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR)
+ fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR)
+ fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R)
+ fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R)
+ fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R)
+ fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R)
+ fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR)
+ fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR)
+ fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R)
+ fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R)
+ fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R)
+ fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R)
+ fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R)
+ fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R)
+ fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR)
+ fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR)
+ fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR)
+ fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR)
+ fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R)
+ fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R)
+ fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R)
+ fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R)
+ fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR)
+ fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR)
+ fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R)
+ fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R)
+ fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R)
+ fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R)
+ fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R)
+ fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R)
+ fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR)
+ fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR)
+ fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR)
+ fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR)
+ fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R)
+ fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R)
+ fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R)
+ fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R)
+ fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR)
+ fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR)
+ fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R)
+ fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R)
+ fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R)
+ fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R)
+ fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R)
+ fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R)
+ fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR)
+ fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR)
+ fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR)
+ fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR)
+ fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R)
+ fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R)
+ fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R)
+ fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R)
+ fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR)
+ fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR)
+ fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R)
+ fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R)
+ fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R)
+ fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R)
+ fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R)
+ fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R)
+ fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR)
+ fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR)
+ fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR)
+ fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR)
+ fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R)
+ fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R)
+ fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R)
+ fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R)
+ fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR)
+ fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR)
+ fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R)
+ fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R)
+ fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R)
+ fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R)
+ fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R)
+ fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R)
+ fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR)
+ fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR)
+ fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR)
+ fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR)
+ fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R)
+ fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R)
+ fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R)
+ fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R)
+ fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR)
+ fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR)
+ fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R)
+ fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R)
+ fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R)
+ fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R)
+ fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R)
+ fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R)
+ fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR)
+ fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR)
+ fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR)
+ fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR)
+ fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R)
+ fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R)
+ fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R)
+ fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R)
+ fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR)
+ fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR)
+ fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R)
+ fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R)
+ fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R)
+ fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R)
+ fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R)
+ fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R)
+ fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR)
+ fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR)
+ fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR)
+ fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR)
+ fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R)
+ fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R)
+ fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R)
+ fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R)
+ fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR)
+ fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR)
+ fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R)
+ fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R)
+ fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R)
+ fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R)
+ fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R)
+ fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R)
+ fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR)
+ fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR)
+ fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR)
+ fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR)
+ fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R)
+ fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R)
+ fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R)
+ fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R)
+ fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR)
+ fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR)
+ fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R)
+ fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R)
+ fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R)
+ fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R)
+ fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R)
+ fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R)
+ fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR)
+
+ sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+
+ case []interface{}:
+ fastpathTV.EncSliceIntfV(v, e)
+ case *[]interface{}:
+ fastpathTV.EncSliceIntfV(*v, e)
+ case []string:
+ fastpathTV.EncSliceStringV(v, e)
+ case *[]string:
+ fastpathTV.EncSliceStringV(*v, e)
+ case []float32:
+ fastpathTV.EncSliceFloat32V(v, e)
+ case *[]float32:
+ fastpathTV.EncSliceFloat32V(*v, e)
+ case []float64:
+ fastpathTV.EncSliceFloat64V(v, e)
+ case *[]float64:
+ fastpathTV.EncSliceFloat64V(*v, e)
+ case []uint:
+ fastpathTV.EncSliceUintV(v, e)
+ case *[]uint:
+ fastpathTV.EncSliceUintV(*v, e)
+ case []uint16:
+ fastpathTV.EncSliceUint16V(v, e)
+ case *[]uint16:
+ fastpathTV.EncSliceUint16V(*v, e)
+ case []uint32:
+ fastpathTV.EncSliceUint32V(v, e)
+ case *[]uint32:
+ fastpathTV.EncSliceUint32V(*v, e)
+ case []uint64:
+ fastpathTV.EncSliceUint64V(v, e)
+ case *[]uint64:
+ fastpathTV.EncSliceUint64V(*v, e)
+ case []uintptr:
+ fastpathTV.EncSliceUintptrV(v, e)
+ case *[]uintptr:
+ fastpathTV.EncSliceUintptrV(*v, e)
+ case []int:
+ fastpathTV.EncSliceIntV(v, e)
+ case *[]int:
+ fastpathTV.EncSliceIntV(*v, e)
+ case []int8:
+ fastpathTV.EncSliceInt8V(v, e)
+ case *[]int8:
+ fastpathTV.EncSliceInt8V(*v, e)
+ case []int16:
+ fastpathTV.EncSliceInt16V(v, e)
+ case *[]int16:
+ fastpathTV.EncSliceInt16V(*v, e)
+ case []int32:
+ fastpathTV.EncSliceInt32V(v, e)
+ case *[]int32:
+ fastpathTV.EncSliceInt32V(*v, e)
+ case []int64:
+ fastpathTV.EncSliceInt64V(v, e)
+ case *[]int64:
+ fastpathTV.EncSliceInt64V(*v, e)
+ case []bool:
+ fastpathTV.EncSliceBoolV(v, e)
+ case *[]bool:
+ fastpathTV.EncSliceBoolV(*v, e)
+
+ case map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(v, e)
+ case *map[interface{}]interface{}:
+ fastpathTV.EncMapIntfIntfV(*v, e)
+ case map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(v, e)
+ case *map[interface{}]string:
+ fastpathTV.EncMapIntfStringV(*v, e)
+ case map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(v, e)
+ case *map[interface{}]uint:
+ fastpathTV.EncMapIntfUintV(*v, e)
+ case map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(v, e)
+ case *map[interface{}]uint8:
+ fastpathTV.EncMapIntfUint8V(*v, e)
+ case map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(v, e)
+ case *map[interface{}]uint16:
+ fastpathTV.EncMapIntfUint16V(*v, e)
+ case map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(v, e)
+ case *map[interface{}]uint32:
+ fastpathTV.EncMapIntfUint32V(*v, e)
+ case map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(v, e)
+ case *map[interface{}]uint64:
+ fastpathTV.EncMapIntfUint64V(*v, e)
+ case map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(v, e)
+ case *map[interface{}]uintptr:
+ fastpathTV.EncMapIntfUintptrV(*v, e)
+ case map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(v, e)
+ case *map[interface{}]int:
+ fastpathTV.EncMapIntfIntV(*v, e)
+ case map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(v, e)
+ case *map[interface{}]int8:
+ fastpathTV.EncMapIntfInt8V(*v, e)
+ case map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(v, e)
+ case *map[interface{}]int16:
+ fastpathTV.EncMapIntfInt16V(*v, e)
+ case map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(v, e)
+ case *map[interface{}]int32:
+ fastpathTV.EncMapIntfInt32V(*v, e)
+ case map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(v, e)
+ case *map[interface{}]int64:
+ fastpathTV.EncMapIntfInt64V(*v, e)
+ case map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(v, e)
+ case *map[interface{}]float32:
+ fastpathTV.EncMapIntfFloat32V(*v, e)
+ case map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(v, e)
+ case *map[interface{}]float64:
+ fastpathTV.EncMapIntfFloat64V(*v, e)
+ case map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(v, e)
+ case *map[interface{}]bool:
+ fastpathTV.EncMapIntfBoolV(*v, e)
+ case map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(v, e)
+ case *map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(*v, e)
+ case map[string]string:
+ fastpathTV.EncMapStringStringV(v, e)
+ case *map[string]string:
+ fastpathTV.EncMapStringStringV(*v, e)
+ case map[string]uint:
+ fastpathTV.EncMapStringUintV(v, e)
+ case *map[string]uint:
+ fastpathTV.EncMapStringUintV(*v, e)
+ case map[string]uint8:
+ fastpathTV.EncMapStringUint8V(v, e)
+ case *map[string]uint8:
+ fastpathTV.EncMapStringUint8V(*v, e)
+ case map[string]uint16:
+ fastpathTV.EncMapStringUint16V(v, e)
+ case *map[string]uint16:
+ fastpathTV.EncMapStringUint16V(*v, e)
+ case map[string]uint32:
+ fastpathTV.EncMapStringUint32V(v, e)
+ case *map[string]uint32:
+ fastpathTV.EncMapStringUint32V(*v, e)
+ case map[string]uint64:
+ fastpathTV.EncMapStringUint64V(v, e)
+ case *map[string]uint64:
+ fastpathTV.EncMapStringUint64V(*v, e)
+ case map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(v, e)
+ case *map[string]uintptr:
+ fastpathTV.EncMapStringUintptrV(*v, e)
+ case map[string]int:
+ fastpathTV.EncMapStringIntV(v, e)
+ case *map[string]int:
+ fastpathTV.EncMapStringIntV(*v, e)
+ case map[string]int8:
+ fastpathTV.EncMapStringInt8V(v, e)
+ case *map[string]int8:
+ fastpathTV.EncMapStringInt8V(*v, e)
+ case map[string]int16:
+ fastpathTV.EncMapStringInt16V(v, e)
+ case *map[string]int16:
+ fastpathTV.EncMapStringInt16V(*v, e)
+ case map[string]int32:
+ fastpathTV.EncMapStringInt32V(v, e)
+ case *map[string]int32:
+ fastpathTV.EncMapStringInt32V(*v, e)
+ case map[string]int64:
+ fastpathTV.EncMapStringInt64V(v, e)
+ case *map[string]int64:
+ fastpathTV.EncMapStringInt64V(*v, e)
+ case map[string]float32:
+ fastpathTV.EncMapStringFloat32V(v, e)
+ case *map[string]float32:
+ fastpathTV.EncMapStringFloat32V(*v, e)
+ case map[string]float64:
+ fastpathTV.EncMapStringFloat64V(v, e)
+ case *map[string]float64:
+ fastpathTV.EncMapStringFloat64V(*v, e)
+ case map[string]bool:
+ fastpathTV.EncMapStringBoolV(v, e)
+ case *map[string]bool:
+ fastpathTV.EncMapStringBoolV(*v, e)
+ case map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(v, e)
+ case *map[float32]interface{}:
+ fastpathTV.EncMapFloat32IntfV(*v, e)
+ case map[float32]string:
+ fastpathTV.EncMapFloat32StringV(v, e)
+ case *map[float32]string:
+ fastpathTV.EncMapFloat32StringV(*v, e)
+ case map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(v, e)
+ case *map[float32]uint:
+ fastpathTV.EncMapFloat32UintV(*v, e)
+ case map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(v, e)
+ case *map[float32]uint8:
+ fastpathTV.EncMapFloat32Uint8V(*v, e)
+ case map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(v, e)
+ case *map[float32]uint16:
+ fastpathTV.EncMapFloat32Uint16V(*v, e)
+ case map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(v, e)
+ case *map[float32]uint32:
+ fastpathTV.EncMapFloat32Uint32V(*v, e)
+ case map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(v, e)
+ case *map[float32]uint64:
+ fastpathTV.EncMapFloat32Uint64V(*v, e)
+ case map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(v, e)
+ case *map[float32]uintptr:
+ fastpathTV.EncMapFloat32UintptrV(*v, e)
+ case map[float32]int:
+ fastpathTV.EncMapFloat32IntV(v, e)
+ case *map[float32]int:
+ fastpathTV.EncMapFloat32IntV(*v, e)
+ case map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(v, e)
+ case *map[float32]int8:
+ fastpathTV.EncMapFloat32Int8V(*v, e)
+ case map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(v, e)
+ case *map[float32]int16:
+ fastpathTV.EncMapFloat32Int16V(*v, e)
+ case map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(v, e)
+ case *map[float32]int32:
+ fastpathTV.EncMapFloat32Int32V(*v, e)
+ case map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(v, e)
+ case *map[float32]int64:
+ fastpathTV.EncMapFloat32Int64V(*v, e)
+ case map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(v, e)
+ case *map[float32]float32:
+ fastpathTV.EncMapFloat32Float32V(*v, e)
+ case map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(v, e)
+ case *map[float32]float64:
+ fastpathTV.EncMapFloat32Float64V(*v, e)
+ case map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(v, e)
+ case *map[float32]bool:
+ fastpathTV.EncMapFloat32BoolV(*v, e)
+ case map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(v, e)
+ case *map[float64]interface{}:
+ fastpathTV.EncMapFloat64IntfV(*v, e)
+ case map[float64]string:
+ fastpathTV.EncMapFloat64StringV(v, e)
+ case *map[float64]string:
+ fastpathTV.EncMapFloat64StringV(*v, e)
+ case map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(v, e)
+ case *map[float64]uint:
+ fastpathTV.EncMapFloat64UintV(*v, e)
+ case map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(v, e)
+ case *map[float64]uint8:
+ fastpathTV.EncMapFloat64Uint8V(*v, e)
+ case map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(v, e)
+ case *map[float64]uint16:
+ fastpathTV.EncMapFloat64Uint16V(*v, e)
+ case map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(v, e)
+ case *map[float64]uint32:
+ fastpathTV.EncMapFloat64Uint32V(*v, e)
+ case map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(v, e)
+ case *map[float64]uint64:
+ fastpathTV.EncMapFloat64Uint64V(*v, e)
+ case map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(v, e)
+ case *map[float64]uintptr:
+ fastpathTV.EncMapFloat64UintptrV(*v, e)
+ case map[float64]int:
+ fastpathTV.EncMapFloat64IntV(v, e)
+ case *map[float64]int:
+ fastpathTV.EncMapFloat64IntV(*v, e)
+ case map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(v, e)
+ case *map[float64]int8:
+ fastpathTV.EncMapFloat64Int8V(*v, e)
+ case map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(v, e)
+ case *map[float64]int16:
+ fastpathTV.EncMapFloat64Int16V(*v, e)
+ case map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(v, e)
+ case *map[float64]int32:
+ fastpathTV.EncMapFloat64Int32V(*v, e)
+ case map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(v, e)
+ case *map[float64]int64:
+ fastpathTV.EncMapFloat64Int64V(*v, e)
+ case map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(v, e)
+ case *map[float64]float32:
+ fastpathTV.EncMapFloat64Float32V(*v, e)
+ case map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(v, e)
+ case *map[float64]float64:
+ fastpathTV.EncMapFloat64Float64V(*v, e)
+ case map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(v, e)
+ case *map[float64]bool:
+ fastpathTV.EncMapFloat64BoolV(*v, e)
+ case map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(v, e)
+ case *map[uint]interface{}:
+ fastpathTV.EncMapUintIntfV(*v, e)
+ case map[uint]string:
+ fastpathTV.EncMapUintStringV(v, e)
+ case *map[uint]string:
+ fastpathTV.EncMapUintStringV(*v, e)
+ case map[uint]uint:
+ fastpathTV.EncMapUintUintV(v, e)
+ case *map[uint]uint:
+ fastpathTV.EncMapUintUintV(*v, e)
+ case map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(v, e)
+ case *map[uint]uint8:
+ fastpathTV.EncMapUintUint8V(*v, e)
+ case map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(v, e)
+ case *map[uint]uint16:
+ fastpathTV.EncMapUintUint16V(*v, e)
+ case map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(v, e)
+ case *map[uint]uint32:
+ fastpathTV.EncMapUintUint32V(*v, e)
+ case map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(v, e)
+ case *map[uint]uint64:
+ fastpathTV.EncMapUintUint64V(*v, e)
+ case map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(v, e)
+ case *map[uint]uintptr:
+ fastpathTV.EncMapUintUintptrV(*v, e)
+ case map[uint]int:
+ fastpathTV.EncMapUintIntV(v, e)
+ case *map[uint]int:
+ fastpathTV.EncMapUintIntV(*v, e)
+ case map[uint]int8:
+ fastpathTV.EncMapUintInt8V(v, e)
+ case *map[uint]int8:
+ fastpathTV.EncMapUintInt8V(*v, e)
+ case map[uint]int16:
+ fastpathTV.EncMapUintInt16V(v, e)
+ case *map[uint]int16:
+ fastpathTV.EncMapUintInt16V(*v, e)
+ case map[uint]int32:
+ fastpathTV.EncMapUintInt32V(v, e)
+ case *map[uint]int32:
+ fastpathTV.EncMapUintInt32V(*v, e)
+ case map[uint]int64:
+ fastpathTV.EncMapUintInt64V(v, e)
+ case *map[uint]int64:
+ fastpathTV.EncMapUintInt64V(*v, e)
+ case map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(v, e)
+ case *map[uint]float32:
+ fastpathTV.EncMapUintFloat32V(*v, e)
+ case map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(v, e)
+ case *map[uint]float64:
+ fastpathTV.EncMapUintFloat64V(*v, e)
+ case map[uint]bool:
+ fastpathTV.EncMapUintBoolV(v, e)
+ case *map[uint]bool:
+ fastpathTV.EncMapUintBoolV(*v, e)
+ case map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(v, e)
+ case *map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(*v, e)
+ case map[uint8]string:
+ fastpathTV.EncMapUint8StringV(v, e)
+ case *map[uint8]string:
+ fastpathTV.EncMapUint8StringV(*v, e)
+ case map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(v, e)
+ case *map[uint8]uint:
+ fastpathTV.EncMapUint8UintV(*v, e)
+ case map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(v, e)
+ case *map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(*v, e)
+ case map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(v, e)
+ case *map[uint8]uint16:
+ fastpathTV.EncMapUint8Uint16V(*v, e)
+ case map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(v, e)
+ case *map[uint8]uint32:
+ fastpathTV.EncMapUint8Uint32V(*v, e)
+ case map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(v, e)
+ case *map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(*v, e)
+ case map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(v, e)
+ case *map[uint8]uintptr:
+ fastpathTV.EncMapUint8UintptrV(*v, e)
+ case map[uint8]int:
+ fastpathTV.EncMapUint8IntV(v, e)
+ case *map[uint8]int:
+ fastpathTV.EncMapUint8IntV(*v, e)
+ case map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(v, e)
+ case *map[uint8]int8:
+ fastpathTV.EncMapUint8Int8V(*v, e)
+ case map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(v, e)
+ case *map[uint8]int16:
+ fastpathTV.EncMapUint8Int16V(*v, e)
+ case map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(v, e)
+ case *map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(*v, e)
+ case map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(v, e)
+ case *map[uint8]int64:
+ fastpathTV.EncMapUint8Int64V(*v, e)
+ case map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(v, e)
+ case *map[uint8]float32:
+ fastpathTV.EncMapUint8Float32V(*v, e)
+ case map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(v, e)
+ case *map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(*v, e)
+ case map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(v, e)
+ case *map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(*v, e)
+ case map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(v, e)
+ case *map[uint16]interface{}:
+ fastpathTV.EncMapUint16IntfV(*v, e)
+ case map[uint16]string:
+ fastpathTV.EncMapUint16StringV(v, e)
+ case *map[uint16]string:
+ fastpathTV.EncMapUint16StringV(*v, e)
+ case map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(v, e)
+ case *map[uint16]uint:
+ fastpathTV.EncMapUint16UintV(*v, e)
+ case map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(v, e)
+ case *map[uint16]uint8:
+ fastpathTV.EncMapUint16Uint8V(*v, e)
+ case map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(v, e)
+ case *map[uint16]uint16:
+ fastpathTV.EncMapUint16Uint16V(*v, e)
+ case map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(v, e)
+ case *map[uint16]uint32:
+ fastpathTV.EncMapUint16Uint32V(*v, e)
+ case map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(v, e)
+ case *map[uint16]uint64:
+ fastpathTV.EncMapUint16Uint64V(*v, e)
+ case map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(v, e)
+ case *map[uint16]uintptr:
+ fastpathTV.EncMapUint16UintptrV(*v, e)
+ case map[uint16]int:
+ fastpathTV.EncMapUint16IntV(v, e)
+ case *map[uint16]int:
+ fastpathTV.EncMapUint16IntV(*v, e)
+ case map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(v, e)
+ case *map[uint16]int8:
+ fastpathTV.EncMapUint16Int8V(*v, e)
+ case map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(v, e)
+ case *map[uint16]int16:
+ fastpathTV.EncMapUint16Int16V(*v, e)
+ case map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(v, e)
+ case *map[uint16]int32:
+ fastpathTV.EncMapUint16Int32V(*v, e)
+ case map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(v, e)
+ case *map[uint16]int64:
+ fastpathTV.EncMapUint16Int64V(*v, e)
+ case map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(v, e)
+ case *map[uint16]float32:
+ fastpathTV.EncMapUint16Float32V(*v, e)
+ case map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(v, e)
+ case *map[uint16]float64:
+ fastpathTV.EncMapUint16Float64V(*v, e)
+ case map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(v, e)
+ case *map[uint16]bool:
+ fastpathTV.EncMapUint16BoolV(*v, e)
+ case map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(v, e)
+ case *map[uint32]interface{}:
+ fastpathTV.EncMapUint32IntfV(*v, e)
+ case map[uint32]string:
+ fastpathTV.EncMapUint32StringV(v, e)
+ case *map[uint32]string:
+ fastpathTV.EncMapUint32StringV(*v, e)
+ case map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(v, e)
+ case *map[uint32]uint:
+ fastpathTV.EncMapUint32UintV(*v, e)
+ case map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(v, e)
+ case *map[uint32]uint8:
+ fastpathTV.EncMapUint32Uint8V(*v, e)
+ case map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(v, e)
+ case *map[uint32]uint16:
+ fastpathTV.EncMapUint32Uint16V(*v, e)
+ case map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(v, e)
+ case *map[uint32]uint32:
+ fastpathTV.EncMapUint32Uint32V(*v, e)
+ case map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(v, e)
+ case *map[uint32]uint64:
+ fastpathTV.EncMapUint32Uint64V(*v, e)
+ case map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(v, e)
+ case *map[uint32]uintptr:
+ fastpathTV.EncMapUint32UintptrV(*v, e)
+ case map[uint32]int:
+ fastpathTV.EncMapUint32IntV(v, e)
+ case *map[uint32]int:
+ fastpathTV.EncMapUint32IntV(*v, e)
+ case map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(v, e)
+ case *map[uint32]int8:
+ fastpathTV.EncMapUint32Int8V(*v, e)
+ case map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(v, e)
+ case *map[uint32]int16:
+ fastpathTV.EncMapUint32Int16V(*v, e)
+ case map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(v, e)
+ case *map[uint32]int32:
+ fastpathTV.EncMapUint32Int32V(*v, e)
+ case map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(v, e)
+ case *map[uint32]int64:
+ fastpathTV.EncMapUint32Int64V(*v, e)
+ case map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(v, e)
+ case *map[uint32]float32:
+ fastpathTV.EncMapUint32Float32V(*v, e)
+ case map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(v, e)
+ case *map[uint32]float64:
+ fastpathTV.EncMapUint32Float64V(*v, e)
+ case map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(v, e)
+ case *map[uint32]bool:
+ fastpathTV.EncMapUint32BoolV(*v, e)
+ case map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(v, e)
+ case *map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(*v, e)
+ case map[uint64]string:
+ fastpathTV.EncMapUint64StringV(v, e)
+ case *map[uint64]string:
+ fastpathTV.EncMapUint64StringV(*v, e)
+ case map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(v, e)
+ case *map[uint64]uint:
+ fastpathTV.EncMapUint64UintV(*v, e)
+ case map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(v, e)
+ case *map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(*v, e)
+ case map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(v, e)
+ case *map[uint64]uint16:
+ fastpathTV.EncMapUint64Uint16V(*v, e)
+ case map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(v, e)
+ case *map[uint64]uint32:
+ fastpathTV.EncMapUint64Uint32V(*v, e)
+ case map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(v, e)
+ case *map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(*v, e)
+ case map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(v, e)
+ case *map[uint64]uintptr:
+ fastpathTV.EncMapUint64UintptrV(*v, e)
+ case map[uint64]int:
+ fastpathTV.EncMapUint64IntV(v, e)
+ case *map[uint64]int:
+ fastpathTV.EncMapUint64IntV(*v, e)
+ case map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(v, e)
+ case *map[uint64]int8:
+ fastpathTV.EncMapUint64Int8V(*v, e)
+ case map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(v, e)
+ case *map[uint64]int16:
+ fastpathTV.EncMapUint64Int16V(*v, e)
+ case map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(v, e)
+ case *map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(*v, e)
+ case map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(v, e)
+ case *map[uint64]int64:
+ fastpathTV.EncMapUint64Int64V(*v, e)
+ case map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(v, e)
+ case *map[uint64]float32:
+ fastpathTV.EncMapUint64Float32V(*v, e)
+ case map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(v, e)
+ case *map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(*v, e)
+ case map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(v, e)
+ case *map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(*v, e)
+ case map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(v, e)
+ case *map[uintptr]interface{}:
+ fastpathTV.EncMapUintptrIntfV(*v, e)
+ case map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(v, e)
+ case *map[uintptr]string:
+ fastpathTV.EncMapUintptrStringV(*v, e)
+ case map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(v, e)
+ case *map[uintptr]uint:
+ fastpathTV.EncMapUintptrUintV(*v, e)
+ case map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(v, e)
+ case *map[uintptr]uint8:
+ fastpathTV.EncMapUintptrUint8V(*v, e)
+ case map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(v, e)
+ case *map[uintptr]uint16:
+ fastpathTV.EncMapUintptrUint16V(*v, e)
+ case map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(v, e)
+ case *map[uintptr]uint32:
+ fastpathTV.EncMapUintptrUint32V(*v, e)
+ case map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(v, e)
+ case *map[uintptr]uint64:
+ fastpathTV.EncMapUintptrUint64V(*v, e)
+ case map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(v, e)
+ case *map[uintptr]uintptr:
+ fastpathTV.EncMapUintptrUintptrV(*v, e)
+ case map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(v, e)
+ case *map[uintptr]int:
+ fastpathTV.EncMapUintptrIntV(*v, e)
+ case map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(v, e)
+ case *map[uintptr]int8:
+ fastpathTV.EncMapUintptrInt8V(*v, e)
+ case map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(v, e)
+ case *map[uintptr]int16:
+ fastpathTV.EncMapUintptrInt16V(*v, e)
+ case map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(v, e)
+ case *map[uintptr]int32:
+ fastpathTV.EncMapUintptrInt32V(*v, e)
+ case map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(v, e)
+ case *map[uintptr]int64:
+ fastpathTV.EncMapUintptrInt64V(*v, e)
+ case map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(v, e)
+ case *map[uintptr]float32:
+ fastpathTV.EncMapUintptrFloat32V(*v, e)
+ case map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(v, e)
+ case *map[uintptr]float64:
+ fastpathTV.EncMapUintptrFloat64V(*v, e)
+ case map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(v, e)
+ case *map[uintptr]bool:
+ fastpathTV.EncMapUintptrBoolV(*v, e)
+ case map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(v, e)
+ case *map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(*v, e)
+ case map[int]string:
+ fastpathTV.EncMapIntStringV(v, e)
+ case *map[int]string:
+ fastpathTV.EncMapIntStringV(*v, e)
+ case map[int]uint:
+ fastpathTV.EncMapIntUintV(v, e)
+ case *map[int]uint:
+ fastpathTV.EncMapIntUintV(*v, e)
+ case map[int]uint8:
+ fastpathTV.EncMapIntUint8V(v, e)
+ case *map[int]uint8:
+ fastpathTV.EncMapIntUint8V(*v, e)
+ case map[int]uint16:
+ fastpathTV.EncMapIntUint16V(v, e)
+ case *map[int]uint16:
+ fastpathTV.EncMapIntUint16V(*v, e)
+ case map[int]uint32:
+ fastpathTV.EncMapIntUint32V(v, e)
+ case *map[int]uint32:
+ fastpathTV.EncMapIntUint32V(*v, e)
+ case map[int]uint64:
+ fastpathTV.EncMapIntUint64V(v, e)
+ case *map[int]uint64:
+ fastpathTV.EncMapIntUint64V(*v, e)
+ case map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(v, e)
+ case *map[int]uintptr:
+ fastpathTV.EncMapIntUintptrV(*v, e)
+ case map[int]int:
+ fastpathTV.EncMapIntIntV(v, e)
+ case *map[int]int:
+ fastpathTV.EncMapIntIntV(*v, e)
+ case map[int]int8:
+ fastpathTV.EncMapIntInt8V(v, e)
+ case *map[int]int8:
+ fastpathTV.EncMapIntInt8V(*v, e)
+ case map[int]int16:
+ fastpathTV.EncMapIntInt16V(v, e)
+ case *map[int]int16:
+ fastpathTV.EncMapIntInt16V(*v, e)
+ case map[int]int32:
+ fastpathTV.EncMapIntInt32V(v, e)
+ case *map[int]int32:
+ fastpathTV.EncMapIntInt32V(*v, e)
+ case map[int]int64:
+ fastpathTV.EncMapIntInt64V(v, e)
+ case *map[int]int64:
+ fastpathTV.EncMapIntInt64V(*v, e)
+ case map[int]float32:
+ fastpathTV.EncMapIntFloat32V(v, e)
+ case *map[int]float32:
+ fastpathTV.EncMapIntFloat32V(*v, e)
+ case map[int]float64:
+ fastpathTV.EncMapIntFloat64V(v, e)
+ case *map[int]float64:
+ fastpathTV.EncMapIntFloat64V(*v, e)
+ case map[int]bool:
+ fastpathTV.EncMapIntBoolV(v, e)
+ case *map[int]bool:
+ fastpathTV.EncMapIntBoolV(*v, e)
+ case map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(v, e)
+ case *map[int8]interface{}:
+ fastpathTV.EncMapInt8IntfV(*v, e)
+ case map[int8]string:
+ fastpathTV.EncMapInt8StringV(v, e)
+ case *map[int8]string:
+ fastpathTV.EncMapInt8StringV(*v, e)
+ case map[int8]uint:
+ fastpathTV.EncMapInt8UintV(v, e)
+ case *map[int8]uint:
+ fastpathTV.EncMapInt8UintV(*v, e)
+ case map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(v, e)
+ case *map[int8]uint8:
+ fastpathTV.EncMapInt8Uint8V(*v, e)
+ case map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(v, e)
+ case *map[int8]uint16:
+ fastpathTV.EncMapInt8Uint16V(*v, e)
+ case map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(v, e)
+ case *map[int8]uint32:
+ fastpathTV.EncMapInt8Uint32V(*v, e)
+ case map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(v, e)
+ case *map[int8]uint64:
+ fastpathTV.EncMapInt8Uint64V(*v, e)
+ case map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(v, e)
+ case *map[int8]uintptr:
+ fastpathTV.EncMapInt8UintptrV(*v, e)
+ case map[int8]int:
+ fastpathTV.EncMapInt8IntV(v, e)
+ case *map[int8]int:
+ fastpathTV.EncMapInt8IntV(*v, e)
+ case map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(v, e)
+ case *map[int8]int8:
+ fastpathTV.EncMapInt8Int8V(*v, e)
+ case map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(v, e)
+ case *map[int8]int16:
+ fastpathTV.EncMapInt8Int16V(*v, e)
+ case map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(v, e)
+ case *map[int8]int32:
+ fastpathTV.EncMapInt8Int32V(*v, e)
+ case map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(v, e)
+ case *map[int8]int64:
+ fastpathTV.EncMapInt8Int64V(*v, e)
+ case map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(v, e)
+ case *map[int8]float32:
+ fastpathTV.EncMapInt8Float32V(*v, e)
+ case map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(v, e)
+ case *map[int8]float64:
+ fastpathTV.EncMapInt8Float64V(*v, e)
+ case map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(v, e)
+ case *map[int8]bool:
+ fastpathTV.EncMapInt8BoolV(*v, e)
+ case map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(v, e)
+ case *map[int16]interface{}:
+ fastpathTV.EncMapInt16IntfV(*v, e)
+ case map[int16]string:
+ fastpathTV.EncMapInt16StringV(v, e)
+ case *map[int16]string:
+ fastpathTV.EncMapInt16StringV(*v, e)
+ case map[int16]uint:
+ fastpathTV.EncMapInt16UintV(v, e)
+ case *map[int16]uint:
+ fastpathTV.EncMapInt16UintV(*v, e)
+ case map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(v, e)
+ case *map[int16]uint8:
+ fastpathTV.EncMapInt16Uint8V(*v, e)
+ case map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(v, e)
+ case *map[int16]uint16:
+ fastpathTV.EncMapInt16Uint16V(*v, e)
+ case map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(v, e)
+ case *map[int16]uint32:
+ fastpathTV.EncMapInt16Uint32V(*v, e)
+ case map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(v, e)
+ case *map[int16]uint64:
+ fastpathTV.EncMapInt16Uint64V(*v, e)
+ case map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(v, e)
+ case *map[int16]uintptr:
+ fastpathTV.EncMapInt16UintptrV(*v, e)
+ case map[int16]int:
+ fastpathTV.EncMapInt16IntV(v, e)
+ case *map[int16]int:
+ fastpathTV.EncMapInt16IntV(*v, e)
+ case map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(v, e)
+ case *map[int16]int8:
+ fastpathTV.EncMapInt16Int8V(*v, e)
+ case map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(v, e)
+ case *map[int16]int16:
+ fastpathTV.EncMapInt16Int16V(*v, e)
+ case map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(v, e)
+ case *map[int16]int32:
+ fastpathTV.EncMapInt16Int32V(*v, e)
+ case map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(v, e)
+ case *map[int16]int64:
+ fastpathTV.EncMapInt16Int64V(*v, e)
+ case map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(v, e)
+ case *map[int16]float32:
+ fastpathTV.EncMapInt16Float32V(*v, e)
+ case map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(v, e)
+ case *map[int16]float64:
+ fastpathTV.EncMapInt16Float64V(*v, e)
+ case map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(v, e)
+ case *map[int16]bool:
+ fastpathTV.EncMapInt16BoolV(*v, e)
+ case map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(v, e)
+ case *map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(*v, e)
+ case map[int32]string:
+ fastpathTV.EncMapInt32StringV(v, e)
+ case *map[int32]string:
+ fastpathTV.EncMapInt32StringV(*v, e)
+ case map[int32]uint:
+ fastpathTV.EncMapInt32UintV(v, e)
+ case *map[int32]uint:
+ fastpathTV.EncMapInt32UintV(*v, e)
+ case map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(v, e)
+ case *map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(*v, e)
+ case map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(v, e)
+ case *map[int32]uint16:
+ fastpathTV.EncMapInt32Uint16V(*v, e)
+ case map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(v, e)
+ case *map[int32]uint32:
+ fastpathTV.EncMapInt32Uint32V(*v, e)
+ case map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(v, e)
+ case *map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(*v, e)
+ case map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(v, e)
+ case *map[int32]uintptr:
+ fastpathTV.EncMapInt32UintptrV(*v, e)
+ case map[int32]int:
+ fastpathTV.EncMapInt32IntV(v, e)
+ case *map[int32]int:
+ fastpathTV.EncMapInt32IntV(*v, e)
+ case map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(v, e)
+ case *map[int32]int8:
+ fastpathTV.EncMapInt32Int8V(*v, e)
+ case map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(v, e)
+ case *map[int32]int16:
+ fastpathTV.EncMapInt32Int16V(*v, e)
+ case map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(v, e)
+ case *map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(*v, e)
+ case map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(v, e)
+ case *map[int32]int64:
+ fastpathTV.EncMapInt32Int64V(*v, e)
+ case map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(v, e)
+ case *map[int32]float32:
+ fastpathTV.EncMapInt32Float32V(*v, e)
+ case map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(v, e)
+ case *map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(*v, e)
+ case map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(v, e)
+ case *map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(*v, e)
+ case map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(v, e)
+ case *map[int64]interface{}:
+ fastpathTV.EncMapInt64IntfV(*v, e)
+ case map[int64]string:
+ fastpathTV.EncMapInt64StringV(v, e)
+ case *map[int64]string:
+ fastpathTV.EncMapInt64StringV(*v, e)
+ case map[int64]uint:
+ fastpathTV.EncMapInt64UintV(v, e)
+ case *map[int64]uint:
+ fastpathTV.EncMapInt64UintV(*v, e)
+ case map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(v, e)
+ case *map[int64]uint8:
+ fastpathTV.EncMapInt64Uint8V(*v, e)
+ case map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(v, e)
+ case *map[int64]uint16:
+ fastpathTV.EncMapInt64Uint16V(*v, e)
+ case map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(v, e)
+ case *map[int64]uint32:
+ fastpathTV.EncMapInt64Uint32V(*v, e)
+ case map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(v, e)
+ case *map[int64]uint64:
+ fastpathTV.EncMapInt64Uint64V(*v, e)
+ case map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(v, e)
+ case *map[int64]uintptr:
+ fastpathTV.EncMapInt64UintptrV(*v, e)
+ case map[int64]int:
+ fastpathTV.EncMapInt64IntV(v, e)
+ case *map[int64]int:
+ fastpathTV.EncMapInt64IntV(*v, e)
+ case map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(v, e)
+ case *map[int64]int8:
+ fastpathTV.EncMapInt64Int8V(*v, e)
+ case map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(v, e)
+ case *map[int64]int16:
+ fastpathTV.EncMapInt64Int16V(*v, e)
+ case map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(v, e)
+ case *map[int64]int32:
+ fastpathTV.EncMapInt64Int32V(*v, e)
+ case map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(v, e)
+ case *map[int64]int64:
+ fastpathTV.EncMapInt64Int64V(*v, e)
+ case map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(v, e)
+ case *map[int64]float32:
+ fastpathTV.EncMapInt64Float32V(*v, e)
+ case map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(v, e)
+ case *map[int64]float64:
+ fastpathTV.EncMapInt64Float64V(*v, e)
+ case map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(v, e)
+ case *map[int64]bool:
+ fastpathTV.EncMapInt64BoolV(*v, e)
+ case map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(v, e)
+ case *map[bool]interface{}:
+ fastpathTV.EncMapBoolIntfV(*v, e)
+ case map[bool]string:
+ fastpathTV.EncMapBoolStringV(v, e)
+ case *map[bool]string:
+ fastpathTV.EncMapBoolStringV(*v, e)
+ case map[bool]uint:
+ fastpathTV.EncMapBoolUintV(v, e)
+ case *map[bool]uint:
+ fastpathTV.EncMapBoolUintV(*v, e)
+ case map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(v, e)
+ case *map[bool]uint8:
+ fastpathTV.EncMapBoolUint8V(*v, e)
+ case map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(v, e)
+ case *map[bool]uint16:
+ fastpathTV.EncMapBoolUint16V(*v, e)
+ case map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(v, e)
+ case *map[bool]uint32:
+ fastpathTV.EncMapBoolUint32V(*v, e)
+ case map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(v, e)
+ case *map[bool]uint64:
+ fastpathTV.EncMapBoolUint64V(*v, e)
+ case map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(v, e)
+ case *map[bool]uintptr:
+ fastpathTV.EncMapBoolUintptrV(*v, e)
+ case map[bool]int:
+ fastpathTV.EncMapBoolIntV(v, e)
+ case *map[bool]int:
+ fastpathTV.EncMapBoolIntV(*v, e)
+ case map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(v, e)
+ case *map[bool]int8:
+ fastpathTV.EncMapBoolInt8V(*v, e)
+ case map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(v, e)
+ case *map[bool]int16:
+ fastpathTV.EncMapBoolInt16V(*v, e)
+ case map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(v, e)
+ case *map[bool]int32:
+ fastpathTV.EncMapBoolInt32V(*v, e)
+ case map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(v, e)
+ case *map[bool]int64:
+ fastpathTV.EncMapBoolInt64V(*v, e)
+ case map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(v, e)
+ case *map[bool]float32:
+ fastpathTV.EncMapBoolFloat32V(*v, e)
+ case map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(v, e)
+ case *map[bool]float64:
+ fastpathTV.EncMapBoolFloat64V(*v, e)
+ case map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(v, e)
+ case *map[bool]bool:
+ fastpathTV.EncMapBoolBoolV(*v, e)
+
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e)
+ } else {
+ fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e)
+ }
+}
+func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ e.encode(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ e.encode(v2)
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ e.encode(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ e.encode(v2)
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e)
+ } else {
+ fastpathTV.EncSliceStringV(rv2i(rv).([]string), e)
+ }
+}
+func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e)
+ } else {
+ fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e)
+ }
+}
+func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeFloat32(v2)
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeFloat32(v2)
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e)
+ } else {
+ fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e)
+ }
+}
+func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeFloat64(v2)
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeFloat64(v2)
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e)
+ } else {
+ fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e)
+ }
+}
+func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint8V(rv2i(rv).([]uint8), e)
+ } else {
+ fastpathTV.EncSliceUint8V(rv2i(rv).([]uint8), e)
+ }
+}
+func (_ fastpathT) EncSliceUint8V(v []uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e)
+ } else {
+ fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e)
+ }
+}
+func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e)
+ } else {
+ fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e)
+ }
+}
+func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e)
+ } else {
+ fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e)
+ }
+}
+func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e)
+ } else {
+ fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e)
+ }
+}
+func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ e.encode(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ e.encode(v2)
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ e.encode(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ e.encode(v2)
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e)
+ } else {
+ fastpathTV.EncSliceIntV(rv2i(rv).([]int), e)
+ }
+}
+func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e)
+ } else {
+ fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e)
+ }
+}
+func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e)
+ } else {
+ fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e)
+ }
+}
+func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e)
+ } else {
+ fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e)
+ }
+}
+func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e)
+ } else {
+ fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e)
+ }
+}
+func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e)
+ } else {
+ fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e)
+ }
+}
+func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeBool(v2)
+ }
+ }
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for _, v2 := range v {
+ ee.EncodeBool(v2)
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e)
+}
+func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e)
+}
+func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e)
+}
+func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e)
+}
+func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e)
+}
+func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e)
+}
+func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e)
+}
+func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e)
+}
+func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e)
+}
+func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e)
+}
+func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e)
+}
+func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e)
+}
+func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e)
+}
+func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e)
+}
+func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e)
+}
+func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e)
+}
+func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e)
+}
+func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ e.encode(v[string(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ e.encode(v[string(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e)
+}
+func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[string(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeString(cUTF8, v[string(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e)
+}
+func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e)
+}
+func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e)
+}
+func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e)
+}
+func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e)
+}
+func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e)
+}
+func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ e.encode(v[string(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ e.encode(v[string(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e)
+}
+func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e)
+}
+func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e)
+}
+func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e)
+}
+func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e)
+}
+func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v[string(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e)
+}
+func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[string(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeFloat32(v[string(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e)
+}
+func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[string(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeFloat64(v[string(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e)
+}
+func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = string(k)
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[string(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeBool(v[string(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeString(cUTF8, k2)
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeString(cUTF8, k2)
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e)
+}
+func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ e.encode(v[float32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ e.encode(v[float32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e)
+}
+func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[float32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeString(cUTF8, v[float32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e)
+}
+func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e)
+}
+func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e)
+}
+func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e)
+}
+func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e)
+}
+func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeUint(uint64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e)
+}
+func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ e.encode(v[float32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ e.encode(v[float32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e)
+}
+func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e)
+}
+func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e)
+}
+func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e)
+}
+func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e)
+}
+func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeInt(int64(v[float32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e)
+}
+func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[float32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeFloat32(v[float32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e)
+}
+func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[float32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeFloat64(v[float32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e)
+}
+func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(float32(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[float32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat32(float32(k2))
+ ee.EncodeBool(v[float32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat32(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat32(k2)
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e)
+}
+func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v[float64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ e.encode(v[float64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e)
+}
+func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[float64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeString(cUTF8, v[float64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e)
+}
+func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e)
+}
+func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e)
+}
+func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e)
+}
+func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e)
+}
+func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeUint(uint64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e)
+}
+func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v[float64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ e.encode(v[float64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e)
+}
+func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e)
+}
+func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e)
+}
+func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e)
+}
+func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e)
+}
+func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeInt(int64(v[float64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e)
+}
+func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[float64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeFloat32(v[float64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e)
+}
+func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[float64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeFloat64(v[float64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e)
+}
+func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]float64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = float64(k)
+ i++
+ }
+ sort.Sort(floatSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(float64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[float64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeFloat64(float64(k2))
+ ee.EncodeBool(v[float64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeFloat64(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeFloat64(k2)
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e)
+}
+func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ e.encode(v[uint(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e)
+}
+func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[uint(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeString(cUTF8, v[uint(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e)
+}
+func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e)
+}
+func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e)
+}
+func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e)
+}
+func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e)
+}
+func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeUint(uint64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e)
+}
+func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ e.encode(v[uint(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e)
+}
+func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e)
+}
+func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e)
+}
+func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e)
+}
+func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e)
+}
+func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeInt(int64(v[uint(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e)
+}
+func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[uint(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeFloat32(v[uint(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e)
+}
+func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[uint(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeFloat64(v[uint(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e)
+}
+func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[uint(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint(k2)))
+ ee.EncodeBool(v[uint(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e)
+}
+func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ e.encode(v[uint8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e)
+}
+func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[uint8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeString(cUTF8, v[uint8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e)
+}
+func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e)
+}
+func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e)
+}
+func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e)
+}
+func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e)
+}
+func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeUint(uint64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e)
+}
+func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ e.encode(v[uint8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e)
+}
+func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e)
+}
+func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e)
+}
+func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e)
+}
+func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e)
+}
+func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeInt(int64(v[uint8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e)
+}
+func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[uint8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeFloat32(v[uint8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e)
+}
+func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[uint8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeFloat64(v[uint8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e)
+}
+func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[uint8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint8(k2)))
+ ee.EncodeBool(v[uint8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e)
+}
+func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ e.encode(v[uint16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e)
+}
+func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[uint16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeString(cUTF8, v[uint16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e)
+}
+func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e)
+}
+func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e)
+}
+func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e)
+}
+func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e)
+}
+func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeUint(uint64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e)
+}
+func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ e.encode(v[uint16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e)
+}
+func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e)
+}
+func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e)
+}
+func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e)
+}
+func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e)
+}
+func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeInt(int64(v[uint16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e)
+}
+func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[uint16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeFloat32(v[uint16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e)
+}
+func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[uint16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeFloat64(v[uint16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e)
+}
+func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[uint16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint16(k2)))
+ ee.EncodeBool(v[uint16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e)
+}
+func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ e.encode(v[uint32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e)
+}
+func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[uint32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeString(cUTF8, v[uint32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e)
+}
+func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e)
+}
+func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e)
+}
+func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e)
+}
+func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e)
+}
+func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeUint(uint64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e)
+}
+func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ e.encode(v[uint32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e)
+}
+func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e)
+}
+func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e)
+}
+func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e)
+}
+func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e)
+}
+func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeInt(int64(v[uint32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e)
+}
+func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[uint32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeFloat32(v[uint32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e)
+}
+func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[uint32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeFloat64(v[uint32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e)
+}
+func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[uint32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint32(k2)))
+ ee.EncodeBool(v[uint32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e)
+}
+func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ e.encode(v[uint64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e)
+}
+func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[uint64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeString(cUTF8, v[uint64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e)
+}
+func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e)
+}
+func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e)
+}
+func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e)
+}
+func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e)
+}
+func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeUint(uint64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e)
+}
+func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[uint64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ e.encode(v[uint64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e)
+}
+func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e)
+}
+func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e)
+}
+func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e)
+}
+func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e)
+}
+func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeInt(int64(v[uint64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e)
+}
+func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[uint64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeFloat32(v[uint64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e)
+}
+func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[uint64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeFloat64(v[uint64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e)
+}
+func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[uint64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeUint(uint64(uint64(k2)))
+ ee.EncodeBool(v[uint64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeUint(uint64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeUint(uint64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e)
+}
+func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ e.encode(v[uintptr(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ e.encode(v[uintptr(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e)
+}
+func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[uintptr(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeString(cUTF8, v[uintptr(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e)
+}
+func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e)
+}
+func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e)
+}
+func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e)
+}
+func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e)
+}
+func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeUint(uint64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e)
+}
+func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ e.encode(v[uintptr(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ e.encode(v[uintptr(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e)
+}
+func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e)
+}
+func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e)
+}
+func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e)
+}
+func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e)
+}
+func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeInt(int64(v[uintptr(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e)
+}
+func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[uintptr(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeFloat32(v[uintptr(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e)
+}
+func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[uintptr(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeFloat64(v[uintptr(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e)
+}
+func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = uint64(k)
+ i++
+ }
+ sort.Sort(uintSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ e.encode(uintptr(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[uintptr(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ e.encode(uintptr(k2))
+ ee.EncodeBool(v[uintptr(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ e.encode(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ e.encode(k2)
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e)
+}
+func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ e.encode(v[int(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e)
+}
+func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[int(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeString(cUTF8, v[int(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e)
+}
+func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e)
+}
+func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e)
+}
+func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e)
+}
+func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e)
+}
+func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeUint(uint64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e)
+}
+func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ e.encode(v[int(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e)
+}
+func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e)
+}
+func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e)
+}
+func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e)
+}
+func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e)
+}
+func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeInt(int64(v[int(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e)
+}
+func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[int(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeFloat32(v[int(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e)
+}
+func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[int(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeFloat64(v[int(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e)
+}
+func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[int(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int(k2)))
+ ee.EncodeBool(v[int(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e)
+}
+func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ e.encode(v[int8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e)
+}
+func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[int8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeString(cUTF8, v[int8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e)
+}
+func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e)
+}
+func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e)
+}
+func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e)
+}
+func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e)
+}
+func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeUint(uint64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e)
+}
+func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ e.encode(v[int8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e)
+}
+func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e)
+}
+func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e)
+}
+func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e)
+}
+func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e)
+}
+func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeInt(int64(v[int8(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e)
+}
+func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[int8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeFloat32(v[int8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e)
+}
+func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[int8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeFloat64(v[int8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e)
+}
+func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int8(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[int8(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int8(k2)))
+ ee.EncodeBool(v[int8(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e)
+}
+func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ e.encode(v[int16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e)
+}
+func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[int16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeString(cUTF8, v[int16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e)
+}
+func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e)
+}
+func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e)
+}
+func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e)
+}
+func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e)
+}
+func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeUint(uint64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e)
+}
+func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ e.encode(v[int16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e)
+}
+func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e)
+}
+func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e)
+}
+func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e)
+}
+func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e)
+}
+func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeInt(int64(v[int16(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e)
+}
+func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[int16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeFloat32(v[int16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e)
+}
+func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[int16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeFloat64(v[int16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e)
+}
+func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int16(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[int16(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int16(k2)))
+ ee.EncodeBool(v[int16(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e)
+}
+func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ e.encode(v[int32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e)
+}
+func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[int32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeString(cUTF8, v[int32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e)
+}
+func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e)
+}
+func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e)
+}
+func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e)
+}
+func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e)
+}
+func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeUint(uint64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e)
+}
+func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ e.encode(v[int32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e)
+}
+func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e)
+}
+func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e)
+}
+func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e)
+}
+func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e)
+}
+func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeInt(int64(v[int32(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e)
+}
+func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[int32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeFloat32(v[int32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e)
+}
+func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[int32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeFloat64(v[int32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e)
+}
+func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int32(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[int32(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int32(k2)))
+ ee.EncodeBool(v[int32(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e)
+}
+func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ e.encode(v[int64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e)
+}
+func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[int64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeString(cUTF8, v[int64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e)
+}
+func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e)
+}
+func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e)
+}
+func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e)
+}
+func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e)
+}
+func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeUint(uint64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e)
+}
+func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ e.encode(v[int64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ e.encode(v[int64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e)
+}
+func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e)
+}
+func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e)
+}
+func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e)
+}
+func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e)
+}
+func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeInt(int64(v[int64(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e)
+}
+func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[int64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeFloat32(v[int64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e)
+}
+func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[int64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeFloat64(v[int64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e)
+}
+func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int64, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = int64(k)
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(int64(k2)))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[int64(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeInt(int64(int64(k2)))
+ ee.EncodeBool(v[int64(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeInt(int64(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeInt(int64(k2))
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e)
+}
+func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ e.encode(v[bool(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ e.encode(v[bool(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e)
+}
+func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v[bool(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeString(cUTF8, v[bool(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeString(cUTF8, v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeString(cUTF8, v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e)
+}
+func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e)
+}
+func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e)
+}
+func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e)
+}
+func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e)
+}
+func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeUint(uint64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeUint(uint64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeUint(uint64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e)
+}
+func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ e.encode(v[bool(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ e.encode(v[bool(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ e.encode(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ e.encode(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e)
+}
+func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e)
+}
+func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e)
+}
+func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e)
+}
+func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e)
+}
+func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeInt(int64(v[bool(k2)]))
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeInt(int64(v2))
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeInt(int64(v2))
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e)
+}
+func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v[bool(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeFloat32(v[bool(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat32(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeFloat32(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e)
+}
+func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v[bool(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeFloat64(v[bool(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeFloat64(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeFloat64(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e)
+}
+func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) {
+ if v == nil {
+ e.e.EncodeNil()
+ return
+ }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]bool, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = bool(k)
+ i++
+ }
+ sort.Sort(boolSlice(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(bool(k2))
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v[bool(k2)])
+ }
+ } else {
+ for _, k2 := range v2 {
+ ee.EncodeBool(bool(k2))
+ ee.EncodeBool(v[bool(k2)])
+ }
+ }
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ ee.EncodeBool(k2)
+ ee.WriteMapElemValue()
+ ee.EncodeBool(v2)
+ }
+ } else {
+ for k2, v2 := range v {
+ ee.EncodeBool(k2)
+ ee.EncodeBool(v2)
+ }
+ }
+ }
+ ee.WriteMapEnd()
+}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ var changed bool
+ switch v := iv.(type) {
+
+ case []interface{}:
+ var v2 []interface{}
+ v2, changed = fastpathTV.DecSliceIntfV(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]interface{}:
+ var v2 []interface{}
+ v2, changed = fastpathTV.DecSliceIntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []string:
+ var v2 []string
+ v2, changed = fastpathTV.DecSliceStringV(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]string:
+ var v2 []string
+ v2, changed = fastpathTV.DecSliceStringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []float32:
+ var v2 []float32
+ v2, changed = fastpathTV.DecSliceFloat32V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]float32:
+ var v2 []float32
+ v2, changed = fastpathTV.DecSliceFloat32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []float64:
+ var v2 []float64
+ v2, changed = fastpathTV.DecSliceFloat64V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]float64:
+ var v2 []float64
+ v2, changed = fastpathTV.DecSliceFloat64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []uint:
+ var v2 []uint
+ v2, changed = fastpathTV.DecSliceUintV(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]uint:
+ var v2 []uint
+ v2, changed = fastpathTV.DecSliceUintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []uint16:
+ var v2 []uint16
+ v2, changed = fastpathTV.DecSliceUint16V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]uint16:
+ var v2 []uint16
+ v2, changed = fastpathTV.DecSliceUint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []uint32:
+ var v2 []uint32
+ v2, changed = fastpathTV.DecSliceUint32V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]uint32:
+ var v2 []uint32
+ v2, changed = fastpathTV.DecSliceUint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []uint64:
+ var v2 []uint64
+ v2, changed = fastpathTV.DecSliceUint64V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]uint64:
+ var v2 []uint64
+ v2, changed = fastpathTV.DecSliceUint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []uintptr:
+ var v2 []uintptr
+ v2, changed = fastpathTV.DecSliceUintptrV(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]uintptr:
+ var v2 []uintptr
+ v2, changed = fastpathTV.DecSliceUintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []int:
+ var v2 []int
+ v2, changed = fastpathTV.DecSliceIntV(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]int:
+ var v2 []int
+ v2, changed = fastpathTV.DecSliceIntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []int8:
+ var v2 []int8
+ v2, changed = fastpathTV.DecSliceInt8V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]int8:
+ var v2 []int8
+ v2, changed = fastpathTV.DecSliceInt8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []int16:
+ var v2 []int16
+ v2, changed = fastpathTV.DecSliceInt16V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]int16:
+ var v2 []int16
+ v2, changed = fastpathTV.DecSliceInt16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []int32:
+ var v2 []int32
+ v2, changed = fastpathTV.DecSliceInt32V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]int32:
+ var v2 []int32
+ v2, changed = fastpathTV.DecSliceInt32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []int64:
+ var v2 []int64
+ v2, changed = fastpathTV.DecSliceInt64V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]int64:
+ var v2 []int64
+ v2, changed = fastpathTV.DecSliceInt64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case []bool:
+ var v2 []bool
+ v2, changed = fastpathTV.DecSliceBoolV(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]bool:
+ var v2 []bool
+ v2, changed = fastpathTV.DecSliceBoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+
+ case map[interface{}]interface{}:
+ fastpathTV.DecMapIntfIntfV(v, false, d)
+ case *map[interface{}]interface{}:
+ var v2 map[interface{}]interface{}
+ v2, changed = fastpathTV.DecMapIntfIntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]string:
+ fastpathTV.DecMapIntfStringV(v, false, d)
+ case *map[interface{}]string:
+ var v2 map[interface{}]string
+ v2, changed = fastpathTV.DecMapIntfStringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]uint:
+ fastpathTV.DecMapIntfUintV(v, false, d)
+ case *map[interface{}]uint:
+ var v2 map[interface{}]uint
+ v2, changed = fastpathTV.DecMapIntfUintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]uint8:
+ fastpathTV.DecMapIntfUint8V(v, false, d)
+ case *map[interface{}]uint8:
+ var v2 map[interface{}]uint8
+ v2, changed = fastpathTV.DecMapIntfUint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]uint16:
+ fastpathTV.DecMapIntfUint16V(v, false, d)
+ case *map[interface{}]uint16:
+ var v2 map[interface{}]uint16
+ v2, changed = fastpathTV.DecMapIntfUint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]uint32:
+ fastpathTV.DecMapIntfUint32V(v, false, d)
+ case *map[interface{}]uint32:
+ var v2 map[interface{}]uint32
+ v2, changed = fastpathTV.DecMapIntfUint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]uint64:
+ fastpathTV.DecMapIntfUint64V(v, false, d)
+ case *map[interface{}]uint64:
+ var v2 map[interface{}]uint64
+ v2, changed = fastpathTV.DecMapIntfUint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]uintptr:
+ fastpathTV.DecMapIntfUintptrV(v, false, d)
+ case *map[interface{}]uintptr:
+ var v2 map[interface{}]uintptr
+ v2, changed = fastpathTV.DecMapIntfUintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]int:
+ fastpathTV.DecMapIntfIntV(v, false, d)
+ case *map[interface{}]int:
+ var v2 map[interface{}]int
+ v2, changed = fastpathTV.DecMapIntfIntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]int8:
+ fastpathTV.DecMapIntfInt8V(v, false, d)
+ case *map[interface{}]int8:
+ var v2 map[interface{}]int8
+ v2, changed = fastpathTV.DecMapIntfInt8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]int16:
+ fastpathTV.DecMapIntfInt16V(v, false, d)
+ case *map[interface{}]int16:
+ var v2 map[interface{}]int16
+ v2, changed = fastpathTV.DecMapIntfInt16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]int32:
+ fastpathTV.DecMapIntfInt32V(v, false, d)
+ case *map[interface{}]int32:
+ var v2 map[interface{}]int32
+ v2, changed = fastpathTV.DecMapIntfInt32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]int64:
+ fastpathTV.DecMapIntfInt64V(v, false, d)
+ case *map[interface{}]int64:
+ var v2 map[interface{}]int64
+ v2, changed = fastpathTV.DecMapIntfInt64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]float32:
+ fastpathTV.DecMapIntfFloat32V(v, false, d)
+ case *map[interface{}]float32:
+ var v2 map[interface{}]float32
+ v2, changed = fastpathTV.DecMapIntfFloat32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]float64:
+ fastpathTV.DecMapIntfFloat64V(v, false, d)
+ case *map[interface{}]float64:
+ var v2 map[interface{}]float64
+ v2, changed = fastpathTV.DecMapIntfFloat64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[interface{}]bool:
+ fastpathTV.DecMapIntfBoolV(v, false, d)
+ case *map[interface{}]bool:
+ var v2 map[interface{}]bool
+ v2, changed = fastpathTV.DecMapIntfBoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]interface{}:
+ fastpathTV.DecMapStringIntfV(v, false, d)
+ case *map[string]interface{}:
+ var v2 map[string]interface{}
+ v2, changed = fastpathTV.DecMapStringIntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]string:
+ fastpathTV.DecMapStringStringV(v, false, d)
+ case *map[string]string:
+ var v2 map[string]string
+ v2, changed = fastpathTV.DecMapStringStringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]uint:
+ fastpathTV.DecMapStringUintV(v, false, d)
+ case *map[string]uint:
+ var v2 map[string]uint
+ v2, changed = fastpathTV.DecMapStringUintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]uint8:
+ fastpathTV.DecMapStringUint8V(v, false, d)
+ case *map[string]uint8:
+ var v2 map[string]uint8
+ v2, changed = fastpathTV.DecMapStringUint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]uint16:
+ fastpathTV.DecMapStringUint16V(v, false, d)
+ case *map[string]uint16:
+ var v2 map[string]uint16
+ v2, changed = fastpathTV.DecMapStringUint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]uint32:
+ fastpathTV.DecMapStringUint32V(v, false, d)
+ case *map[string]uint32:
+ var v2 map[string]uint32
+ v2, changed = fastpathTV.DecMapStringUint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]uint64:
+ fastpathTV.DecMapStringUint64V(v, false, d)
+ case *map[string]uint64:
+ var v2 map[string]uint64
+ v2, changed = fastpathTV.DecMapStringUint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]uintptr:
+ fastpathTV.DecMapStringUintptrV(v, false, d)
+ case *map[string]uintptr:
+ var v2 map[string]uintptr
+ v2, changed = fastpathTV.DecMapStringUintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]int:
+ fastpathTV.DecMapStringIntV(v, false, d)
+ case *map[string]int:
+ var v2 map[string]int
+ v2, changed = fastpathTV.DecMapStringIntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]int8:
+ fastpathTV.DecMapStringInt8V(v, false, d)
+ case *map[string]int8:
+ var v2 map[string]int8
+ v2, changed = fastpathTV.DecMapStringInt8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]int16:
+ fastpathTV.DecMapStringInt16V(v, false, d)
+ case *map[string]int16:
+ var v2 map[string]int16
+ v2, changed = fastpathTV.DecMapStringInt16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]int32:
+ fastpathTV.DecMapStringInt32V(v, false, d)
+ case *map[string]int32:
+ var v2 map[string]int32
+ v2, changed = fastpathTV.DecMapStringInt32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]int64:
+ fastpathTV.DecMapStringInt64V(v, false, d)
+ case *map[string]int64:
+ var v2 map[string]int64
+ v2, changed = fastpathTV.DecMapStringInt64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]float32:
+ fastpathTV.DecMapStringFloat32V(v, false, d)
+ case *map[string]float32:
+ var v2 map[string]float32
+ v2, changed = fastpathTV.DecMapStringFloat32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]float64:
+ fastpathTV.DecMapStringFloat64V(v, false, d)
+ case *map[string]float64:
+ var v2 map[string]float64
+ v2, changed = fastpathTV.DecMapStringFloat64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[string]bool:
+ fastpathTV.DecMapStringBoolV(v, false, d)
+ case *map[string]bool:
+ var v2 map[string]bool
+ v2, changed = fastpathTV.DecMapStringBoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]interface{}:
+ fastpathTV.DecMapFloat32IntfV(v, false, d)
+ case *map[float32]interface{}:
+ var v2 map[float32]interface{}
+ v2, changed = fastpathTV.DecMapFloat32IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]string:
+ fastpathTV.DecMapFloat32StringV(v, false, d)
+ case *map[float32]string:
+ var v2 map[float32]string
+ v2, changed = fastpathTV.DecMapFloat32StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]uint:
+ fastpathTV.DecMapFloat32UintV(v, false, d)
+ case *map[float32]uint:
+ var v2 map[float32]uint
+ v2, changed = fastpathTV.DecMapFloat32UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]uint8:
+ fastpathTV.DecMapFloat32Uint8V(v, false, d)
+ case *map[float32]uint8:
+ var v2 map[float32]uint8
+ v2, changed = fastpathTV.DecMapFloat32Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]uint16:
+ fastpathTV.DecMapFloat32Uint16V(v, false, d)
+ case *map[float32]uint16:
+ var v2 map[float32]uint16
+ v2, changed = fastpathTV.DecMapFloat32Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]uint32:
+ fastpathTV.DecMapFloat32Uint32V(v, false, d)
+ case *map[float32]uint32:
+ var v2 map[float32]uint32
+ v2, changed = fastpathTV.DecMapFloat32Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]uint64:
+ fastpathTV.DecMapFloat32Uint64V(v, false, d)
+ case *map[float32]uint64:
+ var v2 map[float32]uint64
+ v2, changed = fastpathTV.DecMapFloat32Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]uintptr:
+ fastpathTV.DecMapFloat32UintptrV(v, false, d)
+ case *map[float32]uintptr:
+ var v2 map[float32]uintptr
+ v2, changed = fastpathTV.DecMapFloat32UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]int:
+ fastpathTV.DecMapFloat32IntV(v, false, d)
+ case *map[float32]int:
+ var v2 map[float32]int
+ v2, changed = fastpathTV.DecMapFloat32IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]int8:
+ fastpathTV.DecMapFloat32Int8V(v, false, d)
+ case *map[float32]int8:
+ var v2 map[float32]int8
+ v2, changed = fastpathTV.DecMapFloat32Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]int16:
+ fastpathTV.DecMapFloat32Int16V(v, false, d)
+ case *map[float32]int16:
+ var v2 map[float32]int16
+ v2, changed = fastpathTV.DecMapFloat32Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]int32:
+ fastpathTV.DecMapFloat32Int32V(v, false, d)
+ case *map[float32]int32:
+ var v2 map[float32]int32
+ v2, changed = fastpathTV.DecMapFloat32Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]int64:
+ fastpathTV.DecMapFloat32Int64V(v, false, d)
+ case *map[float32]int64:
+ var v2 map[float32]int64
+ v2, changed = fastpathTV.DecMapFloat32Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]float32:
+ fastpathTV.DecMapFloat32Float32V(v, false, d)
+ case *map[float32]float32:
+ var v2 map[float32]float32
+ v2, changed = fastpathTV.DecMapFloat32Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]float64:
+ fastpathTV.DecMapFloat32Float64V(v, false, d)
+ case *map[float32]float64:
+ var v2 map[float32]float64
+ v2, changed = fastpathTV.DecMapFloat32Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float32]bool:
+ fastpathTV.DecMapFloat32BoolV(v, false, d)
+ case *map[float32]bool:
+ var v2 map[float32]bool
+ v2, changed = fastpathTV.DecMapFloat32BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]interface{}:
+ fastpathTV.DecMapFloat64IntfV(v, false, d)
+ case *map[float64]interface{}:
+ var v2 map[float64]interface{}
+ v2, changed = fastpathTV.DecMapFloat64IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]string:
+ fastpathTV.DecMapFloat64StringV(v, false, d)
+ case *map[float64]string:
+ var v2 map[float64]string
+ v2, changed = fastpathTV.DecMapFloat64StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]uint:
+ fastpathTV.DecMapFloat64UintV(v, false, d)
+ case *map[float64]uint:
+ var v2 map[float64]uint
+ v2, changed = fastpathTV.DecMapFloat64UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]uint8:
+ fastpathTV.DecMapFloat64Uint8V(v, false, d)
+ case *map[float64]uint8:
+ var v2 map[float64]uint8
+ v2, changed = fastpathTV.DecMapFloat64Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]uint16:
+ fastpathTV.DecMapFloat64Uint16V(v, false, d)
+ case *map[float64]uint16:
+ var v2 map[float64]uint16
+ v2, changed = fastpathTV.DecMapFloat64Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]uint32:
+ fastpathTV.DecMapFloat64Uint32V(v, false, d)
+ case *map[float64]uint32:
+ var v2 map[float64]uint32
+ v2, changed = fastpathTV.DecMapFloat64Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]uint64:
+ fastpathTV.DecMapFloat64Uint64V(v, false, d)
+ case *map[float64]uint64:
+ var v2 map[float64]uint64
+ v2, changed = fastpathTV.DecMapFloat64Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]uintptr:
+ fastpathTV.DecMapFloat64UintptrV(v, false, d)
+ case *map[float64]uintptr:
+ var v2 map[float64]uintptr
+ v2, changed = fastpathTV.DecMapFloat64UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]int:
+ fastpathTV.DecMapFloat64IntV(v, false, d)
+ case *map[float64]int:
+ var v2 map[float64]int
+ v2, changed = fastpathTV.DecMapFloat64IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]int8:
+ fastpathTV.DecMapFloat64Int8V(v, false, d)
+ case *map[float64]int8:
+ var v2 map[float64]int8
+ v2, changed = fastpathTV.DecMapFloat64Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]int16:
+ fastpathTV.DecMapFloat64Int16V(v, false, d)
+ case *map[float64]int16:
+ var v2 map[float64]int16
+ v2, changed = fastpathTV.DecMapFloat64Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]int32:
+ fastpathTV.DecMapFloat64Int32V(v, false, d)
+ case *map[float64]int32:
+ var v2 map[float64]int32
+ v2, changed = fastpathTV.DecMapFloat64Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]int64:
+ fastpathTV.DecMapFloat64Int64V(v, false, d)
+ case *map[float64]int64:
+ var v2 map[float64]int64
+ v2, changed = fastpathTV.DecMapFloat64Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]float32:
+ fastpathTV.DecMapFloat64Float32V(v, false, d)
+ case *map[float64]float32:
+ var v2 map[float64]float32
+ v2, changed = fastpathTV.DecMapFloat64Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]float64:
+ fastpathTV.DecMapFloat64Float64V(v, false, d)
+ case *map[float64]float64:
+ var v2 map[float64]float64
+ v2, changed = fastpathTV.DecMapFloat64Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[float64]bool:
+ fastpathTV.DecMapFloat64BoolV(v, false, d)
+ case *map[float64]bool:
+ var v2 map[float64]bool
+ v2, changed = fastpathTV.DecMapFloat64BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]interface{}:
+ fastpathTV.DecMapUintIntfV(v, false, d)
+ case *map[uint]interface{}:
+ var v2 map[uint]interface{}
+ v2, changed = fastpathTV.DecMapUintIntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]string:
+ fastpathTV.DecMapUintStringV(v, false, d)
+ case *map[uint]string:
+ var v2 map[uint]string
+ v2, changed = fastpathTV.DecMapUintStringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]uint:
+ fastpathTV.DecMapUintUintV(v, false, d)
+ case *map[uint]uint:
+ var v2 map[uint]uint
+ v2, changed = fastpathTV.DecMapUintUintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]uint8:
+ fastpathTV.DecMapUintUint8V(v, false, d)
+ case *map[uint]uint8:
+ var v2 map[uint]uint8
+ v2, changed = fastpathTV.DecMapUintUint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]uint16:
+ fastpathTV.DecMapUintUint16V(v, false, d)
+ case *map[uint]uint16:
+ var v2 map[uint]uint16
+ v2, changed = fastpathTV.DecMapUintUint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]uint32:
+ fastpathTV.DecMapUintUint32V(v, false, d)
+ case *map[uint]uint32:
+ var v2 map[uint]uint32
+ v2, changed = fastpathTV.DecMapUintUint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]uint64:
+ fastpathTV.DecMapUintUint64V(v, false, d)
+ case *map[uint]uint64:
+ var v2 map[uint]uint64
+ v2, changed = fastpathTV.DecMapUintUint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]uintptr:
+ fastpathTV.DecMapUintUintptrV(v, false, d)
+ case *map[uint]uintptr:
+ var v2 map[uint]uintptr
+ v2, changed = fastpathTV.DecMapUintUintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]int:
+ fastpathTV.DecMapUintIntV(v, false, d)
+ case *map[uint]int:
+ var v2 map[uint]int
+ v2, changed = fastpathTV.DecMapUintIntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]int8:
+ fastpathTV.DecMapUintInt8V(v, false, d)
+ case *map[uint]int8:
+ var v2 map[uint]int8
+ v2, changed = fastpathTV.DecMapUintInt8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]int16:
+ fastpathTV.DecMapUintInt16V(v, false, d)
+ case *map[uint]int16:
+ var v2 map[uint]int16
+ v2, changed = fastpathTV.DecMapUintInt16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]int32:
+ fastpathTV.DecMapUintInt32V(v, false, d)
+ case *map[uint]int32:
+ var v2 map[uint]int32
+ v2, changed = fastpathTV.DecMapUintInt32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]int64:
+ fastpathTV.DecMapUintInt64V(v, false, d)
+ case *map[uint]int64:
+ var v2 map[uint]int64
+ v2, changed = fastpathTV.DecMapUintInt64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]float32:
+ fastpathTV.DecMapUintFloat32V(v, false, d)
+ case *map[uint]float32:
+ var v2 map[uint]float32
+ v2, changed = fastpathTV.DecMapUintFloat32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]float64:
+ fastpathTV.DecMapUintFloat64V(v, false, d)
+ case *map[uint]float64:
+ var v2 map[uint]float64
+ v2, changed = fastpathTV.DecMapUintFloat64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint]bool:
+ fastpathTV.DecMapUintBoolV(v, false, d)
+ case *map[uint]bool:
+ var v2 map[uint]bool
+ v2, changed = fastpathTV.DecMapUintBoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]interface{}:
+ fastpathTV.DecMapUint8IntfV(v, false, d)
+ case *map[uint8]interface{}:
+ var v2 map[uint8]interface{}
+ v2, changed = fastpathTV.DecMapUint8IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]string:
+ fastpathTV.DecMapUint8StringV(v, false, d)
+ case *map[uint8]string:
+ var v2 map[uint8]string
+ v2, changed = fastpathTV.DecMapUint8StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]uint:
+ fastpathTV.DecMapUint8UintV(v, false, d)
+ case *map[uint8]uint:
+ var v2 map[uint8]uint
+ v2, changed = fastpathTV.DecMapUint8UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]uint8:
+ fastpathTV.DecMapUint8Uint8V(v, false, d)
+ case *map[uint8]uint8:
+ var v2 map[uint8]uint8
+ v2, changed = fastpathTV.DecMapUint8Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]uint16:
+ fastpathTV.DecMapUint8Uint16V(v, false, d)
+ case *map[uint8]uint16:
+ var v2 map[uint8]uint16
+ v2, changed = fastpathTV.DecMapUint8Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]uint32:
+ fastpathTV.DecMapUint8Uint32V(v, false, d)
+ case *map[uint8]uint32:
+ var v2 map[uint8]uint32
+ v2, changed = fastpathTV.DecMapUint8Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]uint64:
+ fastpathTV.DecMapUint8Uint64V(v, false, d)
+ case *map[uint8]uint64:
+ var v2 map[uint8]uint64
+ v2, changed = fastpathTV.DecMapUint8Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]uintptr:
+ fastpathTV.DecMapUint8UintptrV(v, false, d)
+ case *map[uint8]uintptr:
+ var v2 map[uint8]uintptr
+ v2, changed = fastpathTV.DecMapUint8UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]int:
+ fastpathTV.DecMapUint8IntV(v, false, d)
+ case *map[uint8]int:
+ var v2 map[uint8]int
+ v2, changed = fastpathTV.DecMapUint8IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]int8:
+ fastpathTV.DecMapUint8Int8V(v, false, d)
+ case *map[uint8]int8:
+ var v2 map[uint8]int8
+ v2, changed = fastpathTV.DecMapUint8Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]int16:
+ fastpathTV.DecMapUint8Int16V(v, false, d)
+ case *map[uint8]int16:
+ var v2 map[uint8]int16
+ v2, changed = fastpathTV.DecMapUint8Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]int32:
+ fastpathTV.DecMapUint8Int32V(v, false, d)
+ case *map[uint8]int32:
+ var v2 map[uint8]int32
+ v2, changed = fastpathTV.DecMapUint8Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]int64:
+ fastpathTV.DecMapUint8Int64V(v, false, d)
+ case *map[uint8]int64:
+ var v2 map[uint8]int64
+ v2, changed = fastpathTV.DecMapUint8Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]float32:
+ fastpathTV.DecMapUint8Float32V(v, false, d)
+ case *map[uint8]float32:
+ var v2 map[uint8]float32
+ v2, changed = fastpathTV.DecMapUint8Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]float64:
+ fastpathTV.DecMapUint8Float64V(v, false, d)
+ case *map[uint8]float64:
+ var v2 map[uint8]float64
+ v2, changed = fastpathTV.DecMapUint8Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint8]bool:
+ fastpathTV.DecMapUint8BoolV(v, false, d)
+ case *map[uint8]bool:
+ var v2 map[uint8]bool
+ v2, changed = fastpathTV.DecMapUint8BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]interface{}:
+ fastpathTV.DecMapUint16IntfV(v, false, d)
+ case *map[uint16]interface{}:
+ var v2 map[uint16]interface{}
+ v2, changed = fastpathTV.DecMapUint16IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]string:
+ fastpathTV.DecMapUint16StringV(v, false, d)
+ case *map[uint16]string:
+ var v2 map[uint16]string
+ v2, changed = fastpathTV.DecMapUint16StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]uint:
+ fastpathTV.DecMapUint16UintV(v, false, d)
+ case *map[uint16]uint:
+ var v2 map[uint16]uint
+ v2, changed = fastpathTV.DecMapUint16UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]uint8:
+ fastpathTV.DecMapUint16Uint8V(v, false, d)
+ case *map[uint16]uint8:
+ var v2 map[uint16]uint8
+ v2, changed = fastpathTV.DecMapUint16Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]uint16:
+ fastpathTV.DecMapUint16Uint16V(v, false, d)
+ case *map[uint16]uint16:
+ var v2 map[uint16]uint16
+ v2, changed = fastpathTV.DecMapUint16Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]uint32:
+ fastpathTV.DecMapUint16Uint32V(v, false, d)
+ case *map[uint16]uint32:
+ var v2 map[uint16]uint32
+ v2, changed = fastpathTV.DecMapUint16Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]uint64:
+ fastpathTV.DecMapUint16Uint64V(v, false, d)
+ case *map[uint16]uint64:
+ var v2 map[uint16]uint64
+ v2, changed = fastpathTV.DecMapUint16Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]uintptr:
+ fastpathTV.DecMapUint16UintptrV(v, false, d)
+ case *map[uint16]uintptr:
+ var v2 map[uint16]uintptr
+ v2, changed = fastpathTV.DecMapUint16UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]int:
+ fastpathTV.DecMapUint16IntV(v, false, d)
+ case *map[uint16]int:
+ var v2 map[uint16]int
+ v2, changed = fastpathTV.DecMapUint16IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]int8:
+ fastpathTV.DecMapUint16Int8V(v, false, d)
+ case *map[uint16]int8:
+ var v2 map[uint16]int8
+ v2, changed = fastpathTV.DecMapUint16Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]int16:
+ fastpathTV.DecMapUint16Int16V(v, false, d)
+ case *map[uint16]int16:
+ var v2 map[uint16]int16
+ v2, changed = fastpathTV.DecMapUint16Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]int32:
+ fastpathTV.DecMapUint16Int32V(v, false, d)
+ case *map[uint16]int32:
+ var v2 map[uint16]int32
+ v2, changed = fastpathTV.DecMapUint16Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]int64:
+ fastpathTV.DecMapUint16Int64V(v, false, d)
+ case *map[uint16]int64:
+ var v2 map[uint16]int64
+ v2, changed = fastpathTV.DecMapUint16Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]float32:
+ fastpathTV.DecMapUint16Float32V(v, false, d)
+ case *map[uint16]float32:
+ var v2 map[uint16]float32
+ v2, changed = fastpathTV.DecMapUint16Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]float64:
+ fastpathTV.DecMapUint16Float64V(v, false, d)
+ case *map[uint16]float64:
+ var v2 map[uint16]float64
+ v2, changed = fastpathTV.DecMapUint16Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint16]bool:
+ fastpathTV.DecMapUint16BoolV(v, false, d)
+ case *map[uint16]bool:
+ var v2 map[uint16]bool
+ v2, changed = fastpathTV.DecMapUint16BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]interface{}:
+ fastpathTV.DecMapUint32IntfV(v, false, d)
+ case *map[uint32]interface{}:
+ var v2 map[uint32]interface{}
+ v2, changed = fastpathTV.DecMapUint32IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]string:
+ fastpathTV.DecMapUint32StringV(v, false, d)
+ case *map[uint32]string:
+ var v2 map[uint32]string
+ v2, changed = fastpathTV.DecMapUint32StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]uint:
+ fastpathTV.DecMapUint32UintV(v, false, d)
+ case *map[uint32]uint:
+ var v2 map[uint32]uint
+ v2, changed = fastpathTV.DecMapUint32UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]uint8:
+ fastpathTV.DecMapUint32Uint8V(v, false, d)
+ case *map[uint32]uint8:
+ var v2 map[uint32]uint8
+ v2, changed = fastpathTV.DecMapUint32Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]uint16:
+ fastpathTV.DecMapUint32Uint16V(v, false, d)
+ case *map[uint32]uint16:
+ var v2 map[uint32]uint16
+ v2, changed = fastpathTV.DecMapUint32Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]uint32:
+ fastpathTV.DecMapUint32Uint32V(v, false, d)
+ case *map[uint32]uint32:
+ var v2 map[uint32]uint32
+ v2, changed = fastpathTV.DecMapUint32Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]uint64:
+ fastpathTV.DecMapUint32Uint64V(v, false, d)
+ case *map[uint32]uint64:
+ var v2 map[uint32]uint64
+ v2, changed = fastpathTV.DecMapUint32Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]uintptr:
+ fastpathTV.DecMapUint32UintptrV(v, false, d)
+ case *map[uint32]uintptr:
+ var v2 map[uint32]uintptr
+ v2, changed = fastpathTV.DecMapUint32UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]int:
+ fastpathTV.DecMapUint32IntV(v, false, d)
+ case *map[uint32]int:
+ var v2 map[uint32]int
+ v2, changed = fastpathTV.DecMapUint32IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]int8:
+ fastpathTV.DecMapUint32Int8V(v, false, d)
+ case *map[uint32]int8:
+ var v2 map[uint32]int8
+ v2, changed = fastpathTV.DecMapUint32Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]int16:
+ fastpathTV.DecMapUint32Int16V(v, false, d)
+ case *map[uint32]int16:
+ var v2 map[uint32]int16
+ v2, changed = fastpathTV.DecMapUint32Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]int32:
+ fastpathTV.DecMapUint32Int32V(v, false, d)
+ case *map[uint32]int32:
+ var v2 map[uint32]int32
+ v2, changed = fastpathTV.DecMapUint32Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]int64:
+ fastpathTV.DecMapUint32Int64V(v, false, d)
+ case *map[uint32]int64:
+ var v2 map[uint32]int64
+ v2, changed = fastpathTV.DecMapUint32Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]float32:
+ fastpathTV.DecMapUint32Float32V(v, false, d)
+ case *map[uint32]float32:
+ var v2 map[uint32]float32
+ v2, changed = fastpathTV.DecMapUint32Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]float64:
+ fastpathTV.DecMapUint32Float64V(v, false, d)
+ case *map[uint32]float64:
+ var v2 map[uint32]float64
+ v2, changed = fastpathTV.DecMapUint32Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint32]bool:
+ fastpathTV.DecMapUint32BoolV(v, false, d)
+ case *map[uint32]bool:
+ var v2 map[uint32]bool
+ v2, changed = fastpathTV.DecMapUint32BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]interface{}:
+ fastpathTV.DecMapUint64IntfV(v, false, d)
+ case *map[uint64]interface{}:
+ var v2 map[uint64]interface{}
+ v2, changed = fastpathTV.DecMapUint64IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]string:
+ fastpathTV.DecMapUint64StringV(v, false, d)
+ case *map[uint64]string:
+ var v2 map[uint64]string
+ v2, changed = fastpathTV.DecMapUint64StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]uint:
+ fastpathTV.DecMapUint64UintV(v, false, d)
+ case *map[uint64]uint:
+ var v2 map[uint64]uint
+ v2, changed = fastpathTV.DecMapUint64UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]uint8:
+ fastpathTV.DecMapUint64Uint8V(v, false, d)
+ case *map[uint64]uint8:
+ var v2 map[uint64]uint8
+ v2, changed = fastpathTV.DecMapUint64Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]uint16:
+ fastpathTV.DecMapUint64Uint16V(v, false, d)
+ case *map[uint64]uint16:
+ var v2 map[uint64]uint16
+ v2, changed = fastpathTV.DecMapUint64Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]uint32:
+ fastpathTV.DecMapUint64Uint32V(v, false, d)
+ case *map[uint64]uint32:
+ var v2 map[uint64]uint32
+ v2, changed = fastpathTV.DecMapUint64Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]uint64:
+ fastpathTV.DecMapUint64Uint64V(v, false, d)
+ case *map[uint64]uint64:
+ var v2 map[uint64]uint64
+ v2, changed = fastpathTV.DecMapUint64Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]uintptr:
+ fastpathTV.DecMapUint64UintptrV(v, false, d)
+ case *map[uint64]uintptr:
+ var v2 map[uint64]uintptr
+ v2, changed = fastpathTV.DecMapUint64UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]int:
+ fastpathTV.DecMapUint64IntV(v, false, d)
+ case *map[uint64]int:
+ var v2 map[uint64]int
+ v2, changed = fastpathTV.DecMapUint64IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]int8:
+ fastpathTV.DecMapUint64Int8V(v, false, d)
+ case *map[uint64]int8:
+ var v2 map[uint64]int8
+ v2, changed = fastpathTV.DecMapUint64Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]int16:
+ fastpathTV.DecMapUint64Int16V(v, false, d)
+ case *map[uint64]int16:
+ var v2 map[uint64]int16
+ v2, changed = fastpathTV.DecMapUint64Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]int32:
+ fastpathTV.DecMapUint64Int32V(v, false, d)
+ case *map[uint64]int32:
+ var v2 map[uint64]int32
+ v2, changed = fastpathTV.DecMapUint64Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]int64:
+ fastpathTV.DecMapUint64Int64V(v, false, d)
+ case *map[uint64]int64:
+ var v2 map[uint64]int64
+ v2, changed = fastpathTV.DecMapUint64Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]float32:
+ fastpathTV.DecMapUint64Float32V(v, false, d)
+ case *map[uint64]float32:
+ var v2 map[uint64]float32
+ v2, changed = fastpathTV.DecMapUint64Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]float64:
+ fastpathTV.DecMapUint64Float64V(v, false, d)
+ case *map[uint64]float64:
+ var v2 map[uint64]float64
+ v2, changed = fastpathTV.DecMapUint64Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uint64]bool:
+ fastpathTV.DecMapUint64BoolV(v, false, d)
+ case *map[uint64]bool:
+ var v2 map[uint64]bool
+ v2, changed = fastpathTV.DecMapUint64BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]interface{}:
+ fastpathTV.DecMapUintptrIntfV(v, false, d)
+ case *map[uintptr]interface{}:
+ var v2 map[uintptr]interface{}
+ v2, changed = fastpathTV.DecMapUintptrIntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]string:
+ fastpathTV.DecMapUintptrStringV(v, false, d)
+ case *map[uintptr]string:
+ var v2 map[uintptr]string
+ v2, changed = fastpathTV.DecMapUintptrStringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]uint:
+ fastpathTV.DecMapUintptrUintV(v, false, d)
+ case *map[uintptr]uint:
+ var v2 map[uintptr]uint
+ v2, changed = fastpathTV.DecMapUintptrUintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]uint8:
+ fastpathTV.DecMapUintptrUint8V(v, false, d)
+ case *map[uintptr]uint8:
+ var v2 map[uintptr]uint8
+ v2, changed = fastpathTV.DecMapUintptrUint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]uint16:
+ fastpathTV.DecMapUintptrUint16V(v, false, d)
+ case *map[uintptr]uint16:
+ var v2 map[uintptr]uint16
+ v2, changed = fastpathTV.DecMapUintptrUint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]uint32:
+ fastpathTV.DecMapUintptrUint32V(v, false, d)
+ case *map[uintptr]uint32:
+ var v2 map[uintptr]uint32
+ v2, changed = fastpathTV.DecMapUintptrUint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]uint64:
+ fastpathTV.DecMapUintptrUint64V(v, false, d)
+ case *map[uintptr]uint64:
+ var v2 map[uintptr]uint64
+ v2, changed = fastpathTV.DecMapUintptrUint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]uintptr:
+ fastpathTV.DecMapUintptrUintptrV(v, false, d)
+ case *map[uintptr]uintptr:
+ var v2 map[uintptr]uintptr
+ v2, changed = fastpathTV.DecMapUintptrUintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]int:
+ fastpathTV.DecMapUintptrIntV(v, false, d)
+ case *map[uintptr]int:
+ var v2 map[uintptr]int
+ v2, changed = fastpathTV.DecMapUintptrIntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]int8:
+ fastpathTV.DecMapUintptrInt8V(v, false, d)
+ case *map[uintptr]int8:
+ var v2 map[uintptr]int8
+ v2, changed = fastpathTV.DecMapUintptrInt8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]int16:
+ fastpathTV.DecMapUintptrInt16V(v, false, d)
+ case *map[uintptr]int16:
+ var v2 map[uintptr]int16
+ v2, changed = fastpathTV.DecMapUintptrInt16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]int32:
+ fastpathTV.DecMapUintptrInt32V(v, false, d)
+ case *map[uintptr]int32:
+ var v2 map[uintptr]int32
+ v2, changed = fastpathTV.DecMapUintptrInt32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]int64:
+ fastpathTV.DecMapUintptrInt64V(v, false, d)
+ case *map[uintptr]int64:
+ var v2 map[uintptr]int64
+ v2, changed = fastpathTV.DecMapUintptrInt64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]float32:
+ fastpathTV.DecMapUintptrFloat32V(v, false, d)
+ case *map[uintptr]float32:
+ var v2 map[uintptr]float32
+ v2, changed = fastpathTV.DecMapUintptrFloat32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]float64:
+ fastpathTV.DecMapUintptrFloat64V(v, false, d)
+ case *map[uintptr]float64:
+ var v2 map[uintptr]float64
+ v2, changed = fastpathTV.DecMapUintptrFloat64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[uintptr]bool:
+ fastpathTV.DecMapUintptrBoolV(v, false, d)
+ case *map[uintptr]bool:
+ var v2 map[uintptr]bool
+ v2, changed = fastpathTV.DecMapUintptrBoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]interface{}:
+ fastpathTV.DecMapIntIntfV(v, false, d)
+ case *map[int]interface{}:
+ var v2 map[int]interface{}
+ v2, changed = fastpathTV.DecMapIntIntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]string:
+ fastpathTV.DecMapIntStringV(v, false, d)
+ case *map[int]string:
+ var v2 map[int]string
+ v2, changed = fastpathTV.DecMapIntStringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]uint:
+ fastpathTV.DecMapIntUintV(v, false, d)
+ case *map[int]uint:
+ var v2 map[int]uint
+ v2, changed = fastpathTV.DecMapIntUintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]uint8:
+ fastpathTV.DecMapIntUint8V(v, false, d)
+ case *map[int]uint8:
+ var v2 map[int]uint8
+ v2, changed = fastpathTV.DecMapIntUint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]uint16:
+ fastpathTV.DecMapIntUint16V(v, false, d)
+ case *map[int]uint16:
+ var v2 map[int]uint16
+ v2, changed = fastpathTV.DecMapIntUint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]uint32:
+ fastpathTV.DecMapIntUint32V(v, false, d)
+ case *map[int]uint32:
+ var v2 map[int]uint32
+ v2, changed = fastpathTV.DecMapIntUint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]uint64:
+ fastpathTV.DecMapIntUint64V(v, false, d)
+ case *map[int]uint64:
+ var v2 map[int]uint64
+ v2, changed = fastpathTV.DecMapIntUint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]uintptr:
+ fastpathTV.DecMapIntUintptrV(v, false, d)
+ case *map[int]uintptr:
+ var v2 map[int]uintptr
+ v2, changed = fastpathTV.DecMapIntUintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]int:
+ fastpathTV.DecMapIntIntV(v, false, d)
+ case *map[int]int:
+ var v2 map[int]int
+ v2, changed = fastpathTV.DecMapIntIntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]int8:
+ fastpathTV.DecMapIntInt8V(v, false, d)
+ case *map[int]int8:
+ var v2 map[int]int8
+ v2, changed = fastpathTV.DecMapIntInt8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]int16:
+ fastpathTV.DecMapIntInt16V(v, false, d)
+ case *map[int]int16:
+ var v2 map[int]int16
+ v2, changed = fastpathTV.DecMapIntInt16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]int32:
+ fastpathTV.DecMapIntInt32V(v, false, d)
+ case *map[int]int32:
+ var v2 map[int]int32
+ v2, changed = fastpathTV.DecMapIntInt32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]int64:
+ fastpathTV.DecMapIntInt64V(v, false, d)
+ case *map[int]int64:
+ var v2 map[int]int64
+ v2, changed = fastpathTV.DecMapIntInt64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]float32:
+ fastpathTV.DecMapIntFloat32V(v, false, d)
+ case *map[int]float32:
+ var v2 map[int]float32
+ v2, changed = fastpathTV.DecMapIntFloat32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]float64:
+ fastpathTV.DecMapIntFloat64V(v, false, d)
+ case *map[int]float64:
+ var v2 map[int]float64
+ v2, changed = fastpathTV.DecMapIntFloat64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int]bool:
+ fastpathTV.DecMapIntBoolV(v, false, d)
+ case *map[int]bool:
+ var v2 map[int]bool
+ v2, changed = fastpathTV.DecMapIntBoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]interface{}:
+ fastpathTV.DecMapInt8IntfV(v, false, d)
+ case *map[int8]interface{}:
+ var v2 map[int8]interface{}
+ v2, changed = fastpathTV.DecMapInt8IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]string:
+ fastpathTV.DecMapInt8StringV(v, false, d)
+ case *map[int8]string:
+ var v2 map[int8]string
+ v2, changed = fastpathTV.DecMapInt8StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]uint:
+ fastpathTV.DecMapInt8UintV(v, false, d)
+ case *map[int8]uint:
+ var v2 map[int8]uint
+ v2, changed = fastpathTV.DecMapInt8UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]uint8:
+ fastpathTV.DecMapInt8Uint8V(v, false, d)
+ case *map[int8]uint8:
+ var v2 map[int8]uint8
+ v2, changed = fastpathTV.DecMapInt8Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]uint16:
+ fastpathTV.DecMapInt8Uint16V(v, false, d)
+ case *map[int8]uint16:
+ var v2 map[int8]uint16
+ v2, changed = fastpathTV.DecMapInt8Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]uint32:
+ fastpathTV.DecMapInt8Uint32V(v, false, d)
+ case *map[int8]uint32:
+ var v2 map[int8]uint32
+ v2, changed = fastpathTV.DecMapInt8Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]uint64:
+ fastpathTV.DecMapInt8Uint64V(v, false, d)
+ case *map[int8]uint64:
+ var v2 map[int8]uint64
+ v2, changed = fastpathTV.DecMapInt8Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]uintptr:
+ fastpathTV.DecMapInt8UintptrV(v, false, d)
+ case *map[int8]uintptr:
+ var v2 map[int8]uintptr
+ v2, changed = fastpathTV.DecMapInt8UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]int:
+ fastpathTV.DecMapInt8IntV(v, false, d)
+ case *map[int8]int:
+ var v2 map[int8]int
+ v2, changed = fastpathTV.DecMapInt8IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]int8:
+ fastpathTV.DecMapInt8Int8V(v, false, d)
+ case *map[int8]int8:
+ var v2 map[int8]int8
+ v2, changed = fastpathTV.DecMapInt8Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]int16:
+ fastpathTV.DecMapInt8Int16V(v, false, d)
+ case *map[int8]int16:
+ var v2 map[int8]int16
+ v2, changed = fastpathTV.DecMapInt8Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]int32:
+ fastpathTV.DecMapInt8Int32V(v, false, d)
+ case *map[int8]int32:
+ var v2 map[int8]int32
+ v2, changed = fastpathTV.DecMapInt8Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]int64:
+ fastpathTV.DecMapInt8Int64V(v, false, d)
+ case *map[int8]int64:
+ var v2 map[int8]int64
+ v2, changed = fastpathTV.DecMapInt8Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]float32:
+ fastpathTV.DecMapInt8Float32V(v, false, d)
+ case *map[int8]float32:
+ var v2 map[int8]float32
+ v2, changed = fastpathTV.DecMapInt8Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]float64:
+ fastpathTV.DecMapInt8Float64V(v, false, d)
+ case *map[int8]float64:
+ var v2 map[int8]float64
+ v2, changed = fastpathTV.DecMapInt8Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int8]bool:
+ fastpathTV.DecMapInt8BoolV(v, false, d)
+ case *map[int8]bool:
+ var v2 map[int8]bool
+ v2, changed = fastpathTV.DecMapInt8BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]interface{}:
+ fastpathTV.DecMapInt16IntfV(v, false, d)
+ case *map[int16]interface{}:
+ var v2 map[int16]interface{}
+ v2, changed = fastpathTV.DecMapInt16IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]string:
+ fastpathTV.DecMapInt16StringV(v, false, d)
+ case *map[int16]string:
+ var v2 map[int16]string
+ v2, changed = fastpathTV.DecMapInt16StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]uint:
+ fastpathTV.DecMapInt16UintV(v, false, d)
+ case *map[int16]uint:
+ var v2 map[int16]uint
+ v2, changed = fastpathTV.DecMapInt16UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]uint8:
+ fastpathTV.DecMapInt16Uint8V(v, false, d)
+ case *map[int16]uint8:
+ var v2 map[int16]uint8
+ v2, changed = fastpathTV.DecMapInt16Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]uint16:
+ fastpathTV.DecMapInt16Uint16V(v, false, d)
+ case *map[int16]uint16:
+ var v2 map[int16]uint16
+ v2, changed = fastpathTV.DecMapInt16Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]uint32:
+ fastpathTV.DecMapInt16Uint32V(v, false, d)
+ case *map[int16]uint32:
+ var v2 map[int16]uint32
+ v2, changed = fastpathTV.DecMapInt16Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]uint64:
+ fastpathTV.DecMapInt16Uint64V(v, false, d)
+ case *map[int16]uint64:
+ var v2 map[int16]uint64
+ v2, changed = fastpathTV.DecMapInt16Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]uintptr:
+ fastpathTV.DecMapInt16UintptrV(v, false, d)
+ case *map[int16]uintptr:
+ var v2 map[int16]uintptr
+ v2, changed = fastpathTV.DecMapInt16UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]int:
+ fastpathTV.DecMapInt16IntV(v, false, d)
+ case *map[int16]int:
+ var v2 map[int16]int
+ v2, changed = fastpathTV.DecMapInt16IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]int8:
+ fastpathTV.DecMapInt16Int8V(v, false, d)
+ case *map[int16]int8:
+ var v2 map[int16]int8
+ v2, changed = fastpathTV.DecMapInt16Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]int16:
+ fastpathTV.DecMapInt16Int16V(v, false, d)
+ case *map[int16]int16:
+ var v2 map[int16]int16
+ v2, changed = fastpathTV.DecMapInt16Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]int32:
+ fastpathTV.DecMapInt16Int32V(v, false, d)
+ case *map[int16]int32:
+ var v2 map[int16]int32
+ v2, changed = fastpathTV.DecMapInt16Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]int64:
+ fastpathTV.DecMapInt16Int64V(v, false, d)
+ case *map[int16]int64:
+ var v2 map[int16]int64
+ v2, changed = fastpathTV.DecMapInt16Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]float32:
+ fastpathTV.DecMapInt16Float32V(v, false, d)
+ case *map[int16]float32:
+ var v2 map[int16]float32
+ v2, changed = fastpathTV.DecMapInt16Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]float64:
+ fastpathTV.DecMapInt16Float64V(v, false, d)
+ case *map[int16]float64:
+ var v2 map[int16]float64
+ v2, changed = fastpathTV.DecMapInt16Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int16]bool:
+ fastpathTV.DecMapInt16BoolV(v, false, d)
+ case *map[int16]bool:
+ var v2 map[int16]bool
+ v2, changed = fastpathTV.DecMapInt16BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]interface{}:
+ fastpathTV.DecMapInt32IntfV(v, false, d)
+ case *map[int32]interface{}:
+ var v2 map[int32]interface{}
+ v2, changed = fastpathTV.DecMapInt32IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]string:
+ fastpathTV.DecMapInt32StringV(v, false, d)
+ case *map[int32]string:
+ var v2 map[int32]string
+ v2, changed = fastpathTV.DecMapInt32StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]uint:
+ fastpathTV.DecMapInt32UintV(v, false, d)
+ case *map[int32]uint:
+ var v2 map[int32]uint
+ v2, changed = fastpathTV.DecMapInt32UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]uint8:
+ fastpathTV.DecMapInt32Uint8V(v, false, d)
+ case *map[int32]uint8:
+ var v2 map[int32]uint8
+ v2, changed = fastpathTV.DecMapInt32Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]uint16:
+ fastpathTV.DecMapInt32Uint16V(v, false, d)
+ case *map[int32]uint16:
+ var v2 map[int32]uint16
+ v2, changed = fastpathTV.DecMapInt32Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]uint32:
+ fastpathTV.DecMapInt32Uint32V(v, false, d)
+ case *map[int32]uint32:
+ var v2 map[int32]uint32
+ v2, changed = fastpathTV.DecMapInt32Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]uint64:
+ fastpathTV.DecMapInt32Uint64V(v, false, d)
+ case *map[int32]uint64:
+ var v2 map[int32]uint64
+ v2, changed = fastpathTV.DecMapInt32Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]uintptr:
+ fastpathTV.DecMapInt32UintptrV(v, false, d)
+ case *map[int32]uintptr:
+ var v2 map[int32]uintptr
+ v2, changed = fastpathTV.DecMapInt32UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]int:
+ fastpathTV.DecMapInt32IntV(v, false, d)
+ case *map[int32]int:
+ var v2 map[int32]int
+ v2, changed = fastpathTV.DecMapInt32IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]int8:
+ fastpathTV.DecMapInt32Int8V(v, false, d)
+ case *map[int32]int8:
+ var v2 map[int32]int8
+ v2, changed = fastpathTV.DecMapInt32Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]int16:
+ fastpathTV.DecMapInt32Int16V(v, false, d)
+ case *map[int32]int16:
+ var v2 map[int32]int16
+ v2, changed = fastpathTV.DecMapInt32Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]int32:
+ fastpathTV.DecMapInt32Int32V(v, false, d)
+ case *map[int32]int32:
+ var v2 map[int32]int32
+ v2, changed = fastpathTV.DecMapInt32Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]int64:
+ fastpathTV.DecMapInt32Int64V(v, false, d)
+ case *map[int32]int64:
+ var v2 map[int32]int64
+ v2, changed = fastpathTV.DecMapInt32Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]float32:
+ fastpathTV.DecMapInt32Float32V(v, false, d)
+ case *map[int32]float32:
+ var v2 map[int32]float32
+ v2, changed = fastpathTV.DecMapInt32Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]float64:
+ fastpathTV.DecMapInt32Float64V(v, false, d)
+ case *map[int32]float64:
+ var v2 map[int32]float64
+ v2, changed = fastpathTV.DecMapInt32Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int32]bool:
+ fastpathTV.DecMapInt32BoolV(v, false, d)
+ case *map[int32]bool:
+ var v2 map[int32]bool
+ v2, changed = fastpathTV.DecMapInt32BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]interface{}:
+ fastpathTV.DecMapInt64IntfV(v, false, d)
+ case *map[int64]interface{}:
+ var v2 map[int64]interface{}
+ v2, changed = fastpathTV.DecMapInt64IntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]string:
+ fastpathTV.DecMapInt64StringV(v, false, d)
+ case *map[int64]string:
+ var v2 map[int64]string
+ v2, changed = fastpathTV.DecMapInt64StringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]uint:
+ fastpathTV.DecMapInt64UintV(v, false, d)
+ case *map[int64]uint:
+ var v2 map[int64]uint
+ v2, changed = fastpathTV.DecMapInt64UintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]uint8:
+ fastpathTV.DecMapInt64Uint8V(v, false, d)
+ case *map[int64]uint8:
+ var v2 map[int64]uint8
+ v2, changed = fastpathTV.DecMapInt64Uint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]uint16:
+ fastpathTV.DecMapInt64Uint16V(v, false, d)
+ case *map[int64]uint16:
+ var v2 map[int64]uint16
+ v2, changed = fastpathTV.DecMapInt64Uint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]uint32:
+ fastpathTV.DecMapInt64Uint32V(v, false, d)
+ case *map[int64]uint32:
+ var v2 map[int64]uint32
+ v2, changed = fastpathTV.DecMapInt64Uint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]uint64:
+ fastpathTV.DecMapInt64Uint64V(v, false, d)
+ case *map[int64]uint64:
+ var v2 map[int64]uint64
+ v2, changed = fastpathTV.DecMapInt64Uint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]uintptr:
+ fastpathTV.DecMapInt64UintptrV(v, false, d)
+ case *map[int64]uintptr:
+ var v2 map[int64]uintptr
+ v2, changed = fastpathTV.DecMapInt64UintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]int:
+ fastpathTV.DecMapInt64IntV(v, false, d)
+ case *map[int64]int:
+ var v2 map[int64]int
+ v2, changed = fastpathTV.DecMapInt64IntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]int8:
+ fastpathTV.DecMapInt64Int8V(v, false, d)
+ case *map[int64]int8:
+ var v2 map[int64]int8
+ v2, changed = fastpathTV.DecMapInt64Int8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]int16:
+ fastpathTV.DecMapInt64Int16V(v, false, d)
+ case *map[int64]int16:
+ var v2 map[int64]int16
+ v2, changed = fastpathTV.DecMapInt64Int16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]int32:
+ fastpathTV.DecMapInt64Int32V(v, false, d)
+ case *map[int64]int32:
+ var v2 map[int64]int32
+ v2, changed = fastpathTV.DecMapInt64Int32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]int64:
+ fastpathTV.DecMapInt64Int64V(v, false, d)
+ case *map[int64]int64:
+ var v2 map[int64]int64
+ v2, changed = fastpathTV.DecMapInt64Int64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]float32:
+ fastpathTV.DecMapInt64Float32V(v, false, d)
+ case *map[int64]float32:
+ var v2 map[int64]float32
+ v2, changed = fastpathTV.DecMapInt64Float32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]float64:
+ fastpathTV.DecMapInt64Float64V(v, false, d)
+ case *map[int64]float64:
+ var v2 map[int64]float64
+ v2, changed = fastpathTV.DecMapInt64Float64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[int64]bool:
+ fastpathTV.DecMapInt64BoolV(v, false, d)
+ case *map[int64]bool:
+ var v2 map[int64]bool
+ v2, changed = fastpathTV.DecMapInt64BoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]interface{}:
+ fastpathTV.DecMapBoolIntfV(v, false, d)
+ case *map[bool]interface{}:
+ var v2 map[bool]interface{}
+ v2, changed = fastpathTV.DecMapBoolIntfV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]string:
+ fastpathTV.DecMapBoolStringV(v, false, d)
+ case *map[bool]string:
+ var v2 map[bool]string
+ v2, changed = fastpathTV.DecMapBoolStringV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]uint:
+ fastpathTV.DecMapBoolUintV(v, false, d)
+ case *map[bool]uint:
+ var v2 map[bool]uint
+ v2, changed = fastpathTV.DecMapBoolUintV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]uint8:
+ fastpathTV.DecMapBoolUint8V(v, false, d)
+ case *map[bool]uint8:
+ var v2 map[bool]uint8
+ v2, changed = fastpathTV.DecMapBoolUint8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]uint16:
+ fastpathTV.DecMapBoolUint16V(v, false, d)
+ case *map[bool]uint16:
+ var v2 map[bool]uint16
+ v2, changed = fastpathTV.DecMapBoolUint16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]uint32:
+ fastpathTV.DecMapBoolUint32V(v, false, d)
+ case *map[bool]uint32:
+ var v2 map[bool]uint32
+ v2, changed = fastpathTV.DecMapBoolUint32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]uint64:
+ fastpathTV.DecMapBoolUint64V(v, false, d)
+ case *map[bool]uint64:
+ var v2 map[bool]uint64
+ v2, changed = fastpathTV.DecMapBoolUint64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]uintptr:
+ fastpathTV.DecMapBoolUintptrV(v, false, d)
+ case *map[bool]uintptr:
+ var v2 map[bool]uintptr
+ v2, changed = fastpathTV.DecMapBoolUintptrV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]int:
+ fastpathTV.DecMapBoolIntV(v, false, d)
+ case *map[bool]int:
+ var v2 map[bool]int
+ v2, changed = fastpathTV.DecMapBoolIntV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]int8:
+ fastpathTV.DecMapBoolInt8V(v, false, d)
+ case *map[bool]int8:
+ var v2 map[bool]int8
+ v2, changed = fastpathTV.DecMapBoolInt8V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]int16:
+ fastpathTV.DecMapBoolInt16V(v, false, d)
+ case *map[bool]int16:
+ var v2 map[bool]int16
+ v2, changed = fastpathTV.DecMapBoolInt16V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]int32:
+ fastpathTV.DecMapBoolInt32V(v, false, d)
+ case *map[bool]int32:
+ var v2 map[bool]int32
+ v2, changed = fastpathTV.DecMapBoolInt32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]int64:
+ fastpathTV.DecMapBoolInt64V(v, false, d)
+ case *map[bool]int64:
+ var v2 map[bool]int64
+ v2, changed = fastpathTV.DecMapBoolInt64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]float32:
+ fastpathTV.DecMapBoolFloat32V(v, false, d)
+ case *map[bool]float32:
+ var v2 map[bool]float32
+ v2, changed = fastpathTV.DecMapBoolFloat32V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]float64:
+ fastpathTV.DecMapBoolFloat64V(v, false, d)
+ case *map[bool]float64:
+ var v2 map[bool]float64
+ v2, changed = fastpathTV.DecMapBoolFloat64V(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ case map[bool]bool:
+ fastpathTV.DecMapBoolBoolV(v, false, d)
+ case *map[bool]bool:
+ var v2 map[bool]bool
+ v2, changed = fastpathTV.DecMapBoolBoolV(*v, true, d)
+ if changed {
+ *v = v2
+ }
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
+ switch v := iv.(type) {
+
+ case *[]interface{}:
+ *v = nil
+ case *[]string:
+ *v = nil
+ case *[]float32:
+ *v = nil
+ case *[]float64:
+ *v = nil
+ case *[]uint:
+ *v = nil
+ case *[]uint8:
+ *v = nil
+ case *[]uint16:
+ *v = nil
+ case *[]uint32:
+ *v = nil
+ case *[]uint64:
+ *v = nil
+ case *[]uintptr:
+ *v = nil
+ case *[]int:
+ *v = nil
+ case *[]int8:
+ *v = nil
+ case *[]int16:
+ *v = nil
+ case *[]int32:
+ *v = nil
+ case *[]int64:
+ *v = nil
+ case *[]bool:
+ *v = nil
+
+ case *map[interface{}]interface{}:
+ *v = nil
+ case *map[interface{}]string:
+ *v = nil
+ case *map[interface{}]uint:
+ *v = nil
+ case *map[interface{}]uint8:
+ *v = nil
+ case *map[interface{}]uint16:
+ *v = nil
+ case *map[interface{}]uint32:
+ *v = nil
+ case *map[interface{}]uint64:
+ *v = nil
+ case *map[interface{}]uintptr:
+ *v = nil
+ case *map[interface{}]int:
+ *v = nil
+ case *map[interface{}]int8:
+ *v = nil
+ case *map[interface{}]int16:
+ *v = nil
+ case *map[interface{}]int32:
+ *v = nil
+ case *map[interface{}]int64:
+ *v = nil
+ case *map[interface{}]float32:
+ *v = nil
+ case *map[interface{}]float64:
+ *v = nil
+ case *map[interface{}]bool:
+ *v = nil
+ case *map[string]interface{}:
+ *v = nil
+ case *map[string]string:
+ *v = nil
+ case *map[string]uint:
+ *v = nil
+ case *map[string]uint8:
+ *v = nil
+ case *map[string]uint16:
+ *v = nil
+ case *map[string]uint32:
+ *v = nil
+ case *map[string]uint64:
+ *v = nil
+ case *map[string]uintptr:
+ *v = nil
+ case *map[string]int:
+ *v = nil
+ case *map[string]int8:
+ *v = nil
+ case *map[string]int16:
+ *v = nil
+ case *map[string]int32:
+ *v = nil
+ case *map[string]int64:
+ *v = nil
+ case *map[string]float32:
+ *v = nil
+ case *map[string]float64:
+ *v = nil
+ case *map[string]bool:
+ *v = nil
+ case *map[float32]interface{}:
+ *v = nil
+ case *map[float32]string:
+ *v = nil
+ case *map[float32]uint:
+ *v = nil
+ case *map[float32]uint8:
+ *v = nil
+ case *map[float32]uint16:
+ *v = nil
+ case *map[float32]uint32:
+ *v = nil
+ case *map[float32]uint64:
+ *v = nil
+ case *map[float32]uintptr:
+ *v = nil
+ case *map[float32]int:
+ *v = nil
+ case *map[float32]int8:
+ *v = nil
+ case *map[float32]int16:
+ *v = nil
+ case *map[float32]int32:
+ *v = nil
+ case *map[float32]int64:
+ *v = nil
+ case *map[float32]float32:
+ *v = nil
+ case *map[float32]float64:
+ *v = nil
+ case *map[float32]bool:
+ *v = nil
+ case *map[float64]interface{}:
+ *v = nil
+ case *map[float64]string:
+ *v = nil
+ case *map[float64]uint:
+ *v = nil
+ case *map[float64]uint8:
+ *v = nil
+ case *map[float64]uint16:
+ *v = nil
+ case *map[float64]uint32:
+ *v = nil
+ case *map[float64]uint64:
+ *v = nil
+ case *map[float64]uintptr:
+ *v = nil
+ case *map[float64]int:
+ *v = nil
+ case *map[float64]int8:
+ *v = nil
+ case *map[float64]int16:
+ *v = nil
+ case *map[float64]int32:
+ *v = nil
+ case *map[float64]int64:
+ *v = nil
+ case *map[float64]float32:
+ *v = nil
+ case *map[float64]float64:
+ *v = nil
+ case *map[float64]bool:
+ *v = nil
+ case *map[uint]interface{}:
+ *v = nil
+ case *map[uint]string:
+ *v = nil
+ case *map[uint]uint:
+ *v = nil
+ case *map[uint]uint8:
+ *v = nil
+ case *map[uint]uint16:
+ *v = nil
+ case *map[uint]uint32:
+ *v = nil
+ case *map[uint]uint64:
+ *v = nil
+ case *map[uint]uintptr:
+ *v = nil
+ case *map[uint]int:
+ *v = nil
+ case *map[uint]int8:
+ *v = nil
+ case *map[uint]int16:
+ *v = nil
+ case *map[uint]int32:
+ *v = nil
+ case *map[uint]int64:
+ *v = nil
+ case *map[uint]float32:
+ *v = nil
+ case *map[uint]float64:
+ *v = nil
+ case *map[uint]bool:
+ *v = nil
+ case *map[uint8]interface{}:
+ *v = nil
+ case *map[uint8]string:
+ *v = nil
+ case *map[uint8]uint:
+ *v = nil
+ case *map[uint8]uint8:
+ *v = nil
+ case *map[uint8]uint16:
+ *v = nil
+ case *map[uint8]uint32:
+ *v = nil
+ case *map[uint8]uint64:
+ *v = nil
+ case *map[uint8]uintptr:
+ *v = nil
+ case *map[uint8]int:
+ *v = nil
+ case *map[uint8]int8:
+ *v = nil
+ case *map[uint8]int16:
+ *v = nil
+ case *map[uint8]int32:
+ *v = nil
+ case *map[uint8]int64:
+ *v = nil
+ case *map[uint8]float32:
+ *v = nil
+ case *map[uint8]float64:
+ *v = nil
+ case *map[uint8]bool:
+ *v = nil
+ case *map[uint16]interface{}:
+ *v = nil
+ case *map[uint16]string:
+ *v = nil
+ case *map[uint16]uint:
+ *v = nil
+ case *map[uint16]uint8:
+ *v = nil
+ case *map[uint16]uint16:
+ *v = nil
+ case *map[uint16]uint32:
+ *v = nil
+ case *map[uint16]uint64:
+ *v = nil
+ case *map[uint16]uintptr:
+ *v = nil
+ case *map[uint16]int:
+ *v = nil
+ case *map[uint16]int8:
+ *v = nil
+ case *map[uint16]int16:
+ *v = nil
+ case *map[uint16]int32:
+ *v = nil
+ case *map[uint16]int64:
+ *v = nil
+ case *map[uint16]float32:
+ *v = nil
+ case *map[uint16]float64:
+ *v = nil
+ case *map[uint16]bool:
+ *v = nil
+ case *map[uint32]interface{}:
+ *v = nil
+ case *map[uint32]string:
+ *v = nil
+ case *map[uint32]uint:
+ *v = nil
+ case *map[uint32]uint8:
+ *v = nil
+ case *map[uint32]uint16:
+ *v = nil
+ case *map[uint32]uint32:
+ *v = nil
+ case *map[uint32]uint64:
+ *v = nil
+ case *map[uint32]uintptr:
+ *v = nil
+ case *map[uint32]int:
+ *v = nil
+ case *map[uint32]int8:
+ *v = nil
+ case *map[uint32]int16:
+ *v = nil
+ case *map[uint32]int32:
+ *v = nil
+ case *map[uint32]int64:
+ *v = nil
+ case *map[uint32]float32:
+ *v = nil
+ case *map[uint32]float64:
+ *v = nil
+ case *map[uint32]bool:
+ *v = nil
+ case *map[uint64]interface{}:
+ *v = nil
+ case *map[uint64]string:
+ *v = nil
+ case *map[uint64]uint:
+ *v = nil
+ case *map[uint64]uint8:
+ *v = nil
+ case *map[uint64]uint16:
+ *v = nil
+ case *map[uint64]uint32:
+ *v = nil
+ case *map[uint64]uint64:
+ *v = nil
+ case *map[uint64]uintptr:
+ *v = nil
+ case *map[uint64]int:
+ *v = nil
+ case *map[uint64]int8:
+ *v = nil
+ case *map[uint64]int16:
+ *v = nil
+ case *map[uint64]int32:
+ *v = nil
+ case *map[uint64]int64:
+ *v = nil
+ case *map[uint64]float32:
+ *v = nil
+ case *map[uint64]float64:
+ *v = nil
+ case *map[uint64]bool:
+ *v = nil
+ case *map[uintptr]interface{}:
+ *v = nil
+ case *map[uintptr]string:
+ *v = nil
+ case *map[uintptr]uint:
+ *v = nil
+ case *map[uintptr]uint8:
+ *v = nil
+ case *map[uintptr]uint16:
+ *v = nil
+ case *map[uintptr]uint32:
+ *v = nil
+ case *map[uintptr]uint64:
+ *v = nil
+ case *map[uintptr]uintptr:
+ *v = nil
+ case *map[uintptr]int:
+ *v = nil
+ case *map[uintptr]int8:
+ *v = nil
+ case *map[uintptr]int16:
+ *v = nil
+ case *map[uintptr]int32:
+ *v = nil
+ case *map[uintptr]int64:
+ *v = nil
+ case *map[uintptr]float32:
+ *v = nil
+ case *map[uintptr]float64:
+ *v = nil
+ case *map[uintptr]bool:
+ *v = nil
+ case *map[int]interface{}:
+ *v = nil
+ case *map[int]string:
+ *v = nil
+ case *map[int]uint:
+ *v = nil
+ case *map[int]uint8:
+ *v = nil
+ case *map[int]uint16:
+ *v = nil
+ case *map[int]uint32:
+ *v = nil
+ case *map[int]uint64:
+ *v = nil
+ case *map[int]uintptr:
+ *v = nil
+ case *map[int]int:
+ *v = nil
+ case *map[int]int8:
+ *v = nil
+ case *map[int]int16:
+ *v = nil
+ case *map[int]int32:
+ *v = nil
+ case *map[int]int64:
+ *v = nil
+ case *map[int]float32:
+ *v = nil
+ case *map[int]float64:
+ *v = nil
+ case *map[int]bool:
+ *v = nil
+ case *map[int8]interface{}:
+ *v = nil
+ case *map[int8]string:
+ *v = nil
+ case *map[int8]uint:
+ *v = nil
+ case *map[int8]uint8:
+ *v = nil
+ case *map[int8]uint16:
+ *v = nil
+ case *map[int8]uint32:
+ *v = nil
+ case *map[int8]uint64:
+ *v = nil
+ case *map[int8]uintptr:
+ *v = nil
+ case *map[int8]int:
+ *v = nil
+ case *map[int8]int8:
+ *v = nil
+ case *map[int8]int16:
+ *v = nil
+ case *map[int8]int32:
+ *v = nil
+ case *map[int8]int64:
+ *v = nil
+ case *map[int8]float32:
+ *v = nil
+ case *map[int8]float64:
+ *v = nil
+ case *map[int8]bool:
+ *v = nil
+ case *map[int16]interface{}:
+ *v = nil
+ case *map[int16]string:
+ *v = nil
+ case *map[int16]uint:
+ *v = nil
+ case *map[int16]uint8:
+ *v = nil
+ case *map[int16]uint16:
+ *v = nil
+ case *map[int16]uint32:
+ *v = nil
+ case *map[int16]uint64:
+ *v = nil
+ case *map[int16]uintptr:
+ *v = nil
+ case *map[int16]int:
+ *v = nil
+ case *map[int16]int8:
+ *v = nil
+ case *map[int16]int16:
+ *v = nil
+ case *map[int16]int32:
+ *v = nil
+ case *map[int16]int64:
+ *v = nil
+ case *map[int16]float32:
+ *v = nil
+ case *map[int16]float64:
+ *v = nil
+ case *map[int16]bool:
+ *v = nil
+ case *map[int32]interface{}:
+ *v = nil
+ case *map[int32]string:
+ *v = nil
+ case *map[int32]uint:
+ *v = nil
+ case *map[int32]uint8:
+ *v = nil
+ case *map[int32]uint16:
+ *v = nil
+ case *map[int32]uint32:
+ *v = nil
+ case *map[int32]uint64:
+ *v = nil
+ case *map[int32]uintptr:
+ *v = nil
+ case *map[int32]int:
+ *v = nil
+ case *map[int32]int8:
+ *v = nil
+ case *map[int32]int16:
+ *v = nil
+ case *map[int32]int32:
+ *v = nil
+ case *map[int32]int64:
+ *v = nil
+ case *map[int32]float32:
+ *v = nil
+ case *map[int32]float64:
+ *v = nil
+ case *map[int32]bool:
+ *v = nil
+ case *map[int64]interface{}:
+ *v = nil
+ case *map[int64]string:
+ *v = nil
+ case *map[int64]uint:
+ *v = nil
+ case *map[int64]uint8:
+ *v = nil
+ case *map[int64]uint16:
+ *v = nil
+ case *map[int64]uint32:
+ *v = nil
+ case *map[int64]uint64:
+ *v = nil
+ case *map[int64]uintptr:
+ *v = nil
+ case *map[int64]int:
+ *v = nil
+ case *map[int64]int8:
+ *v = nil
+ case *map[int64]int16:
+ *v = nil
+ case *map[int64]int32:
+ *v = nil
+ case *map[int64]int64:
+ *v = nil
+ case *map[int64]float32:
+ *v = nil
+ case *map[int64]float64:
+ *v = nil
+ case *map[int64]bool:
+ *v = nil
+ case *map[bool]interface{}:
+ *v = nil
+ case *map[bool]string:
+ *v = nil
+ case *map[bool]uint:
+ *v = nil
+ case *map[bool]uint8:
+ *v = nil
+ case *map[bool]uint16:
+ *v = nil
+ case *map[bool]uint32:
+ *v = nil
+ case *map[bool]uint64:
+ *v = nil
+ case *map[bool]uintptr:
+ *v = nil
+ case *map[bool]int:
+ *v = nil
+ case *map[bool]int8:
+ *v = nil
+ case *map[bool]int16:
+ *v = nil
+ case *map[bool]int32:
+ *v = nil
+ case *map[bool]int64:
+ *v = nil
+ case *map[bool]float32:
+ *v = nil
+ case *map[bool]float64:
+ *v = nil
+ case *map[bool]bool:
+ *v = nil
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]interface{})
+ v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]interface{})
+ v2, changed := fastpathTV.DecSliceIntfV(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) {
+ v, changed := f.DecSliceIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]interface{}, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ } else {
+ xlen = 8
+ }
+ v = make([]interface{}, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, nil)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = nil
+ } else {
+ d.decode(&v[j])
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]interface{}, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]string)
+ v, changed := fastpathTV.DecSliceStringV(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]string)
+ v2, changed := fastpathTV.DecSliceStringV(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) {
+ v, changed := f.DecSliceStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]string, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ } else {
+ xlen = 8
+ }
+ v = make([]string, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, "")
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = ""
+ } else {
+ v[j] = dd.DecodeString()
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]string, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]float32)
+ v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]float32)
+ v2, changed := fastpathTV.DecSliceFloat32V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) {
+ v, changed := f.DecSliceFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float32, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ } else {
+ xlen = 8
+ }
+ v = make([]float32, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]float32, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]float64)
+ v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]float64)
+ v2, changed := fastpathTV.DecSliceFloat64V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) {
+ v, changed := f.DecSliceFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]float64, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ } else {
+ xlen = 8
+ }
+ v = make([]float64, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = dd.DecodeFloat64()
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]float64, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]uint)
+ v, changed := fastpathTV.DecSliceUintV(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]uint)
+ v2, changed := fastpathTV.DecSliceUintV(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) {
+ v, changed := f.DecSliceUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ } else {
+ xlen = 8
+ }
+ v = make([]uint, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]uint, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]uint8)
+ v, changed := fastpathTV.DecSliceUint8V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]uint8)
+ v2, changed := fastpathTV.DecSliceUint8V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) {
+ v, changed := f.DecSliceUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint8, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ } else {
+ xlen = 8
+ }
+ v = make([]uint8, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]uint8, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]uint16)
+ v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]uint16)
+ v2, changed := fastpathTV.DecSliceUint16V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) {
+ v, changed := f.DecSliceUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint16, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ } else {
+ xlen = 8
+ }
+ v = make([]uint16, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]uint16, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]uint32)
+ v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]uint32)
+ v2, changed := fastpathTV.DecSliceUint32V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) {
+ v, changed := f.DecSliceUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint32, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ } else {
+ xlen = 8
+ }
+ v = make([]uint32, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]uint32, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]uint64)
+ v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]uint64)
+ v2, changed := fastpathTV.DecSliceUint64V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) {
+ v, changed := f.DecSliceUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uint64, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ } else {
+ xlen = 8
+ }
+ v = make([]uint64, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = dd.DecodeUint64()
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]uint64, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]uintptr)
+ v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]uintptr)
+ v2, changed := fastpathTV.DecSliceUintptrV(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) {
+ v, changed := f.DecSliceUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []uintptr{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]uintptr, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ } else {
+ xlen = 8
+ }
+ v = make([]uintptr, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]uintptr, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]int)
+ v, changed := fastpathTV.DecSliceIntV(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]int)
+ v2, changed := fastpathTV.DecSliceIntV(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) {
+ v, changed := f.DecSliceIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ } else {
+ xlen = 8
+ }
+ v = make([]int, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]int, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]int8)
+ v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]int8)
+ v2, changed := fastpathTV.DecSliceInt8V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) {
+ v, changed := f.DecSliceInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int8, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ } else {
+ xlen = 8
+ }
+ v = make([]int8, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]int8, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]int16)
+ v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]int16)
+ v2, changed := fastpathTV.DecSliceInt16V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) {
+ v, changed := f.DecSliceInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int16{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int16, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
+ } else {
+ xlen = 8
+ }
+ v = make([]int16, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]int16, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]int32)
+ v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]int32)
+ v2, changed := fastpathTV.DecSliceInt32V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) {
+ v, changed := f.DecSliceInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int32, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ } else {
+ xlen = 8
+ }
+ v = make([]int32, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]int32, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]int64)
+ v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]int64)
+ v2, changed := fastpathTV.DecSliceInt64V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) {
+ v, changed := f.DecSliceInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]int64, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ } else {
+ xlen = 8
+ }
+ v = make([]int64, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, 0)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = 0
+ } else {
+ v[j] = dd.DecodeInt64()
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]int64, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]bool)
+ v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ v := rv2i(rv).([]bool)
+ v2, changed := fastpathTV.DecSliceBoolV(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) {
+ v, changed := f.DecSliceBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) {
+ dd := d.d
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]bool, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ } else {
+ xlen = 8
+ }
+ v = make([]bool, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, false)
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = false
+ } else {
+ v[j] = dd.DecodeBool()
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]bool, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]interface{})
+ v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) {
+ v, changed := f.DecMapIntfIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool,
+ d *Decoder) (_ map[interface{}]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[interface{}]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk interface{}
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]string)
+ v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) {
+ v, changed := f.DecMapIntfStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool,
+ d *Decoder) (_ map[interface{}]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[interface{}]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]uint)
+ v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) {
+ v, changed := f.DecMapIntfUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool,
+ d *Decoder) (_ map[interface{}]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]uint8)
+ v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) {
+ v, changed := f.DecMapIntfUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool,
+ d *Decoder) (_ map[interface{}]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]uint16)
+ v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) {
+ v, changed := f.DecMapIntfUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool,
+ d *Decoder) (_ map[interface{}]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[interface{}]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]uint32)
+ v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) {
+ v, changed := f.DecMapIntfUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool,
+ d *Decoder) (_ map[interface{}]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]uint64)
+ v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) {
+ v, changed := f.DecMapIntfUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool,
+ d *Decoder) (_ map[interface{}]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]uintptr)
+ v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) {
+ v, changed := f.DecMapIntfUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool,
+ d *Decoder) (_ map[interface{}]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]int)
+ v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) {
+ v, changed := f.DecMapIntfIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool,
+ d *Decoder) (_ map[interface{}]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]int8)
+ v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) {
+ v, changed := f.DecMapIntfInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool,
+ d *Decoder) (_ map[interface{}]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]int16)
+ v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) {
+ v, changed := f.DecMapIntfInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool,
+ d *Decoder) (_ map[interface{}]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[interface{}]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]int32)
+ v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) {
+ v, changed := f.DecMapIntfInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool,
+ d *Decoder) (_ map[interface{}]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]int64)
+ v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) {
+ v, changed := f.DecMapIntfInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool,
+ d *Decoder) (_ map[interface{}]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]float32)
+ v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) {
+ v, changed := f.DecMapIntfFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool,
+ d *Decoder) (_ map[interface{}]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[interface{}]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]float64)
+ v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) {
+ v, changed := f.DecMapIntfFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool,
+ d *Decoder) (_ map[interface{}]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[interface{}]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[interface{}]bool)
+ v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) {
+ v, changed := f.DecMapIntfBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool,
+ d *Decoder) (_ map[interface{}]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[interface{}]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk interface{}
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv)
+ }
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]interface{})
+ v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) {
+ v, changed := f.DecMapStringIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool,
+ d *Decoder) (_ map[string]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[string]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk string
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]string)
+ v, changed := fastpathTV.DecMapStringStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d)
+ }
+}
+func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) {
+ v, changed := f.DecMapStringStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool,
+ d *Decoder) (_ map[string]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
+ v = make(map[string]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]uint)
+ v, changed := fastpathTV.DecMapStringUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) {
+ v, changed := f.DecMapStringUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool,
+ d *Decoder) (_ map[string]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]uint8)
+ v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) {
+ v, changed := f.DecMapStringUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool,
+ d *Decoder) (_ map[string]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]uint16)
+ v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) {
+ v, changed := f.DecMapStringUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool,
+ d *Decoder) (_ map[string]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[string]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]uint32)
+ v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) {
+ v, changed := f.DecMapStringUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool,
+ d *Decoder) (_ map[string]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]uint64)
+ v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) {
+ v, changed := f.DecMapStringUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool,
+ d *Decoder) (_ map[string]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]uintptr)
+ v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) {
+ v, changed := f.DecMapStringUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool,
+ d *Decoder) (_ map[string]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]int)
+ v, changed := fastpathTV.DecMapStringIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d)
+ }
+}
+func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) {
+ v, changed := f.DecMapStringIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool,
+ d *Decoder) (_ map[string]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]int8)
+ v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) {
+ v, changed := f.DecMapStringInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool,
+ d *Decoder) (_ map[string]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]int16)
+ v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) {
+ v, changed := f.DecMapStringInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool,
+ d *Decoder) (_ map[string]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[string]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]int32)
+ v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) {
+ v, changed := f.DecMapStringInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool,
+ d *Decoder) (_ map[string]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]int64)
+ v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) {
+ v, changed := f.DecMapStringInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool,
+ d *Decoder) (_ map[string]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]float32)
+ v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) {
+ v, changed := f.DecMapStringFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool,
+ d *Decoder) (_ map[string]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[string]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]float64)
+ v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) {
+ v, changed := f.DecMapStringFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool,
+ d *Decoder) (_ map[string]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[string]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[string]bool)
+ v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) {
+ v, changed := f.DecMapStringBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool,
+ d *Decoder) (_ map[string]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[string]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk string
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeString()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]interface{})
+ v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) {
+ v, changed := f.DecMapFloat32IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool,
+ d *Decoder) (_ map[float32]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[float32]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk float32
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]string)
+ v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) {
+ v, changed := f.DecMapFloat32StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool,
+ d *Decoder) (_ map[float32]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[float32]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]uint)
+ v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) {
+ v, changed := f.DecMapFloat32UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool,
+ d *Decoder) (_ map[float32]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]uint8)
+ v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool,
+ d *Decoder) (_ map[float32]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]uint16)
+ v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool,
+ d *Decoder) (_ map[float32]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[float32]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]uint32)
+ v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool,
+ d *Decoder) (_ map[float32]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]uint64)
+ v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) {
+ v, changed := f.DecMapFloat32Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool,
+ d *Decoder) (_ map[float32]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]uintptr)
+ v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) {
+ v, changed := f.DecMapFloat32UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool,
+ d *Decoder) (_ map[float32]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]int)
+ v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) {
+ v, changed := f.DecMapFloat32IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool,
+ d *Decoder) (_ map[float32]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]int8)
+ v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) {
+ v, changed := f.DecMapFloat32Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool,
+ d *Decoder) (_ map[float32]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]int16)
+ v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) {
+ v, changed := f.DecMapFloat32Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool,
+ d *Decoder) (_ map[float32]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[float32]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]int32)
+ v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) {
+ v, changed := f.DecMapFloat32Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool,
+ d *Decoder) (_ map[float32]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]int64)
+ v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) {
+ v, changed := f.DecMapFloat32Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool,
+ d *Decoder) (_ map[float32]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]float32)
+ v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) {
+ v, changed := f.DecMapFloat32Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool,
+ d *Decoder) (_ map[float32]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[float32]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]float64)
+ v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) {
+ v, changed := f.DecMapFloat32Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool,
+ d *Decoder) (_ map[float32]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float32]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float32]bool)
+ v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) {
+ v, changed := f.DecMapFloat32BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool,
+ d *Decoder) (_ map[float32]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[float32]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float32
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]interface{})
+ v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) {
+ v, changed := f.DecMapFloat64IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool,
+ d *Decoder) (_ map[float64]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[float64]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk float64
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]string)
+ v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) {
+ v, changed := f.DecMapFloat64StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool,
+ d *Decoder) (_ map[float64]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[float64]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]uint)
+ v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) {
+ v, changed := f.DecMapFloat64UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool,
+ d *Decoder) (_ map[float64]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]uint8)
+ v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool,
+ d *Decoder) (_ map[float64]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]uint16)
+ v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool,
+ d *Decoder) (_ map[float64]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[float64]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]uint32)
+ v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool,
+ d *Decoder) (_ map[float64]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]uint64)
+ v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) {
+ v, changed := f.DecMapFloat64Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool,
+ d *Decoder) (_ map[float64]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]uintptr)
+ v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) {
+ v, changed := f.DecMapFloat64UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool,
+ d *Decoder) (_ map[float64]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]int)
+ v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) {
+ v, changed := f.DecMapFloat64IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool,
+ d *Decoder) (_ map[float64]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]int8)
+ v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) {
+ v, changed := f.DecMapFloat64Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool,
+ d *Decoder) (_ map[float64]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]int16)
+ v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) {
+ v, changed := f.DecMapFloat64Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool,
+ d *Decoder) (_ map[float64]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[float64]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]int32)
+ v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) {
+ v, changed := f.DecMapFloat64Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool,
+ d *Decoder) (_ map[float64]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]int64)
+ v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) {
+ v, changed := f.DecMapFloat64Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool,
+ d *Decoder) (_ map[float64]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]float32)
+ v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) {
+ v, changed := f.DecMapFloat64Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool,
+ d *Decoder) (_ map[float64]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[float64]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]float64)
+ v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) {
+ v, changed := f.DecMapFloat64Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool,
+ d *Decoder) (_ map[float64]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[float64]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[float64]bool)
+ v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) {
+ v, changed := f.DecMapFloat64BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool,
+ d *Decoder) (_ map[float64]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[float64]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk float64
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeFloat64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]interface{})
+ v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) {
+ v, changed := f.DecMapUintIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool,
+ d *Decoder) (_ map[uint]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]string)
+ v, changed := fastpathTV.DecMapUintStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d)
+ }
+}
+func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) {
+ v, changed := f.DecMapUintStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool,
+ d *Decoder) (_ map[uint]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]uint)
+ v, changed := fastpathTV.DecMapUintUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) {
+ v, changed := f.DecMapUintUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool,
+ d *Decoder) (_ map[uint]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]uint8)
+ v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) {
+ v, changed := f.DecMapUintUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool,
+ d *Decoder) (_ map[uint]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]uint16)
+ v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) {
+ v, changed := f.DecMapUintUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool,
+ d *Decoder) (_ map[uint]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]uint32)
+ v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) {
+ v, changed := f.DecMapUintUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool,
+ d *Decoder) (_ map[uint]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]uint64)
+ v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) {
+ v, changed := f.DecMapUintUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool,
+ d *Decoder) (_ map[uint]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]uintptr)
+ v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) {
+ v, changed := f.DecMapUintUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool,
+ d *Decoder) (_ map[uint]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]int)
+ v, changed := fastpathTV.DecMapUintIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d)
+ }
+}
+func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) {
+ v, changed := f.DecMapUintIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool,
+ d *Decoder) (_ map[uint]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]int8)
+ v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) {
+ v, changed := f.DecMapUintInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool,
+ d *Decoder) (_ map[uint]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]int16)
+ v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) {
+ v, changed := f.DecMapUintInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool,
+ d *Decoder) (_ map[uint]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]int32)
+ v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) {
+ v, changed := f.DecMapUintInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool,
+ d *Decoder) (_ map[uint]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]int64)
+ v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) {
+ v, changed := f.DecMapUintInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool,
+ d *Decoder) (_ map[uint]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]float32)
+ v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) {
+ v, changed := f.DecMapUintFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool,
+ d *Decoder) (_ map[uint]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]float64)
+ v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) {
+ v, changed := f.DecMapUintFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool,
+ d *Decoder) (_ map[uint]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint]bool)
+ v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) {
+ v, changed := f.DecMapUintBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool,
+ d *Decoder) (_ map[uint]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]interface{})
+ v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) {
+ v, changed := f.DecMapUint8IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool,
+ d *Decoder) (_ map[uint8]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[uint8]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint8
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]string)
+ v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) {
+ v, changed := f.DecMapUint8StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool,
+ d *Decoder) (_ map[uint8]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[uint8]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]uint)
+ v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) {
+ v, changed := f.DecMapUint8UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool,
+ d *Decoder) (_ map[uint8]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]uint8)
+ v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) {
+ v, changed := f.DecMapUint8Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool,
+ d *Decoder) (_ map[uint8]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]uint16)
+ v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) {
+ v, changed := f.DecMapUint8Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool,
+ d *Decoder) (_ map[uint8]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint8]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]uint32)
+ v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) {
+ v, changed := f.DecMapUint8Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool,
+ d *Decoder) (_ map[uint8]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]uint64)
+ v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) {
+ v, changed := f.DecMapUint8Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool,
+ d *Decoder) (_ map[uint8]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]uintptr)
+ v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) {
+ v, changed := f.DecMapUint8UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool,
+ d *Decoder) (_ map[uint8]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]int)
+ v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) {
+ v, changed := f.DecMapUint8IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool,
+ d *Decoder) (_ map[uint8]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]int8)
+ v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) {
+ v, changed := f.DecMapUint8Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool,
+ d *Decoder) (_ map[uint8]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]int16)
+ v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) {
+ v, changed := f.DecMapUint8Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool,
+ d *Decoder) (_ map[uint8]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint8]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]int32)
+ v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) {
+ v, changed := f.DecMapUint8Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool,
+ d *Decoder) (_ map[uint8]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]int64)
+ v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) {
+ v, changed := f.DecMapUint8Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool,
+ d *Decoder) (_ map[uint8]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]float32)
+ v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) {
+ v, changed := f.DecMapUint8Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool,
+ d *Decoder) (_ map[uint8]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint8]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]float64)
+ v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) {
+ v, changed := f.DecMapUint8Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool,
+ d *Decoder) (_ map[uint8]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint8]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint8]bool)
+ v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) {
+ v, changed := f.DecMapUint8BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool,
+ d *Decoder) (_ map[uint8]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[uint8]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint8
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]interface{})
+ v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) {
+ v, changed := f.DecMapUint16IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool,
+ d *Decoder) (_ map[uint16]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[uint16]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint16
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]string)
+ v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) {
+ v, changed := f.DecMapUint16StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool,
+ d *Decoder) (_ map[uint16]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[uint16]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]uint)
+ v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) {
+ v, changed := f.DecMapUint16UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool,
+ d *Decoder) (_ map[uint16]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]uint8)
+ v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) {
+ v, changed := f.DecMapUint16Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool,
+ d *Decoder) (_ map[uint16]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]uint16)
+ v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) {
+ v, changed := f.DecMapUint16Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool,
+ d *Decoder) (_ map[uint16]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[uint16]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]uint32)
+ v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) {
+ v, changed := f.DecMapUint16Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool,
+ d *Decoder) (_ map[uint16]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]uint64)
+ v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) {
+ v, changed := f.DecMapUint16Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool,
+ d *Decoder) (_ map[uint16]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]uintptr)
+ v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) {
+ v, changed := f.DecMapUint16UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool,
+ d *Decoder) (_ map[uint16]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]int)
+ v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) {
+ v, changed := f.DecMapUint16IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool,
+ d *Decoder) (_ map[uint16]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]int8)
+ v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) {
+ v, changed := f.DecMapUint16Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool,
+ d *Decoder) (_ map[uint16]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]int16)
+ v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) {
+ v, changed := f.DecMapUint16Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool,
+ d *Decoder) (_ map[uint16]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[uint16]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]int32)
+ v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) {
+ v, changed := f.DecMapUint16Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool,
+ d *Decoder) (_ map[uint16]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]int64)
+ v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) {
+ v, changed := f.DecMapUint16Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool,
+ d *Decoder) (_ map[uint16]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]float32)
+ v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) {
+ v, changed := f.DecMapUint16Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool,
+ d *Decoder) (_ map[uint16]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint16]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]float64)
+ v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) {
+ v, changed := f.DecMapUint16Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool,
+ d *Decoder) (_ map[uint16]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint16]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint16]bool)
+ v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) {
+ v, changed := f.DecMapUint16BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool,
+ d *Decoder) (_ map[uint16]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[uint16]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint16
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]interface{})
+ v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) {
+ v, changed := f.DecMapUint32IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool,
+ d *Decoder) (_ map[uint32]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[uint32]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint32
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]string)
+ v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) {
+ v, changed := f.DecMapUint32StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool,
+ d *Decoder) (_ map[uint32]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[uint32]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]uint)
+ v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) {
+ v, changed := f.DecMapUint32UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool,
+ d *Decoder) (_ map[uint32]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]uint8)
+ v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) {
+ v, changed := f.DecMapUint32Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool,
+ d *Decoder) (_ map[uint32]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]uint16)
+ v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) {
+ v, changed := f.DecMapUint32Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool,
+ d *Decoder) (_ map[uint32]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint32]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]uint32)
+ v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) {
+ v, changed := f.DecMapUint32Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool,
+ d *Decoder) (_ map[uint32]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]uint64)
+ v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) {
+ v, changed := f.DecMapUint32Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool,
+ d *Decoder) (_ map[uint32]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]uintptr)
+ v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) {
+ v, changed := f.DecMapUint32UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool,
+ d *Decoder) (_ map[uint32]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]int)
+ v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) {
+ v, changed := f.DecMapUint32IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool,
+ d *Decoder) (_ map[uint32]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]int8)
+ v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) {
+ v, changed := f.DecMapUint32Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool,
+ d *Decoder) (_ map[uint32]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]int16)
+ v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) {
+ v, changed := f.DecMapUint32Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool,
+ d *Decoder) (_ map[uint32]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[uint32]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]int32)
+ v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) {
+ v, changed := f.DecMapUint32Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool,
+ d *Decoder) (_ map[uint32]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]int64)
+ v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) {
+ v, changed := f.DecMapUint32Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool,
+ d *Decoder) (_ map[uint32]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]float32)
+ v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) {
+ v, changed := f.DecMapUint32Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool,
+ d *Decoder) (_ map[uint32]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[uint32]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]float64)
+ v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) {
+ v, changed := f.DecMapUint32Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool,
+ d *Decoder) (_ map[uint32]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint32]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint32]bool)
+ v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) {
+ v, changed := f.DecMapUint32BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool,
+ d *Decoder) (_ map[uint32]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[uint32]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint32
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]interface{})
+ v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) {
+ v, changed := f.DecMapUint64IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool,
+ d *Decoder) (_ map[uint64]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint64]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint64
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]string)
+ v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) {
+ v, changed := f.DecMapUint64StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool,
+ d *Decoder) (_ map[uint64]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uint64]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]uint)
+ v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) {
+ v, changed := f.DecMapUint64UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool,
+ d *Decoder) (_ map[uint64]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]uint8)
+ v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) {
+ v, changed := f.DecMapUint64Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool,
+ d *Decoder) (_ map[uint64]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]uint16)
+ v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) {
+ v, changed := f.DecMapUint64Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool,
+ d *Decoder) (_ map[uint64]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint64]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]uint32)
+ v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) {
+ v, changed := f.DecMapUint64Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool,
+ d *Decoder) (_ map[uint64]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]uint64)
+ v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) {
+ v, changed := f.DecMapUint64Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool,
+ d *Decoder) (_ map[uint64]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]uintptr)
+ v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) {
+ v, changed := f.DecMapUint64UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool,
+ d *Decoder) (_ map[uint64]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]int)
+ v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) {
+ v, changed := f.DecMapUint64IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool,
+ d *Decoder) (_ map[uint64]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]int8)
+ v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) {
+ v, changed := f.DecMapUint64Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool,
+ d *Decoder) (_ map[uint64]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]int16)
+ v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) {
+ v, changed := f.DecMapUint64Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool,
+ d *Decoder) (_ map[uint64]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uint64]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]int32)
+ v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) {
+ v, changed := f.DecMapUint64Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool,
+ d *Decoder) (_ map[uint64]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]int64)
+ v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) {
+ v, changed := f.DecMapUint64Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool,
+ d *Decoder) (_ map[uint64]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]float32)
+ v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) {
+ v, changed := f.DecMapUint64Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool,
+ d *Decoder) (_ map[uint64]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uint64]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]float64)
+ v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) {
+ v, changed := f.DecMapUint64Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool,
+ d *Decoder) (_ map[uint64]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uint64]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uint64]bool)
+ v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) {
+ v, changed := f.DecMapUint64BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool,
+ d *Decoder) (_ map[uint64]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uint64]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uint64
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeUint64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]interface{})
+ v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) {
+ v, changed := f.DecMapUintptrIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool,
+ d *Decoder) (_ map[uintptr]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uintptr]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uintptr
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]string)
+ v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) {
+ v, changed := f.DecMapUintptrStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool,
+ d *Decoder) (_ map[uintptr]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[uintptr]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]uint)
+ v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) {
+ v, changed := f.DecMapUintptrUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool,
+ d *Decoder) (_ map[uintptr]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]uint8)
+ v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) {
+ v, changed := f.DecMapUintptrUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool,
+ d *Decoder) (_ map[uintptr]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]uint16)
+ v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) {
+ v, changed := f.DecMapUintptrUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool,
+ d *Decoder) (_ map[uintptr]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uintptr]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]uint32)
+ v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) {
+ v, changed := f.DecMapUintptrUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool,
+ d *Decoder) (_ map[uintptr]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]uint64)
+ v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) {
+ v, changed := f.DecMapUintptrUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool,
+ d *Decoder) (_ map[uintptr]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]uintptr)
+ v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) {
+ v, changed := f.DecMapUintptrUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool,
+ d *Decoder) (_ map[uintptr]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]int)
+ v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) {
+ v, changed := f.DecMapUintptrIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool,
+ d *Decoder) (_ map[uintptr]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]int8)
+ v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) {
+ v, changed := f.DecMapUintptrInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool,
+ d *Decoder) (_ map[uintptr]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]int16)
+ v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) {
+ v, changed := f.DecMapUintptrInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool,
+ d *Decoder) (_ map[uintptr]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[uintptr]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]int32)
+ v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) {
+ v, changed := f.DecMapUintptrInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool,
+ d *Decoder) (_ map[uintptr]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]int64)
+ v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) {
+ v, changed := f.DecMapUintptrInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool,
+ d *Decoder) (_ map[uintptr]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]float32)
+ v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) {
+ v, changed := f.DecMapUintptrFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool,
+ d *Decoder) (_ map[uintptr]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[uintptr]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]float64)
+ v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) {
+ v, changed := f.DecMapUintptrFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool,
+ d *Decoder) (_ map[uintptr]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[uintptr]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[uintptr]bool)
+ v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) {
+ v, changed := f.DecMapUintptrBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool,
+ d *Decoder) (_ map[uintptr]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[uintptr]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk uintptr
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]interface{})
+ v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) {
+ v, changed := f.DecMapIntIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool,
+ d *Decoder) (_ map[int]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]string)
+ v, changed := fastpathTV.DecMapIntStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d)
+ }
+}
+func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) {
+ v, changed := f.DecMapIntStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool,
+ d *Decoder) (_ map[int]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]uint)
+ v, changed := fastpathTV.DecMapIntUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) {
+ v, changed := f.DecMapIntUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool,
+ d *Decoder) (_ map[int]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]uint8)
+ v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) {
+ v, changed := f.DecMapIntUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool,
+ d *Decoder) (_ map[int]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]uint16)
+ v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) {
+ v, changed := f.DecMapIntUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool,
+ d *Decoder) (_ map[int]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]uint32)
+ v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) {
+ v, changed := f.DecMapIntUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool,
+ d *Decoder) (_ map[int]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]uint64)
+ v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) {
+ v, changed := f.DecMapIntUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool,
+ d *Decoder) (_ map[int]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]uintptr)
+ v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) {
+ v, changed := f.DecMapIntUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool,
+ d *Decoder) (_ map[int]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]int)
+ v, changed := fastpathTV.DecMapIntIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d)
+ }
+}
+func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) {
+ v, changed := f.DecMapIntIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool,
+ d *Decoder) (_ map[int]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]int8)
+ v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) {
+ v, changed := f.DecMapIntInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool,
+ d *Decoder) (_ map[int]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]int16)
+ v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) {
+ v, changed := f.DecMapIntInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool,
+ d *Decoder) (_ map[int]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]int32)
+ v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) {
+ v, changed := f.DecMapIntInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool,
+ d *Decoder) (_ map[int]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]int64)
+ v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) {
+ v, changed := f.DecMapIntInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool,
+ d *Decoder) (_ map[int]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]float32)
+ v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) {
+ v, changed := f.DecMapIntFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool,
+ d *Decoder) (_ map[int]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]float64)
+ v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) {
+ v, changed := f.DecMapIntFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool,
+ d *Decoder) (_ map[int]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int]bool)
+ v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) {
+ v, changed := f.DecMapIntBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool,
+ d *Decoder) (_ map[int]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]interface{})
+ v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) {
+ v, changed := f.DecMapInt8IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool,
+ d *Decoder) (_ map[int8]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[int8]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int8
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]string)
+ v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) {
+ v, changed := f.DecMapInt8StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool,
+ d *Decoder) (_ map[int8]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[int8]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]uint)
+ v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) {
+ v, changed := f.DecMapInt8UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool,
+ d *Decoder) (_ map[int8]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]uint8)
+ v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) {
+ v, changed := f.DecMapInt8Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool,
+ d *Decoder) (_ map[int8]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]uint16)
+ v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) {
+ v, changed := f.DecMapInt8Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool,
+ d *Decoder) (_ map[int8]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int8]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]uint32)
+ v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) {
+ v, changed := f.DecMapInt8Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool,
+ d *Decoder) (_ map[int8]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]uint64)
+ v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) {
+ v, changed := f.DecMapInt8Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool,
+ d *Decoder) (_ map[int8]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]uintptr)
+ v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) {
+ v, changed := f.DecMapInt8UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool,
+ d *Decoder) (_ map[int8]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]int)
+ v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) {
+ v, changed := f.DecMapInt8IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool,
+ d *Decoder) (_ map[int8]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]int8)
+ v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) {
+ v, changed := f.DecMapInt8Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool,
+ d *Decoder) (_ map[int8]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]int16)
+ v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) {
+ v, changed := f.DecMapInt8Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool,
+ d *Decoder) (_ map[int8]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int8]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]int32)
+ v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) {
+ v, changed := f.DecMapInt8Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool,
+ d *Decoder) (_ map[int8]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]int64)
+ v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) {
+ v, changed := f.DecMapInt8Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool,
+ d *Decoder) (_ map[int8]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]float32)
+ v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) {
+ v, changed := f.DecMapInt8Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool,
+ d *Decoder) (_ map[int8]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int8]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]float64)
+ v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) {
+ v, changed := f.DecMapInt8Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool,
+ d *Decoder) (_ map[int8]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int8]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int8]bool)
+ v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) {
+ v, changed := f.DecMapInt8BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool,
+ d *Decoder) (_ map[int8]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[int8]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int8
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]interface{})
+ v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) {
+ v, changed := f.DecMapInt16IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool,
+ d *Decoder) (_ map[int16]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[int16]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int16
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]string)
+ v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) {
+ v, changed := f.DecMapInt16StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool,
+ d *Decoder) (_ map[int16]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
+ v = make(map[int16]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]uint)
+ v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) {
+ v, changed := f.DecMapInt16UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool,
+ d *Decoder) (_ map[int16]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]uint8)
+ v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) {
+ v, changed := f.DecMapInt16Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool,
+ d *Decoder) (_ map[int16]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]uint16)
+ v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) {
+ v, changed := f.DecMapInt16Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool,
+ d *Decoder) (_ map[int16]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[int16]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]uint32)
+ v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) {
+ v, changed := f.DecMapInt16Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool,
+ d *Decoder) (_ map[int16]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]uint64)
+ v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) {
+ v, changed := f.DecMapInt16Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool,
+ d *Decoder) (_ map[int16]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]uintptr)
+ v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) {
+ v, changed := f.DecMapInt16UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool,
+ d *Decoder) (_ map[int16]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]int)
+ v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) {
+ v, changed := f.DecMapInt16IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool,
+ d *Decoder) (_ map[int16]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]int8)
+ v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) {
+ v, changed := f.DecMapInt16Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool,
+ d *Decoder) (_ map[int16]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]int16)
+ v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) {
+ v, changed := f.DecMapInt16Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool,
+ d *Decoder) (_ map[int16]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
+ v = make(map[int16]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]int32)
+ v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) {
+ v, changed := f.DecMapInt16Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool,
+ d *Decoder) (_ map[int16]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]int64)
+ v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) {
+ v, changed := f.DecMapInt16Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool,
+ d *Decoder) (_ map[int16]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]float32)
+ v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) {
+ v, changed := f.DecMapInt16Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool,
+ d *Decoder) (_ map[int16]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int16]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]float64)
+ v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) {
+ v, changed := f.DecMapInt16Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool,
+ d *Decoder) (_ map[int16]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int16]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int16]bool)
+ v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) {
+ v, changed := f.DecMapInt16BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool,
+ d *Decoder) (_ map[int16]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[int16]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int16
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]interface{})
+ v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) {
+ v, changed := f.DecMapInt32IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool,
+ d *Decoder) (_ map[int32]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[int32]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int32
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]string)
+ v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) {
+ v, changed := f.DecMapInt32StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool,
+ d *Decoder) (_ map[int32]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
+ v = make(map[int32]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]uint)
+ v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) {
+ v, changed := f.DecMapInt32UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool,
+ d *Decoder) (_ map[int32]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]uint8)
+ v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) {
+ v, changed := f.DecMapInt32Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool,
+ d *Decoder) (_ map[int32]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]uint16)
+ v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) {
+ v, changed := f.DecMapInt32Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool,
+ d *Decoder) (_ map[int32]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int32]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]uint32)
+ v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) {
+ v, changed := f.DecMapInt32Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool,
+ d *Decoder) (_ map[int32]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]uint64)
+ v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) {
+ v, changed := f.DecMapInt32Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool,
+ d *Decoder) (_ map[int32]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]uintptr)
+ v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) {
+ v, changed := f.DecMapInt32UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool,
+ d *Decoder) (_ map[int32]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]int)
+ v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) {
+ v, changed := f.DecMapInt32IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool,
+ d *Decoder) (_ map[int32]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]int8)
+ v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) {
+ v, changed := f.DecMapInt32Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool,
+ d *Decoder) (_ map[int32]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]int16)
+ v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) {
+ v, changed := f.DecMapInt32Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool,
+ d *Decoder) (_ map[int32]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
+ v = make(map[int32]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]int32)
+ v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) {
+ v, changed := f.DecMapInt32Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool,
+ d *Decoder) (_ map[int32]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]int64)
+ v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) {
+ v, changed := f.DecMapInt32Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool,
+ d *Decoder) (_ map[int32]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]float32)
+ v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) {
+ v, changed := f.DecMapInt32Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool,
+ d *Decoder) (_ map[int32]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
+ v = make(map[int32]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]float64)
+ v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) {
+ v, changed := f.DecMapInt32Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool,
+ d *Decoder) (_ map[int32]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int32]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int32]bool)
+ v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) {
+ v, changed := f.DecMapInt32BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool,
+ d *Decoder) (_ map[int32]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[int32]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int32
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]interface{})
+ v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) {
+ v, changed := f.DecMapInt64IntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool,
+ d *Decoder) (_ map[int64]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int64]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int64
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]string)
+ v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) {
+ v, changed := f.DecMapInt64StringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool,
+ d *Decoder) (_ map[int64]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
+ v = make(map[int64]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]uint)
+ v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) {
+ v, changed := f.DecMapInt64UintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool,
+ d *Decoder) (_ map[int64]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]uint8)
+ v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) {
+ v, changed := f.DecMapInt64Uint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool,
+ d *Decoder) (_ map[int64]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]uint16)
+ v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) {
+ v, changed := f.DecMapInt64Uint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool,
+ d *Decoder) (_ map[int64]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int64]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]uint32)
+ v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) {
+ v, changed := f.DecMapInt64Uint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool,
+ d *Decoder) (_ map[int64]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]uint64)
+ v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) {
+ v, changed := f.DecMapInt64Uint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool,
+ d *Decoder) (_ map[int64]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]uintptr)
+ v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) {
+ v, changed := f.DecMapInt64UintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool,
+ d *Decoder) (_ map[int64]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]int)
+ v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) {
+ v, changed := f.DecMapInt64IntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool,
+ d *Decoder) (_ map[int64]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]int8)
+ v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) {
+ v, changed := f.DecMapInt64Int8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool,
+ d *Decoder) (_ map[int64]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]int16)
+ v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) {
+ v, changed := f.DecMapInt64Int16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool,
+ d *Decoder) (_ map[int64]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
+ v = make(map[int64]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]int32)
+ v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) {
+ v, changed := f.DecMapInt64Int32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool,
+ d *Decoder) (_ map[int64]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]int64)
+ v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) {
+ v, changed := f.DecMapInt64Int64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool,
+ d *Decoder) (_ map[int64]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]float32)
+ v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) {
+ v, changed := f.DecMapInt64Float32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool,
+ d *Decoder) (_ map[int64]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
+ v = make(map[int64]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]float64)
+ v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) {
+ v, changed := f.DecMapInt64Float64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool,
+ d *Decoder) (_ map[int64]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
+ v = make(map[int64]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[int64]bool)
+ v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) {
+ v, changed := f.DecMapInt64BoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool,
+ d *Decoder) (_ map[int64]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[int64]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk int64
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeInt64()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]interface{})
+ v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) {
+ v, changed := f.DecMapBoolIntfV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool,
+ d *Decoder) (_ map[bool]interface{}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[bool]interface{}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk bool
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = nil
+ }
+ continue
+ }
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]string)
+ v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) {
+ v, changed := f.DecMapBoolStringV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool,
+ d *Decoder) (_ map[bool]string, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
+ v = make(map[bool]string, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = ""
+ }
+ continue
+ }
+ mv = dd.DecodeString()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]uint)
+ v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) {
+ v, changed := f.DecMapBoolUintV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool,
+ d *Decoder) (_ map[bool]uint, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uint, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv uint
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]uint8)
+ v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) {
+ v, changed := f.DecMapBoolUint8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool,
+ d *Decoder) (_ map[bool]uint8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]uint8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]uint16)
+ v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) {
+ v, changed := f.DecMapBoolUint16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool,
+ d *Decoder) (_ map[bool]uint16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[bool]uint16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv uint16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]uint32)
+ v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) {
+ v, changed := f.DecMapBoolUint32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool,
+ d *Decoder) (_ map[bool]uint32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]uint32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv uint32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]uint64)
+ v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) {
+ v, changed := f.DecMapBoolUint64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool,
+ d *Decoder) (_ map[bool]uint64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uint64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeUint64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]uintptr)
+ v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) {
+ v, changed := f.DecMapBoolUintptrV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool,
+ d *Decoder) (_ map[bool]uintptr, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]uintptr, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv uintptr
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]int)
+ v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) {
+ v, changed := f.DecMapBoolIntV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool,
+ d *Decoder) (_ map[bool]int, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]int, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]int8)
+ v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) {
+ v, changed := f.DecMapBoolInt8V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool,
+ d *Decoder) (_ map[bool]int8, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]int8, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv int8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]int16)
+ v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) {
+ v, changed := f.DecMapBoolInt16V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool,
+ d *Decoder) (_ map[bool]int16, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
+ v = make(map[bool]int16, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv int16
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]int32)
+ v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) {
+ v, changed := f.DecMapBoolInt32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool,
+ d *Decoder) (_ map[bool]int32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]int32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]int64)
+ v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) {
+ v, changed := f.DecMapBoolInt64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool,
+ d *Decoder) (_ map[bool]int64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]int64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv int64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeInt64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]float32)
+ v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) {
+ v, changed := f.DecMapBoolFloat32V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool,
+ d *Decoder) (_ map[bool]float32, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
+ v = make(map[bool]float32, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv float32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]float64)
+ v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) {
+ v, changed := f.DecMapBoolFloat64V(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool,
+ d *Decoder) (_ map[bool]float64, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
+ v = make(map[bool]float64, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = 0
+ }
+ continue
+ }
+ mv = dd.DecodeFloat64()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+
+func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[bool]bool)
+ v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+ } else {
+ fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d)
+ }
+}
+func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) {
+ v, changed := f.DecMapBoolBoolV(*vp, true, d)
+ if changed {
+ *vp = v
+ }
+}
+func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool,
+ d *Decoder) (_ map[bool]bool, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators()
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
+ v = make(map[bool]bool, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ var mk bool
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep {
+ dd.ReadMapElemKey()
+ }
+ mk = dd.DecodeBool()
+ if esep {
+ dd.ReadMapElemValue()
+ }
+ if dd.TryDecodeAsNil() {
+ if v == nil {
+ } else if d.h.DeleteOnNilMapValue {
+ delete(v, mk)
+ } else {
+ v[mk] = false
+ }
+ continue
+ }
+ mv = dd.DecodeBool()
+ if v != nil {
+ v[mk] = mv
+ }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
new file mode 100644
index 0000000..2023e05
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
@@ -0,0 +1,544 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from fast-path.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register then in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types,
+// - map of all builtin types to string or interface value
+// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathEnabled = true
+
+type fastpathT struct {}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*Encoder, *codecFnInfo, reflect.Value)
+ decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+
+type fastpathA [{{ .FastpathLen }}]fastpathE
+
+func (x *fastpathA) index(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ h, i, j := 0, 0, {{ .FastpathLen }} // len(x)
+ for i < j {
+ h = i + (j-i)/2
+ if x[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < {{ .FastpathLen }} && x[i].rtid == rtid {
+ return i
+ }
+ return -1
+}
+
+type fastpathAslice []fastpathE
+
+func (x fastpathAslice) Len() int { return len(x) }
+func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
+func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+var fastpathAV fastpathA
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ i := 0
+ fn := func(v interface{},
+ fe func(*Encoder, *codecFnInfo, reflect.Value),
+ fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) {
+ xrt := reflect.TypeOf(v)
+ xptr := rt2id(xrt)
+ fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
+ i++
+ return
+ }
+ {{/* do not register []uint8 in fast-path */}}
+ {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
+ fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}{{end}}
+
+ {{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+ fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
+
+ sort.Sort(fastpathAslice(fastpathAV[:]))
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
+ case []{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+ case *[]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/*
+*/}}{{end}}{{end}}{{end}}{{end}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+ case map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/*
+*/}}{{end}}{{end}}{{end}}
+
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+{{/*
+**** removing this block, as they are never called directly ****
+
+
+
+**** removing this block, as they are never called directly ****
+
+
+
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ case []{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+ case *[]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
+{{end}}{{end}}{{end}}
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+ case map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
+{{end}}{{end}}{{end}}
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+
+
+**** removing this block, as they are never called directly ****
+
+
+
+**** removing this block, as they are never called directly ****
+*/}}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e)
+ } else {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e)
+ }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) {
+ if v == nil { e.e.EncodeNil(); return }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteArrayStart(len(v))
+ if esep {
+ for _, v2 := range v {
+ ee.WriteArrayElem()
+ {{ encmd .Elem "v2"}}
+ }
+ } else {
+ for _, v2 := range v {
+ {{ encmd .Elem "v2"}}
+ }
+ } {{/*
+ for _, v2 := range v {
+ if esep { ee.WriteArrayElem() }
+ {{ encmd .Elem "v2"}}
+ } */}}
+ ee.WriteArrayEnd()
+}
+func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ if len(v)%2 == 1 {
+ e.errorf("mapBySlice requires even slice length, but got %v", len(v))
+ return
+ }
+ ee.WriteMapStart(len(v) / 2)
+ if esep {
+ for j, v2 := range v {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ {{ encmd .Elem "v2"}}
+ }
+ } else {
+ for _, v2 := range v {
+ {{ encmd .Elem "v2"}}
+ }
+ } {{/*
+ for j, v2 := range v {
+ if esep {
+ if j%2 == 0 {
+ ee.WriteMapElemKey()
+ } else {
+ ee.WriteMapElemValue()
+ }
+ }
+ {{ encmd .Elem "v2"}}
+ } */}}
+ ee.WriteMapEnd()
+}
+{{end}}{{end}}{{end}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
+}
+func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) {
+ if v == nil { e.e.EncodeNil(); return }
+ ee, esep := e.e, e.hh.hasElemSeparators()
+ ee.WriteMapStart(len(v))
+ if e.h.Canonical {
+ {{if eq .MapKey "interface{}"}}{{/* out of band
+ */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesI, len(v))
+ var i, l int
+ var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}}
+ for k2, _ := range v {
+ l = len(mksv)
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesISlice(v2))
+ if esep {
+ for j := range v2 {
+ ee.WriteMapElemKey()
+ e.asis(v2[j].v)
+ ee.WriteMapElemValue()
+ e.encode(v[v2[j].i])
+ }
+ } else {
+ for j := range v2 {
+ e.asis(v2[j].v)
+ e.encode(v[v2[j].i])
+ }
+ } {{/*
+ for j := range v2 {
+ if esep { ee.WriteMapElemKey() }
+ e.asis(v2[j].v)
+ if esep { ee.WriteMapElemValue() }
+ e.encode(v[v2[j].i])
+ } */}} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
+ var i int
+ for k, _ := range v {
+ v2[i] = {{ $x }}(k)
+ i++
+ }
+ sort.Sort({{ sorttype .MapKey false}}(v2))
+ if esep {
+ for _, k2 := range v2 {
+ ee.WriteMapElemKey()
+ {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+ ee.WriteMapElemValue()
+ {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
+ }
+ } else {
+ for _, k2 := range v2 {
+ {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+ {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
+ }
+ } {{/*
+ for _, k2 := range v2 {
+ if esep { ee.WriteMapElemKey() }
+ {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
+ if esep { ee.WriteMapElemValue() }
+ {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
+ } */}} {{end}}
+ } else {
+ if esep {
+ for k2, v2 := range v {
+ ee.WriteMapElemKey()
+ {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
+ ee.WriteMapElemValue()
+ {{ encmd .Elem "v2"}}
+ }
+ } else {
+ for k2, v2 := range v {
+ {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
+ {{ encmd .Elem "v2"}}
+ }
+ } {{/*
+ for k2, v2 := range v {
+ if esep { ee.WriteMapElemKey() }
+ {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
+ if esep { ee.WriteMapElemValue() }
+ {{ encmd .Elem "v2"}}
+ } */}}
+ }
+ ee.WriteMapEnd()
+}
+{{end}}{{end}}{{end}}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ var changed bool
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
+ case []{{ .Elem }}:
+ var v2 []{{ .Elem }}
+ v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ case *[]{{ .Elem }}:
+ var v2 []{{ .Elem }}
+ v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d)
+ if changed {
+ *v = v2
+ }{{/*
+*/}}{{end}}{{end}}{{end}}{{end}}
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
+// maps only change if nil, and in that case, there's no point copying
+*/}}
+ case map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d)
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ var v2 map[{{ .MapKey }}]{{ .Elem }}
+ v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d)
+ if changed {
+ *v = v2
+ }{{/*
+*/}}{{end}}{{end}}{{end}}
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+ case *[]{{ .Elem }}:
+ *v = nil {{/*
+*/}}{{end}}{{end}}{{end}}
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ *v = nil {{/*
+*/}}{{end}}{{end}}{{end}}
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
+{{/*
+Slices can change if they
+- did not come from an array
+- are addressable (from a ptr)
+- are settable (e.g. contained in an interface{})
+*/}}
+func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
+ if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*[]{{ .Elem }})
+ v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d)
+ if changed { *vp = v }
+ } else {
+ v := rv2i(rv).([]{{ .Elem }})
+ v2, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, !array, d)
+ if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
+ copy(v, v2)
+ }
+ }
+}
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) {
+ v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d)
+ if changed { *vp = v }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
+ dd := d.d{{/*
+ // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil()
+ */}}
+ slh, containerLenS := d.decSliceHelperStart()
+ if containerLenS == 0 {
+ if canChange {
+ if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] }
+ changed = true
+ }
+ slh.End()
+ return v, changed
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen && canChange {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
+ if xlen <= cap(v) {
+ v = v[:xlen]
+ } else {
+ v = make([]{{ .Elem }}, xlen)
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ j := 0
+ for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
+ if j == 0 && len(v) == 0 && canChange {
+ if hasLen {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
+ } else {
+ xlen = 8
+ }
+ v = make([]{{ .Elem }}, xlen)
+ changed = true
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ var decodeIntoBlank bool
+ if j >= len(v) {
+ if canChange {
+ v = append(v, {{ zerocmd .Elem }})
+ changed = true
+ } else {
+ d.arrayCannotExpand(len(v), j+1)
+ decodeIntoBlank = true
+ }
+ }
+ slh.ElemContainerState(j)
+ if decodeIntoBlank {
+ d.swallow()
+ } else if dd.TryDecodeAsNil() {
+ v[j] = {{ zerocmd .Elem }}
+ } else {
+ {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
+ }
+ }
+ if canChange {
+ if j < len(v) {
+ v = v[:j]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = make([]{{ .Elem }}, 0)
+ changed = true
+ }
+ }
+ slh.End()
+ return v, changed
+}
+{{end}}{{end}}{{end}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
+{{/*
+Maps can change if they are
+- addressable (from a ptr)
+- settable (e.g. contained in an interface{})
+*/}}
+func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
+ if rv.Kind() == reflect.Ptr {
+ vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
+ v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d);
+ if changed { *vp = v }
+ } else {
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d)
+ }
+}
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
+ v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d)
+ if changed { *vp = v }
+}
+func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool,
+ d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
+ dd, esep := d.d, d.hh.hasElemSeparators(){{/*
+ // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil()
+ */}}
+ containerLen := dd.ReadMapStart()
+ if canChange && v == nil {
+ xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
+ v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen)
+ changed = true
+ }
+ if containerLen == 0 {
+ dd.ReadMapEnd()
+ return v, changed
+ }
+ {{ if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ {{end}}var mk {{ .MapKey }}
+ var mv {{ .Elem }}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
+ if esep { dd.ReadMapElemKey() }
+ {{ if eq .MapKey "interface{}" }}mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
+ }{{ else }}mk = {{ decmd .MapKey }}{{ end }}
+ if esep { dd.ReadMapElemValue() }
+ if dd.TryDecodeAsNil() {
+ if v == nil {} else if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} }
+ continue
+ }
+ {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
+ d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
+ if v != nil { v[mk] = mv }
+ }
+ dd.ReadMapEnd()
+ return v, changed
+}
+{{end}}{{end}}{{end}}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go
new file mode 100644
index 0000000..f11b467
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build notfastpath
+
+package codec
+
+import "reflect"
+
+const fastpathEnabled = false
+
+// The generated fast-path code is very large, and adds a few seconds to the build time.
+// This causes test execution, execution of small tools which use codec, etc
+// to take a long time.
+//
+// To mitigate, we now support the notfastpath tag.
+// This tag disables fastpath during build, allowing for faster build, test execution,
+// short-program runs, etc.
+
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
+func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
+
+type fastpathT struct{}
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*Encoder, *codecFnInfo, reflect.Value)
+ decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+type fastpathA [0]fastpathE
+
+func (x fastpathA) index(rtid uintptr) int { return -1 }
+
+func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
+ fn := d.cfer().get(uint8SliceTyp, true, true)
+ d.kSlice(&fn.i, reflect.ValueOf(&v).Elem())
+ return v, true
+}
+
+var fastpathAV fastpathA
+var fastpathTV fastpathT
+
+// ----
+type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode
diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
new file mode 100644
index 0000000..59c5983
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
@@ -0,0 +1,78 @@
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{else if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+} else {
+ {{var "hl"}} := {{var "l"}} > 0
+ var {{var "rl"}} int
+ _ = {{var "rl"}}
+ {{if isSlice }} if {{var "hl"}} {
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ } else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ }
+ } {{end}}
+ var {{var "j"}} int
+ // var {{var "dn"}} bool
+ for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
+ {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
+ if {{var "hl"}} {
+ {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ } else {
+ {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
+ }
+ {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
+ {{var "c"}} = true
+ }{{end}}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}}
+ {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
+ {{ decLineVar $x }}
+ {{var "v"}} <- {{ $x }}
+ // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this
+ {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}}
+ var {{var "db"}} bool
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
+ {{var "c"}} = true
+ {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
+ {{end}}
+ }
+ if {{var "db"}} {
+ z.DecSwallow()
+ } else {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ {{end}}
+ }
+ {{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = make([]{{ .Typ }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}{{end}}
diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
new file mode 100644
index 0000000..8323b54
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
@@ -0,0 +1,42 @@
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+ {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} != 0 {
+{{var "hl"}} := {{var "l"}} > 0
+ for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
+ r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
+ {{var "mdn"}} = false
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
+ if {{var "mdn"}} {
+ if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
+ } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} // else len==0: TODO: Should we clear map entries?
+r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
diff --git a/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl
new file mode 100644
index 0000000..4249588
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl
@@ -0,0 +1,27 @@
+{{.Label}}:
+switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
+case timeout{{.Sfx}} == 0: // only consume available
+ for {
+ select {
+ case b{{.Sfx}} := <-{{.Chan}}:
+ {{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
+ default:
+ break {{.Label}}
+ }
+ }
+case timeout{{.Sfx}} > 0: // consume until timeout
+ tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
+ for {
+ select {
+ case b{{.Sfx}} := <-{{.Chan}}:
+ {{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+ case <-tt{{.Sfx}}.C:
+ // close(tt.C)
+ break {{.Label}}
+ }
+ }
+default: // consume until close
+ for b{{.Sfx}} := range {{.Chan}} {
+ {{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+ }
+}
diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
new file mode 100644
index 0000000..917d282
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
@@ -0,0 +1,335 @@
+/* // +build ignore */
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// GenVersion is the current version of codecgen.
+const GenVersion = 8
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continuously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
+ ge = genHelperEncoder{e: e}
+ ee = genHelperEncDriver{encDriver: e.e}
+ return
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
+ gd = genHelperDecoder{d: d}
+ dd = genHelperDecDriver{decDriver: d.d}
+ return
+}
+
+type genHelperEncDriver struct {
+ encDriver
+}
+
+func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) {
+ encStructFieldKey(x.encDriver, keyType, s)
+}
+func (x genHelperEncDriver) EncodeSymbol(s string) {
+ x.encDriver.EncodeString(cUTF8, s)
+}
+
+type genHelperDecDriver struct {
+ decDriver
+ C checkOverflow
+}
+
+func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {}
+func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte {
+ return decStructFieldKey(x.decDriver, keyType, buf)
+}
+func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ return x.C.IntV(x.decDriver.DecodeInt64(), bitsize)
+}
+func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ return x.C.UintV(x.decDriver.DecodeUint64(), bitsize)
+}
+func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ f = x.DecodeFloat64()
+ if chkOverflow32 && chkOvf.Float32(f) {
+ panicv.errorf("float32 overflow: %v", f)
+ }
+ return
+}
+func (x genHelperDecDriver) DecodeFloat32As64() (f float64) {
+ f = x.DecodeFloat64()
+ if chkOvf.Float32(f) {
+ panicv.errorf("float32 overflow: %v", f)
+ }
+ return
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ M must
+ e *Encoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ C checkOverflow
+ d *Decoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // println(">>>>>>>>> EncFallback")
+ // f.e.encodeI(iv, false, false)
+ f.e.encodeValue(reflect.ValueOf(iv), nil, false)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshal(bs, fnerr, false, cUTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshal(bs, fnerr, true, cUTF8)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshal(bs, fnerr, false, cRAW)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: builtin no longer supported - so we make this method a no-op,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return }
+
+// func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+// if _, ok := f.e.hh.(*BincHandle); ok {
+// return timeTypId
+// }
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) I2Rtid(v interface{}) uintptr {
+ return i2rtid(v)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
+ return f.e.h.getExt(rtid)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) HasExtensions() bool {
+ return len(f.e.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+ if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+ return true
+ }
+ return false
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+ return f.d.b[:]
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
+ return &f.d.b
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ // println(">>>>>>>>> DecFallback")
+ rv := reflect.ValueOf(iv)
+ if chkPtr {
+ rv = f.d.ensureDecodeable(rv)
+ }
+ f.d.decodeValue(rv, nil, false)
+ // f.d.decodeValueFallback(rv)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ // bs := f.dd.DecodeStringAsBytes()
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: builtin no longer supported - so we make this method a no-op,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return }
+
+// func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+// // Note: builtin is no longer supported - so make this a no-op
+// if _, ok := f.d.hh.(*BincHandle); ok {
+// return timeTypId
+// }
+// return 0
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
+ return i2rtid(v)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
+ return f.d.h.getExt(rtid)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) HasExtensions() bool {
+ return len(f.d.h.extHandle) != 0
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+ if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+ return true
+ }
+ return false
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
+ return decInferLen(clen, maxlen, unit)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: no longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) }
diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
new file mode 100644
index 0000000..6aeb856
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
@@ -0,0 +1,302 @@
+/* // +build ignore */
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// GenVersion is the current version of codecgen.
+const GenVersion = {{ .Version }}
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continuously and without notice.
+//
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
+ ge = genHelperEncoder{e: e}
+ ee = genHelperEncDriver{encDriver: e.e}
+ return
+}
+
+// GenHelperDecoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
+ gd = genHelperDecoder{d: d}
+ dd = genHelperDecDriver{decDriver: d.d}
+ return
+}
+
+type genHelperEncDriver struct {
+ encDriver
+}
+
+func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) {
+ encStructFieldKey(x.encDriver, keyType, s)
+}
+func (x genHelperEncDriver) EncodeSymbol(s string) {
+ x.encDriver.EncodeString(cUTF8, s)
+}
+
+type genHelperDecDriver struct {
+ decDriver
+ C checkOverflow
+}
+
+func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {}
+func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte {
+ return decStructFieldKey(x.decDriver, keyType, buf)
+}
+func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) {
+ return x.C.IntV(x.decDriver.DecodeInt64(), bitsize)
+}
+func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
+ return x.C.UintV(x.decDriver.DecodeUint64(), bitsize)
+}
+func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+ f = x.DecodeFloat64()
+ if chkOverflow32 && chkOvf.Float32(f) {
+ panicv.errorf("float32 overflow: %v", f)
+ }
+ return
+}
+func (x genHelperDecDriver) DecodeFloat32As64() (f float64) {
+ f = x.DecodeFloat64()
+ if chkOvf.Float32(f) {
+ panicv.errorf("float32 overflow: %v", f)
+ }
+ return
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ M must
+ e *Encoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ C checkOverflow
+ d *Decoder
+ F fastpathT
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // println(">>>>>>>>> EncFallback")
+ // f.e.encodeI(iv, false, false)
+ f.e.encodeValue(reflect.ValueOf(iv), nil, false)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshal(bs, fnerr, false, cUTF8)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshal(bs, fnerr, true, cUTF8)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshal(bs, fnerr, false, cRAW)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: builtin no longer supported - so we make this method a no-op,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return }
+// func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
+// if _, ok := f.e.hh.(*BincHandle); ok {
+// return timeTypId
+// }
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) I2Rtid(v interface{}) uintptr {
+ return i2rtid(v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
+ return f.e.h.getExt(rtid)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) HasExtensions() bool {
+ return len(f.e.h.extHandle) != 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
+ if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil {
+ f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
+ return true
+ }
+ return false
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchBuffer() []byte {
+ return f.d.b[:]
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
+ return &f.d.b
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ // println(">>>>>>>>> DecFallback")
+ rv := reflect.ValueOf(iv)
+ if chkPtr {
+ rv = f.d.ensureDecodeable(rv)
+ }
+ f.d.decodeValue(rv, nil, false)
+ // f.d.decodeValueFallback(rv)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ // bs := f.dd.DecodeStringAsBytes()
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true))
+ if fnerr != nil {
+ panic(fnerr)
+ }
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: builtin no longer supported - so we make this method a no-op,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return }
+// func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
+// // Note: builtin is no longer supported - so make this a no-op
+// if _, ok := f.d.hh.(*BincHandle); ok {
+// return timeTypId
+// }
+// return 0
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
+ return i2rtid(v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
+ return f.d.h.getExt(rtid)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) HasExtensions() bool {
+ return len(f.d.h.extHandle) != 0
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: No longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
+ if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil {
+ f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
+ return true
+ }
+ return false
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
+ return decInferLen(clen, maxlen, unit)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+//
+// Deprecated: no longer used,
+// but leave in-place so that old generated files continue to work without regeneration.
+func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) }
+
diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go
new file mode 100644
index 0000000..240ba9f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen.generated.go
@@ -0,0 +1,164 @@
+// +build codecgen.exec
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = `
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := r.ReadMapStart()
+{{var "bh"}} := z.DecBasicHandle()
+if {{var "v"}} == nil {
+ {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+var {{var "mk"}} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if {{var "bh"}}.MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} != 0 {
+{{var "hl"}} := {{var "l"}} > 0
+ for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
+ r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
+ {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
+{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = string({{var "bv"}})
+ }{{ end }}{{if decElemKindPtr}}
+ {{var "ms"}} = true{{end}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
+ {{var "mdn"}} = false
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
+ if {{var "mdn"}} {
+ if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
+ } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
+ {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
+ }
+}
+} // else len==0: TODO: Should we clear map entries?
+r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
+`
+
+const genDecListTmpl = `
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}{{end}}
+if {{var "l"}} == 0 {
+ {{if isSlice }}if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{else if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+} else {
+ {{var "hl"}} := {{var "l"}} > 0
+ var {{var "rl"}} int
+ _ = {{var "rl"}}
+ {{if isSlice }} if {{var "hl"}} {
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ } else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ }
+ } {{end}}
+ var {{var "j"}} int
+ // var {{var "dn"}} bool
+ for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
+ {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
+ if {{var "hl"}} {
+ {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ } else {
+ {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
+ }
+ {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
+ {{var "c"}} = true
+ }{{end}}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}}
+ {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
+ {{ decLineVar $x }}
+ {{var "v"}} <- {{ $x }}
+ // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this
+ {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}}
+ var {{var "db"}} bool
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
+ {{var "c"}} = true
+ {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
+ {{end}}
+ }
+ if {{var "db"}} {
+ z.DecSwallow()
+ } else {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
+ }
+ {{end}}
+ }
+ {{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = make([]{{ .Typ }}, 0)
+ {{var "c"}} = true
+ } {{end}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}{{end}}
+`
+
+const genEncChanTmpl = `
+{{.Label}}:
+switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
+case timeout{{.Sfx}} == 0: // only consume available
+ for {
+ select {
+ case b{{.Sfx}} := <-{{.Chan}}:
+ {{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
+ default:
+ break {{.Label}}
+ }
+ }
+case timeout{{.Sfx}} > 0: // consume until timeout
+ tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
+ for {
+ select {
+ case b{{.Sfx}} := <-{{.Chan}}:
+ {{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+ case <-tt{{.Sfx}}.C:
+ // close(tt.C)
+ break {{.Label}}
+ }
+ }
+default: // consume until close
+ for b{{.Sfx}} := range {{.Chan}} {
+ {{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+ }
+}
+`
diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go
new file mode 100644
index 0000000..b4c4031
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen.go
@@ -0,0 +1,2139 @@
+// +build codecgen.exec
+
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+// ---------------------------------------------------
+// codecgen supports the full cycle of reflection-based codec:
+// - RawExt
+// - Raw
+// - Extensions
+// - (Binary|Text|JSON)(Unm|M)arshal
+// - generic by-kind
+//
+// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type.
+// In those areas, we try to only do reflection or interface-conversion when NECESSARY:
+// - Extensions, only if Extensions are configured.
+//
+// However, codecgen doesn't support the following:
+// - Canonical option. (codecgen IGNORES it currently)
+// This is just because it has not been implemented.
+//
+// During encode/decode, Selfer takes precedence.
+// A type implementing Selfer will know how to encode/decode itself statically.
+//
+// The following field types are supported:
+// array: [n]T
+// slice: []T
+// map: map[K]V
+// primitive: [u]int[n], float(32|64), bool, string
+// struct
+//
+// ---------------------------------------------------
+// Note that a Selfer cannot call (e|d).(En|De)code on itself,
+// as this will cause a circular reference, as (En|De)code will call Selfer methods.
+// Any type that implements Selfer must implement completely and not fallback to (En|De)code.
+//
+// In addition, code in this file manages the generation of fast-path implementations of
+// encode/decode of slices/maps of primitive keys/values.
+//
+// Users MUST re-generate their implementations whenever the code shape changes.
+// The generated code will panic if it was generated with a version older than the supporting library.
+// ---------------------------------------------------
+//
+// codec framework is very feature rich.
+// When encoding or decoding into an interface, it depends on the runtime type of the interface.
+// The type of the interface may be a named type, an extension, etc.
+// Consequently, we fallback to runtime codec for encoding/decoding interfaces.
+// In addition, we fallback for any value which cannot be guaranteed at runtime.
+// This allows us support ANY value, including any named types, specifically those which
+// do not implement our interfaces (e.g. Selfer).
+//
+// This explains some slowness compared to other code generation codecs (e.g. msgp).
+// This reduction in speed is only seen when your refers to interfaces,
+// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} }
+//
+// codecgen will panic if the file was generated with an old version of the library in use.
+//
+// Note:
+// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
+// This way, there isn't a function call overhead just to see that we should not enter a block of code.
+//
+// Note:
+// codecgen-generated code depends on the variables defined by fast-path.generated.go.
+// consequently, you cannot run with tags "codecgen notfastpath".
+
+// GenVersion is the current version of codecgen.
+//
+// NOTE: Increment this value each time codecgen changes fundamentally.
+// Fundamental changes are:
+// - helper methods change (signature change, new ones added, some removed, etc)
+// - codecgen command line changes
+//
+// v1: Initial Version
+// v2:
+// v3: Changes for Kubernetes:
+// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
+// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
+// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
+// v6: removed unsafe from gen, and now uses codecgen.exec tag
+// v7:
+// v8: current - we now maintain compatibility with old generated code.
+const genVersion = 8
+
+const (
+ genCodecPkg = "codec1978"
+ genTempVarPfx = "yy"
+ genTopLevelVarName = "x"
+
+ // ignore canBeNil parameter, and always set to true.
+ // This is because nil can appear anywhere, so we should always check.
+ genAnythingCanBeNil = true
+
+ // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function;
+ // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals
+ // are not executed a lot.
+ //
+ // From testing, it didn't make much difference in runtime, so keep as true (one function only)
+ genUseOneFunctionForDecStructMap = true
+)
+
+type genStructMapStyle uint8
+
+const (
+ genStructMapStyleConsolidated genStructMapStyle = iota
+ genStructMapStyleLenPrefix
+ genStructMapStyleCheckBreak
+)
+
+var (
+ errGenAllTypesSamePkg = errors.New("All types must be in the same package")
+ errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice")
+
+ genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
+ genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
+)
+
+type genBuf struct {
+ buf []byte
+}
+
+func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x }
+func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x }
+func (x *genBuf) v() string { return string(x.buf) }
+func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) }
+func (x *genBuf) reset() {
+ if x.buf != nil {
+ x.buf = x.buf[:0]
+ }
+}
+
+// genRunner holds some state used during a Gen run.
+type genRunner struct {
+ w io.Writer // output
+ c uint64 // counter used for generating varsfx
+ t []reflect.Type // list of types to run selfer on
+
+ tc reflect.Type // currently running selfer on this type
+ te map[uintptr]bool // types for which the encoder has been created
+ td map[uintptr]bool // types for which the decoder has been created
+ cp string // codec import path
+
+ im map[string]reflect.Type // imports to add
+ imn map[string]string // package names of imports to add
+ imc uint64 // counter for import numbers
+
+ is map[reflect.Type]struct{} // types seen during import search
+ bp string // base PkgPath, for which we are generating for
+
+ cpfx string // codec package prefix
+
+ tm map[reflect.Type]struct{} // types for which enc/dec must be generated
+ ts []reflect.Type // types for which enc/dec must be generated
+
+ xs string // top level variable/constant suffix
+ hn string // fn helper type name
+
+ ti *TypeInfos
+ // rr *rand.Rand // random generator for file-specific types
+
+ nx bool // no extensions
+}
+
+// Gen will write a complete go file containing Selfer implementations for each
+// type passed. All the types must be in the same package.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE.
+func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
+ ti *TypeInfos, typ ...reflect.Type) {
+ // All types passed to this method do not have a codec.Selfer method implemented directly.
+ // codecgen already checks the AST and skips any types that define the codec.Selfer methods.
+ // Consequently, there's no need to check and trim them if they implement codec.Selfer
+
+ if len(typ) == 0 {
+ return
+ }
+ x := genRunner{
+ w: w,
+ t: typ,
+ te: make(map[uintptr]bool),
+ td: make(map[uintptr]bool),
+ im: make(map[string]reflect.Type),
+ imn: make(map[string]string),
+ is: make(map[reflect.Type]struct{}),
+ tm: make(map[reflect.Type]struct{}),
+ ts: []reflect.Type{},
+ bp: genImportPath(typ[0]),
+ xs: uid,
+ ti: ti,
+ nx: noExtensions,
+ }
+ if x.ti == nil {
+ x.ti = defTypeInfos
+ }
+ if x.xs == "" {
+ rr := rand.New(rand.NewSource(time.Now().UnixNano()))
+ x.xs = strconv.FormatInt(rr.Int63n(9999), 10)
+ }
+
+ // gather imports first:
+ x.cp = genImportPath(reflect.TypeOf(x))
+ x.imn[x.cp] = genCodecPkg
+ for _, t := range typ {
+ // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+ if genImportPath(t) != x.bp {
+ panic(errGenAllTypesSamePkg)
+ }
+ x.genRefPkgs(t)
+ }
+ if buildTags != "" {
+ x.line("// +build " + buildTags)
+ x.line("")
+ }
+ x.line(`
+
+// Code generated by codecgen - DO NOT EDIT.
+
+`)
+ x.line("package " + pkgName)
+ x.line("")
+ x.line("import (")
+ if x.cp != x.bp {
+ x.cpfx = genCodecPkg + "."
+ x.linef("%s \"%s\"", genCodecPkg, x.cp)
+ }
+ // use a sorted set of im keys, so that we can get consistent output
+ imKeys := make([]string, 0, len(x.im))
+ for k := range x.im {
+ imKeys = append(imKeys, k)
+ }
+ sort.Strings(imKeys)
+ for _, k := range imKeys { // for k, _ := range x.im {
+ if k == x.imn[k] {
+ x.linef("\"%s\"", k)
+ } else {
+ x.linef("%s \"%s\"", x.imn[k], k)
+ }
+ }
+ // add required packages
+ for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt"
+ if _, ok := x.im[k]; !ok {
+ x.line("\"" + k + "\"")
+ }
+ }
+ x.line(")")
+ x.line("")
+
+ x.line("const (")
+ x.linef("// ----- content types ----")
+ x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8))
+ x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW))
+ x.linef("// ----- value types used ----")
+ for _, vt := range [...]valueType{
+ valueTypeArray, valueTypeMap, valueTypeString,
+ valueTypeInt, valueTypeUint, valueTypeFloat} {
+ x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt))
+ }
+
+ x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs)
+ x.line(")")
+ x.line("var (")
+ x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)")
+ x.line(")")
+ x.line("")
+
+ x.hn = "codecSelfer" + x.xs
+ x.line("type " + x.hn + " struct{}")
+ x.line("")
+
+ x.varsfxreset()
+ x.line("func init() {")
+ x.linef("if %sGenVersion != %v {", x.cpfx, genVersion)
+ x.line("_, file, _, _ := runtime.Caller(0)")
+ x.outf(`panic("codecgen version mismatch: current: %v, need " + strconv.FormatInt(int64(%sGenVersion), 10) + ". Re-generate file: " + file)`, genVersion, x.cpfx)
+ // x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
+ // x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx)
+ x.linef("}")
+ x.line("if false { // reference the types, but skip this branch at build/run time")
+ // x.line("_ = strconv.ParseInt")
+ var n int
+ // for k, t := range x.im {
+ for _, k := range imKeys {
+ t := x.im[k]
+ x.linef("var v%v %s.%s", n, x.imn[k], t.Name())
+ n++
+ }
+ if n > 0 {
+ x.out("_")
+ for i := 1; i < n; i++ {
+ x.out(", _")
+ }
+ x.out(" = v0")
+ for i := 1; i < n; i++ {
+ x.outf(", v%v", i)
+ }
+ }
+ x.line("} ") // close if false
+ x.line("}") // close init
+ x.line("")
+
+ // generate rest of type info
+ for _, t := range typ {
+ x.tc = t
+ x.selfer(true)
+ x.selfer(false)
+ }
+
+ for _, t := range x.ts {
+ rtid := rt2id(t)
+ // generate enc functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(true)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.encListFallback("v", t)
+ case reflect.Map:
+ x.encMapFallback("v", t)
+ default:
+ panic(errGenExpectArrayOrMap)
+ }
+ x.line("}")
+ x.line("")
+
+ // generate dec functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(false)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.decListFallback("v", rtid, t)
+ case reflect.Map:
+ x.decMapFallback("v", rtid, t)
+ default:
+ panic(errGenExpectArrayOrMap)
+ }
+ x.line("}")
+ x.line("")
+ }
+
+ x.line("")
+}
+
+func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool {
+ // return varname != genTopLevelVarName && t != x.tc
+ // the only time we checkForSelfer is if we are not at the TOP of the generated code.
+ return varname != genTopLevelVarName
+}
+
+func (x *genRunner) arr2str(t reflect.Type, s string) string {
+ if t.Kind() == reflect.Array {
+ return s
+ }
+ return ""
+}
+
+func (x *genRunner) genRequiredMethodVars(encode bool) {
+ x.line("var h " + x.hn)
+ if encode {
+ x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)")
+ } else {
+ x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)")
+ }
+ x.line("_, _, _ = h, z, r")
+}
+
+func (x *genRunner) genRefPkgs(t reflect.Type) {
+ if _, ok := x.is[t]; ok {
+ return
+ }
+ x.is[t] = struct{}{}
+ tpkg, tname := genImportPath(t), t.Name()
+ if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' {
+ if _, ok := x.im[tpkg]; !ok {
+ x.im[tpkg] = t
+ if idx := strings.LastIndex(tpkg, "/"); idx < 0 {
+ x.imn[tpkg] = tpkg
+ } else {
+ x.imc++
+ x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan:
+ x.genRefPkgs(t.Elem())
+ case reflect.Map:
+ x.genRefPkgs(t.Elem())
+ x.genRefPkgs(t.Key())
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' {
+ x.genRefPkgs(t.Field(i).Type)
+ }
+ }
+ }
+}
+
+func (x *genRunner) varsfx() string {
+ x.c++
+ return strconv.FormatUint(x.c, 10)
+}
+
+func (x *genRunner) varsfxreset() {
+ x.c = 0
+}
+
+func (x *genRunner) out(s string) {
+ _, err := io.WriteString(x.w, s)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) outf(s string, params ...interface{}) {
+ _, err := fmt.Fprintf(x.w, s, params...)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) line(s string) {
+ x.out(s)
+ if len(s) == 0 || s[len(s)-1] != '\n' {
+ x.out("\n")
+ }
+}
+
+func (x *genRunner) linef(s string, params ...interface{}) {
+ x.outf(s, params...)
+ if len(s) == 0 || s[len(s)-1] != '\n' {
+ x.out("\n")
+ }
+}
+
+func (x *genRunner) genTypeName(t reflect.Type) (n string) {
+ // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }()
+
+ // if the type has a PkgPath, which doesn't match the current package,
+ // then include it.
+ // We cannot depend on t.String() because it includes current package,
+ // or t.PkgPath because it includes full import path,
+ //
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "*"
+ t = t.Elem()
+ }
+ if tn := t.Name(); tn != "" {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem())
+ case reflect.Slice:
+ return ptrPfx + "[]" + x.genTypeName(t.Elem())
+ case reflect.Array:
+ return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem())
+ case reflect.Chan:
+ return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem())
+ default:
+ if t == intfTyp {
+ return ptrPfx + "interface{}"
+ } else {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ }
+}
+
+func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) {
+ if t.Name() == "" {
+ return t.String()
+ } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) {
+ return t.Name()
+ } else {
+ return x.imn[genImportPath(t)] + "." + t.Name()
+ // return t.String() // best way to get the package name inclusive
+ }
+}
+
+func (x *genRunner) genZeroValueR(t reflect.Type) string {
+ // if t is a named type, w
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func,
+ reflect.Slice, reflect.Map, reflect.Invalid:
+ return "nil"
+ case reflect.Bool:
+ return "false"
+ case reflect.String:
+ return `""`
+ case reflect.Struct, reflect.Array:
+ return x.genTypeName(t) + "{}"
+ default: // all numbers
+ return "0"
+ }
+}
+
+func (x *genRunner) genMethodNameT(t reflect.Type) (s string) {
+ return genMethodNameT(t, x.tc)
+}
+
+func (x *genRunner) selfer(encode bool) {
+ t := x.tc
+ t0 := t
+ // always make decode use a pointer receiver,
+ // and structs/arrays always use a ptr receiver (encode|decode)
+ isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp)
+ x.varsfxreset()
+
+ fnSigPfx := "func (" + genTopLevelVarName + " "
+ if isptr {
+ fnSigPfx += "*"
+ }
+ fnSigPfx += x.genTypeName(t)
+ x.out(fnSigPfx)
+
+ if isptr {
+ t = reflect.PtrTo(t)
+ }
+ if encode {
+ x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {")
+ x.genRequiredMethodVars(true)
+ x.encVar(genTopLevelVarName, t)
+ } else {
+ x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ // do not use decVar, as there is no need to check TryDecodeAsNil
+ // or way to elegantly handle that, and also setting it to a
+ // non-nil value doesn't affect the pointer passed.
+ // x.decVar(genTopLevelVarName, t, false)
+ x.dec(genTopLevelVarName, t0, true)
+ }
+ x.line("}")
+ x.line("")
+
+ if encode || t0.Kind() != reflect.Struct {
+ return
+ }
+
+ // write is containerMap
+ if genUseOneFunctionForDecStructMap {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated)
+ x.line("}")
+ x.line("")
+ } else {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix)
+ x.line("}")
+ x.line("")
+
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak)
+ x.line("}")
+ x.line("")
+ }
+
+ // write containerArray
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0)
+ x.line("}")
+ x.line("")
+
+}
+
+// used for chan, array, slice, map
+func (x *genRunner) xtraSM(varname string, t reflect.Type, encode, isptr bool) {
+ var ptrPfx, addrPfx string
+ if isptr {
+ ptrPfx = "*"
+ } else {
+ addrPfx = "&"
+ }
+ if encode {
+ x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname)
+ } else {
+ x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname)
+ }
+ x.registerXtraT(t)
+}
+
+func (x *genRunner) registerXtraT(t reflect.Type) {
+ // recursively register the types
+ if _, ok := x.tm[t]; ok {
+ return
+ }
+ var tkey reflect.Type
+ switch t.Kind() {
+ case reflect.Chan, reflect.Slice, reflect.Array:
+ case reflect.Map:
+ tkey = t.Key()
+ default:
+ return
+ }
+ x.tm[t] = struct{}{}
+ x.ts = append(x.ts, t)
+ // check if this refers to any xtra types eg. a slice of array: add the array
+ x.registerXtraT(t.Elem())
+ if tkey != nil {
+ x.registerXtraT(tkey)
+ }
+}
+
+// encVar will encode a variable.
+// The parameter, t, is the reflect.Type of the variable itself
+func (x *genRunner) encVar(varname string, t reflect.Type) {
+ // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t)
+ var checkNil bool
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan:
+ checkNil = true
+ }
+ if checkNil {
+ x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+ }
+
+ switch t.Kind() {
+ case reflect.Ptr:
+ telem := t.Elem()
+ tek := telem.Kind()
+ if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) {
+ x.enc(varname, genNonPtr(t))
+ break
+ }
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := *" + varname)
+ x.enc(genTempVarPfx+i, genNonPtr(t))
+ case reflect.Struct, reflect.Array:
+ if t == timeTyp {
+ x.enc(varname, t)
+ break
+ }
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := &" + varname)
+ x.enc(genTempVarPfx+i, t)
+ default:
+ x.enc(varname, t)
+ }
+
+ if checkNil {
+ x.line("}")
+ }
+
+}
+
+// enc will encode a variable (varname) of type t, where t represents T.
+// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T
+// (to prevent copying),
+// else t is of type T
+func (x *genRunner) enc(varname string, t reflect.Type) {
+ rtid := rt2id(t)
+ ti2 := x.ti.get(rtid, t)
+ // We call CodecEncodeSelf if one of the following are honored:
+ // - the type already implements Selfer, call that
+ // - the type has a Selfer implementation just created, use that
+ // - the type is in the list of the ones we will generate for, but it is not currently being generated
+
+ mi := x.varsfx()
+ // tptr := reflect.PtrTo(t)
+ tk := t.Kind()
+ if x.checkForSelfer(t, varname) {
+ if tk == reflect.Array || (tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T
+ // if tptr.Implements(selferTyp) || t.Implements(selferTyp) {
+ if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ } else { // varname is of type T
+ if ti2.cs { // t.Implements(selferTyp) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ } else if ti2.csp { // tptr.Implements(selferTyp) {
+ x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname)
+ x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
+ return
+ }
+ }
+
+ if _, ok := x.te[rtid]; ok {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.line(varname + ".CodecEncodeSelf(e)")
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.te[rtid] = true
+ rtidAdded = true
+ }
+
+ // check if
+ // - type is time.Time, RawExt, Raw
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+
+ x.line("if false {") //start if block
+ defer func() { x.line("}") }() //end if block
+
+ if t == timeTyp {
+ x.linef("} else { r.EncodeTime(%s)", varname)
+ return
+ }
+ if t == rawTyp {
+ x.linef("} else { z.EncRaw(%s)", varname)
+ return
+ }
+ if t == rawExtTyp {
+ x.linef("} else { r.EncodeRawExt(%s, e)", varname)
+ return
+ }
+ // only check for extensions if the type is named, and has a packagePath.
+ var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T
+ if !x.nx && genImportPath(t) != "" && t.Name() != "" {
+ yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
+ x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ", yy, varname, yy, varname, yy)
+ }
+ if arrayOrStruct { // varname is of type *T
+ if ti2.bm || ti2.bmp { // t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) {
+ x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname)
+ }
+ if ti2.jm || ti2.jmp { // t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname)
+ } else if ti2.tm || ti2.tmp { // t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) {
+ x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname)
+ }
+ } else { // varname is of type T
+ if ti2.bm { // t.Implements(binaryMarshalerTyp) {
+ x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname)
+ } else if ti2.bmp { // tptr.Implements(binaryMarshalerTyp) {
+ x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(&%v) ", varname)
+ }
+ if ti2.jm { // t.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname)
+ } else if ti2.jmp { // tptr.Implements(jsonMarshalerTyp) {
+ x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", varname)
+ } else if ti2.tm { // t.Implements(textMarshalerTyp) {
+ x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname)
+ } else if ti2.tmp { // tptr.Implements(textMarshalerTyp) {
+ x.linef("} else if !z.EncBinary() { z.EncTextMarshal(&%v) ", varname)
+ }
+ }
+ x.line("} else {")
+
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(int64(" + varname + "))")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(uint64(" + varname + "))")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(float32(" + varname + "))")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(float64(" + varname + "))")
+ case reflect.Bool:
+ x.line("r.EncodeBool(bool(" + varname + "))")
+ case reflect.String:
+ x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + ", string(" + varname + "))")
+ case reflect.Chan:
+ x.xtraSM(varname, t, true, false)
+ // x.encListFallback(varname, rtid, t)
+ case reflect.Array:
+ x.xtraSM(varname, t, true, true)
+ case reflect.Slice:
+ // if nil, call dedicated function
+ // if a []uint8, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if rtid == uint8SliceTypId {
+ x.line("r.EncodeStringBytes(codecSelferCcRAW" + x.xs + ", []byte(" + varname + "))")
+ } else if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)")
+ } else {
+ x.xtraSM(varname, t, true, false)
+ // x.encListFallback(varname, rtid, t)
+ }
+ case reflect.Map:
+ // if nil, call dedicated function
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ")
+ if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)")
+ } else {
+ x.xtraSM(varname, t, true, false)
+ // x.encMapFallback(varname, rtid, t)
+ }
+ case reflect.Struct:
+ if !inlist {
+ delete(x.te, rtid)
+ x.line("z.EncFallback(" + varname + ")")
+ break
+ }
+ x.encStruct(varname, rtid, t)
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.EncFallback(" + varname + ")")
+ }
+}
+
+func (x *genRunner) encZero(t reflect.Type) {
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(0)")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(0)")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(0)")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(0)")
+ case reflect.Bool:
+ x.line("r.EncodeBool(false)")
+ case reflect.String:
+ x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + `, "")`)
+ default:
+ x.line("r.EncodeNil()")
+ }
+}
+
+func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) {
+ // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
+ // also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
+ varname2 := varname + "." + t2.Name
+ switch t2.Type.Kind() {
+ case reflect.Struct:
+ rtid2 := rt2id(t2.Type)
+ ti2 := x.ti.get(rtid2, t2.Type)
+ // fmt.Printf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name)
+ if ti2.rtid == timeTypId {
+ buf.s("!(").s(varname2).s(".IsZero())")
+ break
+ }
+ if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) {
+ buf.s("!(").s(varname2).s(".IsZero())")
+ break
+ }
+ if ti2.isFlag(typeInfoFlagComparable) {
+ buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type))
+ break
+ }
+ // buf.s("(")
+ buf.s("false")
+ for i, n := 0, t2.Type.NumField(); i < n; i++ {
+ f := t2.Type.Field(i)
+ if f.PkgPath != "" { // unexported
+ continue
+ }
+ buf.s(" || ")
+ x.encOmitEmptyLine(f, varname2, buf)
+ }
+ //buf.s(")")
+ case reflect.Bool:
+ buf.s(varname2)
+ case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
+ buf.s("len(").s(varname2).s(") != 0")
+ default:
+ buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type))
+ }
+}
+
+func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
+ // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. )
+ // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it
+
+ // if t === type currently running selfer on, do for all
+ ti := x.ti.get(rtid, t)
+ i := x.varsfx()
+ sepVarname := genTempVarPfx + "sep" + i
+ numfieldsvar := genTempVarPfx + "q" + i
+ ti2arrayvar := genTempVarPfx + "r" + i
+ struct2arrvar := genTempVarPfx + "2arr" + i
+
+ x.line(sepVarname + " := !z.EncBinary()")
+ x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar)
+ x.linef("_, _ = %s, %s", sepVarname, struct2arrvar)
+ x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray)
+
+ tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing.
+
+ // var nn int
+ // due to omitEmpty, we need to calculate the
+ // number of non-empty things we write out first.
+ // This is required as we need to pre-determine the size of the container,
+ // to support length-prefixing.
+ if ti.anyOmitEmpty {
+ x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi))
+
+ for j, si := range tisfi {
+ _ = j
+ if !si.omitEmpty() {
+ // x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName)
+ x.linef("true, // %s", si.fieldName)
+ // nn++
+ continue
+ }
+ var t2 reflect.StructField
+ var omitline genBuf
+ {
+ t2typ := t
+ varname3 := varname
+ // go through the loop, record the t2 field explicitly,
+ // and gather the omit line if embedded in pointers.
+ for ij, ix := range si.is {
+ if uint8(ij) == si.nis {
+ break
+ }
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(int(ix))
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ // do not include actual field in the omit line.
+ // that is done subsequently (right after - below).
+ if uint8(ij+1) < si.nis && t2typ.Kind() == reflect.Ptr {
+ omitline.s(varname3).s(" != nil && ")
+ }
+ }
+ }
+ x.encOmitEmptyLine(t2, varname, &omitline)
+ x.linef("%s, // %s", omitline.v(), si.fieldName)
+ }
+ x.line("}")
+ x.linef("_ = %s", numfieldsvar)
+ }
+ // x.linef("var %snn%s int", genTempVarPfx, i)
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.linef("r.WriteArrayStart(%d)", len(tisfi))
+ x.linef("} else {") // if not ti.toArray
+ if ti.anyOmitEmpty {
+ // nn = 0
+ // x.linef("var %snn%s = %v", genTempVarPfx, i, nn)
+ x.linef("var %snn%s int", genTempVarPfx, i)
+ x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
+ x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i)
+ x.linef("%snn%s = %v", genTempVarPfx, i, 0)
+ } else {
+ x.linef("r.WriteMapStart(%d)", len(tisfi))
+ }
+ x.line("}") // close if not StructToArray
+
+ for j, si := range tisfi {
+ i := x.varsfx()
+ isNilVarName := genTempVarPfx + "n" + i
+ var labelUsed bool
+ var t2 reflect.StructField
+ {
+ t2typ := t
+ varname3 := varname
+ for ij, ix := range si.is {
+ if uint8(ij) == si.nis {
+ break
+ }
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(int(ix))
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ if t2typ.Kind() == reflect.Ptr {
+ if !labelUsed {
+ x.line("var " + isNilVarName + " bool")
+ }
+ x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ")
+ x.line("goto LABEL" + i)
+ x.line("}")
+ labelUsed = true
+ // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }")
+ }
+ }
+ // t2 = t.FieldByIndex(si.is)
+ }
+ if labelUsed {
+ x.line("LABEL" + i + ":")
+ }
+ // if the type of the field is a Selfer, or one of the ones
+
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray
+ if labelUsed {
+ x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName)
+ }
+ x.line("r.WriteArrayElem()")
+ if si.omitEmpty() {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ if si.omitEmpty() {
+ x.linef("} else {")
+ x.encZero(t2.Type)
+ x.linef("}")
+ }
+ if labelUsed {
+ x.line("}")
+ }
+
+ x.linef("} else {") // if not ti.toArray
+
+ if si.omitEmpty() {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.line("r.WriteMapElemKey()")
+
+ // x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + ", `" + si.encName + "`)")
+ // emulate EncStructFieldKey
+ switch ti.keyType {
+ case valueTypeInt:
+ x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName)
+ case valueTypeUint:
+ x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName)
+ case valueTypeFloat:
+ x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName)
+ default: // string
+ x.linef("r.EncodeString(codecSelferCcUTF8%s, `%s`)", x.xs, si.encName)
+ }
+ // x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName)
+ x.line("r.WriteMapElemValue()")
+ if labelUsed {
+ x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ x.line("}")
+ } else {
+ x.encVar(varname+"."+t2.Name, t2.Type)
+ }
+ if si.omitEmpty() {
+ x.line("}")
+ }
+ x.linef("} ") // end if/else ti.toArray
+ }
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
+ x.line("r.WriteArrayEnd()")
+ x.line("} else {")
+ x.line("r.WriteMapEnd()")
+ x.line("}")
+
+}
+
+func (x *genRunner) encListFallback(varname string, t reflect.Type) {
+ elemBytes := t.Elem().Kind() == reflect.Uint8
+ if t.AssignableTo(uint8SliceTyp) {
+ x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, []byte(%s))", x.xs, varname)
+ return
+ }
+ if t.Kind() == reflect.Array && elemBytes {
+ x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, ((*[%d]byte)(%s))[:])", x.xs, t.Len(), varname)
+ return
+ }
+ i := x.varsfx()
+ if t.Kind() == reflect.Chan {
+ type ts struct {
+ Label, Chan, Slice, Sfx string
+ }
+ tm, err := template.New("").Parse(genEncChanTmpl)
+ if err != nil {
+ panic(err)
+ }
+ x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+ x.linef("var sch%s []%s", i, x.genTypeName(t.Elem()))
+ err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i})
+ if err != nil {
+ panic(err)
+ }
+ // x.linef("%s = sch%s", varname, i)
+ if elemBytes {
+ x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, []byte(%s))", x.xs, "sch"+i)
+ x.line("}")
+ return
+ }
+ varname = "sch" + i
+ }
+
+ x.line("r.WriteArrayStart(len(" + varname + "))")
+ x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
+ x.line("r.WriteArrayElem()")
+
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.line("r.WriteArrayEnd()")
+ if t.Kind() == reflect.Chan {
+ x.line("}")
+ }
+}
+
+func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
+ // TODO: expand this to handle canonical.
+ i := x.varsfx()
+ x.line("r.WriteMapStart(len(" + varname + "))")
+ x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+ x.line("r.WriteMapElemKey()")
+ x.encVar(genTempVarPfx+"k"+i, t.Key())
+ x.line("r.WriteMapElemValue()")
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.line("r.WriteMapEnd()")
+}
+
+func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo,
+ newbuf, nilbuf *genBuf) (t2 reflect.StructField) {
+ //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ t2typ := t
+ varname3 := varname
+ t2kind := t2typ.Kind()
+ var nilbufed bool
+ if si != nil {
+ for ij, ix := range si.is {
+ if uint8(ij) == si.nis {
+ break
+ }
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(int(ix))
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ t2kind = t2typ.Kind()
+ if t2kind != reflect.Ptr {
+ continue
+ }
+ if newbuf != nil {
+ newbuf.f("if %s == nil { %s = new(%s) }\n", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ if nilbuf != nil {
+ if !nilbufed {
+ nilbuf.s("if true")
+ nilbufed = true
+ }
+ nilbuf.s(" && ").s(varname3).s(" != nil")
+ }
+ }
+ }
+ // if t2typ.Kind() == reflect.Ptr {
+ // varname3 = varname3 + t2.Name
+ // }
+ if nilbuf != nil {
+ if nilbufed {
+ nilbuf.s(" { ")
+ }
+ if nilvar != "" {
+ nilbuf.s(nilvar).s(" = true")
+ } else if tk := t2typ.Kind(); tk == reflect.Ptr {
+ if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 {
+ nilbuf.s(varname3).s(" = nil")
+ } else {
+ nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem()))
+ }
+ } else {
+ nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ))
+ }
+ if nilbufed {
+ nilbuf.s("}")
+ }
+ }
+ return t2
+}
+
+// decVar takes a variable called varname, of type t
+func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) {
+ // We only encode as nil if a nillable value.
+ // This removes some of the wasted checks for TryDecodeAsNil.
+ // We need to think about this more, to see what happens if omitempty, etc
+ // cause a nil value to be stored when something is expected.
+ // This could happen when decoding from a struct encoded as an array.
+ // For that, decVar should be called with canNil=true, to force true as its value.
+ var varname2 string
+ if t.Kind() != reflect.Ptr {
+ if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) {
+ x.dec(varname, t, false)
+ }
+ } else {
+ if checkNotNil {
+ x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem()))
+ }
+ // Ensure we set underlying ptr to a non-nil value (so we can deref to it later).
+ // There's a chance of a **T in here which is nil.
+ var ptrPfx string
+ for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() {
+ ptrPfx += "*"
+ if checkNotNil {
+ x.linef("if %s%s == nil { %s%s = new(%s)}",
+ ptrPfx, varname, ptrPfx, varname, x.genTypeName(t))
+ }
+ }
+ // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it.
+
+ if ptrPfx == "" {
+ x.dec(varname, t, true)
+ } else {
+ varname2 = genTempVarPfx + "z" + rand
+ x.line(varname2 + " := " + ptrPfx + varname)
+ x.dec(varname2, t, true)
+ }
+ }
+}
+
+// decVar takes a variable called varname, of type t
+func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) {
+ i := x.varsfx()
+
+ // We only encode as nil if a nillable value.
+ // This removes some of the wasted checks for TryDecodeAsNil.
+ // We need to think about this more, to see what happens if omitempty, etc
+ // cause a nil value to be stored when something is expected.
+ // This could happen when decoding from a struct encoded as an array.
+ // For that, decVar should be called with canNil=true, to force true as its value.
+
+ if !canBeNil {
+ canBeNil = genAnythingCanBeNil || !genIsImmutable(t)
+ }
+
+ if canBeNil {
+ var buf genBuf
+ x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf)
+ x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf)
+ } else {
+ x.line("// cannot be nil")
+ }
+
+ x.decVarMain(varname, i, t, checkNotNil)
+
+ if canBeNil {
+ x.line("} ")
+ }
+}
+
+// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true.
+// t is always a basetype (i.e. not of kind reflect.Ptr).
+func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
+ // assumptions:
+ // - the varname is to a pointer already. No need to take address of it
+ // - t is always a baseType T (not a *T, etc).
+ rtid := rt2id(t)
+ ti2 := x.ti.get(rtid, t)
+ // tptr := reflect.PtrTo(t)
+ if x.checkForSelfer(t, varname) {
+ if ti2.cs || ti2.csp { // t.Implements(selferTyp) || tptr.Implements(selferTyp) {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ if _, ok := x.td[rtid]; ok {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.line(varname + ".CodecDecodeSelf(d)")
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.td[rtid] = true
+ rtidAdded = true
+ }
+
+ // check if
+ // - type is time.Time, Raw, RawExt
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+
+ mi := x.varsfx()
+ // x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
+ // x.linef("_ = %sm%s", genTempVarPfx, mi)
+ x.line("if false {") //start if block
+ defer func() { x.line("}") }() //end if block
+
+ var ptrPfx, addrPfx string
+ if isptr {
+ ptrPfx = "*"
+ } else {
+ addrPfx = "&"
+ }
+ if t == timeTyp {
+ x.linef("} else { %s%v = r.DecodeTime()", ptrPfx, varname)
+ return
+ }
+ if t == rawTyp {
+ x.linef("} else { %s%v = z.DecRaw()", ptrPfx, varname)
+ return
+ }
+
+ if t == rawExtTyp {
+ x.linef("} else { r.DecodeExt(%s%v, 0, nil)", addrPfx, varname)
+ return
+ }
+
+ // only check for extensions if the type is named, and has a packagePath.
+ if !x.nx && genImportPath(t) != "" && t.Name() != "" {
+ // first check if extensions are configued, before doing the interface conversion
+ // x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname)
+ yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
+ x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", yy, varname, yy, varname, yy)
+ }
+
+ if ti2.bu || ti2.bup { // t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) {
+ x.linef("} else if z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", addrPfx, varname)
+ }
+ if ti2.ju || ti2.jup { // t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) {
+ x.linef("} else if !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", addrPfx, varname)
+ } else if ti2.tu || ti2.tup { // t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) {
+ x.linef("} else if !z.DecBinary() { z.DecTextUnmarshal(%s%v)", addrPfx, varname)
+ }
+
+ x.line("} else {")
+
+ if x.decTryAssignPrimitive(varname, t, isptr) {
+ return
+ }
+
+ switch t.Kind() {
+ case reflect.Array, reflect.Chan:
+ x.xtraSM(varname, t, false, isptr)
+ case reflect.Slice:
+ // if a []uint8, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if rtid == uint8SliceTypId {
+ x.linef("%s%s = r.DecodeBytes(%s(%s[]byte)(%s), false)",
+ ptrPfx, varname, ptrPfx, ptrPfx, varname)
+ } else if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname)
+ } else {
+ x.xtraSM(varname, t, false, isptr)
+ // x.decListFallback(varname, rtid, false, t)
+ }
+ case reflect.Map:
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ if fastpathAV.index(rtid) != -1 {
+ g := x.newGenV(t)
+ x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname)
+ } else {
+ x.xtraSM(varname, t, false, isptr)
+ // x.decMapFallback(varname, rtid, t)
+ }
+ case reflect.Struct:
+ if inlist {
+ // no need to create temp variable if isptr, or x.F or x[F]
+ if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 {
+ x.decStruct(varname, rtid, t)
+ } else {
+ varname2 := genTempVarPfx + "j" + mi
+ x.line(varname2 + " := &" + varname)
+ x.decStruct(varname2, rtid, t)
+ }
+ } else {
+ // delete(x.td, rtid)
+ x.line("z.DecFallback(" + addrPfx + varname + ", false)")
+ }
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.DecFallback(" + addrPfx + varname + ", true)")
+ }
+}
+
+func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) {
+ // This should only be used for exact primitives (ie un-named types).
+ // Named types may be implementations of Selfer, Unmarshaler, etc.
+ // They should be handled by dec(...)
+
+ var ptr string
+ if isptr {
+ ptr = "*"
+ }
+ switch t.Kind() {
+ case reflect.Int:
+ x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+ case reflect.Int8:
+ x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t))
+ case reflect.Int16:
+ x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t))
+ case reflect.Int32:
+ x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t))
+ case reflect.Int64:
+ x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t))
+
+ case reflect.Uint:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+ case reflect.Uint8:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t))
+ case reflect.Uint16:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t))
+ case reflect.Uint32:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t))
+ case reflect.Uint64:
+ x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t))
+ case reflect.Uintptr:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+
+ case reflect.Float32:
+ x.linef("%s%s = (%s)(r.DecodeFloat32As64())", ptr, varname, x.genTypeName(t))
+ case reflect.Float64:
+ x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t))
+
+ case reflect.Bool:
+ x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t))
+ case reflect.String:
+ x.linef("%s%s = (%s)(r.DecodeString())", ptr, varname, x.genTypeName(t))
+ default:
+ return false
+ }
+ return true
+}
+
+func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
+ if t.AssignableTo(uint8SliceTyp) {
+ x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)")
+ return
+ }
+ if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
+ x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname)
+ return
+ }
+ type tstruc struct {
+ TempVar string
+ Rand string
+ Varname string
+ CTyp string
+ Typ string
+ Immutable bool
+ Size int
+ }
+ telem := t.Elem()
+ ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())}
+
+ funcs := make(template.FuncMap)
+
+ funcs["decLineVar"] = func(varname string) string {
+ x.decVar(varname, "", telem, false, true)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+ funcs["zero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["isArray"] = func() bool {
+ return t.Kind() == reflect.Array
+ }
+ funcs["isSlice"] = func() bool {
+ return t.Kind() == reflect.Slice
+ }
+ funcs["isChan"] = func() bool {
+ return t.Kind() == reflect.Chan
+ }
+ tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl)
+ if err != nil {
+ panic(err)
+ }
+ if err = tm.Execute(x.w, &ts); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
+ type tstruc struct {
+ TempVar string
+ Sfx string
+ Rand string
+ Varname string
+ KTyp string
+ Typ string
+ Size int
+ }
+ telem := t.Elem()
+ tkey := t.Key()
+ ts := tstruc{
+ genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey),
+ x.genTypeName(telem), int(telem.Size() + tkey.Size()),
+ }
+
+ funcs := make(template.FuncMap)
+ funcs["decElemZero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["decElemKindImmutable"] = func() bool {
+ return genIsImmutable(telem)
+ }
+ funcs["decElemKindPtr"] = func() bool {
+ return telem.Kind() == reflect.Ptr
+ }
+ funcs["decElemKindIntf"] = func() bool {
+ return telem.Kind() == reflect.Interface
+ }
+ funcs["decLineVarK"] = func(varname string) string {
+ x.decVar(varname, "", tkey, false, true)
+ return ""
+ }
+ funcs["decLineVar"] = func(varname, decodedNilVarname string) string {
+ x.decVar(varname, decodedNilVarname, telem, false, true)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+
+ tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl)
+ if err != nil {
+ panic(err)
+ }
+ if err = tm.Execute(x.w, &ts); err != nil {
+ panic(err)
+ }
+}
+
+func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) {
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing.
+ x.line("switch (" + kName + ") {")
+ var newbuf, nilbuf genBuf
+ for _, si := range tisfi {
+ x.line("case \"" + si.encName + "\":")
+ newbuf.reset()
+ nilbuf.reset()
+ t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf)
+ x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf)
+ x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false)
+ x.line("}")
+ }
+ x.line("default:")
+ // pass the slice here, so that the string will not escape, and maybe save allocation
+ x.line("z.DecStructFieldNotFound(-1, " + kName + ")")
+ x.line("} // end switch " + kName)
+}
+
+func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) {
+ tpfx := genTempVarPfx
+ ti := x.ti.get(rtid, t)
+ i := x.varsfx()
+ kName := tpfx + "s" + i
+
+ switch style {
+ case genStructMapStyleLenPrefix:
+ x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i)
+ case genStructMapStyleCheckBreak:
+ x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i)
+ default: // 0, otherwise.
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i)
+ x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
+ x.line("} else { if r.CheckBreak() { break }; }")
+ }
+ x.line("r.ReadMapElemKey()")
+
+ // emulate decstructfieldkey
+ switch ti.keyType {
+ case valueTypeInt:
+ x.linef("%s := z.StringView(strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10))", kName)
+ case valueTypeUint:
+ x.linef("%s := z.StringView(strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10))", kName)
+ case valueTypeFloat:
+ x.linef("%s := z.StringView(strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64))", kName)
+ default: // string
+ x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName)
+ }
+ // x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs)
+
+ x.line("r.ReadMapElemValue()")
+ x.decStructMapSwitch(kName, varname, rtid, t)
+
+ x.line("} // end for " + tpfx + "j" + i)
+ x.line("r.ReadMapEnd()")
+}
+
+func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
+ tpfx := genTempVarPfx
+ i := x.varsfx()
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing.
+ x.linef("var %sj%s int", tpfx, i)
+ x.linef("var %sb%s bool", tpfx, i) // break
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ var newbuf, nilbuf genBuf
+ for _, si := range tisfi {
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString)
+ x.line("r.ReadArrayElem()")
+ newbuf.reset()
+ nilbuf.reset()
+ t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf)
+ x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf)
+ x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false)
+ x.line("}")
+ }
+ // read remaining values and throw away.
+ x.line("for {")
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { break }", tpfx, i)
+ x.line("r.ReadArrayElem()")
+ x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
+ x.line("}")
+ x.line("r.ReadArrayEnd()")
+}
+
+func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
+ // varname MUST be a ptr, or a struct field or a slice element.
+ i := x.varsfx()
+ x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
+ x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ x.line("r.ReadMapEnd()")
+ if genUseOneFunctionForDecStructMap {
+ x.line("} else { ")
+ x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i)
+ } else {
+ x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ")
+ x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)")
+ x.line("} else {")
+ x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)")
+ }
+ x.line("}")
+
+ // else if container is array
+ x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ x.line("r.ReadArrayEnd()")
+ x.line("} else { ")
+ x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i)
+ x.line("}")
+ // else panic
+ x.line("} else { ")
+ x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")")
+ x.line("} ")
+}
+
+// --------
+
+type genV struct {
+ // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice
+ MapKey string
+ Elem string
+ Primitive string
+ Size int
+}
+
+func (x *genRunner) newGenV(t reflect.Type) (v genV) {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array:
+ te := t.Elem()
+ v.Elem = x.genTypeName(te)
+ v.Size = int(te.Size())
+ case reflect.Map:
+ te, tk := t.Elem(), t.Key()
+ v.Elem = x.genTypeName(te)
+ v.MapKey = x.genTypeName(tk)
+ v.Size = int(te.Size() + tk.Size())
+ default:
+ panic("unexpected type for newGenV. Requires map or slice type")
+ }
+ return
+}
+
+func (x *genV) MethodNamePfx(prefix string, prim bool) string {
+ var name []byte
+ if prefix != "" {
+ name = append(name, prefix...)
+ }
+ if prim {
+ name = append(name, genTitleCaseName(x.Primitive)...)
+ } else {
+ if x.MapKey == "" {
+ name = append(name, "Slice"...)
+ } else {
+ name = append(name, "Map"...)
+ name = append(name, genTitleCaseName(x.MapKey)...)
+ }
+ name = append(name, genTitleCaseName(x.Elem)...)
+ }
+ return string(name)
+
+}
+
+// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
+//
+// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
+// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped.
+// We strip it here.
+func genImportPath(t reflect.Type) (s string) {
+ s = t.PkgPath()
+ if genCheckVendor {
+ // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7
+ s = genStripVendor(s)
+ }
+ return
+}
+
+// A go identifier is (letter|_)[letter|number|_]*
+func genGoIdentifier(s string, checkFirstChar bool) string {
+ b := make([]byte, 0, len(s))
+ t := make([]byte, 4)
+ var n int
+ for i, r := range s {
+ if checkFirstChar && i == 0 && !unicode.IsLetter(r) {
+ b = append(b, '_')
+ }
+ // r must be unicode_letter, unicode_digit or _
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ n = utf8.EncodeRune(t, r)
+ b = append(b, t[:n]...)
+ } else {
+ b = append(b, '_')
+ }
+ }
+ return string(b)
+}
+
+func genNonPtr(t reflect.Type) reflect.Type {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+func genTitleCaseName(s string) string {
+ switch s {
+ case "interface{}", "interface {}":
+ return "Intf"
+ default:
+ return strings.ToUpper(s[0:1]) + s[1:]
+ }
+}
+
+func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) {
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "Ptrto"
+ t = t.Elem()
+ }
+ tstr := t.String()
+ if tn := t.Name(); tn != "" {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ return ptrPfx + tn
+ } else {
+ if genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Slice:
+ return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef)
+ case reflect.Array:
+ return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Chan:
+ var cx string
+ switch t.ChanDir() {
+ case reflect.SendDir:
+ cx = "ChanSend"
+ case reflect.RecvDir:
+ cx = "ChanRecv"
+ default:
+ cx = "Chan"
+ }
+ return ptrPfx + cx + genMethodNameT(t.Elem(), tRef)
+ default:
+ if t == intfTyp {
+ return ptrPfx + "Interface"
+ } else {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ if t.Name() != "" {
+ return ptrPfx + t.Name()
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ } else {
+ // best way to get the package name inclusive
+ // return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr))
+ if t.Name() != "" && genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ }
+}
+
+// genCustomNameForType base64encodes the t.String() value in such a way
+// that it can be used within a function name.
+func genCustomTypeName(tstr string) string {
+ len2 := genBase64enc.EncodedLen(len(tstr))
+ bufx := make([]byte, len2)
+ genBase64enc.Encode(bufx, []byte(tstr))
+ for i := len2 - 1; i >= 0; i-- {
+ if bufx[i] == '=' {
+ len2--
+ } else {
+ break
+ }
+ }
+ return string(bufx[:len2])
+}
+
+func genIsImmutable(t reflect.Type) (v bool) {
+ return isImmutableKind(t.Kind())
+}
+
+type genInternal struct {
+ Version int
+ Values []genV
+}
+
+func (x genInternal) FastpathLen() (l int) {
+ for _, v := range x.Values {
+ if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") {
+ l++
+ }
+ }
+ return
+}
+
+func genInternalZeroValue(s string) string {
+ switch s {
+ case "interface{}", "interface {}":
+ return "nil"
+ case "bool":
+ return "false"
+ case "string":
+ return `""`
+ default:
+ return "0"
+ }
+}
+
+var genInternalNonZeroValueIdx [5]uint64
+var genInternalNonZeroValueStrs = [2][5]string{
+ {`"string-is-an-interface"`, "true", `"some-string"`, "11.1", "33"},
+ {`"string-is-an-interface-2"`, "true", `"some-string-2"`, "22.2", "44"},
+}
+
+func genInternalNonZeroValue(s string) string {
+ switch s {
+ case "interface{}", "interface {}":
+ genInternalNonZeroValueIdx[0]++
+ return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[0]%2][0] // return string, to remove ambiguity
+ case "bool":
+ genInternalNonZeroValueIdx[1]++
+ return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[1]%2][1]
+ case "string":
+ genInternalNonZeroValueIdx[2]++
+ return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[2]%2][2]
+ case "float32", "float64", "float", "double":
+ genInternalNonZeroValueIdx[3]++
+ return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[3]%2][3]
+ default:
+ genInternalNonZeroValueIdx[4]++
+ return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[4]%2][4]
+ }
+}
+
+func genInternalEncCommandAsString(s string, vname string) string {
+ switch s {
+ case "uint", "uint8", "uint16", "uint32", "uint64":
+ return "ee.EncodeUint(uint64(" + vname + "))"
+ case "int", "int8", "int16", "int32", "int64":
+ return "ee.EncodeInt(int64(" + vname + "))"
+ case "string":
+ return "ee.EncodeString(cUTF8, " + vname + ")"
+ case "float32":
+ return "ee.EncodeFloat32(" + vname + ")"
+ case "float64":
+ return "ee.EncodeFloat64(" + vname + ")"
+ case "bool":
+ return "ee.EncodeBool(" + vname + ")"
+ // case "symbol":
+ // return "ee.EncodeSymbol(" + vname + ")"
+ default:
+ return "e.encode(" + vname + ")"
+ }
+}
+
+func genInternalDecCommandAsString(s string) string {
+ switch s {
+ case "uint":
+ return "uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))"
+ case "uint8":
+ return "uint8(chkOvf.UintV(dd.DecodeUint64(), 8))"
+ case "uint16":
+ return "uint16(chkOvf.UintV(dd.DecodeUint64(), 16))"
+ case "uint32":
+ return "uint32(chkOvf.UintV(dd.DecodeUint64(), 32))"
+ case "uint64":
+ return "dd.DecodeUint64()"
+ case "uintptr":
+ return "uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))"
+ case "int":
+ return "int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))"
+ case "int8":
+ return "int8(chkOvf.IntV(dd.DecodeInt64(), 8))"
+ case "int16":
+ return "int16(chkOvf.IntV(dd.DecodeInt64(), 16))"
+ case "int32":
+ return "int32(chkOvf.IntV(dd.DecodeInt64(), 32))"
+ case "int64":
+ return "dd.DecodeInt64()"
+
+ case "string":
+ return "dd.DecodeString()"
+ case "float32":
+ return "float32(chkOvf.Float32V(dd.DecodeFloat64()))"
+ case "float64":
+ return "dd.DecodeFloat64()"
+ case "bool":
+ return "dd.DecodeBool()"
+ default:
+ panic(errors.New("gen internal: unknown type for decode: " + s))
+ }
+}
+
+func genInternalSortType(s string, elem bool) string {
+ for _, v := range [...]string{"int", "uint", "float", "bool", "string"} {
+ if strings.HasPrefix(s, v) {
+ if elem {
+ if v == "int" || v == "uint" || v == "float" {
+ return v + "64"
+ } else {
+ return v
+ }
+ }
+ return v + "Slice"
+ }
+ }
+ panic("sorttype: unexpected type: " + s)
+}
+
+func genStripVendor(s string) string {
+ // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
+ // if s contains /vendor/ OR startsWith vendor/, then return everything after it.
+ const vendorStart = "vendor/"
+ const vendorInline = "/vendor/"
+ if i := strings.LastIndex(s, vendorInline); i >= 0 {
+ s = s[i+len(vendorInline):]
+ } else if strings.HasPrefix(s, vendorStart) {
+ s = s[len(vendorStart):]
+ }
+ return s
+}
+
+// var genInternalMu sync.Mutex
+var genInternalV = genInternal{Version: genVersion}
+var genInternalTmplFuncs template.FuncMap
+var genInternalOnce sync.Once
+
+func genInternalInit() {
+ types := [...]string{
+ "interface{}",
+ "string",
+ "float32",
+ "float64",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "bool",
+ }
+ // keep as slice, so it is in specific iteration order.
+ // Initial order was uint64, string, interface{}, int, int64
+ mapvaltypes := [...]string{
+ "interface{}",
+ "string",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "float32",
+ "float64",
+ "bool",
+ }
+ wordSizeBytes := int(intBitsize) / 8
+
+ mapvaltypes2 := map[string]int{
+ "interface{}": 2 * wordSizeBytes,
+ "string": 2 * wordSizeBytes,
+ "uint": 1 * wordSizeBytes,
+ "uint8": 1,
+ "uint16": 2,
+ "uint32": 4,
+ "uint64": 8,
+ "uintptr": 1 * wordSizeBytes,
+ "int": 1 * wordSizeBytes,
+ "int8": 1,
+ "int16": 2,
+ "int32": 4,
+ "int64": 8,
+ "float32": 4,
+ "float64": 8,
+ "bool": 1,
+ }
+ var gt = genInternal{Version: genVersion}
+
+ // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function
+ for _, s := range types {
+ gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
+ // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
+ // gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]})
+ // }
+ gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]})
+ if _, ok := mapvaltypes2[s]; !ok {
+ gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]})
+ }
+ for _, ms := range mapvaltypes {
+ gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]})
+ }
+ }
+
+ funcs := make(template.FuncMap)
+ // funcs["haspfx"] = strings.HasPrefix
+ funcs["encmd"] = genInternalEncCommandAsString
+ funcs["decmd"] = genInternalDecCommandAsString
+ funcs["zerocmd"] = genInternalZeroValue
+ funcs["nonzerocmd"] = genInternalNonZeroValue
+ funcs["hasprefix"] = strings.HasPrefix
+ funcs["sorttype"] = genInternalSortType
+
+ genInternalV = gt
+ genInternalTmplFuncs = funcs
+}
+
+// genInternalGoFile is used to generate source files from templates.
+// It is run by the program author alone.
+// Unfortunately, it has to be exported so that it can be called from a command line tool.
+// *** DO NOT USE ***
+func genInternalGoFile(r io.Reader, w io.Writer) (err error) {
+ genInternalOnce.Do(genInternalInit)
+
+ gt := genInternalV
+
+ t := template.New("").Funcs(genInternalTmplFuncs)
+
+ tmplstr, err := ioutil.ReadAll(r)
+ if err != nil {
+ return
+ }
+
+ if t, err = t.Parse(string(tmplstr)); err != nil {
+ return
+ }
+
+ var out bytes.Buffer
+ err = t.Execute(&out, gt)
+ if err != nil {
+ return
+ }
+
+ bout, err := format.Source(out.Bytes())
+ if err != nil {
+ w.Write(out.Bytes()) // write out if error, so we can still see.
+ // w.Write(bout) // write out if error, as much as possible, so we can still see.
+ return
+ }
+ w.Write(bout)
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go
new file mode 100644
index 0000000..9ddbe20
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.5
+
+package codec
+
+import "reflect"
+
+const reflectArrayOfSupported = true
+
+func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
+ return reflect.ArrayOf(count, elem)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go
new file mode 100644
index 0000000..c5fcd66
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.5
+
+package codec
+
+import "reflect"
+
+const reflectArrayOfSupported = false
+
+func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
+ panic("codec: reflect.ArrayOf unsupported in this go version")
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go
new file mode 100644
index 0000000..bc39d6b
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.9
+
+package codec
+
+import "reflect"
+
+func makeMapReflect(t reflect.Type, size int) reflect.Value {
+ if size < 0 {
+ return reflect.MakeMapWithSize(t, 4)
+ }
+ return reflect.MakeMapWithSize(t, size)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go
new file mode 100644
index 0000000..cde4cd3
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.9
+
+package codec
+
+import "reflect"
+
+func makeMapReflect(t reflect.Type, size int) reflect.Value {
+ return reflect.MakeMap(t)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go
new file mode 100644
index 0000000..794133a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.10
+
+package codec
+
+const allowSetUnexportedEmbeddedPtr = false
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go
new file mode 100644
index 0000000..fd92ede
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.10
+
+package codec
+
+const allowSetUnexportedEmbeddedPtr = true
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go
new file mode 100644
index 0000000..8debfa6
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.4
+
+package codec
+
+// This codec package will only work for go1.4 and above.
+// This is for the following reasons:
+// - go 1.4 was released in 2014
+// - go runtime is written fully in go
+// - interface only holds pointers
+// - reflect.Value is stabilized as 3 words
+
+func init() {
+ panic("codec: go 1.3 and below are not supported")
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go
new file mode 100644
index 0000000..0f1bb01
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go
@@ -0,0 +1,10 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.5,!go1.6
+
+package codec
+
+import "os"
+
+var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go
new file mode 100644
index 0000000..2fb4b05
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go
@@ -0,0 +1,10 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.6,!go1.7
+
+package codec
+
+import "os"
+
+var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go
new file mode 100644
index 0000000..c5b8155
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build go1.7
+
+package codec
+
+const genCheckVendor = true
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go
new file mode 100644
index 0000000..837cf24
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build !go1.5
+
+package codec
+
+var genCheckVendor = false
diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go
new file mode 100644
index 0000000..bd29895
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper.go
@@ -0,0 +1,2414 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+// Some shared ideas around encoding/decoding
+// ------------------------------------------
+//
+// If an interface{} is passed, we first do a type assertion to see if it is
+// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
+//
+// If we start with a reflect.Value, we are already in reflect.Value land and
+// will try to grab the function for the underlying Type and directly call that function.
+// This is more performant than calling reflect.Value.Interface().
+//
+// This still helps us bypass many layers of reflection, and give best performance.
+//
+// Containers
+// ------------
+// Containers in the stream are either associative arrays (key-value pairs) or
+// regular arrays (indexed by incrementing integers).
+//
+// Some streams support indefinite-length containers, and use a breaking
+// byte-sequence to denote that the container has come to an end.
+//
+// Some streams also are text-based, and use explicit separators to denote the
+// end/beginning of different values.
+//
+// During encode, we use a high-level condition to determine how to iterate through
+// the container. That decision is based on whether the container is text-based (with
+// separators) or binary (without separators). If binary, we do not even call the
+// encoding of separators.
+//
+// During decode, we use a different high-level condition to determine how to iterate
+// through the containers. That decision is based on whether the stream contained
+// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
+// it has to be binary, and we do not even try to read separators.
+//
+// Philosophy
+// ------------
+// On decode, this codec will update containers appropriately:
+// - If struct, update fields from stream into fields of struct.
+// If field in stream not found in struct, handle appropriately (based on option).
+// If a struct field has no corresponding value in the stream, leave it AS IS.
+// If nil in stream, set value to nil/zero value.
+// - If map, update map from stream.
+// If the stream value is NIL, set the map to nil.
+// - if slice, try to update up to length of array in stream.
+// if container len is less than stream array length,
+// and container cannot be expanded, handled (based on option).
+// This means you can decode 4-element stream array into 1-element array.
+//
+// ------------------------------------
+// On encode, user can specify omitEmpty. This means that the value will be omitted
+// if the zero value. The problem may occur during decode, where omitted values do not affect
+// the value being decoded into. This means that if decoding into a struct with an
+// int field with current value=5, and the field is omitted in the stream, then after
+// decoding, the value will still be 5 (not 0).
+// omitEmpty only works if you guarantee that you always decode into zero-values.
+//
+// ------------------------------------
+// We could have truncated a map to remove keys not available in the stream,
+// or set values in the struct which are not in the stream to their zero values.
+// We decided against it because there is no efficient way to do it.
+// We may introduce it as an option later.
+// However, that will require enabling it for both runtime and code generation modes.
+//
+// To support truncate, we need to do 2 passes over the container:
+// map
+// - first collect all keys (e.g. in k1)
+// - for each key in stream, mark k1 that the key should not be removed
+// - after updating map, do second pass and call delete for all keys in k1 which are not marked
+// struct:
+// - for each field, track the *typeInfo s1
+// - iterate through all s1, and for each one not marked, set value to zero
+// - this involves checking the possible anonymous fields which are nil ptrs.
+// too much work.
+//
+// ------------------------------------------
+// Error Handling is done within the library using panic.
+//
+// This way, the code doesn't have to keep checking if an error has happened,
+// and we don't have to keep sending the error value along with each call
+// or storing it in the En|Decoder and checking it constantly along the way.
+//
+// The disadvantage is that small functions which use panics cannot be inlined.
+// The code accounts for that by only using panics behind an interface;
+// since interface calls cannot be inlined, this is irrelevant.
+//
+// We considered storing the error is En|Decoder.
+// - once it has its err field set, it cannot be used again.
+// - panicing will be optional, controlled by const flag.
+// - code should always check error first and return early.
+// We eventually decided against it as it makes the code clumsier to always
+// check for these error conditions.
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ scratchByteArrayLen = 32
+ // initCollectionCap = 16 // 32 is defensive. 16 is preferred.
+
+ // Support encoding.(Binary|Text)(Unm|M)arshaler.
+ // This constant flag will enable or disable it.
+ supportMarshalInterfaces = true
+
+ // for debugging, set this to false, to catch panic traces.
+ // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
+ recoverPanicToErr = true
+
+ // arrayCacheLen is the length of the cache used in encoder or decoder for
+ // allowing zero-alloc initialization.
+ arrayCacheLen = 8
+
+ // size of the cacheline: defaulting to value for archs: amd64, arm64, 386
+ // should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
+ cacheLineSize = 64
+
+ wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
+ wordSize = wordSizeBits / 8
+
+ maxLevelsEmbedding = 15 // use this, so structFieldInfo fits into 8 bytes
+)
+
+var (
+ oneByteArr = [1]byte{0}
+ zeroByteSlice = oneByteArr[:0:0]
+)
+
+var refBitset bitset32
+var pool pooler
+var panicv panicHdl
+
+func init() {
+ pool.init()
+
+ refBitset.set(byte(reflect.Map))
+ refBitset.set(byte(reflect.Ptr))
+ refBitset.set(byte(reflect.Func))
+ refBitset.set(byte(reflect.Chan))
+}
+
+type charEncoding uint8
+
+const (
+ cRAW charEncoding = iota
+ cUTF8
+ cUTF16LE
+ cUTF16BE
+ cUTF32LE
+ cUTF32BE
+)
+
+// valueType is the stream type
+type valueType uint8
+
+const (
+ valueTypeUnset valueType = iota
+ valueTypeNil
+ valueTypeInt
+ valueTypeUint
+ valueTypeFloat
+ valueTypeBool
+ valueTypeString
+ valueTypeSymbol
+ valueTypeBytes
+ valueTypeMap
+ valueTypeArray
+ valueTypeTime
+ valueTypeExt
+
+ // valueTypeInvalid = 0xff
+)
+
+var valueTypeStrings = [...]string{
+ "Unset",
+ "Nil",
+ "Int",
+ "Uint",
+ "Float",
+ "Bool",
+ "String",
+ "Symbol",
+ "Bytes",
+ "Map",
+ "Array",
+ "Timestamp",
+ "Ext",
+}
+
+func (x valueType) String() string {
+ if int(x) < len(valueTypeStrings) {
+ return valueTypeStrings[x]
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+
+type seqType uint8
+
+const (
+ _ seqType = iota
+ seqTypeArray
+ seqTypeSlice
+ seqTypeChan
+)
+
+// note that containerMapStart and containerArraySend are not sent.
+// This is because the ReadXXXStart and EncodeXXXStart already does these.
+type containerState uint8
+
+const (
+ _ containerState = iota
+
+ containerMapStart // slot left open, since Driver method already covers it
+ containerMapKey
+ containerMapValue
+ containerMapEnd
+ containerArrayStart // slot left open, since Driver methods already cover it
+ containerArrayElem
+ containerArrayEnd
+)
+
+// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
+// type sfiIdx struct {
+// name string
+// index int
+// }
+
+// do not recurse if a containing type refers to an embedded type
+// which refers back to its containing type (via a pointer).
+// The second time this back-reference happens, break out,
+// so as not to cause an infinite loop.
+const rgetMaxRecursion = 2
+
+// Anecdotally, we believe most types have <= 12 fields.
+// - even Java's PMD rules set TooManyFields threshold to 15.
+// However, go has embedded fields, which should be regarded as
+// top level, allowing structs to possibly double or triple.
+// In addition, we don't want to keep creating transient arrays,
+// especially for the sfi index tracking, and the evtypes tracking.
+//
+// So - try to keep typeInfoLoadArray within 2K bytes
+const (
+ typeInfoLoadArraySfisLen = 16
+ typeInfoLoadArraySfiidxLen = 8 * 112
+ typeInfoLoadArrayEtypesLen = 12
+ typeInfoLoadArrayBLen = 8 * 4
+)
+
+type typeInfoLoad struct {
+ // fNames []string
+ // encNames []string
+ etypes []uintptr
+ sfis []structFieldInfo
+}
+
+type typeInfoLoadArray struct {
+ // fNames [typeInfoLoadArrayLen]string
+ // encNames [typeInfoLoadArrayLen]string
+ sfis [typeInfoLoadArraySfisLen]structFieldInfo
+ sfiidx [typeInfoLoadArraySfiidxLen]byte
+ etypes [typeInfoLoadArrayEtypesLen]uintptr
+ b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names
+}
+
+// mirror json.Marshaler and json.Unmarshaler here,
+// so we don't import the encoding/json package
+
+type jsonMarshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+type jsonUnmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+type isZeroer interface {
+ IsZero() bool
+}
+
+// type byteAccepter func(byte) bool
+
+var (
+ bigen = binary.BigEndian
+ structInfoFieldName = "_struct"
+
+ mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+ mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+ intfSliceTyp = reflect.TypeOf([]interface{}(nil))
+ intfTyp = intfSliceTyp.Elem()
+
+ reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
+
+ stringTyp = reflect.TypeOf("")
+ timeTyp = reflect.TypeOf(time.Time{})
+ rawExtTyp = reflect.TypeOf(RawExt{})
+ rawTyp = reflect.TypeOf(Raw{})
+ uintptrTyp = reflect.TypeOf(uintptr(0))
+ uint8Typ = reflect.TypeOf(uint8(0))
+ uint8SliceTyp = reflect.TypeOf([]uint8(nil))
+ uintTyp = reflect.TypeOf(uint(0))
+ intTyp = reflect.TypeOf(int(0))
+
+ mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+
+ binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+ binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+
+ textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+ jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+ jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+
+ selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
+ iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem()
+
+ uint8TypId = rt2id(uint8Typ)
+ uint8SliceTypId = rt2id(uint8SliceTyp)
+ rawExtTypId = rt2id(rawExtTyp)
+ rawTypId = rt2id(rawTyp)
+ intfTypId = rt2id(intfTyp)
+ timeTypId = rt2id(timeTyp)
+ stringTypId = rt2id(stringTyp)
+
+ mapStrIntfTypId = rt2id(mapStrIntfTyp)
+ mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
+ intfSliceTypId = rt2id(intfSliceTyp)
+ // mapBySliceTypId = rt2id(mapBySliceTyp)
+
+ intBitsize = uint8(intTyp.Bits())
+ uintBitsize = uint8(uintTyp.Bits())
+
+ bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+ chkOvf checkOverflow
+
+ errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo")
+)
+
+var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
+
+var immutableKindsSet = [32]bool{
+ // reflect.Invalid: ,
+ reflect.Bool: true,
+ reflect.Int: true,
+ reflect.Int8: true,
+ reflect.Int16: true,
+ reflect.Int32: true,
+ reflect.Int64: true,
+ reflect.Uint: true,
+ reflect.Uint8: true,
+ reflect.Uint16: true,
+ reflect.Uint32: true,
+ reflect.Uint64: true,
+ reflect.Uintptr: true,
+ reflect.Float32: true,
+ reflect.Float64: true,
+ reflect.Complex64: true,
+ reflect.Complex128: true,
+ // reflect.Array
+ // reflect.Chan
+ // reflect.Func: true,
+ // reflect.Interface
+ // reflect.Map
+ // reflect.Ptr
+ // reflect.Slice
+ reflect.String: true,
+ // reflect.Struct
+ // reflect.UnsafePointer
+}
+
+// Selfer defines methods by which a value can encode or decode itself.
+//
+// Any type which implements Selfer will be able to encode or decode itself.
+// Consequently, during (en|de)code, this takes precedence over
+// (text|binary)(M|Unm)arshal or extension support.
+//
+// Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
+// This is because, during each decode, we first check the the next set of bytes
+// represent nil, and if so, we just set the value to nil.
+type Selfer interface {
+ CodecEncodeSelf(*Encoder)
+ CodecDecodeSelf(*Decoder)
+}
+
+// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream.
+// The slice contains a sequence of key-value pairs.
+// This affords storing a map in a specific sequence in the stream.
+//
+// Example usage:
+// type T1 []string // or []int or []Point or any other "slice" type
+// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
+// type T2 struct { KeyValues T1 }
+//
+// var kvs = []string{"one", "1", "two", "2", "three", "3"}
+// var v2 = T2{ KeyValues: T1(kvs) }
+// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
+//
+// The support of MapBySlice affords the following:
+// - A slice type which implements MapBySlice will be encoded as a map
+// - A slice can be decoded from a map in the stream
+// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice
+type MapBySlice interface {
+ MapBySlice()
+}
+
+// BasicHandle encapsulates the common options and extension functions.
+//
+// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
+type BasicHandle struct {
+ // BasicHandle is always a part of a different type.
+ // It doesn't have to fit into it own cache lines.
+
+ // TypeInfos is used to get the type info for any type.
+ //
+ // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
+ TypeInfos *TypeInfos
+
+ // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls).
+ // If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
+ // Thses slices are used all the time, so keep as slices (not pointers).
+
+ extHandle
+
+ intf2impls
+
+ RPCOptions
+
+ // ---- cache line
+
+ DecodeOptions
+
+ // ---- cache line
+
+ EncodeOptions
+
+ // noBuiltInTypeChecker
+}
+
+func (x *BasicHandle) getBasicHandle() *BasicHandle {
+ return x
+}
+
+func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ if x.TypeInfos == nil {
+ return defTypeInfos.get(rtid, rt)
+ }
+ return x.TypeInfos.get(rtid, rt)
+}
+
+// Handle is the interface for a specific encoding format.
+//
+// Typically, a Handle is pre-configured before first time use,
+// and not modified while in use. Such a pre-configured Handle
+// is safe for concurrent access.
+type Handle interface {
+ Name() string
+ getBasicHandle() *BasicHandle
+ recreateEncDriver(encDriver) bool
+ newEncDriver(w *Encoder) encDriver
+ newDecDriver(r *Decoder) decDriver
+ isBinary() bool
+ hasElemSeparators() bool
+ // IsBuiltinType(rtid uintptr) bool
+}
+
+// Raw represents raw formatted bytes.
+// We "blindly" store it during encode and retrieve the raw bytes during decode.
+// Note: it is dangerous during encode, so we may gate the behaviour
+// behind an Encode flag which must be explicitly set.
+type Raw []byte
+
+// RawExt represents raw unprocessed extension data.
+// Some codecs will decode extension data as a *RawExt
+// if there is no registered extension for the tag.
+//
+// Only one of Data or Value is nil.
+// If Data is nil, then the content of the RawExt is in the Value.
+type RawExt struct {
+ Tag uint64
+ // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
+ // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
+ Data []byte
+ // Value represents the extension, if Data is nil.
+ // Value is used by codecs (e.g. cbor, json) which leverage the format to do
+ // custom serialization of the types.
+ Value interface{}
+}
+
+// BytesExt handles custom (de)serialization of types to/from []byte.
+// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
+type BytesExt interface {
+ // WriteExt converts a value to a []byte.
+ //
+ // Note: v is a pointer iff the registered extension type is a struct or array kind.
+ WriteExt(v interface{}) []byte
+
+ // ReadExt updates a value from a []byte.
+ //
+ // Note: dst is always a pointer kind to the registered extension type.
+ ReadExt(dst interface{}, src []byte)
+}
+
+// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
+// The Encoder or Decoder will then handle the further (de)serialization of that known type.
+//
+// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
+type InterfaceExt interface {
+ // ConvertExt converts a value into a simpler interface for easy encoding
+ // e.g. convert time.Time to int64.
+ //
+ // Note: v is a pointer iff the registered extension type is a struct or array kind.
+ ConvertExt(v interface{}) interface{}
+
+ // UpdateExt updates a value from a simpler interface for easy decoding
+ // e.g. convert int64 to time.Time.
+ //
+ // Note: dst is always a pointer kind to the registered extension type.
+ UpdateExt(dst interface{}, src interface{})
+}
+
+// Ext handles custom (de)serialization of custom types / extensions.
+type Ext interface {
+ BytesExt
+ InterfaceExt
+}
+
+// addExtWrapper is a wrapper implementation to support former AddExt exported method.
+type addExtWrapper struct {
+ encFn func(reflect.Value) ([]byte, error)
+ decFn func(reflect.Value, []byte) error
+}
+
+func (x addExtWrapper) WriteExt(v interface{}) []byte {
+ bs, err := x.encFn(reflect.ValueOf(v))
+ if err != nil {
+ panic(err)
+ }
+ return bs
+}
+
+func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
+ if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
+ panic(err)
+ }
+}
+
+func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+
+func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+ x.ReadExt(dest, v.([]byte))
+}
+
+type extWrapper struct {
+ BytesExt
+ InterfaceExt
+}
+
+type bytesExtFailer struct{}
+
+func (bytesExtFailer) WriteExt(v interface{}) []byte {
+ panicv.errorstr("BytesExt.WriteExt is not supported")
+ return nil
+}
+func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
+ panicv.errorstr("BytesExt.ReadExt is not supported")
+}
+
+type interfaceExtFailer struct{}
+
+func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
+ panicv.errorstr("InterfaceExt.ConvertExt is not supported")
+ return nil
+}
+func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
+ panicv.errorstr("InterfaceExt.UpdateExt is not supported")
+}
+
+type binaryEncodingType struct{}
+
+func (binaryEncodingType) isBinary() bool { return true }
+
+type textEncodingType struct{}
+
+func (textEncodingType) isBinary() bool { return false }
+
+// noBuiltInTypes is embedded into many types which do not support builtins
+// e.g. msgpack, simple, cbor.
+
+// type noBuiltInTypeChecker struct{}
+// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false }
+// type noBuiltInTypes struct{ noBuiltInTypeChecker }
+
+type noBuiltInTypes struct{}
+
+func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
+
+// type noStreamingCodec struct{}
+// func (noStreamingCodec) CheckBreak() bool { return false }
+// func (noStreamingCodec) hasElemSeparators() bool { return false }
+
+type noElemSeparators struct{}
+
+func (noElemSeparators) hasElemSeparators() (v bool) { return }
+func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return }
+
+// bigenHelper.
+// Users must already slice the x completely, because we will not reslice.
+type bigenHelper struct {
+ x []byte // must be correctly sliced to appropriate len. slicing is a cost.
+ w encWriter
+}
+
+func (z bigenHelper) writeUint16(v uint16) {
+ bigen.PutUint16(z.x, v)
+ z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint32(v uint32) {
+ bigen.PutUint32(z.x, v)
+ z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint64(v uint64) {
+ bigen.PutUint64(z.x, v)
+ z.w.writeb(z.x)
+}
+
+type extTypeTagFn struct {
+ rtid uintptr
+ rtidptr uintptr
+ rt reflect.Type
+ tag uint64
+ ext Ext
+ _ [1]uint64 // padding
+}
+
+type extHandle []extTypeTagFn
+
+// AddExt registes an encode and decode function for a reflect.Type.
+// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
+//
+// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+func (o *extHandle) AddExt(rt reflect.Type, tag byte,
+ encfn func(reflect.Value) ([]byte, error),
+ decfn func(reflect.Value, []byte) error) (err error) {
+ if encfn == nil || decfn == nil {
+ return o.SetExt(rt, uint64(tag), nil)
+ }
+ return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
+}
+
+// SetExt will set the extension for a tag and reflect.Type.
+// Note that the type must be a named type, and specifically not a pointer or Interface.
+// An error is returned if that is not honored.
+// To Deregister an ext, call SetExt with nil Ext.
+//
+// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
+ // o is a pointer, because we may need to initialize it
+ rk := rt.Kind()
+ for rk == reflect.Ptr {
+ rt = rt.Elem()
+ rk = rt.Kind()
+ }
+
+ if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
+ return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
+ }
+
+ rtid := rt2id(rt)
+ switch rtid {
+ case timeTypId, rawTypId, rawExtTypId:
+ // all natively supported type, so cannot have an extension
+ return // TODO: should we silently ignore, or return an error???
+ }
+ // if o == nil {
+ // return errors.New("codec.Handle.SetExt: extHandle not initialized")
+ // }
+ o2 := *o
+ // if o2 == nil {
+ // return errors.New("codec.Handle.SetExt: extHandle not initialized")
+ // }
+ for i := range o2 {
+ v := &o2[i]
+ if v.rtid == rtid {
+ v.tag, v.ext = tag, ext
+ return
+ }
+ }
+ rtidptr := rt2id(reflect.PtrTo(rt))
+ *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}})
+ return
+}
+
+func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) {
+ for i := range o {
+ v = &o[i]
+ if v.rtid == rtid || v.rtidptr == rtid {
+ return
+ }
+ }
+ return nil
+}
+
+func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
+ for i := range o {
+ v = &o[i]
+ if v.tag == tag {
+ return
+ }
+ }
+ return nil
+}
+
+type intf2impl struct {
+ rtid uintptr // for intf
+ impl reflect.Type
+ // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned.
+}
+
+type intf2impls []intf2impl
+
+// Intf2Impl maps an interface to an implementing type.
+// This allows us support infering the concrete type
+// and populating it when passed an interface.
+// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
+//
+// Passing a nil impl will clear the mapping.
+func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
+ if impl != nil && !impl.Implements(intf) {
+ return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
+ }
+ rtid := rt2id(intf)
+ o2 := *o
+ for i := range o2 {
+ v := &o2[i]
+ if v.rtid == rtid {
+ v.impl = impl
+ return
+ }
+ }
+ *o = append(o2, intf2impl{rtid, impl})
+ return
+}
+
+func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
+ for i := range o {
+ v := &o[i]
+ if v.rtid == rtid {
+ if v.impl == nil {
+ return
+ }
+ if v.impl.Kind() == reflect.Ptr {
+ return reflect.New(v.impl.Elem())
+ }
+ return reflect.New(v.impl).Elem()
+ }
+ }
+ return
+}
+
+type structFieldInfoFlag uint8
+
+const (
+ _ structFieldInfoFlag = 1 << iota
+ structFieldInfoFlagReady
+ structFieldInfoFlagOmitEmpty
+)
+
+func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) {
+ *x = *x | f
+}
+
+func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) {
+ *x = *x &^ f
+}
+
+func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool {
+ return x&f != 0
+}
+
+func (x structFieldInfoFlag) omitEmpty() bool {
+ return x.flagGet(structFieldInfoFlagOmitEmpty)
+}
+
+func (x structFieldInfoFlag) ready() bool {
+ return x.flagGet(structFieldInfoFlagReady)
+}
+
+type structFieldInfo struct {
+ encName string // encode name
+ fieldName string // field name
+
+ is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct
+ nis uint8 // num levels of embedding. if 1, then it's not embedded.
+ structFieldInfoFlag
+}
+
+func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
+ if v, valid := si.field(v, false); valid {
+ v.Set(reflect.Zero(v.Type()))
+ }
+}
+
+// rv returns the field of the struct.
+// If anonymous, it returns an Invalid
+func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) {
+ // replicate FieldByIndex
+ for i, x := range si.is {
+ if uint8(i) == si.nis {
+ break
+ }
+ if v, valid = baseStructRv(v, update); !valid {
+ return
+ }
+ v = v.Field(int(x))
+ }
+
+ return v, true
+}
+
+// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value {
+// v, _ = si.field(v, update)
+// return v
+// }
+
+func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
+ keytype = valueTypeString // default
+ if stag == "" {
+ return
+ }
+ for i, s := range strings.Split(stag, ",") {
+ if i == 0 {
+ } else {
+ switch s {
+ case "omitempty":
+ omitEmpty = true
+ case "toarray":
+ toArray = true
+ case "int":
+ keytype = valueTypeInt
+ case "uint":
+ keytype = valueTypeUint
+ case "float":
+ keytype = valueTypeFloat
+ // case "bool":
+ // keytype = valueTypeBool
+ case "string":
+ keytype = valueTypeString
+ }
+ }
+ }
+ return
+}
+
+func (si *structFieldInfo) parseTag(stag string) {
+ // if fname == "" {
+ // panic(errNoFieldNameToStructFieldInfo)
+ // }
+
+ if stag == "" {
+ return
+ }
+ for i, s := range strings.Split(stag, ",") {
+ if i == 0 {
+ if s != "" {
+ si.encName = s
+ }
+ } else {
+ switch s {
+ case "omitempty":
+ si.flagSet(structFieldInfoFlagOmitEmpty)
+ // si.omitEmpty = true
+ // case "toarray":
+ // si.toArray = true
+ }
+ }
+ }
+}
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int {
+ return len(p)
+}
+
+func (p sfiSortedByEncName) Less(i, j int) bool {
+ return p[i].encName < p[j].encName
+}
+
+func (p sfiSortedByEncName) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+const structFieldNodeNumToCache = 4
+
+type structFieldNodeCache struct {
+ rv [structFieldNodeNumToCache]reflect.Value
+ idx [structFieldNodeNumToCache]uint32
+ num uint8
+}
+
+func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) {
+ for i, k := range &x.idx {
+ if uint8(i) == x.num {
+ return // break
+ }
+ if key == k {
+ return x.rv[i], true
+ }
+ }
+ return
+}
+
+func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) {
+ if x.num < structFieldNodeNumToCache {
+ x.rv[x.num] = fv
+ x.idx[x.num] = key
+ x.num++
+ return
+ }
+}
+
+type structFieldNode struct {
+ v reflect.Value
+ cache2 structFieldNodeCache
+ cache3 structFieldNodeCache
+ update bool
+}
+
+func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) {
+ // return si.fieldval(x.v, x.update)
+ // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding
+ // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc.
+ var valid bool
+ switch si.nis {
+ case 1:
+ fv = x.v.Field(int(si.is[0]))
+ case 2:
+ if fv, valid = x.cache2.get(uint32(si.is[0])); valid {
+ fv = fv.Field(int(si.is[1]))
+ return
+ }
+ fv = x.v.Field(int(si.is[0]))
+ if fv, valid = baseStructRv(fv, x.update); !valid {
+ return
+ }
+ x.cache2.tryAdd(fv, uint32(si.is[0]))
+ fv = fv.Field(int(si.is[1]))
+ case 3:
+ var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1])
+ if fv, valid = x.cache3.get(key); valid {
+ fv = fv.Field(int(si.is[2]))
+ return
+ }
+ fv = x.v.Field(int(si.is[0]))
+ if fv, valid = baseStructRv(fv, x.update); !valid {
+ return
+ }
+ fv = fv.Field(int(si.is[1]))
+ if fv, valid = baseStructRv(fv, x.update); !valid {
+ return
+ }
+ x.cache3.tryAdd(fv, key)
+ fv = fv.Field(int(si.is[2]))
+ default:
+ fv, _ = si.field(x.v, x.update)
+ }
+ return
+}
+
+func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ if !update {
+ return
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ return v, true
+}
+
+type typeInfoFlag uint8
+
+const (
+ typeInfoFlagComparable = 1 << iota
+ typeInfoFlagIsZeroer
+ typeInfoFlagIsZeroerPtr
+)
+
+// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence.
+//
+// During an encode/decode sequence, we work as below:
+// - If base is a built in type, en/decode base value
+// - If base is registered as an extension, en/decode base value
+// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
+// - Else decode appropriately based on the reflect.Kind
+type typeInfo struct {
+ rt reflect.Type
+ elem reflect.Type
+ pkgpath string
+
+ rtid uintptr
+ // rv0 reflect.Value // saved zero value, used if immutableKind
+
+ numMeth uint16 // number of methods
+ kind uint8
+ chandir uint8
+
+ anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty"
+ toArray bool // whether this (struct) type should be encoded as an array
+ keyType valueType // if struct, how is the field name stored in a stream? default is string
+ mbs bool // base type (T or *T) is a MapBySlice
+
+ // ---- cpu cache line boundary?
+ sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map.
+ sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array.
+
+ key reflect.Type
+
+ // ---- cpu cache line boundary?
+ // sfis []structFieldInfo // all sfi, in src order, as created.
+ sfiNamesSort []byte // all names, with indexes into the sfiSort
+
+ // format of marshal type fields below: [btj][mu]p? OR csp?
+
+ bm bool // T is a binaryMarshaler
+ bmp bool // *T is a binaryMarshaler
+ bu bool // T is a binaryUnmarshaler
+ bup bool // *T is a binaryUnmarshaler
+ tm bool // T is a textMarshaler
+ tmp bool // *T is a textMarshaler
+ tu bool // T is a textUnmarshaler
+ tup bool // *T is a textUnmarshaler
+
+ jm bool // T is a jsonMarshaler
+ jmp bool // *T is a jsonMarshaler
+ ju bool // T is a jsonUnmarshaler
+ jup bool // *T is a jsonUnmarshaler
+ cs bool // T is a Selfer
+ csp bool // *T is a Selfer
+
+ // other flags, with individual bits representing if set.
+ flags typeInfoFlag
+
+ // _ [2]byte // padding
+ _ [3]uint64 // padding
+}
+
+func (ti *typeInfo) isFlag(f typeInfoFlag) bool {
+ return ti.flags&f != 0
+}
+
+func (ti *typeInfo) indexForEncName(name []byte) (index int16) {
+ var sn []byte
+ if len(name)+2 <= 32 {
+ var buf [32]byte // should not escape
+ sn = buf[:len(name)+2]
+ } else {
+ sn = make([]byte, len(name)+2)
+ }
+ copy(sn[1:], name)
+ sn[0], sn[len(sn)-1] = tiSep2(name), 0xff
+ j := bytes.Index(ti.sfiNamesSort, sn)
+ if j < 0 {
+ return -1
+ }
+ index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8)
+ return
+}
+
+type rtid2ti struct {
+ rtid uintptr
+ ti *typeInfo
+}
+
+// TypeInfos caches typeInfo for each type on first inspection.
+//
+// It is configured with a set of tag keys, which are used to get
+// configuration for the type.
+type TypeInfos struct {
+ // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected
+ infos atomicTypeInfoSlice
+ mu sync.Mutex
+ tags []string
+ _ [2]uint64 // padding
+}
+
+// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
+//
+// This allows users customize the struct tag keys which contain configuration
+// of their types.
+func NewTypeInfos(tags []string) *TypeInfos {
+ return &TypeInfos{tags: tags}
+}
+
+func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
+ // check for tags: codec, json, in that order.
+ // this allows seamless support for many configured structs.
+ for _, x := range x.tags {
+ s = t.Get(x)
+ if s != "" {
+ return s
+ }
+ }
+ return
+}
+
+func (x *TypeInfos) find(s []rtid2ti, rtid uintptr) (idx int, ti *typeInfo) {
+ // binary search. adapted from sort/search.go.
+ // if sp == nil {
+ // return -1, nil
+ // }
+ // s := *sp
+ h, i, j := 0, 0, len(s)
+ for i < j {
+ h = i + (j-i)/2
+ if s[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < len(s) && s[i].rtid == rtid {
+ return i, s[i].ti
+ }
+ return i, nil
+}
+
+func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ sp := x.infos.load()
+ var idx int
+ if sp != nil {
+ idx, pti = x.find(sp, rtid)
+ if pti != nil {
+ return
+ }
+ }
+
+ rk := rt.Kind()
+
+ if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
+ panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
+ }
+
+ // do not hold lock while computing this.
+ // it may lead to duplication, but that's ok.
+ ti := typeInfo{rt: rt, rtid: rtid, kind: uint8(rk), pkgpath: rt.PkgPath()}
+ // ti.rv0 = reflect.Zero(rt)
+
+ // ti.comparable = rt.Comparable()
+ ti.numMeth = uint16(rt.NumMethod())
+
+ ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp)
+ ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp)
+ ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp)
+ ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp)
+ ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp)
+ ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp)
+ ti.cs, ti.csp = implIntf(rt, selferTyp)
+
+ b1, b2 := implIntf(rt, iszeroTyp)
+ if b1 {
+ ti.flags |= typeInfoFlagIsZeroer
+ }
+ if b2 {
+ ti.flags |= typeInfoFlagIsZeroerPtr
+ }
+ if rt.Comparable() {
+ ti.flags |= typeInfoFlagComparable
+ }
+
+ switch rk {
+ case reflect.Struct:
+ var omitEmpty bool
+ if f, ok := rt.FieldByName(structInfoFieldName); ok {
+ ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
+ } else {
+ ti.keyType = valueTypeString
+ }
+ pp, pi := pool.tiLoad()
+ pv := pi.(*typeInfoLoadArray)
+ pv.etypes[0] = ti.rtid
+ // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
+ vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]}
+ x.rget(rt, rtid, omitEmpty, nil, &vv)
+ // ti.sfis = vv.sfis
+ ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv)
+ pp.Put(pi)
+ case reflect.Map:
+ ti.elem = rt.Elem()
+ ti.key = rt.Key()
+ case reflect.Slice:
+ ti.mbs, _ = implIntf(rt, mapBySliceTyp)
+ ti.elem = rt.Elem()
+ case reflect.Chan:
+ ti.elem = rt.Elem()
+ ti.chandir = uint8(rt.ChanDir())
+ case reflect.Array, reflect.Ptr:
+ ti.elem = rt.Elem()
+ }
+ // sfi = sfiSrc
+
+ x.mu.Lock()
+ sp = x.infos.load()
+ if sp == nil {
+ pti = &ti
+ vs := []rtid2ti{{rtid, pti}}
+ x.infos.store(vs)
+ } else {
+ idx, pti = x.find(sp, rtid)
+ if pti == nil {
+ pti = &ti
+ vs := make([]rtid2ti, len(sp)+1)
+ copy(vs, sp[:idx])
+ copy(vs[idx+1:], sp[idx:])
+ vs[idx] = rtid2ti{rtid, pti}
+ x.infos.store(vs)
+ }
+ }
+ x.mu.Unlock()
+ return
+}
+
+func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
+ indexstack []uint16, pv *typeInfoLoad) {
+ // Read up fields and store how to access the value.
+ //
+ // It uses go's rules for message selectors,
+ // which say that the field with the shallowest depth is selected.
+ //
+ // Note: we consciously use slices, not a map, to simulate a set.
+ // Typically, types have < 16 fields,
+ // and iteration using equals is faster than maps there
+ flen := rt.NumField()
+ if flen > (1< %v fields are not supported - has %v fields",
+ (1< maxLevelsEmbedding-1 {
+ panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
+ maxLevelsEmbedding-1, len(indexstack))
+ }
+ si.nis = uint8(len(indexstack)) + 1
+ copy(si.is[:], indexstack)
+ si.is[len(indexstack)] = j
+
+ if omitEmpty {
+ si.flagSet(structFieldInfoFlagOmitEmpty)
+ }
+ pv.sfis = append(pv.sfis, si)
+ }
+}
+
+func tiSep(name string) uint8 {
+ // (xn[0]%64) // (between 192-255 - outside ascii BMP)
+ // return 0xfe - (name[0] & 63)
+ // return 0xfe - (name[0] & 63) - uint8(len(name))
+ // return 0xfe - (name[0] & 63) - uint8(len(name)&63)
+ // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07))
+ return 0xfe - (name[0] & 63) - uint8(len(name)&63)
+}
+
+func tiSep2(name []byte) uint8 {
+ return 0xfe - (name[0] & 63) - uint8(len(name)&63)
+}
+
+// resolves the struct field info got from a call to rget.
+// Returns a trimmed, unsorted and sorted []*structFieldInfo.
+func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) (
+ y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) {
+ sa := pv.sfiidx[:0]
+ sn := pv.b[:]
+ n := len(x)
+
+ var xn string
+ var ui uint16
+ var sep byte
+
+ for i := range x {
+ ui = uint16(i)
+ xn = x[i].encName // fieldName or encName? use encName for now.
+ if len(xn)+2 > cap(pv.b) {
+ sn = make([]byte, len(xn)+2)
+ } else {
+ sn = sn[:len(xn)+2]
+ }
+ // use a custom sep, so that misses are less frequent,
+ // since the sep (first char in search) is as unique as first char in field name.
+ sep = tiSep(xn)
+ sn[0], sn[len(sn)-1] = sep, 0xff
+ copy(sn[1:], xn)
+ j := bytes.Index(sa, sn)
+ if j == -1 {
+ sa = append(sa, sep)
+ sa = append(sa, xn...)
+ sa = append(sa, 0xff, byte(ui>>8), byte(ui))
+ } else {
+ index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8
+ // one of them must be reset to nil,
+ // and the index updated appropriately to the other one
+ if x[i].nis == x[index].nis {
+ } else if x[i].nis < x[index].nis {
+ sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui)
+ if x[index].ready() {
+ x[index].flagClr(structFieldInfoFlagReady)
+ n--
+ }
+ } else {
+ if x[i].ready() {
+ x[i].flagClr(structFieldInfoFlagReady)
+ n--
+ }
+ }
+ }
+
+ }
+ var w []structFieldInfo
+ sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray
+ if sharingArray {
+ w = make([]structFieldInfo, n)
+ }
+
+ // remove all the nils (non-ready)
+ y = make([]*structFieldInfo, n)
+ n = 0
+ var sslen int
+ for i := range x {
+ if !x[i].ready() {
+ continue
+ }
+ if !anyOmitEmpty && x[i].omitEmpty() {
+ anyOmitEmpty = true
+ }
+ if sharingArray {
+ w[n] = x[i]
+ y[n] = &w[n]
+ } else {
+ y[n] = &x[i]
+ }
+ sslen = sslen + len(x[i].encName) + 4
+ n++
+ }
+ if n != len(y) {
+ panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d",
+ rt, len(y), len(x), n)
+ }
+
+ z = make([]*structFieldInfo, len(y))
+ copy(z, y)
+ sort.Sort(sfiSortedByEncName(z))
+
+ sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen
+ if sharingArray {
+ ss = make([]byte, 0, sslen)
+ } else {
+ ss = sa[:0] // reuse the newly made sa array if necessary
+ }
+ for i := range z {
+ xn = z[i].encName
+ sep = tiSep(xn)
+ ui = uint16(i)
+ ss = append(ss, sep)
+ ss = append(ss, xn...)
+ ss = append(ss, 0xff, byte(ui>>8), byte(ui))
+ }
+ return
+}
+
+func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
+ return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
+}
+
+// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty:
+// - does it implement IsZero() bool
+// - is it comparable, and can i compare directly using ==
+// - if checkStruct, then walk through the encodable fields
+// and check if they are empty or not.
+func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
+ // v is a struct kind - no need to check again.
+ // We only check isZero on a struct kind, to reduce the amount of times
+ // that we lookup the rtid and typeInfo for each type as we walk the tree.
+
+ vt := v.Type()
+ rtid := rt2id(vt)
+ if tinfos == nil {
+ tinfos = defTypeInfos
+ }
+ ti := tinfos.get(rtid, vt)
+ if ti.rtid == timeTypId {
+ return rv2i(v).(time.Time).IsZero()
+ }
+ if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() {
+ return rv2i(v.Addr()).(isZeroer).IsZero()
+ }
+ if ti.isFlag(typeInfoFlagIsZeroer) {
+ return rv2i(v).(isZeroer).IsZero()
+ }
+ if ti.isFlag(typeInfoFlagComparable) {
+ return rv2i(v) == rv2i(reflect.Zero(vt))
+ }
+ if !checkStruct {
+ return false
+ }
+ // We only care about what we can encode/decode,
+ // so that is what we use to check omitEmpty.
+ for _, si := range ti.sfiSrc {
+ sfv, valid := si.field(v, false)
+ if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) {
+ return false
+ }
+ }
+ return true
+}
+
+// func roundFloat(x float64) float64 {
+// t := math.Trunc(x)
+// if math.Abs(x-t) >= 0.5 {
+// return t + math.Copysign(1, x)
+// }
+// return t
+// }
+
+func panicToErr(h errstrDecorator, err *error) {
+ // Note: This method MUST be called directly from defer i.e. defer panicToErr ...
+ // else it seems the recover is not fully handled
+ if recoverPanicToErr {
+ if x := recover(); x != nil {
+ // fmt.Printf("panic'ing with: %v\n", x)
+ // debug.PrintStack()
+ panicValToErr(h, x, err)
+ }
+ }
+}
+
+func panicValToErr(h errstrDecorator, v interface{}, err *error) {
+ switch xerr := v.(type) {
+ case nil:
+ case error:
+ switch xerr {
+ case nil:
+ case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
+ // treat as special (bubble up)
+ *err = xerr
+ default:
+ h.wrapErrstr(xerr.Error(), err)
+ }
+ case string:
+ if xerr != "" {
+ h.wrapErrstr(xerr, err)
+ }
+ case fmt.Stringer:
+ if xerr != nil {
+ h.wrapErrstr(xerr.String(), err)
+ }
+ default:
+ h.wrapErrstr(v, err)
+ }
+}
+
+func isImmutableKind(k reflect.Kind) (v bool) {
+ return immutableKindsSet[k]
+}
+
+// ----
+
+type codecFnInfo struct {
+ ti *typeInfo
+ xfFn Ext
+ xfTag uint64
+ seq seqType
+ addrD bool
+ addrF bool // if addrD, this says whether decode function can take a value or a ptr
+ addrE bool
+ ready bool // ready to use
+}
+
+// codecFn encapsulates the captured variables and the encode function.
+// This way, we only do some calculations one times, and pass to the
+// code block that should be called (encapsulated in a function)
+// instead of executing the checks every time.
+type codecFn struct {
+ i codecFnInfo
+ fe func(*Encoder, *codecFnInfo, reflect.Value)
+ fd func(*Decoder, *codecFnInfo, reflect.Value)
+ _ [1]uint64 // padding
+}
+
+type codecRtidFn struct {
+ rtid uintptr
+ fn *codecFn
+}
+
+type codecFner struct {
+ // hh Handle
+ h *BasicHandle
+ s []codecRtidFn
+ be bool
+ js bool
+ _ [6]byte // padding
+ _ [3]uint64 // padding
+}
+
+func (c *codecFner) reset(hh Handle) {
+ bh := hh.getBasicHandle()
+ // only reset iff extensions changed or *TypeInfos changed
+ var hhSame = true &&
+ c.h == bh && c.h.TypeInfos == bh.TypeInfos &&
+ len(c.h.extHandle) == len(bh.extHandle) &&
+ (len(c.h.extHandle) == 0 || &c.h.extHandle[0] == &bh.extHandle[0])
+ if !hhSame {
+ // c.hh = hh
+ c.h, bh = bh, c.h // swap both
+ _, c.js = hh.(*JsonHandle)
+ c.be = hh.isBinary()
+ for i := range c.s {
+ c.s[i].fn.i.ready = false
+ }
+ }
+}
+
+func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) {
+ rtid := rt2id(rt)
+
+ for _, x := range c.s {
+ if x.rtid == rtid {
+ // if rtid exists, then there's a *codenFn attached (non-nil)
+ fn = x.fn
+ if fn.i.ready {
+ return
+ }
+ break
+ }
+ }
+ var ti *typeInfo
+ if fn == nil {
+ fn = new(codecFn)
+ if c.s == nil {
+ c.s = make([]codecRtidFn, 0, 8)
+ }
+ c.s = append(c.s, codecRtidFn{rtid, fn})
+ } else {
+ ti = fn.i.ti
+ *fn = codecFn{}
+ fn.i.ti = ti
+ // fn.fe, fn.fd = nil, nil
+ }
+ fi := &(fn.i)
+ fi.ready = true
+ if ti == nil {
+ ti = c.h.getTypeInfo(rtid, rt)
+ fi.ti = ti
+ }
+
+ rk := reflect.Kind(ti.kind)
+
+ if checkCodecSelfer && (ti.cs || ti.csp) {
+ fn.fe = (*Encoder).selferMarshal
+ fn.fd = (*Decoder).selferUnmarshal
+ fi.addrF = true
+ fi.addrD = ti.csp
+ fi.addrE = ti.csp
+ } else if rtid == timeTypId {
+ fn.fe = (*Encoder).kTime
+ fn.fd = (*Decoder).kTime
+ } else if rtid == rawTypId {
+ fn.fe = (*Encoder).raw
+ fn.fd = (*Decoder).raw
+ } else if rtid == rawExtTypId {
+ fn.fe = (*Encoder).rawExt
+ fn.fd = (*Decoder).rawExt
+ fi.addrF = true
+ fi.addrD = true
+ fi.addrE = true
+ } else if xfFn := c.h.getExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+ fn.fe = (*Encoder).ext
+ fn.fd = (*Decoder).ext
+ fi.addrF = true
+ fi.addrD = true
+ if rk == reflect.Struct || rk == reflect.Array {
+ fi.addrE = true
+ }
+ } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) {
+ fn.fe = (*Encoder).binaryMarshal
+ fn.fd = (*Decoder).binaryUnmarshal
+ fi.addrF = true
+ fi.addrD = ti.bup
+ fi.addrE = ti.bmp
+ } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) {
+ //If JSON, we should check JSONMarshal before textMarshal
+ fn.fe = (*Encoder).jsonMarshal
+ fn.fd = (*Decoder).jsonUnmarshal
+ fi.addrF = true
+ fi.addrD = ti.jup
+ fi.addrE = ti.jmp
+ } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) {
+ fn.fe = (*Encoder).textMarshal
+ fn.fd = (*Decoder).textUnmarshal
+ fi.addrF = true
+ fi.addrD = ti.tup
+ fi.addrE = ti.tmp
+ } else {
+ if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
+ if ti.pkgpath == "" { // un-named slice or map
+ if idx := fastpathAV.index(rtid); idx != -1 {
+ fn.fe = fastpathAV[idx].encfn
+ fn.fd = fastpathAV[idx].decfn
+ fi.addrD = true
+ fi.addrF = false
+ }
+ } else {
+ // use mapping for underlying type if there
+ var rtu reflect.Type
+ if rk == reflect.Map {
+ rtu = reflect.MapOf(ti.key, ti.elem)
+ } else {
+ rtu = reflect.SliceOf(ti.elem)
+ }
+ rtuid := rt2id(rtu)
+ if idx := fastpathAV.index(rtuid); idx != -1 {
+ xfnf := fastpathAV[idx].encfn
+ xrt := fastpathAV[idx].rt
+ fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
+ xfnf(e, xf, xrv.Convert(xrt))
+ }
+ fi.addrD = true
+ fi.addrF = false // meaning it can be an address(ptr) or a value
+ xfnf2 := fastpathAV[idx].decfn
+ fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+ if xrv.Kind() == reflect.Ptr {
+ xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt)))
+ } else {
+ xfnf2(d, xf, xrv.Convert(xrt))
+ }
+ }
+ }
+ }
+ }
+ if fn.fe == nil && fn.fd == nil {
+ switch rk {
+ case reflect.Bool:
+ fn.fe = (*Encoder).kBool
+ fn.fd = (*Decoder).kBool
+ case reflect.String:
+ fn.fe = (*Encoder).kString
+ fn.fd = (*Decoder).kString
+ case reflect.Int:
+ fn.fd = (*Decoder).kInt
+ fn.fe = (*Encoder).kInt
+ case reflect.Int8:
+ fn.fe = (*Encoder).kInt8
+ fn.fd = (*Decoder).kInt8
+ case reflect.Int16:
+ fn.fe = (*Encoder).kInt16
+ fn.fd = (*Decoder).kInt16
+ case reflect.Int32:
+ fn.fe = (*Encoder).kInt32
+ fn.fd = (*Decoder).kInt32
+ case reflect.Int64:
+ fn.fe = (*Encoder).kInt64
+ fn.fd = (*Decoder).kInt64
+ case reflect.Uint:
+ fn.fd = (*Decoder).kUint
+ fn.fe = (*Encoder).kUint
+ case reflect.Uint8:
+ fn.fe = (*Encoder).kUint8
+ fn.fd = (*Decoder).kUint8
+ case reflect.Uint16:
+ fn.fe = (*Encoder).kUint16
+ fn.fd = (*Decoder).kUint16
+ case reflect.Uint32:
+ fn.fe = (*Encoder).kUint32
+ fn.fd = (*Decoder).kUint32
+ case reflect.Uint64:
+ fn.fe = (*Encoder).kUint64
+ fn.fd = (*Decoder).kUint64
+ case reflect.Uintptr:
+ fn.fe = (*Encoder).kUintptr
+ fn.fd = (*Decoder).kUintptr
+ case reflect.Float32:
+ fn.fe = (*Encoder).kFloat32
+ fn.fd = (*Decoder).kFloat32
+ case reflect.Float64:
+ fn.fe = (*Encoder).kFloat64
+ fn.fd = (*Decoder).kFloat64
+ case reflect.Invalid:
+ fn.fe = (*Encoder).kInvalid
+ fn.fd = (*Decoder).kErr
+ case reflect.Chan:
+ fi.seq = seqTypeChan
+ fn.fe = (*Encoder).kSlice
+ fn.fd = (*Decoder).kSlice
+ case reflect.Slice:
+ fi.seq = seqTypeSlice
+ fn.fe = (*Encoder).kSlice
+ fn.fd = (*Decoder).kSlice
+ case reflect.Array:
+ fi.seq = seqTypeArray
+ fn.fe = (*Encoder).kSlice
+ fi.addrF = false
+ fi.addrD = false
+ rt2 := reflect.SliceOf(ti.elem)
+ fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+ d.cfer().get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len()))
+ }
+ // fn.fd = (*Decoder).kArray
+ case reflect.Struct:
+ if ti.anyOmitEmpty {
+ fn.fe = (*Encoder).kStruct
+ } else {
+ fn.fe = (*Encoder).kStructNoOmitempty
+ }
+ fn.fd = (*Decoder).kStruct
+ case reflect.Map:
+ fn.fe = (*Encoder).kMap
+ fn.fd = (*Decoder).kMap
+ case reflect.Interface:
+ // encode: reflect.Interface are handled already by preEncodeValue
+ fn.fd = (*Decoder).kInterface
+ fn.fe = (*Encoder).kErr
+ default:
+ // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
+ fn.fe = (*Encoder).kErr
+ fn.fd = (*Decoder).kErr
+ }
+ }
+ }
+ return
+}
+
+type codecFnPooler struct {
+ cf *codecFner
+ cfp *sync.Pool
+ hh Handle
+}
+
+func (d *codecFnPooler) cfer() *codecFner {
+ if d.cf == nil {
+ var v interface{}
+ d.cfp, v = pool.codecFner()
+ d.cf = v.(*codecFner)
+ d.cf.reset(d.hh)
+ }
+ return d.cf
+}
+
+func (d *codecFnPooler) alwaysAtEnd() {
+ if d.cf != nil {
+ d.cfp.Put(d.cf)
+ d.cf, d.cfp = nil, nil
+ }
+}
+
+// ----
+
+// these "checkOverflow" functions must be inlinable, and not call anybody.
+// Overflow means that the value cannot be represented without wrapping/overflow.
+// Overflow=false does not mean that the value can be represented without losing precision
+// (especially for floating point).
+
+type checkOverflow struct{}
+
+// func (checkOverflow) Float16(f float64) (overflow bool) {
+// panicv.errorf("unimplemented")
+// if f < 0 {
+// f = -f
+// }
+// return math.MaxFloat32 < f && f <= math.MaxFloat64
+// }
+
+func (checkOverflow) Float32(v float64) (overflow bool) {
+ if v < 0 {
+ v = -v
+ }
+ return math.MaxFloat32 < v && v <= math.MaxFloat64
+}
+func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
+ if bitsize == 0 || bitsize >= 64 || v == 0 {
+ return
+ }
+ if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+ overflow = true
+ }
+ return
+}
+func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
+ if bitsize == 0 || bitsize >= 64 || v == 0 {
+ return
+ }
+ if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+ overflow = true
+ }
+ return
+}
+func (checkOverflow) SignedInt(v uint64) (overflow bool) {
+ //e.g. -127 to 128 for int8
+ pos := (v >> 63) == 0
+ ui2 := v & 0x7fffffffffffffff
+ if pos {
+ if ui2 > math.MaxInt64 {
+ overflow = true
+ }
+ } else {
+ if ui2 > math.MaxInt64-1 {
+ overflow = true
+ }
+ }
+ return
+}
+
+func (x checkOverflow) Float32V(v float64) float64 {
+ if x.Float32(v) {
+ panicv.errorf("float32 overflow: %v", v)
+ }
+ return v
+}
+func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
+ if x.Uint(v, bitsize) {
+ panicv.errorf("uint64 overflow: %v", v)
+ }
+ return v
+}
+func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
+ if x.Int(v, bitsize) {
+ panicv.errorf("int64 overflow: %v", v)
+ }
+ return v
+}
+func (x checkOverflow) SignedIntV(v uint64) int64 {
+ if x.SignedInt(v) {
+ panicv.errorf("uint64 to int64 overflow: %v", v)
+ }
+ return int64(v)
+}
+
+// ------------------ SORT -----------------
+
+func isNaN(f float64) bool { return f != f }
+
+// -----------------------
+
+type ioFlusher interface {
+ Flush() error
+}
+
+type ioPeeker interface {
+ Peek(int) ([]byte, error)
+}
+
+type ioBuffered interface {
+ Buffered() int
+}
+
+// -----------------------
+
+type intSlice []int64
+type uintSlice []uint64
+
+// type uintptrSlice []uintptr
+type floatSlice []float64
+type boolSlice []bool
+type stringSlice []string
+
+// type bytesSlice [][]byte
+
+func (p intSlice) Len() int { return len(p) }
+func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p uintSlice) Len() int { return len(p) }
+func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// func (p uintptrSlice) Len() int { return len(p) }
+// func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] }
+// func (p uintptrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p floatSlice) Len() int { return len(p) }
+func (p floatSlice) Less(i, j int) bool {
+ return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
+}
+func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringSlice) Len() int { return len(p) }
+func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// func (p bytesSlice) Len() int { return len(p) }
+// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
+// func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p boolSlice) Len() int { return len(p) }
+func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
+func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// ---------------------
+
+type intRv struct {
+ v int64
+ r reflect.Value
+}
+type intRvSlice []intRv
+type uintRv struct {
+ v uint64
+ r reflect.Value
+}
+type uintRvSlice []uintRv
+type floatRv struct {
+ v float64
+ r reflect.Value
+}
+type floatRvSlice []floatRv
+type boolRv struct {
+ v bool
+ r reflect.Value
+}
+type boolRvSlice []boolRv
+type stringRv struct {
+ v string
+ r reflect.Value
+}
+type stringRvSlice []stringRv
+type bytesRv struct {
+ v []byte
+ r reflect.Value
+}
+type bytesRvSlice []bytesRv
+type timeRv struct {
+ v time.Time
+ r reflect.Value
+}
+type timeRvSlice []timeRv
+
+func (p intRvSlice) Len() int { return len(p) }
+func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p uintRvSlice) Len() int { return len(p) }
+func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p floatRvSlice) Len() int { return len(p) }
+func (p floatRvSlice) Less(i, j int) bool {
+ return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
+}
+func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringRvSlice) Len() int { return len(p) }
+func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p bytesRvSlice) Len() int { return len(p) }
+func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p boolRvSlice) Len() int { return len(p) }
+func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
+func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p timeRvSlice) Len() int { return len(p) }
+func (p timeRvSlice) Less(i, j int) bool { return p[i].v.Before(p[j].v) }
+func (p timeRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type bytesI struct {
+ v []byte
+ i interface{}
+}
+
+type bytesISlice []bytesI
+
+func (p bytesISlice) Len() int { return len(p) }
+func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
+func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type set []uintptr
+
+func (s *set) add(v uintptr) (exists bool) {
+ // e.ci is always nil, or len >= 1
+ x := *s
+ if x == nil {
+ x = make([]uintptr, 1, 8)
+ x[0] = v
+ *s = x
+ return
+ }
+ // typically, length will be 1. make this perform.
+ if len(x) == 1 {
+ if j := x[0]; j == 0 {
+ x[0] = v
+ } else if j == v {
+ exists = true
+ } else {
+ x = append(x, v)
+ *s = x
+ }
+ return
+ }
+ // check if it exists
+ for _, j := range x {
+ if j == v {
+ exists = true
+ return
+ }
+ }
+ // try to replace a "deleted" slot
+ for i, j := range x {
+ if j == 0 {
+ x[i] = v
+ return
+ }
+ }
+ // if unable to replace deleted slot, just append it.
+ x = append(x, v)
+ *s = x
+ return
+}
+
+func (s *set) remove(v uintptr) (exists bool) {
+ x := *s
+ if len(x) == 0 {
+ return
+ }
+ if len(x) == 1 {
+ if x[0] == v {
+ x[0] = 0
+ }
+ return
+ }
+ for i, j := range x {
+ if j == v {
+ exists = true
+ x[i] = 0 // set it to 0, as way to delete it.
+ // copy(x[i:], x[i+1:])
+ // x = x[:len(x)-1]
+ return
+ }
+ }
+ return
+}
+
+// ------
+
+// bitset types are better than [256]bool, because they permit the whole
+// bitset array being on a single cache line and use less memory.
+
+// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
+// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
+
+type bitset256 [32]byte
+
+func (x *bitset256) isset(pos byte) bool {
+ return x[pos>>3]&(1<<(pos&7)) != 0
+}
+func (x *bitset256) issetv(pos byte) byte {
+ return x[pos>>3] & (1 << (pos & 7))
+}
+func (x *bitset256) set(pos byte) {
+ x[pos>>3] |= (1 << (pos & 7))
+}
+
+// func (x *bitset256) unset(pos byte) {
+// x[pos>>3] &^= (1 << (pos & 7))
+// }
+
+type bitset128 [16]byte
+
+func (x *bitset128) isset(pos byte) bool {
+ return x[pos>>3]&(1<<(pos&7)) != 0
+}
+func (x *bitset128) set(pos byte) {
+ x[pos>>3] |= (1 << (pos & 7))
+}
+
+// func (x *bitset128) unset(pos byte) {
+// x[pos>>3] &^= (1 << (pos & 7))
+// }
+
+type bitset32 [4]byte
+
+func (x *bitset32) isset(pos byte) bool {
+ return x[pos>>3]&(1<<(pos&7)) != 0
+}
+func (x *bitset32) set(pos byte) {
+ x[pos>>3] |= (1 << (pos & 7))
+}
+
+// func (x *bitset32) unset(pos byte) {
+// x[pos>>3] &^= (1 << (pos & 7))
+// }
+
+// type bit2set256 [64]byte
+
+// func (x *bit2set256) set(pos byte, v1, v2 bool) {
+// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
+// if v1 {
+// x[pos>>2] |= 1 << (pos2 + 1)
+// }
+// if v2 {
+// x[pos>>2] |= 1 << pos2
+// }
+// }
+// func (x *bit2set256) get(pos byte) uint8 {
+// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
+// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011
+// }
+
+// ------------
+
+type pooler struct {
+ dn sync.Pool // for decNaked
+ cfn sync.Pool // for codecFner
+ tiload sync.Pool
+ strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool // for stringRV
+}
+
+func (p *pooler) init() {
+ p.strRv8.New = func() interface{} { return new([8]stringRv) }
+ p.strRv16.New = func() interface{} { return new([16]stringRv) }
+ p.strRv32.New = func() interface{} { return new([32]stringRv) }
+ p.strRv64.New = func() interface{} { return new([64]stringRv) }
+ p.strRv128.New = func() interface{} { return new([128]stringRv) }
+ p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x }
+ p.tiload.New = func() interface{} { return new(typeInfoLoadArray) }
+ p.cfn.New = func() interface{} { return new(codecFner) }
+}
+
+func (p *pooler) stringRv8() (sp *sync.Pool, v interface{}) {
+ return &p.strRv8, p.strRv8.Get()
+}
+func (p *pooler) stringRv16() (sp *sync.Pool, v interface{}) {
+ return &p.strRv16, p.strRv16.Get()
+}
+func (p *pooler) stringRv32() (sp *sync.Pool, v interface{}) {
+ return &p.strRv32, p.strRv32.Get()
+}
+func (p *pooler) stringRv64() (sp *sync.Pool, v interface{}) {
+ return &p.strRv64, p.strRv64.Get()
+}
+func (p *pooler) stringRv128() (sp *sync.Pool, v interface{}) {
+ return &p.strRv128, p.strRv128.Get()
+}
+func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) {
+ return &p.dn, p.dn.Get()
+}
+func (p *pooler) codecFner() (sp *sync.Pool, v interface{}) {
+ return &p.cfn, p.cfn.Get()
+}
+func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
+ return &p.tiload, p.tiload.Get()
+}
+
+// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) {
+// sp := &(p.dn)
+// vv := sp.Get()
+// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) }
+// }
+// func (p *pooler) decNakedGet() (v interface{}) {
+// return p.dn.Get()
+// }
+// func (p *pooler) codecFnerGet() (v interface{}) {
+// return p.cfn.Get()
+// }
+// func (p *pooler) tiLoadGet() (v interface{}) {
+// return p.tiload.Get()
+// }
+// func (p *pooler) decNakedPut(v interface{}) {
+// p.dn.Put(v)
+// }
+// func (p *pooler) codecFnerPut(v interface{}) {
+// p.cfn.Put(v)
+// }
+// func (p *pooler) tiLoadPut(v interface{}) {
+// p.tiload.Put(v)
+// }
+
+type panicHdl struct{}
+
+func (panicHdl) errorv(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (panicHdl) errorstr(message string) {
+ if message != "" {
+ panic(message)
+ }
+}
+
+func (panicHdl) errorf(format string, params ...interface{}) {
+ if format != "" {
+ if len(params) == 0 {
+ panic(format)
+ } else {
+ panic(fmt.Sprintf(format, params...))
+ }
+ }
+}
+
+type errstrDecorator interface {
+ wrapErrstr(interface{}, *error)
+}
+
+type errstrDecoratorDef struct{}
+
+func (errstrDecoratorDef) wrapErrstr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) }
+
+type must struct{}
+
+func (must) String(s string, err error) string {
+ if err != nil {
+ panicv.errorv(err)
+ }
+ return s
+}
+func (must) Int(s int64, err error) int64 {
+ if err != nil {
+ panicv.errorv(err)
+ }
+ return s
+}
+func (must) Uint(s uint64, err error) uint64 {
+ if err != nil {
+ panicv.errorv(err)
+ }
+ return s
+}
+func (must) Float(s float64, err error) float64 {
+ if err != nil {
+ panicv.errorv(err)
+ }
+ return s
+}
+
+// xdebugf prints the message in red on the terminal.
+// Use it in place of fmt.Printf (which it calls internally)
+func xdebugf(pattern string, args ...interface{}) {
+ var delim string
+ if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' {
+ delim = "\n"
+ }
+ fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...)
+}
+
+// func isImmutableKind(k reflect.Kind) (v bool) {
+// return false ||
+// k == reflect.Int ||
+// k == reflect.Int8 ||
+// k == reflect.Int16 ||
+// k == reflect.Int32 ||
+// k == reflect.Int64 ||
+// k == reflect.Uint ||
+// k == reflect.Uint8 ||
+// k == reflect.Uint16 ||
+// k == reflect.Uint32 ||
+// k == reflect.Uint64 ||
+// k == reflect.Uintptr ||
+// k == reflect.Float32 ||
+// k == reflect.Float64 ||
+// k == reflect.Bool ||
+// k == reflect.String
+// }
+
+// func timeLocUTCName(tzint int16) string {
+// if tzint == 0 {
+// return "UTC"
+// }
+// var tzname = []byte("UTC+00:00")
+// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
+// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
+// var tzhr, tzmin int16
+// if tzint < 0 {
+// tzname[3] = '-' // (TODO: verify. this works here)
+// tzhr, tzmin = -tzint/60, (-tzint)%60
+// } else {
+// tzhr, tzmin = tzint/60, tzint%60
+// }
+// tzname[4] = timeDigits[tzhr/10]
+// tzname[5] = timeDigits[tzhr%10]
+// tzname[7] = timeDigits[tzmin/10]
+// tzname[8] = timeDigits[tzmin%10]
+// return string(tzname)
+// //return time.FixedZone(string(tzname), int(tzint)*60)
+// }
diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go
new file mode 100644
index 0000000..0cbd665
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_internal.go
@@ -0,0 +1,121 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+func pruneSignExt(v []byte, pos bool) (n int) {
+ if len(v) < 2 {
+ } else if pos && v[0] == 0 {
+ for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
+ }
+ } else if !pos && v[0] == 0xff {
+ for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
+ }
+ }
+ return
+}
+
+// validate that this function is correct ...
+// culled from OGRE (Object-Oriented Graphics Rendering Engine)
+// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
+func halfFloatToFloatBits(yy uint16) (d uint32) {
+ y := uint32(yy)
+ s := (y >> 15) & 0x01
+ e := (y >> 10) & 0x1f
+ m := y & 0x03ff
+
+ if e == 0 {
+ if m == 0 { // plu or minus 0
+ return s << 31
+ }
+ // Denormalized number -- renormalize it
+ for (m & 0x00000400) == 0 {
+ m <<= 1
+ e -= 1
+ }
+ e += 1
+ const zz uint32 = 0x0400
+ m &= ^zz
+ } else if e == 31 {
+ if m == 0 { // Inf
+ return (s << 31) | 0x7f800000
+ }
+ return (s << 31) | 0x7f800000 | (m << 13) // NaN
+ }
+ e = e + (127 - 15)
+ m = m << 13
+ return (s << 31) | (e << 23) | m
+}
+
+// GrowCap will return a new capacity for a slice, given the following:
+// - oldCap: current capacity
+// - unit: in-memory size of an element
+// - num: number of elements to add
+func growCap(oldCap, unit, num int) (newCap int) {
+ // appendslice logic (if cap < 1024, *2, else *1.25):
+ // leads to many copy calls, especially when copying bytes.
+ // bytes.Buffer model (2*cap + n): much better for bytes.
+ // smarter way is to take the byte-size of the appended element(type) into account
+
+ // maintain 3 thresholds:
+ // t1: if cap <= t1, newcap = 2x
+ // t2: if cap <= t2, newcap = 1.75x
+ // t3: if cap <= t3, newcap = 1.5x
+ // else newcap = 1.25x
+ //
+ // t1, t2, t3 >= 1024 always.
+ // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
+ //
+ // With this, appending for bytes increase by:
+ // 100% up to 4K
+ // 75% up to 8K
+ // 50% up to 16K
+ // 25% beyond that
+
+ // unit can be 0 e.g. for struct{}{}; handle that appropriately
+ var t1, t2, t3 int // thresholds
+ if unit <= 1 {
+ t1, t2, t3 = 4*1024, 8*1024, 16*1024
+ } else if unit < 16 {
+ t3 = 16 / unit * 1024
+ t1 = t3 * 1 / 4
+ t2 = t3 * 2 / 4
+ } else {
+ t1, t2, t3 = 1024, 1024, 1024
+ }
+
+ var x int // temporary variable
+
+ // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
+ if oldCap <= t1 { // [0,t1]
+ x = 8
+ } else if oldCap > t3 { // (t3,infinity]
+ x = 5
+ } else if oldCap <= t2 { // (t1,t2]
+ x = 7
+ } else { // (t2,t3]
+ x = 6
+ }
+ newCap = x * oldCap / 4
+
+ if num > 0 {
+ newCap += num
+ }
+
+ // ensure newCap is a multiple of 64 (if it is > 64) or 16.
+ if newCap > 64 {
+ if x = newCap % 64; x != 0 {
+ x = newCap / 64
+ newCap = 64 * (x + 1)
+ }
+ } else {
+ if x = newCap % 16; x != 0 {
+ x = newCap / 16
+ newCap = 16 * (x + 1)
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
new file mode 100644
index 0000000..fd52690
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
@@ -0,0 +1,272 @@
+// +build !go1.7 safe appengine
+
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "reflect"
+ "sync/atomic"
+ "time"
+)
+
+const safeMode = true
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+//
+// Usage: Always maintain a reference to v while result of this call is in use,
+// and call keepAlive4BytesView(v) at point where done with view.
+func stringView(v []byte) string {
+ return string(v)
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+//
+// Usage: Always maintain a reference to v while result of this call is in use,
+// and call keepAlive4BytesView(v) at point where done with view.
+func bytesView(v string) []byte {
+ return []byte(v)
+}
+
+func definitelyNil(v interface{}) bool {
+ // this is a best-effort option.
+ // We just return false, so we don't unnecessarily incur the cost of reflection this early.
+ return false
+}
+
+func rv2i(rv reflect.Value) interface{} {
+ return rv.Interface()
+}
+
+func rt2id(rt reflect.Type) uintptr {
+ return reflect.ValueOf(rt).Pointer()
+}
+
+func rv2rtid(rv reflect.Value) uintptr {
+ return reflect.ValueOf(rv.Type()).Pointer()
+}
+
+func i2rtid(i interface{}) uintptr {
+ return reflect.ValueOf(reflect.TypeOf(i)).Pointer()
+}
+
+// --------------------------
+
+func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
+ switch v.Kind() {
+ case reflect.Invalid:
+ return true
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if deref {
+ if v.IsNil() {
+ return true
+ }
+ return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
+ }
+ return v.IsNil()
+ case reflect.Struct:
+ return isEmptyStruct(v, tinfos, deref, checkStruct)
+ }
+ return false
+}
+
+// --------------------------
+// type ptrToRvMap struct{}
+
+// func (*ptrToRvMap) init() {}
+// func (*ptrToRvMap) get(i interface{}) reflect.Value {
+// return reflect.ValueOf(i).Elem()
+// }
+
+// --------------------------
+type atomicTypeInfoSlice struct { // expected to be 2 words
+ v atomic.Value
+}
+
+func (x *atomicTypeInfoSlice) load() []rtid2ti {
+ i := x.v.Load()
+ if i == nil {
+ return nil
+ }
+ return i.([]rtid2ti)
+}
+
+func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
+ x.v.Store(p)
+}
+
+// --------------------------
+func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
+ rv.SetBytes(d.rawBytes())
+}
+
+func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
+ rv.SetString(d.d.DecodeString())
+}
+
+func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
+ rv.SetBool(d.d.DecodeBool())
+}
+
+func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
+ rv.Set(reflect.ValueOf(d.d.DecodeTime()))
+}
+
+func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+ fv := d.d.DecodeFloat64()
+ if chkOvf.Float32(fv) {
+ d.errorf("float32 overflow: %v", fv)
+ }
+ rv.SetFloat(fv)
+}
+
+func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+ rv.SetFloat(d.d.DecodeFloat64())
+}
+
+func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
+ rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+}
+
+func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+ rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8))
+}
+
+func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+ rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16))
+}
+
+func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+ rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32))
+}
+
+func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+ rv.SetInt(d.d.DecodeInt64())
+}
+
+func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
+ rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+}
+
+func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+ rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+}
+
+func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+ rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8))
+}
+
+func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+ rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16))
+}
+
+func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+ rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32))
+}
+
+func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+ rv.SetUint(d.d.DecodeUint64())
+}
+
+// ----------------
+
+func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeBool(rv.Bool())
+}
+
+func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeTime(rv2i(rv).(time.Time))
+}
+
+func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeString(cUTF8, rv.String())
+}
+
+func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeFloat64(rv.Float())
+}
+
+func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeFloat32(float32(rv.Float()))
+}
+
+func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(rv.Int())
+}
+
+func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(rv.Uint())
+}
+
+func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(rv.Uint())
+}
+
+// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
+// //
+// // Usage: call this at point where done with the bytes view.
+// func keepAlive4BytesView(v string) {}
+
+// // keepAlive4BytesView maintains a reference to the input parameter for stringView.
+// //
+// // Usage: call this at point where done with the string view.
+// func keepAlive4StringView(v []byte) {}
+
+// func definitelyNil(v interface{}) bool {
+// rv := reflect.ValueOf(v)
+// switch rv.Kind() {
+// case reflect.Invalid:
+// return true
+// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func:
+// return rv.IsNil()
+// default:
+// return false
+// }
+// }
diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
new file mode 100644
index 0000000..e3df60a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
@@ -0,0 +1,639 @@
+// +build !safe
+// +build !appengine
+// +build go1.7
+
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "reflect"
+ "sync/atomic"
+ "time"
+ "unsafe"
+)
+
+// This file has unsafe variants of some helper methods.
+// NOTE: See helper_not_unsafe.go for the usage information.
+
+// var zeroRTv [4]uintptr
+
+const safeMode = false
+const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go
+
+type unsafeString struct {
+ Data unsafe.Pointer
+ Len int
+}
+
+type unsafeSlice struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+}
+
+type unsafeIntf struct {
+ typ unsafe.Pointer
+ word unsafe.Pointer
+}
+
+type unsafeReflectValue struct {
+ typ unsafe.Pointer
+ ptr unsafe.Pointer
+ flag uintptr
+}
+
+func stringView(v []byte) string {
+ if len(v) == 0 {
+ return ""
+ }
+ bx := (*unsafeSlice)(unsafe.Pointer(&v))
+ return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len}))
+}
+
+func bytesView(v string) []byte {
+ if len(v) == 0 {
+ return zeroByteSlice
+ }
+ sx := (*unsafeString)(unsafe.Pointer(&v))
+ return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len}))
+}
+
+func definitelyNil(v interface{}) bool {
+ // There is no global way of checking if an interface is nil.
+ // For true references (map, ptr, func, chan), you can just look
+ // at the word of the interface. However, for slices, you have to dereference
+ // the word, and get a pointer to the 3-word interface value.
+ //
+ // However, the following are cheap calls
+ // - TypeOf(interface): cheap 2-line call.
+ // - ValueOf(interface{}): expensive
+ // - type.Kind: cheap call through an interface
+ // - Value.Type(): cheap call
+ // except it's a method value (e.g. r.Read, which implies that it is a Func)
+
+ return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil
+}
+
+func rv2i(rv reflect.Value) interface{} {
+ // TODO: consider a more generally-known optimization for reflect.Value ==> Interface
+ //
+ // Currently, we use this fragile method that taps into implememtation details from
+ // the source go stdlib reflect/value.go, and trims the implementation.
+
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
+ var ptr unsafe.Pointer
+ if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(urv.ptr)
+ } else {
+ ptr = urv.ptr
+ }
+ return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
+}
+
+func rt2id(rt reflect.Type) uintptr {
+ return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
+}
+
+func rv2rtid(rv reflect.Value) uintptr {
+ return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ)
+}
+
+func i2rtid(i interface{}) uintptr {
+ return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
+}
+
+// --------------------------
+
+func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ if urv.flag == 0 {
+ return true
+ }
+ switch v.Kind() {
+ case reflect.Invalid:
+ return true
+ case reflect.String:
+ return (*unsafeString)(urv.ptr).Len == 0
+ case reflect.Slice:
+ return (*unsafeSlice)(urv.ptr).Len == 0
+ case reflect.Bool:
+ return !*(*bool)(urv.ptr)
+ case reflect.Int:
+ return *(*int)(urv.ptr) == 0
+ case reflect.Int8:
+ return *(*int8)(urv.ptr) == 0
+ case reflect.Int16:
+ return *(*int16)(urv.ptr) == 0
+ case reflect.Int32:
+ return *(*int32)(urv.ptr) == 0
+ case reflect.Int64:
+ return *(*int64)(urv.ptr) == 0
+ case reflect.Uint:
+ return *(*uint)(urv.ptr) == 0
+ case reflect.Uint8:
+ return *(*uint8)(urv.ptr) == 0
+ case reflect.Uint16:
+ return *(*uint16)(urv.ptr) == 0
+ case reflect.Uint32:
+ return *(*uint32)(urv.ptr) == 0
+ case reflect.Uint64:
+ return *(*uint64)(urv.ptr) == 0
+ case reflect.Uintptr:
+ return *(*uintptr)(urv.ptr) == 0
+ case reflect.Float32:
+ return *(*float32)(urv.ptr) == 0
+ case reflect.Float64:
+ return *(*float64)(urv.ptr) == 0
+ case reflect.Interface:
+ isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
+ if deref {
+ if isnil {
+ return true
+ }
+ return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
+ }
+ return isnil
+ case reflect.Ptr:
+ // isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type)
+ isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
+ if deref {
+ if isnil {
+ return true
+ }
+ return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
+ }
+ return isnil
+ case reflect.Struct:
+ return isEmptyStruct(v, tinfos, deref, checkStruct)
+ case reflect.Map, reflect.Array, reflect.Chan:
+ return v.Len() == 0
+ }
+ return false
+}
+
+// --------------------------
+
+// atomicTypeInfoSlice contains length and pointer to the array for a slice.
+// It is expected to be 2 words.
+//
+// Previously, we atomically loaded and stored the length and array pointer separately,
+// which could lead to some races.
+// We now just atomically store and load the pointer to the value directly.
+
+type atomicTypeInfoSlice struct { // expected to be 2 words
+ l int // length of the data array (must be first in struct, for 64-bit alignment necessary for 386)
+ v unsafe.Pointer // data array - Pointer (not uintptr) to maintain GC reference
+}
+
+func (x *atomicTypeInfoSlice) load() []rtid2ti {
+ xp := unsafe.Pointer(x)
+ x2 := *(*atomicTypeInfoSlice)(atomic.LoadPointer(&xp))
+ if x2.l == 0 {
+ return nil
+ }
+ return *(*[]rtid2ti)(unsafe.Pointer(&unsafeSlice{Data: x2.v, Len: x2.l, Cap: x2.l}))
+}
+
+func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
+ s := (*unsafeSlice)(unsafe.Pointer(&p))
+ xp := unsafe.Pointer(x)
+ atomic.StorePointer(&xp, unsafe.Pointer(&atomicTypeInfoSlice{l: s.Len, v: s.Data}))
+}
+
+// --------------------------
+func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*[]byte)(urv.ptr) = d.rawBytes()
+}
+
+func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*string)(urv.ptr) = d.d.DecodeString()
+}
+
+func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*bool)(urv.ptr) = d.d.DecodeBool()
+}
+
+func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*time.Time)(urv.ptr) = d.d.DecodeTime()
+}
+
+func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+ fv := d.d.DecodeFloat64()
+ if chkOvf.Float32(fv) {
+ d.errorf("float32 overflow: %v", fv)
+ }
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*float32)(urv.ptr) = float32(fv)
+}
+
+func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*float64)(urv.ptr) = d.d.DecodeFloat64()
+}
+
+func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+}
+
+func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8))
+}
+
+func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16))
+}
+
+func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+}
+
+func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int64)(urv.ptr) = d.d.DecodeInt64()
+}
+
+func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+}
+
+func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+}
+
+func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+}
+
+func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))
+}
+
+func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))
+}
+
+func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint64)(urv.ptr) = d.d.DecodeUint64()
+}
+
+// ------------
+
+func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeBool(*(*bool)(v.ptr))
+}
+
+func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeTime(*(*time.Time)(v.ptr))
+}
+
+func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeString(cUTF8, *(*string)(v.ptr))
+}
+
+func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeFloat64(*(*float64)(v.ptr))
+}
+
+func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeFloat32(*(*float32)(v.ptr))
+}
+
+func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeInt(int64(*(*int)(v.ptr)))
+}
+
+func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeInt(int64(*(*int8)(v.ptr)))
+}
+
+func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeInt(int64(*(*int16)(v.ptr)))
+}
+
+func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeInt(int64(*(*int32)(v.ptr)))
+}
+
+func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeInt(int64(*(*int64)(v.ptr)))
+}
+
+func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeUint(uint64(*(*uint)(v.ptr)))
+}
+
+func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeUint(uint64(*(*uint8)(v.ptr)))
+}
+
+func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeUint(uint64(*(*uint16)(v.ptr)))
+}
+
+func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeUint(uint64(*(*uint32)(v.ptr)))
+}
+
+func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeUint(uint64(*(*uint64)(v.ptr)))
+}
+
+func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ e.e.EncodeUint(uint64(*(*uintptr)(v.ptr)))
+}
+
+// ------------
+
+// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
+// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+// // if urv.flag&unsafeFlagIndir != 0 {
+// // urv.ptr = *(*unsafe.Pointer)(urv.ptr)
+// // }
+// *(*[]byte)(urv.ptr) = d.rawBytes()
+// }
+
+// func rv0t(rt reflect.Type) reflect.Value {
+// ut := (*unsafeIntf)(unsafe.Pointer(&rt))
+// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
+// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())}
+// return *(*reflect.Value)(unsafe.Pointer(&uv})
+// }
+
+// func rv2i(rv reflect.Value) interface{} {
+// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+// // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
+// var ptr unsafe.Pointer
+// // kk := reflect.Kind(urv.flag & (1<<5 - 1))
+// // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 {
+// if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
+// ptr = *(*unsafe.Pointer)(urv.ptr)
+// } else {
+// ptr = urv.ptr
+// }
+// return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
+// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// }
+
+// func definitelyNil(v interface{}) bool {
+// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v))
+// if ui.word == nil {
+// return true
+// }
+// var tk = reflect.TypeOf(v).Kind()
+// return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil
+// fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n",
+// v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil)
+// }
+
+// func keepAlive4BytesView(v string) {
+// runtime.KeepAlive(v)
+// }
+
+// func keepAlive4StringView(v []byte) {
+// runtime.KeepAlive(v)
+// }
+
+// func rt2id(rt reflect.Type) uintptr {
+// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
+// // var i interface{} = rt
+// // // ui := (*unsafeIntf)(unsafe.Pointer(&i))
+// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word
+// }
+
+// func rv2i(rv reflect.Value) interface{} {
+// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+// // non-reference type: already indir
+// // reference type: depend on flagIndir property ('cos maybe was double-referenced)
+// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 )
+// // rvk := reflect.Kind(urv.flag & (1<<5 - 1))
+// // if (rvk == reflect.Chan ||
+// // rvk == reflect.Func ||
+// // rvk == reflect.Interface ||
+// // rvk == reflect.Map ||
+// // rvk == reflect.Ptr ||
+// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 {
+// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
+// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// // }
+// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 {
+// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
+// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// }
+// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type())
+// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// }
+
+// const (
+// unsafeRvFlagKindMask = 1<<5 - 1
+// unsafeRvKindDirectIface = 1 << 5
+// unsafeRvFlagIndir = 1 << 7
+// unsafeRvFlagAddr = 1 << 8
+// unsafeRvFlagMethod = 1 << 9
+
+// _USE_RV_INTERFACE bool = false
+// _UNSAFE_RV_DEBUG = true
+// )
+
+// type unsafeRtype struct {
+// _ [2]uintptr
+// _ uint32
+// _ uint8
+// _ uint8
+// _ uint8
+// kind uint8
+// _ [2]uintptr
+// _ int32
+// }
+
+// func _rv2i(rv reflect.Value) interface{} {
+// // Note: From use,
+// // - it's never an interface
+// // - the only calls here are for ifaceIndir types.
+// // (though that conditional is wrong)
+// // To know for sure, we need the value of t.kind (which is not exposed).
+// //
+// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct)
+// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string
+// // - Type Direct, Value indirect: ==> map???
+// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map
+// //
+// // TRANSLATES TO:
+// // if typeIndirect { } else if valueIndirect { } else { }
+// //
+// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored.
+
+// if _USE_RV_INTERFACE {
+// return rv.Interface()
+// }
+// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+
+// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
+// // println("***** IS flag method or interface: delegating to rv.Interface()")
+// // return rv.Interface()
+// // }
+
+// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
+// // println("***** IS Interface: delegate to rv.Interface")
+// // return rv.Interface()
+// // }
+// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 {
+// // if urv.flag&unsafeRvFlagAddr == 0 {
+// // println("***** IS ifaceIndir typ")
+// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ}
+// // // return *(*interface{})(unsafe.Pointer(&ui))
+// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// // }
+// // } else if urv.flag&unsafeRvFlagIndir != 0 {
+// // println("***** IS flagindir")
+// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// // } else {
+// // println("***** NOT flagindir")
+// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// // }
+// // println("***** default: delegate to rv.Interface")
+
+// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ))
+// if _UNSAFE_RV_DEBUG {
+// fmt.Printf(">>>> start: %v: ", rv.Type())
+// fmt.Printf("%v - %v\n", *urv, *urt)
+// }
+// if urt.kind&unsafeRvKindDirectIface == 0 {
+// if _UNSAFE_RV_DEBUG {
+// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type())
+// }
+// // println("***** IS ifaceIndir typ")
+// // if true || urv.flag&unsafeRvFlagAddr == 0 {
+// // // println(" ***** IS NOT addr")
+// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// // }
+// } else if urv.flag&unsafeRvFlagIndir != 0 {
+// if _UNSAFE_RV_DEBUG {
+// fmt.Printf("**** +flagIndir type: %v\n", rv.Type())
+// }
+// // println("***** IS flagindir")
+// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
+// } else {
+// if _UNSAFE_RV_DEBUG {
+// fmt.Printf("**** -flagIndir type: %v\n", rv.Type())
+// }
+// // println("***** NOT flagindir")
+// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
+// }
+// // println("***** default: delegating to rv.Interface()")
+// // return rv.Interface()
+// }
+
+// var staticM0 = make(map[string]uint64)
+// var staticI0 = (int32)(-5)
+
+// func staticRv2iTest() {
+// i0 := (int32)(-5)
+// m0 := make(map[string]uint16)
+// m0["1"] = 1
+// for _, i := range []interface{}{
+// (int)(7),
+// (uint)(8),
+// (int16)(-9),
+// (uint16)(19),
+// (uintptr)(77),
+// (bool)(true),
+// float32(-32.7),
+// float64(64.9),
+// complex(float32(19), 5),
+// complex(float64(-32), 7),
+// [4]uint64{1, 2, 3, 4},
+// (chan<- int)(nil), // chan,
+// rv2i, // func
+// io.Writer(ioutil.Discard),
+// make(map[string]uint),
+// (map[string]uint)(nil),
+// staticM0,
+// m0,
+// &m0,
+// i0,
+// &i0,
+// &staticI0,
+// &staticM0,
+// []uint32{6, 7, 8},
+// "abc",
+// Raw{},
+// RawExt{},
+// &Raw{},
+// &RawExt{},
+// unsafe.Pointer(&i0),
+// } {
+// i2 := rv2i(reflect.ValueOf(i))
+// eq := reflect.DeepEqual(i, i2)
+// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq)
+// }
+// // os.Exit(0)
+// }
+
+// func init() {
+// staticRv2iTest()
+// }
+
+// func rv2i(rv reflect.Value) interface{} {
+// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() {
+// return rv.Interface()
+// }
+// // var i interface{}
+// // ui := (*unsafeIntf)(unsafe.Pointer(&i))
+// var ui unsafeIntf
+// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr))
+// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 {
+// if urv.flag&unsafeRvFlagAddr != 0 {
+// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()")
+// return rv.Interface()
+// }
+// println("****** indirect type/kind")
+// ui.word = urv.ptr
+// } else if urv.flag&unsafeRvFlagIndir != 0 {
+// println("****** unsafe rv flag indir")
+// ui.word = *(*unsafe.Pointer)(urv.ptr)
+// } else {
+// println("****** default: assign prt to word directly")
+// ui.word = urv.ptr
+// }
+// // ui.word = urv.ptr
+// ui.typ = urv.typ
+// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word)
+// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word))
+// return *(*interface{})(unsafe.Pointer(&ui))
+// // return i
+// }
diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go
new file mode 100644
index 0000000..bdd1996
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/json.go
@@ -0,0 +1,1423 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// By default, this json support uses base64 encoding for bytes, because you cannot
+// store and read any arbitrary string in json (only unicode).
+// However, the user can configre how to encode/decode bytes.
+//
+// This library specifically supports UTF-8 for encoding and decoding only.
+//
+// Note that the library will happily encode/decode things which are not valid
+// json e.g. a map[int64]string. We do it for consistency. With valid json,
+// we will encode and decode appropriately.
+// Users can specify their map type if necessary to force it.
+//
+// Note:
+// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
+// We implement it here.
+
+// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
+// MUST not call one-another.
+
+import (
+ "bytes"
+ "encoding/base64"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+//--------------------------------
+
+var jsonLiterals = [...]byte{
+ '"', 't', 'r', 'u', 'e', '"',
+ '"', 'f', 'a', 'l', 's', 'e', '"',
+ '"', 'n', 'u', 'l', 'l', '"',
+}
+
+const (
+ jsonLitTrueQ = 0
+ jsonLitTrue = 1
+ jsonLitFalseQ = 6
+ jsonLitFalse = 7
+ jsonLitNullQ = 13
+ jsonLitNull = 14
+)
+
+const (
+ jsonU4Chk2 = '0'
+ jsonU4Chk1 = 'a' - 10
+ jsonU4Chk0 = 'A' - 10
+
+ jsonScratchArrayLen = 64
+)
+
+const (
+ // If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
+ // - If we see first character of null, false or true,
+ // do not validate subsequent characters.
+ // - e.g. if we see a n, assume null and skip next 3 characters,
+ // and do not validate they are ull.
+ // P.S. Do not expect a significant decoding boost from this.
+ jsonValidateSymbols = true
+
+ jsonSpacesOrTabsLen = 128
+
+ jsonAlwaysReturnInternString = false
+)
+
+var (
+ // jsonTabs and jsonSpaces are used as caches for indents
+ jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte
+
+ jsonCharHtmlSafeSet bitset128
+ jsonCharSafeSet bitset128
+ jsonCharWhitespaceSet bitset256
+ jsonNumSet bitset256
+)
+
+func init() {
+ for i := 0; i < jsonSpacesOrTabsLen; i++ {
+ jsonSpaces[i] = ' '
+ jsonTabs[i] = '\t'
+ }
+
+ // populate the safe values as true: note: ASCII control characters are (0-31)
+ // jsonCharSafeSet: all true except (0-31) " \
+ // jsonCharHtmlSafeSet: all true except (0-31) " \ < > &
+ var i byte
+ for i = 32; i < utf8.RuneSelf; i++ {
+ switch i {
+ case '"', '\\':
+ case '<', '>', '&':
+ jsonCharSafeSet.set(i) // = true
+ default:
+ jsonCharSafeSet.set(i)
+ jsonCharHtmlSafeSet.set(i)
+ }
+ }
+ for i = 0; i <= utf8.RuneSelf; i++ {
+ switch i {
+ case ' ', '\t', '\r', '\n':
+ jsonCharWhitespaceSet.set(i)
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-':
+ jsonNumSet.set(i)
+ }
+ }
+}
+
+// ----------------
+
+type jsonEncDriverTypical struct {
+ w encWriter
+ // w *encWriterSwitch
+ b *[jsonScratchArrayLen]byte
+ tw bool // term white space
+ c containerState
+}
+
+func (e *jsonEncDriverTypical) typical() {}
+
+func (e *jsonEncDriverTypical) reset(ee *jsonEncDriver) {
+ e.w = ee.ew
+ // e.w = &ee.e.encWriterSwitch
+ e.b = &ee.b
+ e.tw = ee.h.TermWhitespace
+ e.c = 0
+}
+
+func (e *jsonEncDriverTypical) WriteArrayStart(length int) {
+ e.w.writen1('[')
+ e.c = containerArrayStart
+}
+
+func (e *jsonEncDriverTypical) WriteArrayElem() {
+ if e.c != containerArrayStart {
+ e.w.writen1(',')
+ }
+ e.c = containerArrayElem
+}
+
+func (e *jsonEncDriverTypical) WriteArrayEnd() {
+ e.w.writen1(']')
+ e.c = containerArrayEnd
+}
+
+func (e *jsonEncDriverTypical) WriteMapStart(length int) {
+ e.w.writen1('{')
+ e.c = containerMapStart
+}
+
+func (e *jsonEncDriverTypical) WriteMapElemKey() {
+ if e.c != containerMapStart {
+ e.w.writen1(',')
+ }
+ e.c = containerMapKey
+}
+
+func (e *jsonEncDriverTypical) WriteMapElemValue() {
+ e.w.writen1(':')
+ e.c = containerMapValue
+}
+
+func (e *jsonEncDriverTypical) WriteMapEnd() {
+ e.w.writen1('}')
+ e.c = containerMapEnd
+}
+
+func (e *jsonEncDriverTypical) EncodeBool(b bool) {
+ if b {
+ e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4])
+ } else {
+ e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5])
+ }
+}
+
+func (e *jsonEncDriverTypical) EncodeFloat64(f float64) {
+ fmt, prec := jsonFloatStrconvFmtPrec(f)
+ e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64))
+}
+
+func (e *jsonEncDriverTypical) EncodeInt(v int64) {
+ e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverTypical) EncodeUint(v uint64) {
+ e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverTypical) EncodeFloat32(f float32) {
+ e.EncodeFloat64(float64(f))
+}
+
+func (e *jsonEncDriverTypical) atEndOfEncode() {
+ if e.tw {
+ e.w.writen1(' ')
+ }
+}
+
+// ----------------
+
+type jsonEncDriverGeneric struct {
+ w encWriter // encWriter // *encWriterSwitch
+ b *[jsonScratchArrayLen]byte
+ c containerState
+ // ds string // indent string
+ di int8 // indent per
+ d bool // indenting?
+ dt bool // indent using tabs
+ dl uint16 // indent level
+ ks bool // map key as string
+ is byte // integer as string
+ tw bool // term white space
+ _ [7]byte // padding
+}
+
+// indent is done as below:
+// - newline and indent are added before each mapKey or arrayElem
+// - newline and indent are added before each ending,
+// except there was no entry (so we can have {} or [])
+
+func (e *jsonEncDriverGeneric) reset(ee *jsonEncDriver) {
+ e.w = ee.ew
+ e.b = &ee.b
+ e.tw = ee.h.TermWhitespace
+ e.c = 0
+ e.d, e.dt, e.dl, e.di = false, false, 0, 0
+ h := ee.h
+ if h.Indent > 0 {
+ e.d = true
+ e.di = int8(h.Indent)
+ } else if h.Indent < 0 {
+ e.d = true
+ e.dt = true
+ e.di = int8(-h.Indent)
+ }
+ e.ks = h.MapKeyAsString
+ e.is = h.IntegerAsString
+}
+
+func (e *jsonEncDriverGeneric) WriteArrayStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.w.writen1('[')
+ e.c = containerArrayStart
+}
+
+func (e *jsonEncDriverGeneric) WriteArrayElem() {
+ if e.c != containerArrayStart {
+ e.w.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+ e.c = containerArrayElem
+}
+
+func (e *jsonEncDriverGeneric) WriteArrayEnd() {
+ if e.d {
+ e.dl--
+ if e.c != containerArrayStart {
+ e.writeIndent()
+ }
+ }
+ e.w.writen1(']')
+ e.c = containerArrayEnd
+}
+
+func (e *jsonEncDriverGeneric) WriteMapStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.w.writen1('{')
+ e.c = containerMapStart
+}
+
+func (e *jsonEncDriverGeneric) WriteMapElemKey() {
+ if e.c != containerMapStart {
+ e.w.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+ e.c = containerMapKey
+}
+
+func (e *jsonEncDriverGeneric) WriteMapElemValue() {
+ if e.d {
+ e.w.writen2(':', ' ')
+ } else {
+ e.w.writen1(':')
+ }
+ e.c = containerMapValue
+}
+
+func (e *jsonEncDriverGeneric) WriteMapEnd() {
+ if e.d {
+ e.dl--
+ if e.c != containerMapStart {
+ e.writeIndent()
+ }
+ }
+ e.w.writen1('}')
+ e.c = containerMapEnd
+}
+
+func (e *jsonEncDriverGeneric) writeIndent() {
+ e.w.writen1('\n')
+ x := int(e.di) * int(e.dl)
+ if e.dt {
+ for x > jsonSpacesOrTabsLen {
+ e.w.writeb(jsonTabs[:])
+ x -= jsonSpacesOrTabsLen
+ }
+ e.w.writeb(jsonTabs[:x])
+ } else {
+ for x > jsonSpacesOrTabsLen {
+ e.w.writeb(jsonSpaces[:])
+ x -= jsonSpacesOrTabsLen
+ }
+ e.w.writeb(jsonSpaces[:x])
+ }
+}
+
+func (e *jsonEncDriverGeneric) EncodeBool(b bool) {
+ if e.ks && e.c == containerMapKey {
+ if b {
+ e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6])
+ } else {
+ e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7])
+ }
+ } else {
+ if b {
+ e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4])
+ } else {
+ e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5])
+ }
+ }
+}
+
+func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) {
+ // instead of using 'g', specify whether to use 'e' or 'f'
+ fmt, prec := jsonFloatStrconvFmtPrec(f)
+
+ var blen int
+ if e.ks && e.c == containerMapKey {
+ blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64))
+ e.b[0] = '"'
+ e.b[blen-1] = '"'
+ } else {
+ blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64))
+ }
+ e.w.writeb(e.b[:blen])
+}
+
+func (e *jsonEncDriverGeneric) EncodeInt(v int64) {
+ x := e.is
+ if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) {
+ blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10))
+ e.b[0] = '"'
+ e.b[blen-1] = '"'
+ e.w.writeb(e.b[:blen])
+ return
+ }
+ e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverGeneric) EncodeUint(v uint64) {
+ x := e.is
+ if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) {
+ blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10))
+ e.b[0] = '"'
+ e.b[blen-1] = '"'
+ e.w.writeb(e.b[:blen])
+ return
+ }
+ e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) {
+ // e.encodeFloat(float64(f), 32)
+ // always encode all floats as IEEE 64-bit floating point.
+ // It also ensures that we can decode in full precision even if into a float32,
+ // as what is written is always to float64 precision.
+ e.EncodeFloat64(float64(f))
+}
+
+func (e *jsonEncDriverGeneric) atEndOfEncode() {
+ if e.tw {
+ if e.d {
+ e.w.writen1('\n')
+ } else {
+ e.w.writen1(' ')
+ }
+ }
+}
+
+// --------------------
+
+type jsonEncDriver struct {
+ noBuiltInTypes
+ e *Encoder
+ h *JsonHandle
+ ew encWriter // encWriter // *encWriterSwitch
+ se extWrapper
+ // ---- cpu cache line boundary?
+ bs []byte // scratch
+ // ---- cpu cache line boundary?
+ b [jsonScratchArrayLen]byte // scratch (encode time,
+}
+
+func (e *jsonEncDriver) EncodeNil() {
+ // We always encode nil as just null (never in quotes)
+ // This allows us to easily decode if a nil in the json stream
+ // ie if initial token is n.
+ e.ew.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4])
+
+ // if e.h.MapKeyAsString && e.c == containerMapKey {
+ // e.ew.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6])
+ // } else {
+ // e.ew.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4])
+ // }
+}
+
+func (e *jsonEncDriver) EncodeTime(t time.Time) {
+ // Do NOT use MarshalJSON, as it allocates internally.
+ // instead, we call AppendFormat directly, using our scratch buffer (e.b)
+ if t.IsZero() {
+ e.EncodeNil()
+ } else {
+ e.b[0] = '"'
+ b := t.AppendFormat(e.b[1:1], time.RFC3339Nano)
+ e.b[len(b)+1] = '"'
+ e.ew.writeb(e.b[:len(b)+2])
+ }
+ // v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.ew.writeb(v)
+}
+
+func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
+ if v := ext.ConvertExt(rv); v == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(v)
+ }
+}
+
+func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+ // only encodes re.Value (never re.Data)
+ if re.Value == nil {
+ e.EncodeNil()
+ } else {
+ en.encode(re.Value)
+ }
+}
+
+func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
+ e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ // if encoding raw bytes and RawBytesExt is configured, use it to encode
+ if v == nil {
+ e.EncodeNil()
+ return
+ }
+ if c == cRAW {
+ if e.se.InterfaceExt != nil {
+ e.EncodeExt(v, 0, &e.se, e.e)
+ return
+ }
+
+ slen := base64.StdEncoding.EncodedLen(len(v))
+ if cap(e.bs) >= slen+2 {
+ e.bs = e.bs[:slen+2]
+ } else {
+ e.bs = make([]byte, slen+2)
+ }
+ e.bs[0] = '"'
+ base64.StdEncoding.Encode(e.bs[1:], v)
+ e.bs[slen+1] = '"'
+ e.ew.writeb(e.bs)
+ } else {
+ e.quoteStr(stringView(v))
+ }
+}
+
+func (e *jsonEncDriver) EncodeAsis(v []byte) {
+ e.ew.writeb(v)
+}
+
+func (e *jsonEncDriver) quoteStr(s string) {
+ // adapted from std pkg encoding/json
+ const hex = "0123456789abcdef"
+ w := e.ew
+ htmlasis := e.h.HTMLCharsAsIs
+ w.writen1('"')
+ var start int
+ for i, slen := 0, len(s); i < slen; {
+ // encode all bytes < 0x20 (except \r, \n).
+ // also encode < > & to prevent security holes when served to some browsers.
+ if b := s[i]; b < utf8.RuneSelf {
+ // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) {
+ if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) {
+ i++
+ continue
+ }
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ w.writen2('\\', b)
+ case '\n':
+ w.writen2('\\', 'n')
+ case '\r':
+ w.writen2('\\', 'r')
+ case '\b':
+ w.writen2('\\', 'b')
+ case '\f':
+ w.writen2('\\', 'f')
+ case '\t':
+ w.writen2('\\', 't')
+ default:
+ w.writestr(`\u00`)
+ w.writen2(hex[b>>4], hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+ // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\u202`)
+ w.writen1(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ w.writestr(s[start:])
+ }
+ w.writen1('"')
+}
+
+type jsonDecDriver struct {
+ noBuiltInTypes
+ d *Decoder
+ h *JsonHandle
+ r decReader // *decReaderSwitch // decReader
+ se extWrapper
+
+ // ---- writable fields during execution --- *try* to keep in sep cache line
+
+ c containerState
+ // tok is used to store the token read right after skipWhiteSpace.
+ tok uint8
+ fnull bool // found null from appendStringAsBytes
+ bs []byte // scratch. Initialized from b. Used for parsing strings or numbers.
+ bstr [8]byte // scratch used for string \UXXX parsing
+ // ---- cpu cache line boundary?
+ b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time
+ b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes
+
+ _ [3]uint64 // padding
+ // n jsonNum
+}
+
+// func jsonIsWS(b byte) bool {
+// // return b == ' ' || b == '\t' || b == '\r' || b == '\n'
+// return jsonCharWhitespaceSet.isset(b)
+// }
+
+func (d *jsonDecDriver) uncacheRead() {
+ if d.tok != 0 {
+ d.r.unreadn1()
+ d.tok = 0
+ }
+}
+
+func (d *jsonDecDriver) ReadMapStart() int {
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ const xc uint8 = '{'
+ if d.tok != xc {
+ d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ d.c = containerMapStart
+ return -1
+}
+
+func (d *jsonDecDriver) ReadArrayStart() int {
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ const xc uint8 = '['
+ if d.tok != xc {
+ d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ d.c = containerArrayStart
+ return -1
+}
+
+func (d *jsonDecDriver) CheckBreak() bool {
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ return d.tok == '}' || d.tok == ']'
+}
+
+// For the ReadXXX methods below, we could just delegate to helper functions
+// readContainerState(c containerState, xc uint8, check bool)
+// - ReadArrayElem would become:
+// readContainerState(containerArrayElem, ',', d.c != containerArrayStart)
+//
+// However, until mid-stack inlining comes in go1.11 which supports inlining of
+// one-liners, we explicitly write them all 5 out to elide the extra func call.
+//
+// TODO: For Go 1.11, if inlined, consider consolidating these.
+
+func (d *jsonDecDriver) ReadArrayElem() {
+ const xc uint8 = ','
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ if d.c != containerArrayStart {
+ if d.tok != xc {
+ d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ }
+ d.c = containerArrayElem
+}
+
+func (d *jsonDecDriver) ReadArrayEnd() {
+ const xc uint8 = ']'
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ if d.tok != xc {
+ d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ d.c = containerArrayEnd
+}
+
+func (d *jsonDecDriver) ReadMapElemKey() {
+ const xc uint8 = ','
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ if d.c != containerMapStart {
+ if d.tok != xc {
+ d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ }
+ d.c = containerMapKey
+}
+
+func (d *jsonDecDriver) ReadMapElemValue() {
+ const xc uint8 = ':'
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ if d.tok != xc {
+ d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ d.c = containerMapValue
+}
+
+func (d *jsonDecDriver) ReadMapEnd() {
+ const xc uint8 = '}'
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ if d.tok != xc {
+ d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok)
+ }
+ d.tok = 0
+ d.c = containerMapEnd
+}
+
+func (d *jsonDecDriver) readLit(length, fromIdx uint8) {
+ bs := d.r.readx(int(length))
+ d.tok = 0
+ if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) {
+ d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs)
+ return
+ }
+}
+
+func (d *jsonDecDriver) TryDecodeAsNil() bool {
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ // we shouldn't try to see if "null" was here, right?
+ // only the plain string: `null` denotes a nil (ie not quotes)
+ if d.tok == 'n' {
+ d.readLit(3, jsonLitNull+1) // (n)ull
+ return true
+ }
+ return false
+}
+
+func (d *jsonDecDriver) DecodeBool() (v bool) {
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ fquot := d.c == containerMapKey && d.tok == '"'
+ if fquot {
+ d.tok = d.r.readn1()
+ }
+ switch d.tok {
+ case 'f':
+ d.readLit(4, jsonLitFalse+1) // (f)alse
+ // v = false
+ case 't':
+ d.readLit(3, jsonLitTrue+1) // (t)rue
+ v = true
+ default:
+ d.d.errorf("decode bool: got first char %c", d.tok)
+ // v = false // "unreachable"
+ }
+ if fquot {
+ d.r.readn1()
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeTime() (t time.Time) {
+ // read string, and pass the string into json.unmarshal
+ d.appendStringAsBytes()
+ if d.fnull {
+ return
+ }
+ t, err := time.Parse(time.RFC3339, stringView(d.bs))
+ if err != nil {
+ d.d.errorv(err)
+ }
+ return
+}
+
+func (d *jsonDecDriver) ContainerType() (vt valueType) {
+ // check container type by checking the first char
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+
+ // optimize this, so we don't do 4 checks but do one computation.
+ // return jsonContainerSet[d.tok]
+
+ // ContainerType is mostly called for Map and Array,
+ // so this conditional is good enough (max 2 checks typically)
+ if b := d.tok; b == '{' {
+ return valueTypeMap
+ } else if b == '[' {
+ return valueTypeArray
+ } else if b == 'n' {
+ return valueTypeNil
+ } else if b == '"' {
+ return valueTypeString
+ }
+ return valueTypeUnset
+}
+
+func (d *jsonDecDriver) decNumBytes() (bs []byte) {
+ // stores num bytes in d.bs
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ if d.tok == '"' {
+ bs = d.r.readUntil(d.b2[:0], '"')
+ bs = bs[:len(bs)-1]
+ } else {
+ d.r.unreadn1()
+ bs = d.r.readTo(d.bs[:0], &jsonNumSet)
+ }
+ d.tok = 0
+ return bs
+}
+
+func (d *jsonDecDriver) DecodeUint64() (u uint64) {
+ bs := d.decNumBytes()
+ n, neg, badsyntax, overflow := jsonParseInteger(bs)
+ if overflow {
+ d.d.errorf("overflow parsing unsigned integer: %s", bs)
+ } else if neg {
+ d.d.errorf("minus found parsing unsigned integer: %s", bs)
+ } else if badsyntax {
+ // fallback: try to decode as float, and cast
+ n = d.decUint64ViaFloat(stringView(bs))
+ }
+ return n
+}
+
+func (d *jsonDecDriver) DecodeInt64() (i int64) {
+ const cutoff = uint64(1 << uint(64-1))
+ bs := d.decNumBytes()
+ n, neg, badsyntax, overflow := jsonParseInteger(bs)
+ if overflow {
+ d.d.errorf("overflow parsing integer: %s", bs)
+ } else if badsyntax {
+ // d.d.errorf("invalid syntax for integer: %s", bs)
+ // fallback: try to decode as float, and cast
+ if neg {
+ n = d.decUint64ViaFloat(stringView(bs[1:]))
+ } else {
+ n = d.decUint64ViaFloat(stringView(bs))
+ }
+ }
+ if neg {
+ if n > cutoff {
+ d.d.errorf("overflow parsing integer: %s", bs)
+ }
+ i = -(int64(n))
+ } else {
+ if n >= cutoff {
+ d.d.errorf("overflow parsing integer: %s", bs)
+ }
+ i = int64(n)
+ }
+ return
+}
+
+func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) {
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ d.d.errorf("invalid syntax for integer: %s", s)
+ // d.d.errorv(err)
+ }
+ fi, ff := math.Modf(f)
+ if ff > 0 {
+ d.d.errorf("fractional part found parsing integer: %s", s)
+ } else if fi > float64(math.MaxUint64) {
+ d.d.errorf("overflow parsing integer: %s", s)
+ }
+ return uint64(fi)
+}
+
+func (d *jsonDecDriver) DecodeFloat64() (f float64) {
+ bs := d.decNumBytes()
+ f, err := strconv.ParseFloat(stringView(bs), 64)
+ if err != nil {
+ d.d.errorv(err)
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = xtag
+ d.d.decode(&re.Value)
+ } else {
+ var v interface{}
+ d.d.decode(&v)
+ ext.UpdateExt(rv, v)
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+ // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
+ if d.se.InterfaceExt != nil {
+ bsOut = bs
+ d.DecodeExt(&bsOut, 0, &d.se)
+ return
+ }
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ // check if an "array" of uint8's (see ContainerType for how to infer if an array)
+ if d.tok == '[' {
+ bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+ return
+ }
+ d.appendStringAsBytes()
+ // base64 encodes []byte{} as "", and we encode nil []byte as null.
+ // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}.
+ // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs.
+ // However, it sets a fnull field to true, so we can check if a null was found.
+ if len(d.bs) == 0 {
+ if d.fnull {
+ return nil
+ }
+ return []byte{}
+ }
+ bs0 := d.bs
+ slen := base64.StdEncoding.DecodedLen(len(bs0))
+ if slen <= cap(bs) {
+ bsOut = bs[:slen]
+ } else if zerocopy && slen <= cap(d.b2) {
+ bsOut = d.b2[:slen]
+ } else {
+ bsOut = make([]byte, slen)
+ }
+ slen2, err := base64.StdEncoding.Decode(bsOut, bs0)
+ if err != nil {
+ d.d.errorf("error decoding base64 binary '%s': %v", bs0, err)
+ return nil
+ }
+ if slen != slen2 {
+ bsOut = bsOut[:slen2]
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeString() (s string) {
+ d.appendStringAsBytes()
+ return d.bsToString()
+}
+
+func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) {
+ d.appendStringAsBytes()
+ return d.bs
+}
+
+func (d *jsonDecDriver) appendStringAsBytes() {
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+
+ d.fnull = false
+ if d.tok != '"' {
+ // d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok)
+ // handle non-string scalar: null, true, false or a number
+ switch d.tok {
+ case 'n':
+ d.readLit(3, jsonLitNull+1) // (n)ull
+ d.bs = d.bs[:0]
+ d.fnull = true
+ case 'f':
+ d.readLit(4, jsonLitFalse+1) // (f)alse
+ d.bs = d.bs[:5]
+ copy(d.bs, "false")
+ case 't':
+ d.readLit(3, jsonLitTrue+1) // (t)rue
+ d.bs = d.bs[:4]
+ copy(d.bs, "true")
+ default:
+ // try to parse a valid number
+ bs := d.decNumBytes()
+ if len(bs) <= cap(d.bs) {
+ d.bs = d.bs[:len(bs)]
+ } else {
+ d.bs = make([]byte, len(bs))
+ }
+ copy(d.bs, bs)
+ }
+ return
+ }
+
+ d.tok = 0
+ r := d.r
+ var cs = r.readUntil(d.b2[:0], '"')
+ var cslen = len(cs)
+ var c uint8
+ v := d.bs[:0]
+ // append on each byte seen can be expensive, so we just
+ // keep track of where we last read a contiguous set of
+ // non-special bytes (using cursor variable),
+ // and when we see a special byte
+ // e.g. end-of-slice, " or \,
+ // we will append the full range into the v slice before proceeding
+ for i, cursor := 0, 0; ; {
+ if i == cslen {
+ v = append(v, cs[cursor:]...)
+ cs = r.readUntil(d.b2[:0], '"')
+ cslen = len(cs)
+ i, cursor = 0, 0
+ }
+ c = cs[i]
+ if c == '"' {
+ v = append(v, cs[cursor:i]...)
+ break
+ }
+ if c != '\\' {
+ i++
+ continue
+ }
+ v = append(v, cs[cursor:i]...)
+ i++
+ c = cs[i]
+ switch c {
+ case '"', '\\', '/', '\'':
+ v = append(v, c)
+ case 'b':
+ v = append(v, '\b')
+ case 'f':
+ v = append(v, '\f')
+ case 'n':
+ v = append(v, '\n')
+ case 'r':
+ v = append(v, '\r')
+ case 't':
+ v = append(v, '\t')
+ case 'u':
+ var r rune
+ var rr uint32
+ if len(cs) < i+4 { // may help reduce bounds-checking
+ d.d.errorf("need at least 4 more bytes for unicode sequence")
+ }
+ // c = cs[i+4] // may help reduce bounds-checking
+ for j := 1; j < 5; j++ {
+ // best to use explicit if-else
+ // - not a table, etc which involve memory loads, array lookup with bounds checks, etc
+ c = cs[i+j]
+ if c >= '0' && c <= '9' {
+ rr = rr*16 + uint32(c-jsonU4Chk2)
+ } else if c >= 'a' && c <= 'f' {
+ rr = rr*16 + uint32(c-jsonU4Chk1)
+ } else if c >= 'A' && c <= 'F' {
+ rr = rr*16 + uint32(c-jsonU4Chk0)
+ } else {
+ r = unicode.ReplacementChar
+ i += 4
+ goto encode_rune
+ }
+ }
+ r = rune(rr)
+ i += 4
+ if utf16.IsSurrogate(r) {
+ if len(cs) >= i+6 && cs[i+2] == 'u' && cs[i+1] == '\\' {
+ i += 2
+ // c = cs[i+4] // may help reduce bounds-checking
+ var rr1 uint32
+ for j := 1; j < 5; j++ {
+ c = cs[i+j]
+ if c >= '0' && c <= '9' {
+ rr = rr*16 + uint32(c-jsonU4Chk2)
+ } else if c >= 'a' && c <= 'f' {
+ rr = rr*16 + uint32(c-jsonU4Chk1)
+ } else if c >= 'A' && c <= 'F' {
+ rr = rr*16 + uint32(c-jsonU4Chk0)
+ } else {
+ r = unicode.ReplacementChar
+ i += 4
+ goto encode_rune
+ }
+ }
+ r = utf16.DecodeRune(r, rune(rr1))
+ i += 4
+ } else {
+ r = unicode.ReplacementChar
+ goto encode_rune
+ }
+ }
+ encode_rune:
+ w2 := utf8.EncodeRune(d.bstr[:], r)
+ v = append(v, d.bstr[:w2]...)
+ default:
+ d.d.errorf("unsupported escaped value: %c", c)
+ }
+ i++
+ cursor = i
+ }
+ d.bs = v
+}
+
+func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) {
+ const cutoff = uint64(1 << uint(64-1))
+ var n uint64
+ var neg, badsyntax, overflow bool
+
+ if d.h.PreferFloat {
+ goto F
+ }
+ n, neg, badsyntax, overflow = jsonParseInteger(bs)
+ if badsyntax || overflow {
+ goto F
+ }
+ if neg {
+ if n > cutoff {
+ goto F
+ }
+ z.v = valueTypeInt
+ z.i = -(int64(n))
+ } else if d.h.SignedInteger {
+ if n >= cutoff {
+ goto F
+ }
+ z.v = valueTypeInt
+ z.i = int64(n)
+ } else {
+ z.v = valueTypeUint
+ z.u = n
+ }
+ return
+F:
+ z.v = valueTypeFloat
+ z.f, err = strconv.ParseFloat(stringView(bs), 64)
+ return
+}
+
+func (d *jsonDecDriver) bsToString() string {
+ // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
+ if jsonAlwaysReturnInternString || d.c == containerMapKey {
+ return d.d.string(d.bs)
+ }
+ return string(d.bs)
+}
+
+func (d *jsonDecDriver) DecodeNaked() {
+ z := d.d.n
+ // var decodeFurther bool
+
+ if d.tok == 0 {
+ d.tok = d.r.skip(&jsonCharWhitespaceSet)
+ }
+ switch d.tok {
+ case 'n':
+ d.readLit(3, jsonLitNull+1) // (n)ull
+ z.v = valueTypeNil
+ case 'f':
+ d.readLit(4, jsonLitFalse+1) // (f)alse
+ z.v = valueTypeBool
+ z.b = false
+ case 't':
+ d.readLit(3, jsonLitTrue+1) // (t)rue
+ z.v = valueTypeBool
+ z.b = true
+ case '{':
+ z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart
+ case '[':
+ z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart
+ case '"':
+ // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first
+ d.appendStringAsBytes()
+ if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString {
+ switch stringView(d.bs) {
+ case "null":
+ z.v = valueTypeNil
+ case "true":
+ z.v = valueTypeBool
+ z.b = true
+ case "false":
+ z.v = valueTypeBool
+ z.b = false
+ default:
+ // check if a number: float, int or uint
+ if err := d.nakedNum(z, d.bs); err != nil {
+ z.v = valueTypeString
+ z.s = d.bsToString()
+ }
+ }
+ } else {
+ z.v = valueTypeString
+ z.s = d.bsToString()
+ }
+ default: // number
+ bs := d.decNumBytes()
+ if len(bs) == 0 {
+ d.d.errorf("decode number from empty string")
+ return
+ }
+ if err := d.nakedNum(z, bs); err != nil {
+ d.d.errorf("decode number from %s: %v", bs, err)
+ return
+ }
+ }
+ // if decodeFurther {
+ // d.s.sc.retryRead()
+ // }
+ return
+}
+
+//----------------------
+
+// JsonHandle is a handle for JSON encoding format.
+//
+// Json is comprehensively supported:
+// - decodes numbers into interface{} as int, uint or float64
+// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc.
+// - decode integers from float formatted numbers e.g. 1.27e+8
+// - decode any json value (numbers, bool, etc) from quoted strings
+// - configurable way to encode/decode []byte .
+// by default, encodes and decodes []byte using base64 Std Encoding
+// - UTF-8 support for encoding and decoding
+//
+// It has better performance than the json library in the standard library,
+// by leveraging the performance improvements of the codec library.
+//
+// In addition, it doesn't read more bytes than necessary during a decode, which allows
+// reading multiple values from a stream containing json and non-json content.
+// For example, a user can read a json value, then a cbor value, then a msgpack value,
+// all from the same stream in sequence.
+//
+// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are
+// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD.
+type JsonHandle struct {
+ textEncodingType
+ BasicHandle
+
+ // Indent indicates how a value is encoded.
+ // - If positive, indent by that number of spaces.
+ // - If negative, indent by that number of tabs.
+ Indent int8
+
+ // IntegerAsString controls how integers (signed and unsigned) are encoded.
+ //
+ // Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
+ // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
+ // This can be mitigated by configuring how to encode integers.
+ //
+ // IntegerAsString interpretes the following values:
+ // - if 'L', then encode integers > 2^53 as a json string.
+ // - if 'A', then encode all integers as a json string
+ // containing the exact integer representation as a decimal.
+ // - else encode all integers as a json number (default)
+ IntegerAsString byte
+
+ // HTMLCharsAsIs controls how to encode some special characters to html: < > &
+ //
+ // By default, we encode them as \uXXX
+ // to prevent security holes when served from some browsers.
+ HTMLCharsAsIs bool
+
+ // PreferFloat says that we will default to decoding a number as a float.
+ // If not set, we will examine the characters of the number and decode as an
+ // integer type if it doesn't have any of the characters [.eE].
+ PreferFloat bool
+
+ // TermWhitespace says that we add a whitespace character
+ // at the end of an encoding.
+ //
+ // The whitespace is important, especially if using numbers in a context
+ // where multiple items are written to a stream.
+ TermWhitespace bool
+
+ // MapKeyAsString says to encode all map keys as strings.
+ //
+ // Use this to enforce strict json output.
+ // The only caveat is that nil value is ALWAYS written as null (never as "null")
+ MapKeyAsString bool
+
+ // _ [2]byte // padding
+
+ // Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver.
+
+ // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
+ // If not configured, raw bytes are encoded to/from base64 text.
+ RawBytesExt InterfaceExt
+
+ _ [2]uint64 // padding
+}
+
+// Name returns the name of the handle: json
+func (h *JsonHandle) Name() string { return "json" }
+func (h *JsonHandle) hasElemSeparators() bool { return true }
+func (h *JsonHandle) typical() bool {
+ return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L'
+}
+
+type jsonTypical interface {
+ typical()
+}
+
+func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) {
+ _, v = ed.(jsonTypical)
+ return v != h.typical()
+}
+
+// SetInterfaceExt sets an extension
+func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
+}
+
+type jsonEncDriverTypicalImpl struct {
+ jsonEncDriver
+ jsonEncDriverTypical
+ _ [1]uint64 // padding
+}
+
+func (x *jsonEncDriverTypicalImpl) reset() {
+ x.jsonEncDriver.reset()
+ x.jsonEncDriverTypical.reset(&x.jsonEncDriver)
+}
+
+type jsonEncDriverGenericImpl struct {
+ jsonEncDriver
+ jsonEncDriverGeneric
+}
+
+func (x *jsonEncDriverGenericImpl) reset() {
+ x.jsonEncDriver.reset()
+ x.jsonEncDriverGeneric.reset(&x.jsonEncDriver)
+}
+
+func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) {
+ var hd *jsonEncDriver
+ if h.typical() {
+ var v jsonEncDriverTypicalImpl
+ ee = &v
+ hd = &v.jsonEncDriver
+ } else {
+ var v jsonEncDriverGenericImpl
+ ee = &v
+ hd = &v.jsonEncDriver
+ }
+ hd.e, hd.h, hd.bs = e, h, hd.b[:0]
+ hd.se.BytesExt = bytesExtFailer{}
+ ee.reset()
+ return
+}
+
+func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
+ // d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
+ hd := jsonDecDriver{d: d, h: h}
+ hd.se.BytesExt = bytesExtFailer{}
+ hd.bs = hd.b[:0]
+ hd.reset()
+ return &hd
+}
+
+func (e *jsonEncDriver) reset() {
+ e.ew = e.e.w // e.e.w // &e.e.encWriterSwitch
+ e.se.InterfaceExt = e.h.RawBytesExt
+ if e.bs != nil {
+ e.bs = e.bs[:0]
+ }
+}
+
+func (d *jsonDecDriver) reset() {
+ d.r = d.d.r // &d.d.decReaderSwitch // d.d.r
+ d.se.InterfaceExt = d.h.RawBytesExt
+ if d.bs != nil {
+ d.bs = d.bs[:0]
+ }
+ d.c, d.tok = 0, 0
+ // d.n.reset()
+}
+
+func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) {
+ prec = -1
+ var abs = math.Abs(f)
+ if abs != 0 && (abs < 1e-6 || abs >= 1e21) {
+ fmt = 'e'
+ } else {
+ fmt = 'f'
+ // set prec to 1 iff mod is 0.
+ // better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes.
+ // this ensures that every float has an e or .0 in it.
+ if abs <= 1 {
+ if abs == 0 || abs == 1 {
+ prec = 1
+ }
+ } else if _, mod := math.Modf(abs); mod == 0 {
+ prec = 1
+ }
+ }
+ return
+}
+
+// custom-fitted version of strconv.Parse(Ui|I)nt.
+// Also ensures we don't have to search for .eE to determine if a float or not.
+func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) {
+ const maxUint64 = (1<<64 - 1)
+ const cutoff = maxUint64/10 + 1
+
+ if len(s) == 0 {
+ badSyntax = true
+ return
+ }
+ switch s[0] {
+ case '+':
+ s = s[1:]
+ case '-':
+ s = s[1:]
+ neg = true
+ }
+ for _, c := range s {
+ if c < '0' || c > '9' {
+ badSyntax = true
+ return
+ }
+ // unsigned integers don't overflow well on multiplication, so check cutoff here
+ // e.g. (maxUint64-5)*10 doesn't overflow well ...
+ if n >= cutoff {
+ overflow = true
+ return
+ }
+ n *= 10
+ n1 := n + uint64(c-'0')
+ if n1 < n || n1 > maxUint64 {
+ overflow = true
+ return
+ }
+ n = n1
+ }
+ return
+}
+
+var _ decDriver = (*jsonDecDriver)(nil)
+var _ encDriver = (*jsonEncDriverGenericImpl)(nil)
+var _ encDriver = (*jsonEncDriverTypicalImpl)(nil)
+var _ jsonTypical = (*jsonEncDriverTypical)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl
new file mode 100644
index 0000000..90d758c
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl
@@ -0,0 +1,154 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from mammoth-test.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import "testing"
+import "fmt"
+import "reflect"
+
+// TestMammoth has all the different paths optimized in fast-path
+// It has all the primitives, slices and maps.
+//
+// For each of those types, it has a pointer and a non-pointer field.
+
+func init() { _ = fmt.Printf } // so we can include fmt as needed
+
+type TestMammoth struct {
+
+{{range .Values }}{{if .Primitive }}{{/*
+*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
+{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
+{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}} type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }}
+func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { }
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}} type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+func doTestMammothSlices(t *testing.T, h Handle) {
+{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}}
+ var v{{$i}}va [8]{{ .Elem }}
+ for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { {{/*
+ // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
+ // - encode value to some []byte
+ // - decode into a length-wise-equal []byte
+ // - check if equal to initial slice
+ // - encode ptr to the value
+ // - check if encode bytes are same
+ // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice
+ // - decode into non-addressable slice of equal length, then larger len
+ // - for each decode, compare elem-by-elem to the original slice
+ // -
+ // - rinse and repeat for a MapBySlice version
+ // -
+ */}}
+ var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }}
+ v{{$i}}v1 = v
+ bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
+ testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}")
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
+ testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr")
+ // ...
+ bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
+ v{{$i}}v2 = nil
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ v{{$i}}v2 = v{{$i}}va[:1:1]
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)]
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ v{{$i}}v2 = v{{$i}}va[:]
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap")
+ if len(v{{$i}}v1) > 1 {
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr")
+ }
+ // ...
+ var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
+ v{{$i}}v2 = nil
+ if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
+ v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1)
+ v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
+ bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
+ testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
+ testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom")
+ bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
+ v{{$i}}v2 = nil
+ v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
+ testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
+ testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p")
+ }
+{{end}}{{end}}{{end}}
+}
+
+func doTestMammothMaps(t *testing.T, h Handle) {
+{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}}
+ for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } {
+ // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v)
+ var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
+ v{{$i}}v1 = v
+ bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+ testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}")
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+ testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr")
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len")
+ bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
+ v{{$i}}v2 = nil
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil")
+ // ...
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+ var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
+ v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1)
+ v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2)
+ bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
+ testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
+ testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len")
+ }
+{{end}}{{end}}{{end}}
+
+}
+
+func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
+ doTestMammothSlices(t, h)
+ doTestMammothMaps(t, h)
+}
diff --git a/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl
new file mode 100644
index 0000000..7cdf8f5
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl
@@ -0,0 +1,94 @@
+// +build !notfastpath
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go....
+//
+// Add:
+// - test file for creating a mammoth generated file as _mammoth_generated.go
+// - generate a second mammoth files in a different file: mammoth2_generated_test.go
+// - mammoth-test.go.tmpl will do this
+// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags)
+// - as part of TestMammoth, run it also
+// - this will cover all the codecgen, gen-helper, etc in one full run
+// - check in mammoth* files into github also
+// - then
+//
+// Now, add some types:
+// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
+// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
+// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
+
+
+// import "encoding/binary"
+import "fmt"
+
+type TestMammoth2 struct {
+
+{{range .Values }}{{if .Primitive }}{{/*
+*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
+{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
+{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+}
+
+// -----------
+
+type testMammoth2Binary uint64
+func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
+data = make([]byte, 8)
+bigen.PutUint64(data, uint64(x))
+return
+}
+func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
+*x = testMammoth2Binary(bigen.Uint64(data))
+return
+}
+
+type testMammoth2Text uint64
+func (x testMammoth2Text) MarshalText() (data []byte, err error) {
+data = []byte(fmt.Sprintf("%b", uint64(x)))
+return
+}
+func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
+_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
+return
+}
+
+type testMammoth2Json uint64
+func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
+data = []byte(fmt.Sprintf("%v", uint64(x)))
+return
+}
+func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
+_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
+return
+}
+
+type testMammoth2Basic [4]uint64
+
+type TestMammoth2Wrapper struct {
+ V TestMammoth2
+ T testMammoth2Text
+ B testMammoth2Binary
+ J testMammoth2Json
+ C testMammoth2Basic
+ M map[testMammoth2Basic]TestMammoth2
+ L []TestMammoth2
+ A [4]int64
+}
diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go
new file mode 100644
index 0000000..3271579
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/msgpack.go
@@ -0,0 +1,1092 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+MSGPACK
+
+Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
+We need to maintain compatibility with it and how it encodes integer values
+without caring about the type.
+
+For compatibility with behaviour of msgpack-c reference implementation:
+ - Go intX (>0) and uintX
+ IS ENCODED AS
+ msgpack +ve fixnum, unsigned
+ - Go intX (<0)
+ IS ENCODED AS
+ msgpack -ve fixnum, signed
+*/
+
+package codec
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "net/rpc"
+ "reflect"
+ "time"
+)
+
+const (
+ mpPosFixNumMin byte = 0x00
+ mpPosFixNumMax = 0x7f
+ mpFixMapMin = 0x80
+ mpFixMapMax = 0x8f
+ mpFixArrayMin = 0x90
+ mpFixArrayMax = 0x9f
+ mpFixStrMin = 0xa0
+ mpFixStrMax = 0xbf
+ mpNil = 0xc0
+ _ = 0xc1
+ mpFalse = 0xc2
+ mpTrue = 0xc3
+ mpFloat = 0xca
+ mpDouble = 0xcb
+ mpUint8 = 0xcc
+ mpUint16 = 0xcd
+ mpUint32 = 0xce
+ mpUint64 = 0xcf
+ mpInt8 = 0xd0
+ mpInt16 = 0xd1
+ mpInt32 = 0xd2
+ mpInt64 = 0xd3
+
+ // extensions below
+ mpBin8 = 0xc4
+ mpBin16 = 0xc5
+ mpBin32 = 0xc6
+ mpExt8 = 0xc7
+ mpExt16 = 0xc8
+ mpExt32 = 0xc9
+ mpFixExt1 = 0xd4
+ mpFixExt2 = 0xd5
+ mpFixExt4 = 0xd6
+ mpFixExt8 = 0xd7
+ mpFixExt16 = 0xd8
+
+ mpStr8 = 0xd9 // new
+ mpStr16 = 0xda
+ mpStr32 = 0xdb
+
+ mpArray16 = 0xdc
+ mpArray32 = 0xdd
+
+ mpMap16 = 0xde
+ mpMap32 = 0xdf
+
+ mpNegFixNumMin = 0xe0
+ mpNegFixNumMax = 0xff
+)
+
+var mpTimeExtTag int8 = -1
+var mpTimeExtTagU = uint8(mpTimeExtTag)
+
+// var mpdesc = map[byte]string{
+// mpPosFixNumMin: "PosFixNumMin",
+// mpPosFixNumMax: "PosFixNumMax",
+// mpFixMapMin: "FixMapMin",
+// mpFixMapMax: "FixMapMax",
+// mpFixArrayMin: "FixArrayMin",
+// mpFixArrayMax: "FixArrayMax",
+// mpFixStrMin: "FixStrMin",
+// mpFixStrMax: "FixStrMax",
+// mpNil: "Nil",
+// mpFalse: "False",
+// mpTrue: "True",
+// mpFloat: "Float",
+// mpDouble: "Double",
+// mpUint8: "Uint8",
+// mpUint16: "Uint16",
+// mpUint32: "Uint32",
+// mpUint64: "Uint64",
+// mpInt8: "Int8",
+// mpInt16: "Int16",
+// mpInt32: "Int32",
+// mpInt64: "Int64",
+// mpBin8: "Bin8",
+// mpBin16: "Bin16",
+// mpBin32: "Bin32",
+// mpExt8: "Ext8",
+// mpExt16: "Ext16",
+// mpExt32: "Ext32",
+// mpFixExt1: "FixExt1",
+// mpFixExt2: "FixExt2",
+// mpFixExt4: "FixExt4",
+// mpFixExt8: "FixExt8",
+// mpFixExt16: "FixExt16",
+// mpStr8: "Str8",
+// mpStr16: "Str16",
+// mpStr32: "Str32",
+// mpArray16: "Array16",
+// mpArray32: "Array32",
+// mpMap16: "Map16",
+// mpMap32: "Map32",
+// mpNegFixNumMin: "NegFixNumMin",
+// mpNegFixNumMax: "NegFixNumMax",
+// }
+
+func mpdesc(bd byte) string {
+ switch bd {
+ case mpNil:
+ return "nil"
+ case mpFalse:
+ return "false"
+ case mpTrue:
+ return "true"
+ case mpFloat, mpDouble:
+ return "float"
+ case mpUint8, mpUint16, mpUint32, mpUint64:
+ return "uint"
+ case mpInt8, mpInt16, mpInt32, mpInt64:
+ return "int"
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+ return "int"
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ return "int"
+ case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+ return "string|bytes"
+ case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+ return "bytes"
+ case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ return "array"
+ case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+ return "map"
+ case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ return "ext"
+ default:
+ return "unknown"
+ }
+ }
+}
+
+// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
+// that the backend RPC service takes multiple arguments, which have been arranged
+// in sequence in the slice.
+//
+// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
+// array of 1 element).
+type MsgpackSpecRpcMultiArgs []interface{}
+
+// A MsgpackContainer type specifies the different types of msgpackContainers.
+type msgpackContainerType struct {
+ fixCutoff int
+ bFixMin, b8, b16, b32 byte
+ hasFixMin, has8, has8Always bool
+}
+
+var (
+ msgpackContainerStr = msgpackContainerType{
+ 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false,
+ }
+ msgpackContainerBin = msgpackContainerType{
+ 0, 0, mpBin8, mpBin16, mpBin32, false, true, true,
+ }
+ msgpackContainerList = msgpackContainerType{
+ 16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false,
+ }
+ msgpackContainerMap = msgpackContainerType{
+ 16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false,
+ }
+)
+
+//---------------------------------------------
+
+type msgpackEncDriver struct {
+ noBuiltInTypes
+ encDriverNoopContainerWriter
+ // encNoSeparator
+ e *Encoder
+ w encWriter
+ h *MsgpackHandle
+ x [8]byte
+ _ [3]uint64 // padding
+}
+
+func (e *msgpackEncDriver) EncodeNil() {
+ e.w.writen1(mpNil)
+}
+
+func (e *msgpackEncDriver) EncodeInt(i int64) {
+ // if i >= 0 {
+ // e.EncodeUint(uint64(i))
+ // } else if false &&
+ if i > math.MaxInt8 {
+ if i <= math.MaxInt16 {
+ e.w.writen1(mpInt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i <= math.MaxInt32 {
+ e.w.writen1(mpInt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpInt64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+ } else if i >= -32 {
+ if e.h.NoFixedNum {
+ e.w.writen2(mpInt8, byte(i))
+ } else {
+ e.w.writen1(byte(i))
+ }
+ } else if i >= math.MinInt8 {
+ e.w.writen2(mpInt8, byte(i))
+ } else if i >= math.MinInt16 {
+ e.w.writen1(mpInt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i >= math.MinInt32 {
+ e.w.writen1(mpInt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpInt64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeUint(i uint64) {
+ if i <= math.MaxInt8 {
+ if e.h.NoFixedNum {
+ e.w.writen2(mpUint8, byte(i))
+ } else {
+ e.w.writen1(byte(i))
+ }
+ } else if i <= math.MaxUint8 {
+ e.w.writen2(mpUint8, byte(i))
+ } else if i <= math.MaxUint16 {
+ e.w.writen1(mpUint16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
+ } else if i <= math.MaxUint32 {
+ e.w.writen1(mpUint32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
+ } else {
+ e.w.writen1(mpUint64)
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeBool(b bool) {
+ if b {
+ e.w.writen1(mpTrue)
+ } else {
+ e.w.writen1(mpFalse)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeFloat32(f float32) {
+ e.w.writen1(mpFloat)
+ bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeFloat64(f float64) {
+ e.w.writen1(mpDouble)
+ bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeTime(t time.Time) {
+ if t.IsZero() {
+ e.EncodeNil()
+ return
+ }
+ t = t.UTC()
+ sec, nsec := t.Unix(), uint64(t.Nanosecond())
+ var data64 uint64
+ var l = 4
+ if sec >= 0 && sec>>34 == 0 {
+ data64 = (nsec << 34) | uint64(sec)
+ if data64&0xffffffff00000000 != 0 {
+ l = 8
+ }
+ } else {
+ l = 12
+ }
+ if e.h.WriteExt {
+ e.encodeExtPreamble(mpTimeExtTagU, l)
+ } else {
+ e.writeContainerLen(msgpackContainerStr, l)
+ }
+ switch l {
+ case 4:
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64))
+ case 8:
+ bigenHelper{e.x[:8], e.w}.writeUint64(data64)
+ case 12:
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec))
+ bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(v)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ if e.h.WriteExt {
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+ } else {
+ e.EncodeStringBytes(cRAW, bs)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
+ if l == 1 {
+ e.w.writen2(mpFixExt1, xtag)
+ } else if l == 2 {
+ e.w.writen2(mpFixExt2, xtag)
+ } else if l == 4 {
+ e.w.writen2(mpFixExt4, xtag)
+ } else if l == 8 {
+ e.w.writen2(mpFixExt8, xtag)
+ } else if l == 16 {
+ e.w.writen2(mpFixExt16, xtag)
+ } else if l < 256 {
+ e.w.writen2(mpExt8, byte(l))
+ e.w.writen1(xtag)
+ } else if l < 65536 {
+ e.w.writen1(mpExt16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+ e.w.writen1(xtag)
+ } else {
+ e.w.writen1(mpExt32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+ e.w.writen1(xtag)
+ }
+}
+
+func (e *msgpackEncDriver) WriteArrayStart(length int) {
+ e.writeContainerLen(msgpackContainerList, length)
+}
+
+func (e *msgpackEncDriver) WriteMapStart(length int) {
+ e.writeContainerLen(msgpackContainerMap, length)
+}
+
+func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
+ slen := len(s)
+ if c == cRAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, slen)
+ } else {
+ e.writeContainerLen(msgpackContainerStr, slen)
+ }
+ if slen > 0 {
+ e.w.writestr(s)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ slen := len(bs)
+ if c == cRAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, slen)
+ } else {
+ e.writeContainerLen(msgpackContainerStr, slen)
+ }
+ if slen > 0 {
+ e.w.writeb(bs)
+ }
+}
+
+func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
+ if ct.hasFixMin && l < ct.fixCutoff {
+ e.w.writen1(ct.bFixMin | byte(l))
+ } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) {
+ e.w.writen2(ct.b8, uint8(l))
+ } else if l < 65536 {
+ e.w.writen1(ct.b16)
+ bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
+ } else {
+ e.w.writen1(ct.b32)
+ bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
+ }
+}
+
+//---------------------------------------------
+
+type msgpackDecDriver struct {
+ d *Decoder
+ r decReader // *Decoder decReader decReaderT
+ h *MsgpackHandle
+ // b [scratchByteArrayLen]byte
+ bd byte
+ bdRead bool
+ br bool // bytes reader
+ noBuiltInTypes
+ // noStreamingCodec
+ // decNoSeparator
+ decDriverNoopContainerReader
+ _ [3]uint64 // padding
+}
+
+// Note: This returns either a primitive (int, bool, etc) for non-containers,
+// or a containerType, or a specific type denoting nil or extension.
+// It is called when a nil interface{} is passed, leaving it up to the DecDriver
+// to introspect the stream and decide how best to decode.
+// It deciphers the value by looking at the stream first.
+func (d *msgpackDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ bd := d.bd
+ n := d.d.n
+ var decodeFurther bool
+
+ switch bd {
+ case mpNil:
+ n.v = valueTypeNil
+ d.bdRead = false
+ case mpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case mpTrue:
+ n.v = valueTypeBool
+ n.b = true
+
+ case mpFloat:
+ n.v = valueTypeFloat
+ n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ case mpDouble:
+ n.v = valueTypeFloat
+ n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+
+ case mpUint8:
+ n.v = valueTypeUint
+ n.u = uint64(d.r.readn1())
+ case mpUint16:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint16(d.r.readx(2)))
+ case mpUint32:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint32(d.r.readx(4)))
+ case mpUint64:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint64(d.r.readx(8)))
+
+ case mpInt8:
+ n.v = valueTypeInt
+ n.i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ n.v = valueTypeInt
+ n.i = int64(int16(bigen.Uint16(d.r.readx(2))))
+ case mpInt32:
+ n.v = valueTypeInt
+ n.i = int64(int32(bigen.Uint32(d.r.readx(4))))
+ case mpInt64:
+ n.v = valueTypeInt
+ n.i = int64(int64(bigen.Uint64(d.r.readx(8))))
+
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+ // positive fixnum (always signed)
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ // negative fixnum
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+ if d.h.RawToString {
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ } else {
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false)
+ }
+ case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false)
+ case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ n.v = valueTypeExt
+ clen := d.readExtLen()
+ n.u = uint64(d.r.readn1())
+ if n.u == uint64(mpTimeExtTagU) {
+ n.v = valueTypeTime
+ n.t = d.decodeTime(clen)
+ } else {
+ n.l = d.r.readx(clen)
+ }
+ default:
+ d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd))
+ }
+ }
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+ return
+}
+
+// int can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeInt64() (i int64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case mpUint8:
+ i = int64(uint64(d.r.readn1()))
+ case mpUint16:
+ i = int64(uint64(bigen.Uint16(d.r.readx(2))))
+ case mpUint32:
+ i = int64(uint64(bigen.Uint32(d.r.readx(4))))
+ case mpUint64:
+ i = int64(bigen.Uint64(d.r.readx(8)))
+ case mpInt8:
+ i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ i = int64(int16(bigen.Uint16(d.r.readx(2))))
+ case mpInt32:
+ i = int64(int32(bigen.Uint32(d.r.readx(4))))
+ case mpInt64:
+ i = int64(bigen.Uint64(d.r.readx(8)))
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ i = int64(int8(d.bd))
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ i = int64(int8(d.bd))
+ default:
+ d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// uint can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeUint64() (ui uint64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case mpUint8:
+ ui = uint64(d.r.readn1())
+ case mpUint16:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ case mpUint32:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ case mpUint64:
+ ui = bigen.Uint64(d.r.readx(8))
+ case mpInt8:
+ if i := int64(int8(d.r.readn1())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt16:
+ if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt32:
+ if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ case mpInt64:
+ if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+ return
+ }
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ ui = uint64(d.bd)
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd))
+ return
+ default:
+ d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// float can either be decoded from msgpack type: float, double or intX
+func (d *msgpackDecDriver) DecodeFloat64() (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpFloat {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if d.bd == mpDouble {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else {
+ f = float64(d.DecodeInt64())
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool, fixnum 0 or 1.
+func (d *msgpackDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpFalse || d.bd == 0 {
+ // b = false
+ } else if d.bd == mpTrue || d.bd == 1 {
+ b = true
+ } else {
+ d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ // check if an "array" of uint8's (see ContainerType for how to infer if an array)
+ bd := d.bd
+ // DecodeBytes could be from: bin str fixstr fixarray array ...
+ var clen int
+ vt := d.ContainerType()
+ switch vt {
+ case valueTypeBytes:
+ // valueTypeBytes may be a mpBin or an mpStr container
+ if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
+ clen = d.readContainerLen(msgpackContainerBin)
+ } else {
+ clen = d.readContainerLen(msgpackContainerStr)
+ }
+ case valueTypeString:
+ clen = d.readContainerLen(msgpackContainerStr)
+ case valueTypeArray:
+ if zerocopy && len(bs) == 0 {
+ bs = d.d.b[:]
+ }
+ bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+ return
+ default:
+ d.d.errorf("invalid container type: expecting bin|str|array, got: 0x%x", uint8(vt))
+ return
+ }
+
+ // these are (bin|str)(8|16|32)
+ d.bdRead = false
+ // bytes may be nil, so handle it. if nil, clen=-1.
+ if clen < 0 {
+ return nil
+ }
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, d.h.MaxInitLen, bs)
+}
+
+func (d *msgpackDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.d.b[:], true))
+}
+
+func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) {
+ return d.DecodeBytes(d.d.b[:], true)
+}
+
+func (d *msgpackDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *msgpackDecDriver) uncacheRead() {
+ if d.bdRead {
+ d.r.unreadn1()
+ d.bdRead = false
+ }
+}
+
+func (d *msgpackDecDriver) ContainerType() (vt valueType) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ bd := d.bd
+ if bd == mpNil {
+ return valueTypeNil
+ } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
+ (!d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
+ return valueTypeBytes
+ } else if d.h.RawToString &&
+ (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
+ return valueTypeString
+ } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
+ return valueTypeArray
+ } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
+ return valueTypeMap
+ }
+ // else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ // }
+ return valueTypeUnset
+}
+
+func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpNil {
+ d.bdRead = false
+ return true
+ }
+ return
+}
+
+func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
+ bd := d.bd
+ if bd == mpNil {
+ clen = -1 // to represent nil
+ } else if bd == ct.b8 {
+ clen = int(d.r.readn1())
+ } else if bd == ct.b16 {
+ clen = int(bigen.Uint16(d.r.readx(2)))
+ } else if bd == ct.b32 {
+ clen = int(bigen.Uint32(d.r.readx(4)))
+ } else if (ct.bFixMin & bd) == ct.bFixMin {
+ clen = int(ct.bFixMin ^ bd)
+ } else {
+ d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) ReadMapStart() int {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ return d.readContainerLen(msgpackContainerMap)
+}
+
+func (d *msgpackDecDriver) ReadArrayStart() int {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ return d.readContainerLen(msgpackContainerList)
+}
+
+func (d *msgpackDecDriver) readExtLen() (clen int) {
+ switch d.bd {
+ case mpNil:
+ clen = -1 // to represent nil
+ case mpFixExt1:
+ clen = 1
+ case mpFixExt2:
+ clen = 2
+ case mpFixExt4:
+ clen = 4
+ case mpFixExt8:
+ clen = 8
+ case mpFixExt16:
+ clen = 16
+ case mpExt8:
+ clen = int(d.r.readn1())
+ case mpExt16:
+ clen = int(bigen.Uint16(d.r.readx(2)))
+ case mpExt32:
+ clen = int(bigen.Uint32(d.r.readx(4)))
+ default:
+ d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
+ return
+ }
+ return
+}
+
+func (d *msgpackDecDriver) DecodeTime() (t time.Time) {
+ // decode time from string bytes or ext
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpNil {
+ d.bdRead = false
+ return
+ }
+ var clen int
+ switch d.ContainerType() {
+ case valueTypeBytes, valueTypeString:
+ clen = d.readContainerLen(msgpackContainerStr)
+ default:
+ // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1
+ d.bdRead = false
+ b2 := d.r.readn1()
+ if d.bd == mpFixExt4 && b2 == mpTimeExtTagU {
+ clen = 4
+ } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU {
+ clen = 8
+ } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU {
+ clen = 12
+ } else {
+ d.d.errorf("invalid bytes for decoding time as extension: got 0x%x, 0x%x", d.bd, b2)
+ return
+ }
+ }
+ return d.decodeTime(clen)
+}
+
+func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) {
+ // bs = d.r.readx(clen)
+ d.bdRead = false
+ switch clen {
+ case 4:
+ t = time.Unix(int64(bigen.Uint32(d.r.readx(4))), 0).UTC()
+ case 8:
+ tv := bigen.Uint64(d.r.readx(8))
+ t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC()
+ case 12:
+ nsec := bigen.Uint32(d.r.readx(4))
+ sec := bigen.Uint64(d.r.readx(8))
+ t = time.Unix(int64(sec), int64(nsec)).UTC()
+ default:
+ d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen)
+ return
+ }
+ return
+}
+
+func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ xbd := d.bd
+ if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
+ xbs = d.DecodeBytes(nil, true)
+ } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
+ (xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
+ xbs = d.DecodeStringAsBytes()
+ } else {
+ clen := d.readExtLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(clen)
+ }
+ d.bdRead = false
+ return
+}
+
+//--------------------------------------------------
+
+//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
+type MsgpackHandle struct {
+ BasicHandle
+
+ // RawToString controls how raw bytes are decoded into a nil interface{}.
+ RawToString bool
+
+ // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum.
+ NoFixedNum bool
+
+ // WriteExt flag supports encoding configured extensions with extension tags.
+ // It also controls whether other elements of the new spec are encoded (ie Str8).
+ //
+ // With WriteExt=false, configured extensions are serialized as raw bytes
+ // and Str8 is not encoded.
+ //
+ // A stream can still be decoded into a typed value, provided an appropriate value
+ // is provided, but the type cannot be inferred from the stream. If no appropriate
+ // type is provided (e.g. decoding into a nil interface{}), you get back
+ // a []byte or string based on the setting of RawToString.
+ WriteExt bool
+
+ binaryEncodingType
+ noElemSeparators
+
+ // _ [1]uint64 // padding
+}
+
+// Name returns the name of the handle: msgpack
+func (h *MsgpackHandle) Name() string { return "msgpack" }
+
+// SetBytesExt sets an extension
+func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}})
+}
+
+func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
+ return &msgpackEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
+ return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes}
+}
+
+func (e *msgpackEncDriver) reset() {
+ e.w = e.e.w
+}
+
+func (d *msgpackDecDriver) reset() {
+ d.r, d.br = d.d.r, d.d.bytes
+ d.bd, d.bdRead = 0, false
+}
+
+//--------------------------------------------------
+
+type msgpackSpecRpcCodec struct {
+ rpcCodec
+}
+
+// /////////////// Spec RPC Codec ///////////////////
+func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // WriteRequest can write to both a Go service, and other services that do
+ // not abide by the 1 argument rule of a Go service.
+ // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
+ var bodyArr []interface{}
+ if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
+ bodyArr = ([]interface{})(m)
+ } else {
+ bodyArr = []interface{}{body}
+ }
+ r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
+ return c.write(r2, nil, false)
+}
+
+func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ var moe interface{}
+ if r.Error != "" {
+ moe = r.Error
+ }
+ if moe != nil && body != nil {
+ body = nil
+ }
+ r2 := []interface{}{1, uint32(r.Seq), moe, body}
+ return c.write(r2, nil, false)
+}
+
+func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.parseCustomHeader(1, &r.Seq, &r.Error)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
+ if body == nil { // read and discard
+ return c.read(nil)
+ }
+ bodyArr := []interface{}{body}
+ return c.read(&bodyArr)
+}
+
+func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
+ if c.isClosed() {
+ return io.EOF
+ }
+
+ // We read the response header by hand
+ // so that the body can be decoded on its own from the stream at a later time.
+
+ const fia byte = 0x94 //four item array descriptor value
+ // Not sure why the panic of EOF is swallowed above.
+ // if bs1 := c.dec.r.readn1(); bs1 != fia {
+ // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
+ // return
+ // }
+ var ba [1]byte
+ var n int
+ for {
+ n, err = c.r.Read(ba[:])
+ if err != nil {
+ return
+ }
+ if n == 1 {
+ break
+ }
+ }
+
+ var b = ba[0]
+ if b != fia {
+ err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b))
+ } else {
+ err = c.read(&b)
+ if err == nil {
+ if b != expectTypeByte {
+ err = fmt.Errorf("%s - expecting %v but got %x/%s",
+ msgBadDesc, expectTypeByte, b, mpdesc(b))
+ } else {
+ err = c.read(msgid)
+ if err == nil {
+ err = c.read(methodOrError)
+ }
+ }
+ }
+ }
+ return
+}
+
+//--------------------------------------------------
+
+// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
+// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+type msgpackSpecRpc struct{}
+
+// MsgpackSpecRpc implements Rpc using the communication protocol defined in
+// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
+//
+// See GoRpc documentation, for information on buffering for better performance.
+var MsgpackSpecRpc msgpackSpecRpc
+
+func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ decDriver = (*msgpackDecDriver)(nil)
+var _ encDriver = (*msgpackEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go
new file mode 100644
index 0000000..9fb3c01
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/rpc.go
@@ -0,0 +1,232 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/rpc"
+ "sync"
+)
+
+// Rpc provides a rpc Server or Client Codec for rpc communication.
+type Rpc interface {
+ ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
+ ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
+}
+
+// RPCOptions holds options specific to rpc functionality
+type RPCOptions struct {
+ // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls.
+ //
+ // Set RPCNoBuffer=true to turn buffering off.
+ // Buffering can still be done if buffered connections are passed in, or
+ // buffering is configured on the handle.
+ RPCNoBuffer bool
+}
+
+// rpcCodec defines the struct members and common methods.
+type rpcCodec struct {
+ c io.Closer
+ r io.Reader
+ w io.Writer
+ f ioFlusher
+
+ dec *Decoder
+ enc *Encoder
+ // bw *bufio.Writer
+ // br *bufio.Reader
+ mu sync.Mutex
+ h Handle
+
+ cls bool
+ clsmu sync.RWMutex
+ clsErr error
+}
+
+func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
+ // return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h)
+ return newRPCCodec2(conn, conn, conn, h)
+}
+
+func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
+ // defensive: ensure that jsonH has TermWhitespace turned on.
+ if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace {
+ panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true"))
+ }
+ // always ensure that we use a flusher, and always flush what was written to the connection.
+ // we lose nothing by using a buffered writer internally.
+ f, ok := w.(ioFlusher)
+ bh := h.getBasicHandle()
+ if !bh.RPCNoBuffer {
+ if bh.WriterBufferSize <= 0 {
+ if !ok {
+ bw := bufio.NewWriter(w)
+ f, w = bw, bw
+ }
+ }
+ if bh.ReaderBufferSize <= 0 {
+ if _, ok = w.(ioPeeker); !ok {
+ if _, ok = w.(ioBuffered); !ok {
+ br := bufio.NewReader(r)
+ r = br
+ }
+ }
+ }
+ }
+ return rpcCodec{
+ c: c,
+ w: w,
+ r: r,
+ f: f,
+ h: h,
+ enc: NewEncoder(w, h),
+ dec: NewDecoder(r, h),
+ }
+}
+
+func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) {
+ if c.isClosed() {
+ return c.clsErr
+ }
+ err = c.enc.Encode(obj1)
+ if err == nil {
+ if writeObj2 {
+ err = c.enc.Encode(obj2)
+ }
+ // if err == nil && c.f != nil {
+ // err = c.f.Flush()
+ // }
+ }
+ if c.f != nil {
+ if err == nil {
+ err = c.f.Flush()
+ } else {
+ _ = c.f.Flush() // swallow flush error, so we maintain prior error on write
+ }
+ }
+ return
+}
+
+func (c *rpcCodec) swallow(err *error) {
+ defer panicToErr(c.dec, err)
+ c.dec.swallow()
+}
+
+func (c *rpcCodec) read(obj interface{}) (err error) {
+ if c.isClosed() {
+ return c.clsErr
+ }
+ //If nil is passed in, we should read and discard
+ if obj == nil {
+ // var obj2 interface{}
+ // return c.dec.Decode(&obj2)
+ c.swallow(&err)
+ return
+ }
+ return c.dec.Decode(obj)
+}
+
+func (c *rpcCodec) isClosed() (b bool) {
+ if c.c != nil {
+ c.clsmu.RLock()
+ b = c.cls
+ c.clsmu.RUnlock()
+ }
+ return
+}
+
+func (c *rpcCodec) Close() error {
+ if c.c == nil || c.isClosed() {
+ return c.clsErr
+ }
+ c.clsmu.Lock()
+ c.cls = true
+ c.clsErr = c.c.Close()
+ c.clsmu.Unlock()
+ return c.clsErr
+}
+
+func (c *rpcCodec) ReadResponseBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+type goRpcCodec struct {
+ rpcCodec
+}
+
+func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // Must protect for concurrent access as per API
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true)
+}
+
+func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true)
+}
+
+func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+// goRpc is the implementation of Rpc that uses the communication protocol
+// as defined in net/rpc package.
+type goRpc struct{}
+
+// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
+//
+// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered.
+//
+// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle.
+// This ensures we use an adequate buffer during reading and writing.
+// If not configured, we will internally initialize and use a buffer during reads and writes.
+// This can be turned off via the RPCNoBuffer option on the Handle.
+// var handle codec.JsonHandle
+// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer
+//
+// Example 1: one way of configuring buffering explicitly:
+// var handle codec.JsonHandle // codec handle
+// handle.ReaderBufferSize = 1024
+// handle.WriterBufferSize = 1024
+// var conn io.ReadWriteCloser // connection got from a socket
+// var serverCodec = GoRpc.ServerCodec(conn, handle)
+// var clientCodec = GoRpc.ClientCodec(conn, handle)
+//
+// Example 2: you can also explicitly create a buffered connection yourself,
+// and not worry about configuring the buffer sizes in the Handle.
+// var handle codec.Handle // codec handle
+// var conn io.ReadWriteCloser // connection got from a socket
+// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser
+// io.Closer
+// *bufio.Reader
+// *bufio.Writer
+// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)}
+// var serverCodec = GoRpc.ServerCodec(bufconn, handle)
+// var clientCodec = GoRpc.ClientCodec(bufconn, handle)
+//
+var GoRpc goRpc
+
+func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go
new file mode 100644
index 0000000..f1e181e
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/simple.go
@@ -0,0 +1,652 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+const (
+ _ uint8 = iota
+ simpleVdNil = 1
+ simpleVdFalse = 2
+ simpleVdTrue = 3
+ simpleVdFloat32 = 4
+ simpleVdFloat64 = 5
+
+ // each lasts for 4 (ie n, n+1, n+2, n+3)
+ simpleVdPosInt = 8
+ simpleVdNegInt = 12
+
+ simpleVdTime = 24
+
+ // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
+ simpleVdString = 216
+ simpleVdByteArray = 224
+ simpleVdArray = 232
+ simpleVdMap = 240
+ simpleVdExt = 248
+)
+
+type simpleEncDriver struct {
+ noBuiltInTypes
+ // encNoSeparator
+ e *Encoder
+ h *SimpleHandle
+ w encWriter
+ b [8]byte
+ // c containerState
+ encDriverTrackContainerWriter
+ // encDriverNoopContainerWriter
+ _ [2]uint64 // padding
+}
+
+func (e *simpleEncDriver) EncodeNil() {
+ e.w.writen1(simpleVdNil)
+}
+
+func (e *simpleEncDriver) EncodeBool(b bool) {
+ if e.h.EncZeroValuesAsNil && e.c != containerMapKey && !b {
+ e.EncodeNil()
+ return
+ }
+ if b {
+ e.w.writen1(simpleVdTrue)
+ } else {
+ e.w.writen1(simpleVdFalse)
+ }
+}
+
+func (e *simpleEncDriver) EncodeFloat32(f float32) {
+ if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 {
+ e.EncodeNil()
+ return
+ }
+ e.w.writen1(simpleVdFloat32)
+ bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
+}
+
+func (e *simpleEncDriver) EncodeFloat64(f float64) {
+ if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 {
+ e.EncodeNil()
+ return
+ }
+ e.w.writen1(simpleVdFloat64)
+ bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
+}
+
+func (e *simpleEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-v), simpleVdNegInt)
+ } else {
+ e.encUint(uint64(v), simpleVdPosInt)
+ }
+}
+
+func (e *simpleEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, simpleVdPosInt)
+}
+
+func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
+ if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == 0 {
+ e.EncodeNil()
+ return
+ }
+ if v <= math.MaxUint8 {
+ e.w.writen2(bd, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.w.writen1(bd + 1)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.w.writen1(bd + 2)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.w.writen1(bd + 3)
+ bigenHelper{e.b[:8], e.w}.writeUint64(v)
+ }
+}
+
+func (e *simpleEncDriver) encLen(bd byte, length int) {
+ if length == 0 {
+ e.w.writen1(bd)
+ } else if length <= math.MaxUint8 {
+ e.w.writen1(bd + 1)
+ e.w.writen1(uint8(length))
+ } else if length <= math.MaxUint16 {
+ e.w.writen1(bd + 2)
+ bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
+ } else if int64(length) <= math.MaxUint32 {
+ e.w.writen1(bd + 3)
+ bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
+ } else {
+ e.w.writen1(bd + 4)
+ bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
+ }
+}
+
+func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
+ bs := ext.WriteExt(rv)
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.w.writeb(bs)
+}
+
+func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.w.writeb(re.Data)
+}
+
+func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(simpleVdExt, length)
+ e.w.writen1(xtag)
+}
+
+func (e *simpleEncDriver) WriteArrayStart(length int) {
+ e.c = containerArrayStart
+ e.encLen(simpleVdArray, length)
+}
+
+func (e *simpleEncDriver) WriteMapStart(length int) {
+ e.c = containerMapStart
+ e.encLen(simpleVdMap, length)
+}
+
+func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
+ if false && e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == "" {
+ e.EncodeNil()
+ return
+ }
+ e.encLen(simpleVdString, len(v))
+ e.w.writestr(v)
+}
+
+// func (e *simpleEncDriver) EncodeSymbol(v string) {
+// e.EncodeString(cUTF8, v)
+// }
+
+func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+ // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil {
+ if v == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encLen(simpleVdByteArray, len(v))
+ e.w.writeb(v)
+}
+
+func (e *simpleEncDriver) EncodeTime(t time.Time) {
+ // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() {
+ if t.IsZero() {
+ e.EncodeNil()
+ return
+ }
+ v, err := t.MarshalBinary()
+ if err != nil {
+ e.e.errorv(err)
+ return
+ }
+ // time.Time marshalbinary takes about 14 bytes.
+ e.w.writen2(simpleVdTime, uint8(len(v)))
+ e.w.writeb(v)
+}
+
+//------------------------------------
+
+type simpleDecDriver struct {
+ d *Decoder
+ h *SimpleHandle
+ r decReader
+ bdRead bool
+ bd byte
+ br bool // a bytes reader?
+ c containerState
+ // b [scratchByteArrayLen]byte
+ noBuiltInTypes
+ // noStreamingCodec
+ decDriverNoopContainerReader
+ _ [3]uint64 // padding
+}
+
+func (d *simpleDecDriver) readNextBd() {
+ d.bd = d.r.readn1()
+ d.bdRead = true
+}
+
+func (d *simpleDecDriver) uncacheRead() {
+ if d.bdRead {
+ d.r.unreadn1()
+ d.bdRead = false
+ }
+}
+
+func (d *simpleDecDriver) ContainerType() (vt valueType) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdNil:
+ return valueTypeNil
+ case simpleVdByteArray, simpleVdByteArray + 1,
+ simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ return valueTypeBytes
+ case simpleVdString, simpleVdString + 1,
+ simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ return valueTypeString
+ case simpleVdArray, simpleVdArray + 1,
+ simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
+ return valueTypeArray
+ case simpleVdMap, simpleVdMap + 1,
+ simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ return valueTypeMap
+ // case simpleVdTime:
+ // return valueTypeTime
+ }
+ // else {
+ // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+ // }
+ return valueTypeUnset
+}
+
+func (d *simpleDecDriver) TryDecodeAsNil() bool {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdPosInt:
+ ui = uint64(d.r.readn1())
+ case simpleVdPosInt + 1:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ case simpleVdPosInt + 2:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ case simpleVdPosInt + 3:
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ case simpleVdNegInt:
+ ui = uint64(d.r.readn1())
+ neg = true
+ case simpleVdNegInt + 1:
+ ui = uint64(bigen.Uint16(d.r.readx(2)))
+ neg = true
+ case simpleVdNegInt + 2:
+ ui = uint64(bigen.Uint32(d.r.readx(4)))
+ neg = true
+ case simpleVdNegInt + 3:
+ ui = uint64(bigen.Uint64(d.r.readx(8)))
+ neg = true
+ default:
+ d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
+ return
+ }
+ // don't do this check, because callers may only want the unsigned value.
+ // if ui > math.MaxInt64 {
+ // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
+ // return
+ // }
+ return
+}
+
+func (d *simpleDecDriver) DecodeInt64() (i int64) {
+ ui, neg := d.decCheckInteger()
+ i = chkOvf.SignedIntV(ui)
+ if neg {
+ i = -i
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
+ ui, neg := d.decCheckInteger()
+ if neg {
+ d.d.errorf("assigning negative signed value to unsigned type")
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeFloat64() (f float64) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdFloat32 {
+ f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
+ } else if d.bd == simpleVdFloat64 {
+ f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
+ } else {
+ if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
+ f = float64(d.DecodeInt64())
+ } else {
+ d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd)
+ return
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *simpleDecDriver) DecodeBool() (b bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdTrue {
+ b = true
+ } else if d.bd == simpleVdFalse {
+ } else {
+ d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) ReadMapStart() (length int) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ d.bdRead = false
+ d.c = containerMapStart
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) ReadArrayStart() (length int) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ d.bdRead = false
+ d.c = containerArrayStart
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) ReadArrayElem() {
+ d.c = containerArrayElem
+}
+
+func (d *simpleDecDriver) ReadArrayEnd() {
+ d.c = containerArrayEnd
+}
+
+func (d *simpleDecDriver) ReadMapElemKey() {
+ d.c = containerMapKey
+}
+
+func (d *simpleDecDriver) ReadMapElemValue() {
+ d.c = containerMapValue
+}
+
+func (d *simpleDecDriver) ReadMapEnd() {
+ d.c = containerMapEnd
+}
+
+func (d *simpleDecDriver) decLen() int {
+ switch d.bd % 8 {
+ case 0:
+ return 0
+ case 1:
+ return int(d.r.readn1())
+ case 2:
+ return int(bigen.Uint16(d.r.readx(2)))
+ case 3:
+ ui := uint64(bigen.Uint32(d.r.readx(4)))
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("overflow integer: %v", ui)
+ return 0
+ }
+ return int(ui)
+ case 4:
+ ui := bigen.Uint64(d.r.readx(8))
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("overflow integer: %v", ui)
+ return 0
+ }
+ return int(ui)
+ }
+ d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
+ return -1
+}
+
+func (d *simpleDecDriver) DecodeString() (s string) {
+ return string(d.DecodeBytes(d.d.b[:], true))
+}
+
+func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
+ return d.DecodeBytes(d.d.b[:], true)
+}
+
+func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return
+ }
+ // check if an "array" of uint8's (see ContainerType for how to infer if an array)
+ if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 {
+ if len(bs) == 0 && zerocopy {
+ bs = d.d.b[:]
+ }
+ bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+ return
+ }
+
+ clen := d.decLen()
+ d.bdRead = false
+ if zerocopy {
+ if d.br {
+ return d.r.readx(clen)
+ } else if len(bs) == 0 {
+ bs = d.d.b[:]
+ }
+ }
+ return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *simpleDecDriver) DecodeTime() (t time.Time) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return
+ }
+ if d.bd != simpleVdTime {
+ d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
+ return
+ }
+ d.bdRead = false
+ clen := int(d.r.readn1())
+ b := d.r.readx(clen)
+ if err := (&t).UnmarshalBinary(b); err != nil {
+ d.d.errorv(err)
+ }
+ return
+}
+
+func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
+ if xtag > 0xff {
+ d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+ return
+ }
+ realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag = uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+ return
+}
+
+func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ return
+ }
+ xbs = d.r.readx(l)
+ case simpleVdByteArray, simpleVdByteArray + 1,
+ simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ xbs = d.DecodeBytes(nil, true)
+ default:
+ d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
+ return
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := d.d.n
+ var decodeFurther bool
+
+ switch d.bd {
+ case simpleVdNil:
+ n.v = valueTypeNil
+ case simpleVdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case simpleVdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt64()
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint64()
+ }
+ case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt64()
+ case simpleVdFloat32:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat64()
+ case simpleVdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat64()
+ case simpleVdTime:
+ n.v = valueTypeTime
+ n.t = d.DecodeTime()
+ case simpleVdString, simpleVdString + 1,
+ simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ n.v = valueTypeString
+ n.s = d.DecodeString()
+ case simpleVdByteArray, simpleVdByteArray + 1,
+ simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ n.v = valueTypeBytes
+ n.l = d.DecodeBytes(nil, false)
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.r.readn1())
+ n.l = d.r.readx(l)
+ case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2,
+ simpleVdArray + 3, simpleVdArray + 4:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+//------------------------------------
+
+// SimpleHandle is a Handle for a very simple encoding format.
+//
+// simple is a simplistic codec similar to binc, but not as compact.
+// - Encoding of a value is always preceded by the descriptor byte (bd)
+// - True, false, nil are encoded fully in 1 byte (the descriptor)
+// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
+// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
+// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
+// - Length of containers (strings, bytes, array, map, extensions)
+// are encoded in 0, 1, 2, 4 or 8 bytes.
+// Zero-length containers have no length encoded.
+// For others, the number of bytes is given by pow(2, bd%3)
+// - maps are encoded as [bd] [length] [[key][value]]...
+// - arrays are encoded as [bd] [length] [value]...
+// - extensions are encoded as [bd] [length] [tag] [byte]...
+// - strings/bytearrays are encoded as [bd] [length] [byte]...
+// - time.Time are encoded as [bd] [length] [byte]...
+//
+// The full spec will be published soon.
+type SimpleHandle struct {
+ BasicHandle
+ binaryEncodingType
+ noElemSeparators
+ // EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
+ EncZeroValuesAsNil bool
+
+ // _ [1]uint64 // padding
+}
+
+// Name returns the name of the handle: simple
+func (h *SimpleHandle) Name() string { return "simple" }
+
+// SetBytesExt sets an extension
+func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}})
+}
+
+func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX
+
+func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
+ return &simpleEncDriver{e: e, w: e.w, h: h}
+}
+
+func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
+ return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes}
+}
+
+func (e *simpleEncDriver) reset() {
+ e.c = 0
+ e.w = e.e.w
+}
+
+func (d *simpleDecDriver) reset() {
+ d.c = 0
+ d.r, d.br = d.d.r, d.d.bytes
+ d.bd, d.bdRead = 0, false
+}
+
+var _ decDriver = (*simpleDecDriver)(nil)
+var _ encDriver = (*simpleEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
new file mode 100644
index 0000000..9028586
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
@@ -0,0 +1,639 @@
+[
+ {
+ "cbor": "AA==",
+ "hex": "00",
+ "roundtrip": true,
+ "decoded": 0
+ },
+ {
+ "cbor": "AQ==",
+ "hex": "01",
+ "roundtrip": true,
+ "decoded": 1
+ },
+ {
+ "cbor": "Cg==",
+ "hex": "0a",
+ "roundtrip": true,
+ "decoded": 10
+ },
+ {
+ "cbor": "Fw==",
+ "hex": "17",
+ "roundtrip": true,
+ "decoded": 23
+ },
+ {
+ "cbor": "GBg=",
+ "hex": "1818",
+ "roundtrip": true,
+ "decoded": 24
+ },
+ {
+ "cbor": "GBk=",
+ "hex": "1819",
+ "roundtrip": true,
+ "decoded": 25
+ },
+ {
+ "cbor": "GGQ=",
+ "hex": "1864",
+ "roundtrip": true,
+ "decoded": 100
+ },
+ {
+ "cbor": "GQPo",
+ "hex": "1903e8",
+ "roundtrip": true,
+ "decoded": 1000
+ },
+ {
+ "cbor": "GgAPQkA=",
+ "hex": "1a000f4240",
+ "roundtrip": true,
+ "decoded": 1000000
+ },
+ {
+ "cbor": "GwAAAOjUpRAA",
+ "hex": "1b000000e8d4a51000",
+ "roundtrip": true,
+ "decoded": 1000000000000
+ },
+ {
+ "cbor": "G///////////",
+ "hex": "1bffffffffffffffff",
+ "roundtrip": true,
+ "decoded": 18446744073709551615
+ },
+ {
+ "cbor": "wkkBAAAAAAAAAAA=",
+ "hex": "c249010000000000000000",
+ "roundtrip": true,
+ "decoded": 18446744073709551616
+ },
+ {
+ "cbor": "O///////////",
+ "hex": "3bffffffffffffffff",
+ "roundtrip": true,
+ "decoded": -18446744073709551616,
+ "skip": true
+ },
+ {
+ "cbor": "w0kBAAAAAAAAAAA=",
+ "hex": "c349010000000000000000",
+ "roundtrip": true,
+ "decoded": -18446744073709551617
+ },
+ {
+ "cbor": "IA==",
+ "hex": "20",
+ "roundtrip": true,
+ "decoded": -1
+ },
+ {
+ "cbor": "KQ==",
+ "hex": "29",
+ "roundtrip": true,
+ "decoded": -10
+ },
+ {
+ "cbor": "OGM=",
+ "hex": "3863",
+ "roundtrip": true,
+ "decoded": -100
+ },
+ {
+ "cbor": "OQPn",
+ "hex": "3903e7",
+ "roundtrip": true,
+ "decoded": -1000
+ },
+ {
+ "cbor": "+QAA",
+ "hex": "f90000",
+ "roundtrip": true,
+ "decoded": 0.0
+ },
+ {
+ "cbor": "+YAA",
+ "hex": "f98000",
+ "roundtrip": true,
+ "decoded": -0.0
+ },
+ {
+ "cbor": "+TwA",
+ "hex": "f93c00",
+ "roundtrip": true,
+ "decoded": 1.0
+ },
+ {
+ "cbor": "+z/xmZmZmZma",
+ "hex": "fb3ff199999999999a",
+ "roundtrip": true,
+ "decoded": 1.1
+ },
+ {
+ "cbor": "+T4A",
+ "hex": "f93e00",
+ "roundtrip": true,
+ "decoded": 1.5
+ },
+ {
+ "cbor": "+Xv/",
+ "hex": "f97bff",
+ "roundtrip": true,
+ "decoded": 65504.0
+ },
+ {
+ "cbor": "+kfDUAA=",
+ "hex": "fa47c35000",
+ "roundtrip": true,
+ "decoded": 100000.0
+ },
+ {
+ "cbor": "+n9///8=",
+ "hex": "fa7f7fffff",
+ "roundtrip": true,
+ "decoded": 3.4028234663852886e+38
+ },
+ {
+ "cbor": "+3435DyIAHWc",
+ "hex": "fb7e37e43c8800759c",
+ "roundtrip": true,
+ "decoded": 1.0e+300
+ },
+ {
+ "cbor": "+QAB",
+ "hex": "f90001",
+ "roundtrip": true,
+ "decoded": 5.960464477539063e-08
+ },
+ {
+ "cbor": "+QQA",
+ "hex": "f90400",
+ "roundtrip": true,
+ "decoded": 6.103515625e-05
+ },
+ {
+ "cbor": "+cQA",
+ "hex": "f9c400",
+ "roundtrip": true,
+ "decoded": -4.0
+ },
+ {
+ "cbor": "+8AQZmZmZmZm",
+ "hex": "fbc010666666666666",
+ "roundtrip": true,
+ "decoded": -4.1
+ },
+ {
+ "cbor": "+XwA",
+ "hex": "f97c00",
+ "roundtrip": true,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+X4A",
+ "hex": "f97e00",
+ "roundtrip": true,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+fwA",
+ "hex": "f9fc00",
+ "roundtrip": true,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "+n+AAAA=",
+ "hex": "fa7f800000",
+ "roundtrip": false,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+n/AAAA=",
+ "hex": "fa7fc00000",
+ "roundtrip": false,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+v+AAAA=",
+ "hex": "faff800000",
+ "roundtrip": false,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "+3/wAAAAAAAA",
+ "hex": "fb7ff0000000000000",
+ "roundtrip": false,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+3/4AAAAAAAA",
+ "hex": "fb7ff8000000000000",
+ "roundtrip": false,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+//wAAAAAAAA",
+ "hex": "fbfff0000000000000",
+ "roundtrip": false,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "9A==",
+ "hex": "f4",
+ "roundtrip": true,
+ "decoded": false
+ },
+ {
+ "cbor": "9Q==",
+ "hex": "f5",
+ "roundtrip": true,
+ "decoded": true
+ },
+ {
+ "cbor": "9g==",
+ "hex": "f6",
+ "roundtrip": true,
+ "decoded": null
+ },
+ {
+ "cbor": "9w==",
+ "hex": "f7",
+ "roundtrip": true,
+ "diagnostic": "undefined"
+ },
+ {
+ "cbor": "8A==",
+ "hex": "f0",
+ "roundtrip": true,
+ "diagnostic": "simple(16)"
+ },
+ {
+ "cbor": "+Bg=",
+ "hex": "f818",
+ "roundtrip": true,
+ "diagnostic": "simple(24)"
+ },
+ {
+ "cbor": "+P8=",
+ "hex": "f8ff",
+ "roundtrip": true,
+ "diagnostic": "simple(255)"
+ },
+ {
+ "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==",
+ "hex": "c074323031332d30332d32315432303a30343a30305a",
+ "roundtrip": true,
+ "diagnostic": "0(\"2013-03-21T20:04:00Z\")"
+ },
+ {
+ "cbor": "wRpRS2ew",
+ "hex": "c11a514b67b0",
+ "roundtrip": true,
+ "diagnostic": "1(1363896240)"
+ },
+ {
+ "cbor": "wftB1FLZ7CAAAA==",
+ "hex": "c1fb41d452d9ec200000",
+ "roundtrip": true,
+ "diagnostic": "1(1363896240.5)"
+ },
+ {
+ "cbor": "10QBAgME",
+ "hex": "d74401020304",
+ "roundtrip": true,
+ "diagnostic": "23(h'01020304')"
+ },
+ {
+ "cbor": "2BhFZElFVEY=",
+ "hex": "d818456449455446",
+ "roundtrip": true,
+ "diagnostic": "24(h'6449455446')"
+ },
+ {
+ "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==",
+ "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d",
+ "roundtrip": true,
+ "diagnostic": "32(\"http://www.example.com\")"
+ },
+ {
+ "cbor": "QA==",
+ "hex": "40",
+ "roundtrip": true,
+ "diagnostic": "h''"
+ },
+ {
+ "cbor": "RAECAwQ=",
+ "hex": "4401020304",
+ "roundtrip": true,
+ "diagnostic": "h'01020304'"
+ },
+ {
+ "cbor": "YA==",
+ "hex": "60",
+ "roundtrip": true,
+ "decoded": ""
+ },
+ {
+ "cbor": "YWE=",
+ "hex": "6161",
+ "roundtrip": true,
+ "decoded": "a"
+ },
+ {
+ "cbor": "ZElFVEY=",
+ "hex": "6449455446",
+ "roundtrip": true,
+ "decoded": "IETF"
+ },
+ {
+ "cbor": "YiJc",
+ "hex": "62225c",
+ "roundtrip": true,
+ "decoded": "\"\\"
+ },
+ {
+ "cbor": "YsO8",
+ "hex": "62c3bc",
+ "roundtrip": true,
+ "decoded": "ü"
+ },
+ {
+ "cbor": "Y+awtA==",
+ "hex": "63e6b0b4",
+ "roundtrip": true,
+ "decoded": "水"
+ },
+ {
+ "cbor": "ZPCQhZE=",
+ "hex": "64f0908591",
+ "roundtrip": true,
+ "decoded": "𐅑"
+ },
+ {
+ "cbor": "gA==",
+ "hex": "80",
+ "roundtrip": true,
+ "decoded": [
+
+ ]
+ },
+ {
+ "cbor": "gwECAw==",
+ "hex": "83010203",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ 2,
+ 3
+ ]
+ },
+ {
+ "cbor": "gwGCAgOCBAU=",
+ "hex": "8301820203820405",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=",
+ "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25
+ ]
+ },
+ {
+ "cbor": "oA==",
+ "hex": "a0",
+ "roundtrip": true,
+ "decoded": {
+ }
+ },
+ {
+ "cbor": "ogECAwQ=",
+ "hex": "a201020304",
+ "roundtrip": true,
+ "skip": true,
+ "diagnostic": "{1: 2, 3: 4}"
+ },
+ {
+ "cbor": "omFhAWFiggID",
+ "hex": "a26161016162820203",
+ "roundtrip": true,
+ "decoded": {
+ "a": 1,
+ "b": [
+ 2,
+ 3
+ ]
+ }
+ },
+ {
+ "cbor": "gmFhoWFiYWM=",
+ "hex": "826161a161626163",
+ "roundtrip": true,
+ "decoded": [
+ "a",
+ {
+ "b": "c"
+ }
+ ]
+ },
+ {
+ "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF",
+ "hex": "a56161614161626142616361436164614461656145",
+ "roundtrip": true,
+ "decoded": {
+ "a": "A",
+ "b": "B",
+ "c": "C",
+ "d": "D",
+ "e": "E"
+ }
+ },
+ {
+ "cbor": "X0IBAkMDBAX/",
+ "hex": "5f42010243030405ff",
+ "roundtrip": false,
+ "skip": true,
+ "diagnostic": "(_ h'0102', h'030405')"
+ },
+ {
+ "cbor": "f2VzdHJlYWRtaW5n/w==",
+ "hex": "7f657374726561646d696e67ff",
+ "roundtrip": false,
+ "decoded": "streaming"
+ },
+ {
+ "cbor": "n/8=",
+ "hex": "9fff",
+ "roundtrip": false,
+ "decoded": [
+
+ ]
+ },
+ {
+ "cbor": "nwGCAgOfBAX//w==",
+ "hex": "9f018202039f0405ffff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "nwGCAgOCBAX/",
+ "hex": "9f01820203820405ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "gwGCAgOfBAX/",
+ "hex": "83018202039f0405ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "gwGfAgP/ggQF",
+ "hex": "83019f0203ff820405",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=",
+ "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25
+ ]
+ },
+ {
+ "cbor": "v2FhAWFinwID//8=",
+ "hex": "bf61610161629f0203ffff",
+ "roundtrip": false,
+ "decoded": {
+ "a": 1,
+ "b": [
+ 2,
+ 3
+ ]
+ }
+ },
+ {
+ "cbor": "gmFhv2FiYWP/",
+ "hex": "826161bf61626163ff",
+ "roundtrip": false,
+ "decoded": [
+ "a",
+ {
+ "b": "c"
+ }
+ ]
+ },
+ {
+ "cbor": "v2NGdW71Y0FtdCH/",
+ "hex": "bf6346756ef563416d7421ff",
+ "roundtrip": false,
+ "decoded": {
+ "Fun": true,
+ "Amt": -2
+ }
+ }
+]
diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py
new file mode 100755
index 0000000..800376f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/test.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+# This will create golden files in a directory passed to it.
+# A Test calls this internally to create the golden files
+# So it can process them (so we don't have to checkin the files).
+
+# Ensure msgpack-python and cbor are installed first, using:
+# sudo apt-get install python-dev
+# sudo apt-get install python-pip
+# pip install --user msgpack-python msgpack-rpc-python cbor
+
+# Ensure all "string" keys are utf strings (else encoded as bytes)
+
+import cbor, msgpack, msgpackrpc, sys, os, threading
+
+def get_test_data_list():
+ # get list with all primitive types, and a combo type
+ l0 = [
+ -8,
+ -1616,
+ -32323232,
+ -6464646464646464,
+ 192,
+ 1616,
+ 32323232,
+ 6464646464646464,
+ 192,
+ -3232.0,
+ -6464646464.0,
+ 3232.0,
+ 6464.0,
+ 6464646464.0,
+ False,
+ True,
+ u"null",
+ None,
+ u"some&day>some 0
+ if stopTimeSec > 0:
+ def myStopRpcServer():
+ server.stop()
+ t = threading.Timer(stopTimeSec, myStopRpcServer)
+ t.start()
+ server.start()
+
+def doRpcClientToPythonSvc(port):
+ address = msgpackrpc.Address('127.0.0.1', port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print client.call("Echo123", "A1", "B2", "C3")
+ print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doRpcClientToGoSvc(port):
+ # print ">>>> port: ", port, " <<<<<"
+ address = msgpackrpc.Address('127.0.0.1', port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
+ print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doMain(args):
+ if len(args) == 2 and args[0] == "testdata":
+ build_test_data(args[1])
+ elif len(args) == 3 and args[0] == "rpc-server":
+ doRpcServer(int(args[1]), int(args[2]))
+ elif len(args) == 2 and args[0] == "rpc-client-python-service":
+ doRpcClientToPythonSvc(int(args[1]))
+ elif len(args) == 2 and args[0] == "rpc-client-go-service":
+ doRpcClientToGoSvc(int(args[1]))
+ else:
+ print("Usage: test.py " +
+ "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
+
+if __name__ == "__main__":
+ doMain(sys.argv[1:])
+
diff --git a/vendor/github.com/ugorji/go/codec/xml.go b/vendor/github.com/ugorji/go/codec/xml.go
new file mode 100644
index 0000000..19fc36c
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/xml.go
@@ -0,0 +1,508 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// +build ignore
+
+package codec
+
+import "reflect"
+
+/*
+
+A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder.
+
+We are attempting this due to perceived issues with encoding/xml:
+ - Complicated. It tried to do too much, and is not as simple to use as json.
+ - Due to over-engineering, reflection is over-used AND performance suffers:
+ java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/
+ even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html
+
+codec framework will offer the following benefits
+ - VASTLY improved performance (when using reflection-mode or codecgen)
+ - simplicity and consistency: with the rest of the supported formats
+ - all other benefits of codec framework (streaming, codegeneration, etc)
+
+codec is not a drop-in replacement for encoding/xml.
+It is a replacement, based on the simplicity and performance of codec.
+Look at it like JAXB for Go.
+
+Challenges:
+ - Need to output XML preamble, with all namespaces at the right location in the output.
+ - Each "end" block is dynamic, so we need to maintain a context-aware stack
+ - How to decide when to use an attribute VS an element
+ - How to handle chardata, attr, comment EXPLICITLY.
+ - Should it output fragments?
+ e.g. encoding a bool should just output true OR false, which is not well-formed XML.
+
+Extend the struct tag. See representative example:
+ type X struct {
+ ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"`
+ // format: [namespace-uri ][namespace-prefix ]local-name, ...
+ }
+
+Based on this, we encode
+ - fields as elements, BUT
+ encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string)
+ - text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata".
+
+To handle namespaces:
+ - XMLHandle is denoted as being namespace-aware.
+ Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name.
+ - *Encoder and *Decoder know whether the Handle "prefers" namespaces.
+ - add *Encoder.getEncName(*structFieldInfo).
+ No one calls *structFieldInfo.indexForEncName directly anymore
+ - OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware
+ indexForEncName takes a parameter of the form namespace:local-name OR local-name
+ - add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc
+ by being a method on *Decoder, or maybe a method on the Handle itself.
+ No one accesses .encName anymore
+ - let encode.go and decode.go use these (for consistency)
+ - only problem exists for gen.go, where we create a big switch on encName.
+ Now, we also have to add a switch on strings.endsWith(kName, encNsName)
+ - gen.go will need to have many more methods, and then double-on the 2 switch loops like:
+ switch k {
+ case "abc" : x.abc()
+ case "def" : x.def()
+ default {
+ switch {
+ case !nsAware: panic(...)
+ case strings.endsWith(":abc"): x.abc()
+ case strings.endsWith(":def"): x.def()
+ default: panic(...)
+ }
+ }
+ }
+
+The structure below accommodates this:
+
+ type typeInfo struct {
+ sfi []*structFieldInfo // sorted by encName
+ sfins // sorted by namespace
+ sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately.
+ sfip // unsorted
+ }
+ type structFieldInfo struct {
+ encName
+ nsEncName
+ ns string
+ attr bool
+ cdata bool
+ }
+
+indexForEncName is now an internal helper function that takes a sorted array
+(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...)
+
+There will be a separate parser from the builder.
+The parser will have a method: next() xmlToken method. It has lookahead support,
+so you can pop multiple tokens, make a determination, and push them back in the order popped.
+This will be needed to determine whether we are "nakedly" decoding a container or not.
+The stack will be implemented using a slice and push/pop happens at the [0] element.
+
+xmlToken has fields:
+ - type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text
+ - value string
+ - ns string
+
+SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL
+
+The following are skipped when parsing:
+ - External Entities (from external file)
+ - Notation Declaration e.g.
+ - Entity Declarations & References
+ - XML Declaration (assume UTF-8)
+ - XML Directive i.e.
+ - Other Declarations: Notation, etc.
+ - Comment
+ - Processing Instruction
+ - schema / DTD for validation:
+ We are not a VALIDATING parser. Validation is done elsewhere.
+ However, some parts of the DTD internal subset are used (SEE BELOW).
+ For Attribute List Declarations e.g.
+
+ We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED)
+
+The following XML features are supported
+ - Namespace
+ - Element
+ - Attribute
+ - cdata
+ - Unicode escape
+
+The following DTD (when as an internal sub-set) features are supported:
+ - Internal Entities e.g.
+ AND entities for the set: [<>&"']
+ - Parameter entities e.g.
+
+
+At decode time, a structure containing the following is kept
+ - namespace mapping
+ - default attribute values
+ - all internal entities (<>&"' and others written in the document)
+
+When decode starts, it parses XML namespace declarations and creates a map in the
+xmlDecDriver. While parsing, that map continuously gets updated.
+The only problem happens when a namespace declaration happens on the node that it defines.
+e.g.
+To handle this, each Element must be fully parsed at a time,
+even if it amounts to multiple tokens which are returned one at a time on request.
+
+xmlns is a special attribute name.
+ - It is used to define namespaces, including the default
+ - It is never returned as an AttrKey or AttrVal.
+ *We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.*
+
+Number, bool, null, mapKey, etc can all be decoded from any xmlToken.
+This accommodates map[int]string for example.
+
+It should be possible to create a schema from the types,
+or vice versa (generate types from schema with appropriate tags).
+This is however out-of-scope from this parsing project.
+
+We should write all namespace information at the first point that it is referenced in the tree,
+and use the mapping for all child nodes and attributes. This means that state is maintained
+at a point in the tree. This also means that calls to Decode or MustDecode will reset some state.
+
+When decoding, it is important to keep track of entity references and default attribute values.
+It seems these can only be stored in the DTD components. We should honor them when decoding.
+
+Configuration for XMLHandle will look like this:
+
+ XMLHandle
+ DefaultNS string
+ // Encoding:
+ NS map[string]string // ns URI to key, used for encoding
+ // Decoding: in case ENTITY declared in external schema or dtd, store info needed here
+ Entities map[string]string // map of entity rep to character
+
+
+During encode, if a namespace mapping is not defined for a namespace found on a struct,
+then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict
+with any other namespace mapping).
+
+Note that different fields in a struct can have different namespaces.
+However, all fields will default to the namespace on the _struct field (if defined).
+
+An XML document is a name, a map of attributes and a list of children.
+Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example).
+We have to "DecodeNaked" into something that resembles XML data.
+
+To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types:
+ type Name struct { // Preferred. Less allocations due to conversions.
+ Local string
+ Space string
+ }
+ type Element struct {
+ Name Name
+ Attrs map[Name]string
+ Children []interface{} // each child is either *Element or string
+ }
+Only two "supporting" types are exposed for XML: Name and Element.
+
+// ------------------
+
+We considered 'type Name string' where Name is like "Space Local" (space-separated).
+We decided against it, because each creation of a name would lead to
+double allocation (first convert []byte to string, then concatenate them into a string).
+The benefit is that it is faster to read Attrs from a map. But given that Element is a value
+object, we want to eschew methods and have public exposed variables.
+
+We also considered the following, where xml types were not value objects, and we used
+intelligent accessor methods to extract information and for performance.
+*** WE DECIDED AGAINST THIS. ***
+ type Attr struct {
+ Name Name
+ Value string
+ }
+ // Element is a ValueObject: There are no accessor methods.
+ // Make element self-contained.
+ type Element struct {
+ Name Name
+ attrsMap map[string]string // where key is "Space Local"
+ attrs []Attr
+ childrenT []string
+ childrenE []Element
+ childrenI []int // each child is a index into T or E.
+ }
+ func (x *Element) child(i) interface{} // returns string or *Element
+
+// ------------------
+
+Per XML spec and our default handling, white space is always treated as
+insignificant between elements, except in a text node. The xml:space='preserve'
+attribute is ignored.
+
+**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.**
+**So treat them as just "directives" that should be interpreted to mean something**.
+
+On encoding, we support indenting aka prettifying markup in the same way we support it for json.
+
+A document or element can only be encoded/decoded from/to a struct. In this mode:
+ - struct name maps to element name (or tag-info from _struct field)
+ - fields are mapped to child elements or attributes
+
+A map is either encoded as attributes on current element, or as a set of child elements.
+Maps are encoded as attributes iff their keys and values are primitives (number, bool, string).
+
+A list is encoded as a set of child elements.
+
+Primitives (number, bool, string) are encoded as an element, attribute or text
+depending on the context.
+
+Extensions must encode themselves as a text string.
+
+Encoding is tough, specifically when encoding mappings, because we need to encode
+as either attribute or element. To do this, we need to default to encoding as attributes,
+and then let Encoder inform the Handle when to start encoding as nodes.
+i.e. Encoder does something like:
+
+ h.EncodeMapStart()
+ h.Encode(), h.Encode(), ...
+ h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal
+ h.Encode(), h.Encode(), ...
+ h.EncodeEnd()
+
+Only XMLHandle understands this, and will set itself to start encoding as elements.
+
+This support extends to maps. For example, if a struct field is a map, and it has
+the struct tag signifying it should be attr, then all its fields are encoded as attributes.
+e.g.
+
+ type X struct {
+ M map[string]int `codec:"m,attr"` // encode keys as attributes named
+ }
+
+Question:
+ - if encoding a map, what if map keys have spaces in them???
+ Then they cannot be attributes or child elements. Error.
+
+Options to consider adding later:
+ - For attribute values, normalize by trimming beginning and ending white space,
+ and converting every white space sequence to a single space.
+ - ATTLIST restrictions are enforced.
+ e.g. default value of xml:space, skipping xml:XYZ style attributes, etc.
+ - Consider supporting NON-STRICT mode (e.g. to handle HTML parsing).
+ Some elements e.g. br, hr, etc need not close and should be auto-closed
+ ... (see http://www.w3.org/TR/html4/loose.dtd)
+ An expansive set of entities are pre-defined.
+ - Have easy way to create a HTML parser:
+ add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose,
+ and add HTML Entities to the list.
+ - Support validating element/attribute XMLName before writing it.
+ Keep this behind a flag, which is set to false by default (for performance).
+ type XMLHandle struct {
+ CheckName bool
+ }
+
+Misc:
+
+ROADMAP (1 weeks):
+ - build encoder (1 day)
+ - build decoder (based off xmlParser) (1 day)
+ - implement xmlParser (2 days).
+ Look at encoding/xml for inspiration.
+ - integrate and TEST (1 days)
+ - write article and post it (1 day)
+
+// ---------- MORE NOTES FROM 2017-11-30 ------------
+
+when parsing
+- parse the attributes first
+- then parse the nodes
+
+basically:
+- if encoding a field: we use the field name for the wrapper
+- if encoding a non-field, then just use the element type name
+
+ map[string]string ==> ... or
+ ... OR
+ val1val2... <- PREFERED
+ []string ==> v1v2...
+ string v1 ==> v1
+ bool true ==> true
+ float 1.0 ==> 1.0
+ ...
+
+ F1 map[string]string ==> abcval... OR
+ val... OR
+ val... <- PREFERED
+ F2 []string ==> v1v2...
+ F3 bool ==> true
+ ...
+
+- a scalar is encoded as:
+ (value) of type T ==>
+ (value) of field F ==>
+- A kv-pair is encoded as:
+ (key,value) ==> OR