diff --git a/README.md b/README.md index b7be213a..99ae59fd 100644 --- a/README.md +++ b/README.md @@ -36,11 +36,33 @@ Settings: * SKIPVERIFY: false or 0, true or 1 // will skip hostname/certificate check at all * INCLUDE_QUEUES: ".*", // regex, matching queue names are exported * SKIP_QUEUES: "^$", // regex, matching queue names are not exported (useful for short-lived rpc queues). First performed INCLUDE, after SKIP +* RABBIT_CAPABILITIES: "", // comma-separated list of extended scraping capabilities supported by the target RabbitMQ server Example OUTPUT_FORMAT=JSON PUBLISH_PORT=9099 ./rabbitmq_exporter +#### Extended RabbitMQ capabilities + +Newer version of RabbitMQ can provide some features that reduce +overhead imposed by scraping the data needed by this exporter. The +following capabilities are currently supported in +`RABBIT_CAPABILITIES` env var: + +* `no_sort`: By default RabbitMQ management plugin sorts results using + the default sort order of vhost/name. This sorting overhead can be + avoided by passing empty sort argument (`?sort=`) to RabbitMQ + starting from version 3.6.8. This option can be safely enabled on + earlier 3.6.X versions, but it'll not give any performance + improvements. And it's incompatible with 3.4.X and 3.5.X. +* `bert`: Since 3.6.9 (see + https://github.com/rabbitmq/rabbitmq-management/pull/367) RabbitMQ + supports BERT encoding as a JSON alternative. Given that BERT + encoding is implemented in C inside the Erlang VM, it's way more + effective than pure-Erlang JSON encoding. So this greatly reduces + monitoring overhead when we have a lot of objects in RabbitMQ. + + ### Metrics All metrics (except golang/prometheus metrics) are prefixed with "rabbitmq_". diff --git a/bertmap.go b/bertmap.go new file mode 100644 index 00000000..b44a2be2 --- /dev/null +++ b/bertmap.go @@ -0,0 +1,311 @@ +package main + +import ( + "fmt" + log "github.com/Sirupsen/logrus" + bert "github.com/landonia/gobert" + "math/big" +) + +// rabbitBERTReply (along with its RabbitReply interface +// implementation) allow parsing of BERT-encoded RabbitMQ replies in a +// way that's fully compatible with JSON parser from jsonmap.go +type rabbitBERTReply struct { + body []byte +} + +func MakeBERTReply(body []byte) RabbitReply { + return &rabbitBERTReply{body} +} + +func (rep *rabbitBERTReply) MakeStatsInfo() []StatsInfo { + rawObjects, err := bert.Decode(rep.body) + if err != nil { + log.WithField("error", err).Error("Error while decoding bert") + return make([]StatsInfo, 0) + } + + objects, ok := rawObjects.([]bert.Term) + if !ok { + log.WithField("got", rawObjects).Error("Statistics reply should contain a slice of objects") + return make([]StatsInfo, 0) + } + + statistics := make([]StatsInfo, 0, len(objects)) + + for _, v := range objects { + obj, ok := parseSingleStatsObject(v) + if !ok { + log.WithField("got", v).Error("Ignoring unparseable stats object") + continue + } + statistics = append(statistics, *obj) + } + + return statistics +} + +func (rep *rabbitBERTReply) MakeMap() MetricMap { + flMap := make(MetricMap) + term, err := bert.Decode(rep.body) + + if err != nil { + log.WithField("error", err).Error("Error while decoding bert") + return flMap + } + + parseProplist(&flMap, "", term) + return flMap +} + +// iterateBertKV helps to traverse any map-like structures returned by +// RabbitMQ with a user-provided function. We need it because +// different versions of RabbitMQ can encode a map in a bunch of +// different ways: +// - proplist +// - proplist additionally wrapped in a {struct, ...} tuple +// - map type available in modern erlang versions +// +// Non-nil error return means that an object can't be interpreted as a map in any way +// +// Provided function can return 'false' value to stop traversal earlier +func iterateBertKV(obj interface{}, elemFunc func(string, interface{}) bool) error { + switch obj := obj.(type) { + case []bert.Term: + pairs, ok := assertBertProplistPairs(obj) + if !ok { + return BertError("Doesn't look like a proplist", obj) + } + for _, v := range pairs { + key, value, ok := assertBertKeyedTuple(v) + if ok { + needToContinue := elemFunc(key, value) + if !needToContinue { + return nil + } + } + } + return nil + case bert.Map: + for keyRaw, value := range obj { + key, ok := parseBertStringy(keyRaw) + if ok { + needToContinue := elemFunc(key, value) + if !needToContinue { + return nil + } + } + } + return nil + default: + return BertError("Can't iterate over non-KV object", obj) + } +} + +// parseSingleStatsObject extracts information about a named RabbitMQ +// object: both its vhost/name information and then the usual +// MetricMap. +func parseSingleStatsObject(obj interface{}) (*StatsInfo, bool) { + var ok bool + var result StatsInfo + var objectOk = true + result.metrics = make(MetricMap) + + err := iterateBertKV(obj, func(key string, value interface{}) bool { + switch { + case key == "name": + result.name, ok = parseBertStringy(value) + if !ok { + log.WithField("got", value).Error("Non-string 'name' field") + objectOk = false + return false + } + case key == "vhost": + result.vhost, ok = parseBertStringy(value) + if !ok { + log.WithField("got", value).Error("Non-string 'vhost' field") + objectOk = false + return false + } + case key == "policy": + result.policy, _ = parseBertStringy(value) + default: + if key == "durable" { + // We want to have 'durable' in 2 + // places: inside MetricMap (and + // converted to float) and as a string + // field in StatsInfo + tmp, ok := parseBertStringy(value) + if ok { + result.durable = tmp + } + } + if floatValue, ok := parseFloaty(value); ok { + result.metrics[key] = floatValue + return true + } + + // Nested structures don't need special + // processing, so we fallback to generic + // parser. + if err := parseProplist(&result.metrics, key, value); err == nil { + return true + } + } + return true + }) + if err == nil && objectOk { + return &result, true + } else { + return nil, false + } +} + +// parseProplist descends into an erlang data structure and stores +// everything remotely resembling a float in a toMap. +func parseProplist(toMap *MetricMap, basename string, maybeProplist interface{}) error { + prefix := "" + if basename != "" { + prefix = basename + "." + } + return iterateBertKV(maybeProplist, func(key string, value interface{}) bool { + if floatValue, ok := parseFloaty(value); ok { + (*toMap)[prefix+key] = floatValue + return true + } + + parseProplist(toMap, prefix+key, value) // This can fail, but we don't care + return true + }) +} + +// assertBertSlice checks whether the provided value is something +// that's represented as a slice by BERT parcer (list or tuple). +func assertBertSlice(maybeSlice interface{}) ([]bert.Term, bool) { + switch it := maybeSlice.(type) { + case []bert.Term: + return it, true + default: + return nil, false + } +} + +// assertBertKeyedTuple checks whether the provided value looks like +// an element of proplist - 2-element tuple where the first elemen is +// an atom. +func assertBertKeyedTuple(maybeTuple interface{}) (string, bert.Term, bool) { + tuple, ok := assertBertSlice(maybeTuple) + if !ok { + return "", nil, false + } + if len(tuple) != 2 { + return "", nil, false + } + key, ok := assertBertAtom(tuple[0]) + if !ok { + return "", nil, false + } + return key, tuple[1].(bert.Term), true +} + +func assertBertAtom(val interface{}) (string, bool) { + if atom, ok := val.(bert.Atom); ok { + return string(atom), true + } + return "", false +} + +// assertBertProplistPairs checks whether the provided value points to +// a proplist. Additional level of {struct, ...} wrapping can be +// removed in process. +func assertBertProplistPairs(maybeTaggedProplist interface{}) ([]bert.Term, bool) { + terms, ok := assertBertSlice(maybeTaggedProplist) + if !ok { + return nil, false + } + + if len(terms) == 0 { + return terms, true + } + + // Strip {struct, ...} tagging than is used to help RabbitMQ + // JSON encoder + key, value, ok := assertBertKeyedTuple(terms) + if ok && key == "struct" { + return assertBertProplistPairs(value) + } + + // Minimal safety check - at least the first element should be + // a proplist pair + _, _, ok = assertBertKeyedTuple(terms[0]) + if ok { + return terms, true + } + return nil, false +} + +// parseFloaty tries to interpret the provided BERT value as a +// float. Floats itself, integers and booleans are handled. +func parseFloaty(num interface{}) (float64, bool) { + switch num := num.(type) { + case int: + return float64(num), true + case int8: + return float64(num), true + case int16: + return float64(num), true + case int32: + return float64(num), true + case int64: + return float64(num), true + case uint: + return float64(num), true + case uint8: + return float64(num), true + case uint16: + return float64(num), true + case uint32: + return float64(num), true + case uint64: + return float64(num), true + case float32: + return float64(num), true + case float64: + return num, true + case bert.Atom: + if num == bert.TrueAtom { + return 1, true + } else if num == bert.FalseAtom { + return 0, true + } + case big.Int: + bigFloat := new(big.Float).SetInt(&num) + result, _ := bigFloat.Float64() + return result, true + } + return 0, false +} + +// parseBertStringy tries to extract an Erlang value that can be +// represented as a Go string (binary or atom). +func parseBertStringy(val interface{}) (string, bool) { + if stringer, ok := val.(fmt.Stringer); ok { + return stringer.String(), true + } else if atom, ok := val.(bert.Atom); ok { + return string(atom), true + } + return "", false +} + +type bertDecodeError struct { + message string + object interface{} +} + +func (err *bertDecodeError) Error() string { + return fmt.Sprintf("%s while decoding: %s", err.message, err.object) +} + +func BertError(message string, object interface{}) error { + return &bertDecodeError{message, object} +} diff --git a/config.go b/config.go index be0e8f4f..c2ba5ecc 100644 --- a/config.go +++ b/config.go @@ -19,6 +19,7 @@ var ( InsecureSkipVerify: false, SkipQueues: "^$", IncludeQueues: ".*", + RabbitCapabilities: make(rabbitCapabilitySet), } ) @@ -32,6 +33,20 @@ type rabbitExporterConfig struct { InsecureSkipVerify bool SkipQueues string IncludeQueues string + RabbitCapabilities rabbitCapabilitySet +} + +type rabbitCapability string +type rabbitCapabilitySet map[rabbitCapability]bool + +const ( + rabbitCapNoSort rabbitCapability = "no_sort" + rabbitCapBert rabbitCapability = "bert" +) + +var allRabbitCapabilities = rabbitCapabilitySet{ + rabbitCapNoSort: true, + rabbitCapBert: true, } func initConfig() { @@ -40,7 +55,6 @@ func initConfig() { if valid, _ := regexp.MatchString("https?://[a-zA-Z.0-9]+", strings.ToLower(url)); valid { config.RabbitURL = url } - } if user := os.Getenv("RABBIT_USER"); user != "" { @@ -75,4 +89,26 @@ func initConfig() { if IncludeQueues := os.Getenv("INCLUDE_QUEUES"); IncludeQueues != "" { config.IncludeQueues = IncludeQueues } + + if rawCapabilities := os.Getenv("RABBIT_CAPABILITIES"); rawCapabilities != "" { + config.RabbitCapabilities = parseCapabilities(rawCapabilities) + } +} + +func parseCapabilities(raw string) rabbitCapabilitySet { + result := make(rabbitCapabilitySet) + candidates := strings.Split(raw, ",") + for _, maybeCapStr := range candidates { + maybeCap := rabbitCapability(strings.TrimSpace(maybeCapStr)) + enabled, present := allRabbitCapabilities[maybeCap] + if enabled && present { + result[maybeCap] = true + } + } + return result +} + +func isCapEnabled(config rabbitExporterConfig, cap rabbitCapability) bool { + exists, enabled := config.RabbitCapabilities[cap] + return exists && enabled } diff --git a/config_test.go b/config_test.go index 27738014..cc99b011 100644 --- a/config_test.go +++ b/config_test.go @@ -2,6 +2,7 @@ package main import ( "os" + "reflect" "testing" ) @@ -84,3 +85,23 @@ func TestConfig_Http_URL(t *testing.T) { t.Errorf("Invalid URL. It should start with http(s)://. expected=%v,got=%v", url, config.RabbitURL) } } + +func TestConfig_Capabilities(t *testing.T) { + defer os.Unsetenv("RABBIT_CAPABILITIES") + + os.Unsetenv("RABBIT_CAPABILITIES") + initConfig() + if !reflect.DeepEqual(config.RabbitCapabilities, make(rabbitCapabilitySet)) { + t.Error("Capability set should be empty by default") + } + + var needToSupport = []rabbitCapability{"no_sort", "bert"} + for _, cap := range needToSupport { + os.Setenv("RABBIT_CAPABILITIES", "junk_cap, another_with_spaces_around , "+string(cap)+", done") + initConfig() + expected := rabbitCapabilitySet{cap: true} + if !reflect.DeepEqual(config.RabbitCapabilities, expected) { + t.Errorf("Capability '%s' wasn't properly detected from env", cap) + } + } +} diff --git a/decoder.go b/decoder.go new file mode 100644 index 00000000..968b2e7c --- /dev/null +++ b/decoder.go @@ -0,0 +1,40 @@ +package main + +//MetricMap maps name to float64 metric +type MetricMap map[string]float64 + +//StatsInfo describes one statistic (queue or exchange): its name, vhost it belongs to, and all associated metrics. +type StatsInfo struct { + name string + vhost string + policy string + durable string + metrics MetricMap +} + +// RabbitReply is an inteface responsible for extracting usable +// information from RabbitMQ HTTP API replies, independent of the +// actual transfer format used. +type RabbitReply interface { + // MakeMap makes a flat map from string to float values from a + // RabbitMQ reply. Processing happens recursively and nesting + // is represented by '.'-separated keys. Entries are added + // only for values that can be reasonably converted to float + // (numbers and booleans). Failure to parse should result in + // an empty result map. + MakeMap() MetricMap + + // MakeStatsInfo parses a list of details about some named + // RabbitMQ objects (i.e. list of queues, exchanges, etc.). + // Failure to parse should result in an empty result list. + MakeStatsInfo() []StatsInfo +} + +// MakeReply instantiates the apropriate reply parser for a given +// reply and the current configuration. +func MakeReply(config rabbitExporterConfig, body []byte) (RabbitReply, error) { + if isCapEnabled(config, rabbitCapBert) { + return MakeBERTReply(body), nil + } + return MakeJSONReply(body), nil +} diff --git a/decoder_test.go b/decoder_test.go new file mode 100644 index 00000000..f3173041 --- /dev/null +++ b/decoder_test.go @@ -0,0 +1,72 @@ +package main + +import ( + "github.com/kylelemons/godebug/pretty" + "io/ioutil" + "testing" +) + +func TestStatsEquivalence(t *testing.T) { + endpoints := []string{"queues", "exchanges", "nodes"} + versions := []string{"3.6.8", "3.7.0"} + for _, version := range versions { + for _, endpoint := range endpoints { + base := endpoint + "-" + version + assertBertStatsEquivalence(t, base) + } + } +} + +func TestMetricMapEquivalence(t *testing.T) { + endpoints := []string{"overview"} + versions := []string{"3.6.8", "3.7.0"} + for _, version := range versions { + for _, endpoint := range endpoints { + base := endpoint + "-" + version + assertBertMetricMapEquivalence(t, base) + } + } +} + +func tryReadFiles(t *testing.T, base, firstExt, secondExt string) ([]byte, []byte) { + firstFile := "testdata/" + base + "." + firstExt + first, err := ioutil.ReadFile(firstFile) + if err != nil { + t.Fatalf("Error reading %s", firstFile) + } + + secondFile := "testdata/" + base + "." + secondExt + second, err := ioutil.ReadFile(secondFile) + if err != nil { + t.Fatalf("Error reading %s", secondFile) + } + return first, second +} + +func assertBertStatsEquivalence(t *testing.T, baseFileName string) { + json, bert := tryReadFiles(t, baseFileName, "json", "bert") + + jsonReply := MakeJSONReply(json) + bertReply := MakeBERTReply(bert) + + bertParsed := bertReply.MakeStatsInfo() + jsonParsed := jsonReply.MakeStatsInfo() + + if diff := pretty.Compare(jsonParsed, bertParsed); diff != "" { + t.Errorf("JSON/BERT mismatch for %s:\n%s", baseFileName, diff) + } +} + +func assertBertMetricMapEquivalence(t *testing.T, baseFileName string) { + json, bert := tryReadFiles(t, baseFileName, "json", "bert") + + jsonReply := MakeJSONReply(json) + bertReply := MakeBERTReply(bert) + + bertParsed := bertReply.MakeMap() + jsonParsed := jsonReply.MakeMap() + + if diff := pretty.Compare(jsonParsed, bertParsed); diff != "" { + t.Errorf("JSON/BERT mismatch for %s:\n%s", baseFileName, diff) + } +} diff --git a/jsonmap.go b/jsonmap.go index fee38af4..7e8e80f5 100644 --- a/jsonmap.go +++ b/jsonmap.go @@ -1,35 +1,33 @@ package main import ( + "bytes" "encoding/json" "strconv" log "github.com/Sirupsen/logrus" ) -//MetricMap maps name to float64 metric -type MetricMap map[string]float64 +type rabbitJSONReply struct { + decoder *json.Decoder +} -//StatsInfo describes one statistic (queue or exchange): its name, vhost it belongs to, and all associated metrics. -type StatsInfo struct { - name string - vhost string - policy string - durable string - metrics MetricMap +func MakeJSONReply(body []byte) RabbitReply { + decoder := json.NewDecoder(bytes.NewBuffer(body)) + return &rabbitJSONReply{decoder} } //MakeStatsInfo creates a slice of StatsInfo from json input. Only keys with float values are mapped into `metrics`. -func MakeStatsInfo(d *json.Decoder) []StatsInfo { +func (rep *rabbitJSONReply) MakeStatsInfo() []StatsInfo { var statistics []StatsInfo var jsonArr []map[string]interface{} - if d == nil { + if rep.decoder == nil { log.Error("JSON decoder not iniatilized") return make([]StatsInfo, 0) } - if err := d.Decode(&jsonArr); err != nil { + if err := rep.decoder.Decode(&jsonArr); err != nil { log.WithField("error", err).Error("Error while decoding json") return make([]StatsInfo, 0) } @@ -60,16 +58,16 @@ func MakeStatsInfo(d *json.Decoder) []StatsInfo { } //MakeMap creates a map from json input. Only keys with float values are mapped. -func MakeMap(d *json.Decoder) MetricMap { +func (rep *rabbitJSONReply) MakeMap() MetricMap { flMap := make(MetricMap) var output map[string]interface{} - if d == nil { + if rep.decoder == nil { log.Error("JSON decoder not iniatilized") return flMap } - if err := d.Decode(&output); err != nil { + if err := rep.decoder.Decode(&output); err != nil { log.WithField("error", err).Error("Error while decoding json") return flMap } diff --git a/jsonmap_test.go b/jsonmap_test.go index 2a062baf..159ecac4 100644 --- a/jsonmap_test.go +++ b/jsonmap_test.go @@ -1,27 +1,18 @@ package main import ( - "encoding/json" - "strings" "testing" ) func TestWithInvalidJSON(t *testing.T) { - invalidJSONDecoder := json.NewDecoder(strings.NewReader("I'm no json")) + invalidJSONReply := MakeJSONReply([]byte("I'm no json")) - if mm := MakeMap(invalidJSONDecoder); mm == nil { + if mm := invalidJSONReply.MakeMap(); mm == nil { t.Errorf("Json is invalid. Empty map should be returned. Value: %v", mm) } - if qi := MakeStatsInfo(invalidJSONDecoder); qi == nil { + if qi := invalidJSONReply.MakeStatsInfo(); qi == nil { t.Errorf("Json is invalid. Empty map should be returned. Value: %v", qi) } - - if mm := MakeMap(nil); mm == nil { - t.Errorf("Empty map should be returned. Value: %v", mm) - } - if qi := MakeStatsInfo(nil); qi == nil { - t.Errorf("Empty map should be returned.. Value: %v", qi) - } } func checkMap(flMap map[string]float64, t *testing.T, addValue float64) { @@ -43,18 +34,16 @@ func checkMap(flMap map[string]float64, t *testing.T, addValue float64) { } func TestMakeMap(t *testing.T) { - jsonObject := strings.NewReader(`{"FloatKey":4, "st":"string","nes":{"ted":5}}`) - decoder := json.NewDecoder(jsonObject) + reply := MakeJSONReply([]byte(`{"FloatKey":4, "st":"string","nes":{"ted":5}}`)) - flMap := MakeMap(decoder) + flMap := reply.MakeMap() checkMap(flMap, t, 0) } func TestMakeStatsInfo(t *testing.T) { - jsonArray := strings.NewReader(`[{"name":"q1", "FloatKey":14,"nes":{"ted":15}},{"name":"q2", "vhost":"foo", "FloatKey":24,"nes":{"ted":25}}]`) - decoder := json.NewDecoder(jsonArray) + reply := MakeJSONReply([]byte(`[{"name":"q1", "FloatKey":14,"nes":{"ted":15}},{"name":"q2", "vhost":"foo", "FloatKey":24,"nes":{"ted":25}}]`)) - qinfo := MakeStatsInfo(decoder) + qinfo := reply.MakeStatsInfo() if qinfo[0].name != "q1" { t.Errorf("unexpected qinfo name: %v", qinfo[0].name) } diff --git a/main.go b/main.go index fdc1b6f4..a3158b02 100644 --- a/main.go +++ b/main.go @@ -1,6 +1,7 @@ package main import ( + "bytes" "net/http" "os" "strings" @@ -40,14 +41,15 @@ func main() { }) log.WithFields(log.Fields{ - "PUBLISH_PORT": config.PublishPort, - "RABBIT_URL": config.RabbitURL, - "RABBIT_USER": config.RabbitUsername, - "OUTPUT_FORMAT": config.OutputFormat, - "VERSION": Version, - "REVISION": Revision, - "BRANCH": Branch, - "BUILD_DATE": BuildDate, + "PUBLISH_PORT": config.PublishPort, + "RABBIT_URL": config.RabbitURL, + "RABBIT_USER": config.RabbitUsername, + "OUTPUT_FORMAT": config.OutputFormat, + "RABBIT_CAPABILITIES": formatCapabilities(config.RabbitCapabilities), + "VERSION": Version, + "REVISION": Revision, + "BRANCH": Branch, + "BUILD_DATE": BuildDate, // "RABBIT_PASSWORD": config.RABBIT_PASSWORD, }).Info("Starting RabbitMQ exporter") @@ -62,3 +64,16 @@ func getLogLevel() log.Level { } return level } + +func formatCapabilities(caps rabbitCapabilitySet) string { + var buffer bytes.Buffer + first := true + for k, _ := range caps { + if !first { + buffer.WriteString(",") + } + first = false + buffer.WriteString(string(k)) + } + return buffer.String() +} diff --git a/rabbitClient.go b/rabbitClient.go index 071945e3..5edc760a 100644 --- a/rabbitClient.go +++ b/rabbitClient.go @@ -1,10 +1,8 @@ package main import ( - "bytes" "crypto/tls" "crypto/x509" - "encoding/json" "errors" "io/ioutil" "net/http" @@ -40,9 +38,16 @@ func initClient() { } -func loadMetrics(config rabbitExporterConfig, endpoint string) (*json.Decoder, error) { - req, err := http.NewRequest("GET", config.RabbitURL+"/api/"+endpoint, nil) +func loadMetrics(config rabbitExporterConfig, endpoint string) (RabbitReply, error) { + var args string + enabled, exists := config.RabbitCapabilities[rabbitCapNoSort] + if enabled && exists { + args = "?sort=" + } + + req, err := http.NewRequest("GET", config.RabbitURL+"/api/"+endpoint+args, nil) req.SetBasicAuth(config.RabbitUsername, config.RabbitPassword) + req.Header.Add("Accept", acceptContentType(config)) resp, err := client.Do(req) @@ -62,18 +67,18 @@ func loadMetrics(config rabbitExporterConfig, endpoint string) (*json.Decoder, e } log.WithFields(log.Fields{"body": string(body), "endpoint": endpoint}).Debug("Metrics loaded") - return json.NewDecoder(bytes.NewBuffer(body)), nil + return MakeReply(config, body) } func getStatsInfo(config rabbitExporterConfig, apiEndpoint string) ([]StatsInfo, error) { var q []StatsInfo - d, err := loadMetrics(config, apiEndpoint) + reply, err := loadMetrics(config, apiEndpoint) if err != nil { return q, err } - q = MakeStatsInfo(d) + q = reply.MakeStatsInfo() return q, nil } @@ -81,10 +86,17 @@ func getStatsInfo(config rabbitExporterConfig, apiEndpoint string) ([]StatsInfo, func getMetricMap(config rabbitExporterConfig, apiEndpoint string) (MetricMap, error) { var overview MetricMap - d, err := loadMetrics(config, apiEndpoint) + reply, err := loadMetrics(config, apiEndpoint) if err != nil { return overview, err } - return MakeMap(d), nil + return reply.MakeMap(), nil +} + +func acceptContentType(config rabbitExporterConfig) string { + if isCapEnabled(config, rabbitCapBert) { + return "application/bert" + } + return "application/json" } diff --git a/rabbitClient_test.go b/rabbitClient_test.go index 3b6d570a..26c82631 100644 --- a/rabbitClient_test.go +++ b/rabbitClient_test.go @@ -129,3 +129,35 @@ func TestExchanges(t *testing.T) { } expect(t, len(exchanges), 0) } + +func TestNoSort(t *testing.T) { + assertNoSortRespected(t, false) + assertNoSortRespected(t, true) +} + +func assertNoSortRespected(t *testing.T, enabled bool) { + var args string + if enabled { + args = "?sort=" + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + if r.RequestURI == "/api/overview"+args { + fmt.Fprintln(w, `{"nonFloat":"bob@example.com","float1":1.23456789101112,"number":2}`) + } else { + t.Errorf("Invalid request with enabled=%s. URI=%v", enabled, r.RequestURI) + fmt.Fprintf(w, "Invalid request. URI=%v", r.RequestURI) + } + + })) + defer server.Close() + + config := &rabbitExporterConfig{ + RabbitURL: server.URL, + RabbitCapabilities: rabbitCapabilitySet{rabbitCapNoSort: enabled}, + } + + getMetricMap(*config, "overview") +} diff --git a/testdata/exchanges-3.6.8.bert b/testdata/exchanges-3.6.8.bert new file mode 100644 index 00000000..e7d95b44 Binary files /dev/null and b/testdata/exchanges-3.6.8.bert differ diff --git a/testdata/exchanges-3.6.8.json b/testdata/exchanges-3.6.8.json new file mode 100644 index 00000000..e5933b4d --- /dev/null +++ b/testdata/exchanges-3.6.8.json @@ -0,0 +1 @@ +[{"name":"","vhost":"/","type":"direct","durable":true,"auto_delete":false,"internal":false,"arguments":{},"message_stats":{"publish_out":1,"publish_out_details":{"rate":0.0},"publish_in":1,"publish_in_details":{"rate":0.0}}},{"name":"amq.direct","vhost":"/","type":"direct","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"amq.fanout","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"amq.headers","vhost":"/","type":"headers","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"amq.match","vhost":"/","type":"headers","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"amq.rabbitmq.log","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":true,"arguments":{}},{"name":"amq.rabbitmq.trace","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":true,"arguments":{}},{"name":"amq.topic","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":false,"arguments":{}}] \ No newline at end of file diff --git a/testdata/exchanges-3.7.0.bert b/testdata/exchanges-3.7.0.bert new file mode 100644 index 00000000..25b453ba Binary files /dev/null and b/testdata/exchanges-3.7.0.bert differ diff --git a/testdata/exchanges-3.7.0.json b/testdata/exchanges-3.7.0.json new file mode 100644 index 00000000..0384cfc0 --- /dev/null +++ b/testdata/exchanges-3.7.0.json @@ -0,0 +1 @@ +[{"name":"","vhost":"/","type":"direct","durable":true,"auto_delete":false,"internal":false,"arguments":{},"user_who_performed_action":"rmq-internal"},{"name":"amq.direct","vhost":"/","type":"direct","durable":true,"auto_delete":false,"internal":false,"arguments":{},"user_who_performed_action":"rmq-internal"},{"name":"amq.fanout","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{},"user_who_performed_action":"rmq-internal"},{"name":"amq.headers","vhost":"/","type":"headers","durable":true,"auto_delete":false,"internal":false,"arguments":{},"user_who_performed_action":"rmq-internal"},{"name":"amq.match","vhost":"/","type":"headers","durable":true,"auto_delete":false,"internal":false,"arguments":{},"user_who_performed_action":"rmq-internal"},{"name":"amq.rabbitmq.log","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":true,"arguments":{},"user_who_performed_action":"rmq-internal"},{"name":"amq.rabbitmq.trace","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":true,"arguments":{},"user_who_performed_action":"rmq-internal"},{"name":"amq.topic","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":false,"arguments":{},"user_who_performed_action":"rmq-internal"}] \ No newline at end of file diff --git a/testdata/nodes-3.6.8.bert b/testdata/nodes-3.6.8.bert new file mode 100644 index 00000000..93b4abe8 Binary files /dev/null and b/testdata/nodes-3.6.8.bert differ diff --git a/testdata/nodes-3.6.8.json b/testdata/nodes-3.6.8.json new file mode 100644 index 00000000..96363ded --- /dev/null +++ b/testdata/nodes-3.6.8.json @@ -0,0 +1 @@ +[{"partitions":[],"os_pid":"14935","fd_total":1024,"sockets_total":829,"mem_limit":6679527424,"mem_alarm":false,"disk_free_limit":50000000,"disk_free_alarm":false,"proc_total":262144,"rates_mode":"basic","uptime":76891943,"run_queue":0,"processors":4,"exchange_types":[{"name":"topic","description":"AMQP topic exchange, as per the AMQP specification","enabled":true},{"name":"direct","description":"AMQP direct exchange, as per the AMQP specification","enabled":true},{"name":"headers","description":"AMQP headers exchange, as per the AMQP specification","enabled":true},{"name":"fanout","description":"AMQP fanout exchange, as per the AMQP specification","enabled":true}],"auth_mechanisms":[{"name":"AMQPLAIN","description":"QPid AMQPLAIN mechanism","enabled":true},{"name":"PLAIN","description":"SASL PLAIN authentication mechanism","enabled":true},{"name":"RABBIT-CR-DEMO","description":"RabbitMQ Demo challenge-response authentication mechanism","enabled":false}],"applications":[{"name":"amqp_client","description":"RabbitMQ AMQP Client","version":"3.6.8+1.g1dcb221"},{"name":"asn1","description":"The Erlang ASN1 compiler version 4.0.2","version":"4.0.2"},{"name":"compiler","description":"ERTS CXC 138 10","version":"6.0.3"},{"name":"cowboy","description":"Small, fast, modular HTTP server.","version":"1.0.4"},{"name":"cowlib","description":"Support library for manipulating Web protocols.","version":"1.0.2"},{"name":"crypto","description":"CRYPTO","version":"3.6.3"},{"name":"inets","description":"INETS CXC 138 49","version":"6.2.4"},{"name":"kernel","description":"ERTS CXC 138 10","version":"4.2"},{"name":"mnesia","description":"MNESIA CXC 138 12","version":"4.13.4"},{"name":"os_mon","description":"CPO CXC 138 46","version":"2.4"},{"name":"public_key","description":"Public key infrastructure","version":"1.1.1"},{"name":"rabbit","description":"RabbitMQ","version":"3.6.8+1.g1dcb221"},{"name":"rabbit_common","description":"Modules shared by rabbitmq-server and rabbitmq-erlang-client","version":"3.6.8+1.g1dcb221"},{"name":"rabbitmq_management","description":"RabbitMQ Management Console","version":"3.6.8+1.g1dcb221"},{"name":"rabbitmq_management_agent","description":"RabbitMQ Management Agent","version":"3.6.8+1.g1dcb221"},{"name":"rabbitmq_web_dispatch","description":"RabbitMQ Web Dispatcher","version":"3.6.8+1.g1dcb221"},{"name":"ranch","description":"Socket acceptor pool for TCP protocols.","version":"1.3.0"},{"name":"sasl","description":"SASL CXC 138 11","version":"2.7"},{"name":"ssl","description":"Erlang/OTP SSL application","version":"7.3.3.1"},{"name":"stdlib","description":"ERTS CXC 138 10","version":"2.8"},{"name":"syntax_tools","description":"Syntax tools","version":"1.7"},{"name":"xmerl","description":"XML parser","version":"1.3.10"}],"contexts":[{"description":"RabbitMQ Management","path":"/","port":"15672"}],"log_file":"./el","sasl_log_file":"./sel","db_dir":"/home/binarin/mirantis-workspace/mgmt-no-sort/Mnesia.rabbit-dev@localhost","config_files":["/home/binarin/mirantis-workspace/mgmt-no-sort/vm.config"],"net_ticktime":60,"enabled_plugins":["rabbitmq_management"],"name":"rabbit-dev@localhost","type":"disc","running":true,"mem_used":52122128,"mem_used_details":{"rate":-21851.2},"fd_used":30,"fd_used_details":{"rate":0.4},"sockets_used":0,"sockets_used_details":{"rate":0.0},"proc_used":352,"proc_used_details":{"rate":0.0},"disk_free":51221843968,"disk_free_details":{"rate":0.0},"gc_num":582090,"gc_num_details":{"rate":6.4},"gc_bytes_reclaimed":14610041680,"gc_bytes_reclaimed_details":{"rate":145201.6},"context_switches":2476886,"context_switches_details":{"rate":34.0},"io_read_count":1,"io_read_count_details":{"rate":0.0},"io_read_bytes":1,"io_read_bytes_details":{"rate":0.0},"io_read_avg_time":0.249,"io_read_avg_time_details":{"rate":0.0},"io_write_count":0,"io_write_count_details":{"rate":0.0},"io_write_bytes":0,"io_write_bytes_details":{"rate":0.0},"io_write_avg_time":0.0,"io_write_avg_time_details":{"rate":0.0},"io_sync_count":0,"io_sync_count_details":{"rate":0.0},"io_sync_avg_time":0.0,"io_sync_avg_time_details":{"rate":0.0},"io_seek_count":0,"io_seek_count_details":{"rate":0.0},"io_seek_avg_time":0.0,"io_seek_avg_time_details":{"rate":0.0},"io_reopen_count":0,"io_reopen_count_details":{"rate":0.0},"mnesia_ram_tx_count":14,"mnesia_ram_tx_count_details":{"rate":0.0},"mnesia_disk_tx_count":0,"mnesia_disk_tx_count_details":{"rate":0.0},"msg_store_read_count":0,"msg_store_read_count_details":{"rate":0.0},"msg_store_write_count":0,"msg_store_write_count_details":{"rate":0.0},"queue_index_journal_write_count":0,"queue_index_journal_write_count_details":{"rate":0.0},"queue_index_write_count":0,"queue_index_write_count_details":{"rate":0.0},"queue_index_read_count":0,"queue_index_read_count_details":{"rate":0.0},"io_file_handle_open_attempt_count":10,"io_file_handle_open_attempt_count_details":{"rate":0.0},"io_file_handle_open_attempt_avg_time":0.037200000000000004,"io_file_handle_open_attempt_avg_time_details":{"rate":0.0},"cluster_links":[],"metrics_gc_queue_length":{"connection_closed":0,"channel_closed":0,"consumer_deleted":0,"exchange_deleted":0,"queue_deleted":0,"vhost_deleted":0,"node_node_deleted":0,"channel_consumer_deleted":0}}] \ No newline at end of file diff --git a/testdata/nodes-3.7.0.bert b/testdata/nodes-3.7.0.bert new file mode 100644 index 00000000..bb717dee Binary files /dev/null and b/testdata/nodes-3.7.0.bert differ diff --git a/testdata/nodes-3.7.0.json b/testdata/nodes-3.7.0.json new file mode 100644 index 00000000..7942763c --- /dev/null +++ b/testdata/nodes-3.7.0.json @@ -0,0 +1 @@ +[{"partitions":[],"os_pid":"21044","fd_total":1024,"sockets_total":829,"mem_limit":6679527424,"mem_alarm":false,"disk_free_limit":50000000,"disk_free_alarm":false,"proc_total":1048576,"rates_mode":"basic","uptime":48243,"run_queue":0,"processors":4,"exchange_types":[{"name":"topic","description":"AMQP topic exchange, as per the AMQP specification","enabled":true},{"name":"fanout","description":"AMQP fanout exchange, as per the AMQP specification","enabled":true},{"name":"headers","description":"AMQP headers exchange, as per the AMQP specification","enabled":true},{"name":"direct","description":"AMQP direct exchange, as per the AMQP specification","enabled":true}],"auth_mechanisms":[{"name":"PLAIN","description":"SASL PLAIN authentication mechanism","enabled":true},{"name":"AMQPLAIN","description":"QPid AMQPLAIN mechanism","enabled":true},{"name":"RABBIT-CR-DEMO","description":"RabbitMQ Demo challenge-response authentication mechanism","enabled":false}],"applications":[{"name":"amqp_client","description":"RabbitMQ AMQP Client","version":"3.7.0.milestone14+2.g98b59c2"},{"name":"asn1","description":"The Erlang ASN1 compiler version 4.0.2","version":"4.0.2"},{"name":"compiler","description":"ERTS CXC 138 10","version":"6.0.3"},{"name":"cowboy","description":"Small, fast, modular HTTP server.","version":"1.1.0"},{"name":"cowlib","description":"Support library for manipulating Web protocols.","version":"1.0.2"},{"name":"crypto","description":"CRYPTO","version":"3.6.3"},{"name":"goldrush","description":"Erlang event stream processor","version":"0.1.9"},{"name":"inets","description":"INETS CXC 138 49","version":"6.2.4"},{"name":"jsx","description":"a streaming, evented json parsing toolkit","version":"2.8.2"},{"name":"kernel","description":"ERTS CXC 138 10","version":"4.2"},{"name":"lager","description":"Erlang logging framework","version":"3.2.2"},{"name":"mnesia","description":"MNESIA CXC 138 12","version":"4.13.4"},{"name":"os_mon","description":"CPO CXC 138 46","version":"2.4"},{"name":"public_key","description":"Public key infrastructure","version":"1.1.1"},{"name":"rabbit","description":"RabbitMQ","version":"3.7.0.milestone14+12.gc8efc41"},{"name":"rabbit_common","description":"Modules shared by rabbitmq-server and rabbitmq-erlang-client","version":"3.7.0.milestone14+5.g3a808e7"},{"name":"rabbitmq_management","description":"RabbitMQ Management Console","version":"3.7.0.milestone14+13.g4b39832"},{"name":"rabbitmq_management_agent","description":"RabbitMQ Management Agent","version":"3.7.0.milestone13+11.gc2bc2cc"},{"name":"rabbitmq_web_dispatch","description":"RabbitMQ Web Dispatcher","version":"3.7.0.milestone12+1.g7fbb6eb"},{"name":"ranch","description":"Socket acceptor pool for TCP protocols.","version":"1.3.2"},{"name":"ranch_proxy_protocol","description":"Ranch Proxy Protocol Transport","version":"git"},{"name":"sasl","description":"SASL CXC 138 11","version":"2.7"},{"name":"ssl","description":"Erlang/OTP SSL application","version":"7.3.3.1"},{"name":"stdlib","description":"ERTS CXC 138 10","version":"2.8"},{"name":"syntax_tools","description":"Syntax tools","version":"1.7"},{"name":"xmerl","description":"XML parser","version":"1.3.10"}],"contexts":[{"description":"RabbitMQ Management","path":"/","port":"15672"}],"log_files":["/tmp/rabbitmq-test-instances/rabbit/log/rabbit.log","/tmp/rabbitmq-test-instances/rabbit/log/rabbit_upgrade.log"],"db_dir":"/tmp/rabbitmq-test-instances/rabbit/mnesia/rabbit","config_files":["/tmp/rabbitmq-test-instances/test.config"],"net_ticktime":60,"enabled_plugins":["amqp_client","cowboy","cowlib","rabbitmq_management","rabbitmq_management_agent","rabbitmq_web_dispatch"],"name":"rabbit@demandred","type":"disc","running":true,"mem_used":63606512,"mem_used_details":{"rate":22435.2},"fd_used":36,"fd_used_details":{"rate":0.0},"sockets_used":0,"sockets_used_details":{"rate":0.0},"proc_used":378,"proc_used_details":{"rate":0.0},"disk_free":39426990080,"disk_free_details":{"rate":0.0},"gc_num":4998,"gc_num_details":{"rate":9.8},"gc_bytes_reclaimed":308488464,"gc_bytes_reclaimed_details":{"rate":158065.6},"context_switches":68557,"context_switches_details":{"rate":25.2},"io_read_count":1,"io_read_count_details":{"rate":0.0},"io_read_bytes":1,"io_read_bytes_details":{"rate":0.0},"io_read_avg_time":0.028,"io_read_avg_time_details":{"rate":0.0},"io_write_count":0,"io_write_count_details":{"rate":0.0},"io_write_bytes":0,"io_write_bytes_details":{"rate":0.0},"io_write_avg_time":0.0,"io_write_avg_time_details":{"rate":0.0},"io_sync_count":0,"io_sync_count_details":{"rate":0.0},"io_sync_avg_time":0.0,"io_sync_avg_time_details":{"rate":0.0},"io_seek_count":0,"io_seek_count_details":{"rate":0.0},"io_seek_avg_time":0.0,"io_seek_avg_time_details":{"rate":0.0},"io_reopen_count":0,"io_reopen_count_details":{"rate":0.0},"mnesia_ram_tx_count":6,"mnesia_ram_tx_count_details":{"rate":0.0},"mnesia_disk_tx_count":13,"mnesia_disk_tx_count_details":{"rate":0.0},"msg_store_read_count":0,"msg_store_read_count_details":{"rate":0.0},"msg_store_write_count":0,"msg_store_write_count_details":{"rate":0.0},"queue_index_journal_write_count":0,"queue_index_journal_write_count_details":{"rate":0.0},"queue_index_write_count":0,"queue_index_write_count_details":{"rate":0.0},"queue_index_read_count":0,"queue_index_read_count_details":{"rate":0.0},"io_file_handle_open_attempt_count":10,"io_file_handle_open_attempt_count_details":{"rate":0.0},"io_file_handle_open_attempt_avg_time":0.0285,"io_file_handle_open_attempt_avg_time_details":{"rate":0.0},"cluster_links":[],"metrics_gc_queue_length":{"connection_closed":0,"channel_closed":0,"consumer_deleted":0,"exchange_deleted":0,"queue_deleted":0,"vhost_deleted":0,"node_node_deleted":0,"channel_consumer_deleted":0}}] \ No newline at end of file diff --git a/testdata/overview-3.6.8.bert b/testdata/overview-3.6.8.bert new file mode 100644 index 00000000..29004019 Binary files /dev/null and b/testdata/overview-3.6.8.bert differ diff --git a/testdata/overview-3.6.8.json b/testdata/overview-3.6.8.json new file mode 100644 index 00000000..4cb29b05 --- /dev/null +++ b/testdata/overview-3.6.8.json @@ -0,0 +1 @@ +{"management_version":"3.6.8+1.g1dcb221","rates_mode":"basic","exchange_types":[{"name":"topic","description":"AMQP topic exchange, as per the AMQP specification","enabled":true},{"name":"direct","description":"AMQP direct exchange, as per the AMQP specification","enabled":true},{"name":"headers","description":"AMQP headers exchange, as per the AMQP specification","enabled":true},{"name":"fanout","description":"AMQP fanout exchange, as per the AMQP specification","enabled":true}],"rabbitmq_version":"3.6.8+1.g1dcb221","cluster_name":"rabbit-dev@demandred","erlang_version":"18.3.4.4","erlang_full_version":"Erlang/OTP 18 [erts-7.3.1.2] [source] [64-bit] [smp:4:4] [async-threads:10] [hipe] [kernel-poll:false]","message_stats":{"publish":1,"publish_details":{"rate":0.0},"confirm":1,"confirm_details":{"rate":0.0},"return_unroutable":0,"return_unroutable_details":{"rate":0.0},"disk_reads":0,"disk_reads_details":{"rate":0.0},"disk_writes":0,"disk_writes_details":{"rate":0.0}},"queue_totals":{"messages_ready":1,"messages_ready_details":{"rate":0.0},"messages_unacknowledged":0,"messages_unacknowledged_details":{"rate":0.0},"messages":1,"messages_details":{"rate":0.0}},"object_totals":{"consumers":0,"queues":3,"exchanges":8,"connections":1,"channels":1},"statistics_db_event_queue":0,"node":"rabbit-dev@localhost","listeners":[{"node":"rabbit-dev@localhost","protocol":"amqp","ip_address":"::","port":5672,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false}},{"node":"rabbit-dev@localhost","protocol":"clustering","ip_address":"::","port":42475,"socket_opts":[]},{"node":"rabbit-dev@localhost","protocol":"http","ip_address":"::","port":15672,"socket_opts":{"port":15672}}],"contexts":[{"node":"rabbit-dev@localhost","description":"RabbitMQ Management","path":"/","port":"15672"}]} \ No newline at end of file diff --git a/testdata/overview-3.7.0.bert b/testdata/overview-3.7.0.bert new file mode 100644 index 00000000..fc71392a Binary files /dev/null and b/testdata/overview-3.7.0.bert differ diff --git a/testdata/overview-3.7.0.json b/testdata/overview-3.7.0.json new file mode 100644 index 00000000..3da25626 --- /dev/null +++ b/testdata/overview-3.7.0.json @@ -0,0 +1 @@ +{"management_version":"3.7.0.milestone14+13.g4b39832","rates_mode":"basic","exchange_types":[{"name":"topic","description":"AMQP topic exchange, as per the AMQP specification","enabled":true},{"name":"fanout","description":"AMQP fanout exchange, as per the AMQP specification","enabled":true},{"name":"headers","description":"AMQP headers exchange, as per the AMQP specification","enabled":true},{"name":"direct","description":"AMQP direct exchange, as per the AMQP specification","enabled":true}],"rabbitmq_version":"3.7.0.milestone14+12.gc8efc41","cluster_name":"rabbit@demandred","erlang_version":"18.3.4.4","erlang_full_version":"Erlang/OTP 18 [erts-7.3.1.2] [source] [64-bit] [smp:4:4] [async-threads:64] [hipe] [kernel-poll:true]","message_stats":{"disk_reads":0,"disk_reads_details":{"rate":0.0},"disk_writes":0,"disk_writes_details":{"rate":0.0}},"queue_totals":{"messages":0,"messages_details":{"rate":0.0},"messages_ready":0,"messages_ready_details":{"rate":0.0},"messages_unacknowledged":0,"messages_unacknowledged_details":{"rate":0.0}},"object_totals":{"channels":1,"connections":1,"consumers":0,"exchanges":8,"queues":3},"statistics_db_event_queue":0,"node":"rabbit@demandred","listeners":[{"node":"rabbit@demandred","protocol":"amqp","ip_address":"::","port":5672,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false}},{"node":"rabbit@demandred","protocol":"clustering","ip_address":"::","port":25672,"socket_opts":[]},{"node":"rabbit@demandred","protocol":"http","ip_address":"::","port":15672,"socket_opts":{"port":15672}}],"contexts":[{"node":"rabbit@demandred","description":"RabbitMQ Management","path":"/","port":"15672"}]} \ No newline at end of file diff --git a/testdata/queues-3.6.8.bert b/testdata/queues-3.6.8.bert new file mode 100644 index 00000000..740d35f3 Binary files /dev/null and b/testdata/queues-3.6.8.bert differ diff --git a/testdata/queues-3.6.8.json b/testdata/queues-3.6.8.json new file mode 100644 index 00000000..a2ab8bb5 --- /dev/null +++ b/testdata/queues-3.6.8.json @@ -0,0 +1 @@ +[{"messages_details":{"rate":0.0},"messages":1,"messages_unacknowledged_details":{"rate":0.0},"messages_unacknowledged":0,"messages_ready_details":{"rate":0.0},"messages_ready":1,"reductions_details":{"rate":0.0},"reductions":20166,"message_stats":{"publish_details":{"rate":0.0},"publish":1},"node":"rabbit-dev@localhost","arguments":{},"exclusive":false,"auto_delete":false,"durable":false,"vhost":"/","name":"amq.gen---WBD642a0yMDq25x3DpvQ","message_bytes_paged_out":0,"messages_paged_out":0,"backing_queue_status":{"mode":"default","q1":0,"q2":0,"delta":["delta","undefined",0,0,"undefined"],"q3":0,"q4":1,"len":1,"target_ram_count":"infinity","next_seq_id":1,"avg_ingress_rate":0.00021650966423979837,"avg_egress_rate":0.0,"avg_ack_ingress_rate":0.0,"avg_ack_egress_rate":0.0},"head_message_timestamp":null,"message_bytes_persistent":0,"message_bytes_ram":13,"message_bytes_unacknowledged":0,"message_bytes_ready":13,"message_bytes":13,"messages_persistent":0,"messages_unacknowledged_ram":0,"messages_ready_ram":1,"messages_ram":1,"garbage_collection":{"minor_gcs":54,"fullsweep_after":65535,"min_heap_size":233,"min_bin_vheap_size":46422},"state":"running","recoverable_slaves":null,"memory":34712,"consumer_utilisation":null,"consumers":0,"exclusive_consumer_tag":null,"policy":null},{"messages_details":{"rate":0.0},"messages":0,"messages_unacknowledged_details":{"rate":0.0},"messages_unacknowledged":0,"messages_ready_details":{"rate":0.0},"messages_ready":0,"reductions_details":{"rate":0.0},"reductions":3958,"node":"rabbit-dev@localhost","arguments":{},"exclusive":false,"auto_delete":false,"durable":false,"vhost":"/","name":"amq.gen-AZWct5711RRwY4HXRtU-8w","message_bytes_paged_out":0,"messages_paged_out":0,"backing_queue_status":{"mode":"default","q1":0,"q2":0,"delta":["delta","undefined",0,0,"undefined"],"q3":0,"q4":0,"len":0,"target_ram_count":"infinity","next_seq_id":0,"avg_ingress_rate":0.0,"avg_egress_rate":0.0,"avg_ack_ingress_rate":0.0,"avg_ack_egress_rate":0.0},"head_message_timestamp":null,"message_bytes_persistent":0,"message_bytes_ram":0,"message_bytes_unacknowledged":0,"message_bytes_ready":0,"message_bytes":0,"messages_persistent":0,"messages_unacknowledged_ram":0,"messages_ready_ram":0,"messages_ram":0,"garbage_collection":{"minor_gcs":4,"fullsweep_after":65535,"min_heap_size":233,"min_bin_vheap_size":46422},"state":"running","recoverable_slaves":null,"consumers":0,"exclusive_consumer_tag":null,"policy":null,"consumer_utilisation":null,"idle_since":"2017-04-03 12:49:45","memory":42536},{"messages_details":{"rate":0.0},"messages":0,"messages_unacknowledged_details":{"rate":0.0},"messages_unacknowledged":0,"messages_ready_details":{"rate":0.0},"messages_ready":0,"reductions_details":{"rate":0.0},"reductions":3491,"node":"rabbit-dev@localhost","arguments":{},"exclusive":false,"auto_delete":false,"durable":false,"vhost":"/","name":"amq.gen-vPJMD2iLr8liaiqKRpSt_g","message_bytes_paged_out":0,"messages_paged_out":0,"backing_queue_status":{"mode":"default","q1":0,"q2":0,"delta":["delta","undefined",0,0,"undefined"],"q3":0,"q4":0,"len":0,"target_ram_count":"infinity","next_seq_id":0,"avg_ingress_rate":0.0,"avg_egress_rate":0.0,"avg_ack_ingress_rate":0.0,"avg_ack_egress_rate":0.0},"head_message_timestamp":null,"message_bytes_persistent":0,"message_bytes_ram":0,"message_bytes_unacknowledged":0,"message_bytes_ready":0,"message_bytes":0,"messages_persistent":0,"messages_unacknowledged_ram":0,"messages_ready_ram":0,"messages_ram":0,"garbage_collection":{"minor_gcs":3,"fullsweep_after":65535,"min_heap_size":233,"min_bin_vheap_size":46422},"state":"running","recoverable_slaves":null,"consumers":0,"exclusive_consumer_tag":null,"policy":null,"consumer_utilisation":null,"idle_since":"2017-04-03 12:49:39","memory":42536}] \ No newline at end of file diff --git a/testdata/queues-3.7.0.bert b/testdata/queues-3.7.0.bert new file mode 100644 index 00000000..be53e363 Binary files /dev/null and b/testdata/queues-3.7.0.bert differ diff --git a/testdata/queues-3.7.0.json b/testdata/queues-3.7.0.json new file mode 100644 index 00000000..2f1ee7d1 --- /dev/null +++ b/testdata/queues-3.7.0.json @@ -0,0 +1 @@ +[{"messages_details":{"rate":0.0},"messages":0,"messages_unacknowledged_details":{"rate":0.0},"messages_unacknowledged":0,"messages_ready_details":{"rate":0.0},"messages_ready":0,"reductions_details":{"rate":0.0},"reductions":4737,"node":"rabbit@demandred","arguments":{},"exclusive":false,"auto_delete":false,"durable":false,"vhost":"/","name":"amq.gen-1Nb3tSVlcjin_u_k_8Ur_A","message_bytes_paged_out":0,"messages_paged_out":0,"backing_queue_status":{"avg_ack_egress_rate":0.0,"avg_ack_ingress_rate":0.0,"avg_egress_rate":0.0,"avg_ingress_rate":0.0,"delta":["delta","undefined",0,0,"undefined"],"len":0,"mode":"default","next_seq_id":0,"q1":0,"q2":0,"q3":0,"q4":0,"target_ram_count":"infinity"},"head_message_timestamp":null,"message_bytes_persistent":0,"message_bytes_ram":0,"message_bytes_unacknowledged":0,"message_bytes_ready":0,"message_bytes":0,"messages_persistent":0,"messages_unacknowledged_ram":0,"messages_ready_ram":0,"messages_ram":0,"garbage_collection":{"minor_gcs":10,"fullsweep_after":65535,"min_heap_size":233,"min_bin_vheap_size":46422},"state":"running","recoverable_slaves":null,"consumers":0,"exclusive_consumer_tag":null,"effective_policy_definition":[],"operator_policy":null,"policy":null,"consumer_utilisation":null,"idle_since":"2017-04-05 14:32:42","memory":34632},{"messages_details":{"rate":0.0},"messages":0,"messages_unacknowledged_details":{"rate":0.0},"messages_unacknowledged":0,"messages_ready_details":{"rate":0.0},"messages_ready":0,"reductions_details":{"rate":94.0},"reductions":5208,"node":"rabbit@demandred","arguments":{},"exclusive":false,"auto_delete":false,"durable":false,"vhost":"/","name":"amq.gen-_uN9uFRYxbcWkNXY8GsCmQ","message_bytes_paged_out":0,"messages_paged_out":0,"backing_queue_status":{"avg_ack_egress_rate":0.0,"avg_ack_ingress_rate":0.0,"avg_egress_rate":0.0,"avg_ingress_rate":0.0,"delta":["delta","undefined",0,0,"undefined"],"len":0,"mode":"default","next_seq_id":0,"q1":0,"q2":0,"q3":0,"q4":0,"target_ram_count":"infinity"},"head_message_timestamp":null,"message_bytes_persistent":0,"message_bytes_ram":0,"message_bytes_unacknowledged":0,"message_bytes_ready":0,"message_bytes":0,"messages_persistent":0,"messages_unacknowledged_ram":0,"messages_ready_ram":0,"messages_ram":0,"garbage_collection":{"minor_gcs":11,"fullsweep_after":65535,"min_heap_size":233,"min_bin_vheap_size":46422},"state":"running","recoverable_slaves":null,"consumers":0,"exclusive_consumer_tag":null,"effective_policy_definition":[],"operator_policy":null,"policy":null,"consumer_utilisation":null,"idle_since":"2017-04-05 14:32:47","memory":34632},{"messages_details":{"rate":0.0},"messages":0,"messages_unacknowledged_details":{"rate":0.0},"messages_unacknowledged":0,"messages_ready_details":{"rate":0.0},"messages_ready":0,"reductions_details":{"rate":94.0},"reductions":5221,"node":"rabbit@demandred","arguments":{},"exclusive":false,"auto_delete":false,"durable":false,"vhost":"/","name":"amq.gen-qgYEo5W8l7zlozrznGMVdA","message_bytes_paged_out":0,"messages_paged_out":0,"backing_queue_status":{"avg_ack_egress_rate":0.0,"avg_ack_ingress_rate":0.0,"avg_egress_rate":0.0,"avg_ingress_rate":0.0,"delta":["delta","undefined",0,0,"undefined"],"len":0,"mode":"default","next_seq_id":0,"q1":0,"q2":0,"q3":0,"q4":0,"target_ram_count":"infinity"},"head_message_timestamp":null,"message_bytes_persistent":0,"message_bytes_ram":0,"message_bytes_unacknowledged":0,"message_bytes_ready":0,"message_bytes":0,"messages_persistent":0,"messages_unacknowledged_ram":0,"messages_ready_ram":0,"messages_ram":0,"garbage_collection":{"minor_gcs":11,"fullsweep_after":65535,"min_heap_size":233,"min_bin_vheap_size":46422},"state":"running","recoverable_slaves":null,"consumers":0,"exclusive_consumer_tag":null,"effective_policy_definition":[],"operator_policy":null,"policy":null,"consumer_utilisation":null,"idle_since":"2017-04-05 14:32:47","memory":34632}] \ No newline at end of file diff --git a/vendor/github.com/landonia/gobert/LICENSE b/vendor/github.com/landonia/gobert/LICENSE new file mode 100644 index 00000000..77c251fd --- /dev/null +++ b/vendor/github.com/landonia/gobert/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2009 Joshua Peek +Copyright (c) 2013 Seth W. Klein + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/landonia/gobert/README.md b/vendor/github.com/landonia/gobert/README.md new file mode 100644 index 00000000..55f6b76c --- /dev/null +++ b/vendor/github.com/landonia/gobert/README.md @@ -0,0 +1,40 @@ +About +===== + +The gobert project is a Go library implementing the [BERT serialization and +RPC protocol](http://bert-rpc.org/). + +Status +====== + +I, Seth Klein, don't use this library. I updated it to Go 1 for someone +in #go-nuts and the original maintainer [declared it dead](#history) so I +figured I'd do the basics and handle bug reports and pull requests. As of +February, 2013, the last post to the [BERT mailing +list](http://groups.google.com/group/bert-rpc) was nearly two years ago so +unless this gets serious interest, I'm not going to sink oodles of effort into +figuring out what full BERT compliance is or whether this library implements +it. (The original author implies that it doesn't.) Feel free to report bugs +and make pull requests, though. + +Reporting Bugs +============== + +Bugs may be reported at https://github.com/sethwklein/gobert/issues or via +email or Twitter to the maintainer contact below. + +Maintainer Contact +================== + +Seth W. Klein @sethwklein + +History +======= + +The gobert project was started by Joshua Peek in 2009. In 2013 he [confirmed +that he was no longer maintaining +it](https://twitter.com/joshpeek/status/299891081649209344), and Seth W. +Klein assumed maintenance. As the last update on this was in 2013, Landon Wainwright +has added in support for more tag specs.. I aim to create pull requests to merge this back +into the original source when I am happy it is all working. + diff --git a/vendor/github.com/landonia/gobert/decode.go b/vendor/github.com/landonia/gobert/decode.go new file mode 100644 index 00000000..c0e479cb --- /dev/null +++ b/vendor/github.com/landonia/gobert/decode.go @@ -0,0 +1,878 @@ +package bert + +import ( + "bytes" + "compress/zlib" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "math/big" + "reflect" + "sort" + "strconv" +) + +var ( + ErrBadMagic error = errors.New("bad magic") + ErrUnknownType error = errors.New("unknown type") + ErrMissingAtom error = errors.New("missing Atom") + ErrEOF error = errors.New("Unexpected EOF") + + // The atom distribution cache + cache = DistributionHeader{} +) + +func readLength(r io.Reader, length int64) ([]byte, error) { + bits, err := ioutil.ReadAll(io.LimitReader(r, length)) + if err != nil { + return nil, err + } + if int64(len(bits)) != length { + return nil, ErrEOF + } + return bits, nil +} + +func read1(r io.Reader) (int, error) { + bits, err := readLength(r, 1) + if err != nil { + return 0, err + } + + ui8 := uint8(bits[0]) + return int(ui8), nil +} + +func read2(r io.Reader) (int, error) { + bits, err := readLength(r, 2) + if err != nil { + return 0, err + } + + ui16 := binary.BigEndian.Uint16(bits) + return int(ui16), nil +} + +func read4(r io.Reader) (int, error) { + bits, err := readLength(r, 4) + if err != nil { + return 0, err + } + + ui32 := binary.BigEndian.Uint32(bits) + return int(ui32), nil +} + +func readCompressed(r io.Reader) (Term, error) { + _, err := read4(r) + if err != nil { + return nil, err + } + + // Attempt to decode the bytes + reader, err := zlib.NewReader(r) + if err != nil { + return nil, err + } + defer reader.Close() + + // Start reading from the new reader + return readTag(reader) +} + +func readDistributionHeader(r io.Reader) (Term, error) { + + // Attempt to parse the header into the cache + if err := cache.Update(r); err != nil { + return nil, err + } + + // Cache has now been updated so parse the next flag + return readTag(r) +} + +func readSmallInt(r io.Reader) (int, error) { + return read1(r) +} + +func readInt(r io.Reader) (int, error) { + + // An integer is a Signed 32bit value + // Depending on whether we are on a 32 or 64 bit system the default + // int size will change appropriately. Therefore the sign of + // a number will be lost when compiling on a 64 bit system but + // will work on a 32 bit. The way around this is to cast to a + // int32 and then cast back to an int which will keep the sign of the number + val, err := read4(r) + if err != nil { + return val, err + } + + return int(int32(val)), nil +} + +func readSmallBignum(r io.Reader) (big.Int, error) { + numLen, err := read1(r) + if err != nil { + return *big.NewInt(0), err + } + return readBigNum(r, numLen) +} + +func readLargeBignum(r io.Reader) (big.Int, error) { + numLen, err := read4(r) + if err != nil { + return *big.NewInt(0), err + } + return readBigNum(r, numLen) +} + +func readBigNum(r io.Reader, numLen int) (big.Int, error) { + sign, err := read1(r) + if err != nil { + return *big.NewInt(0), err + } + + bits, err := readLength(r, int64(numLen)) + if err != nil { + return *big.NewInt(0), err + } + + // The bytes are stored with the LSB byte stored first + // Reverse the array to get BigEndian + var bigEndBits []byte + for i := len(bits) - 1; i >= 0; i-- { + bigEndBits = append(bigEndBits, bits[i]) + } + + // Parse the big int + bigNum := &big.Int{} + bigNum.SetBytes(bigEndBits) + if sign == 1 { + + // Then the number is negative + bigNum = bigNum.Neg(bigNum) + } + return *bigNum, nil +} + +func readFloat(r io.Reader) (float32, error) { + bits, err := readLength(r, 31) + if err != nil { + return 0, err + } + + // ParseFloat doesn't like trailing 0s + var i int + for i = 0; i < len(bits); i++ { + if bits[i] == 0 { + break + } + } + + f, err := strconv.ParseFloat(string(bits[0:i]), 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +func readNewFloat(r io.Reader) (float64, error) { + bits, err := readLength(r, 8) + if err != nil { + return 0, err + } + + ui64 := binary.BigEndian.Uint64(bits) + return math.Float64frombits(ui64), nil +} + +func readAtomRef(r io.Reader) (Atom, error) { + atomCacheRefIndex, err := read1(r) + if err != nil { + return Atom(""), err + } + atom, err := cache.GetAtom(uint8(atomCacheRefIndex)) + if err != nil { + return Atom(""), err + } + return *atom, nil +} + +func readAtom(r io.Reader) (Atom, error) { + str, err := readString(r) + return Atom(str), err +} + +func readSmallAtom(r io.Reader) (Atom, error) { + str, err := readSmallString(r) + return Atom(str), err +} + +func readSmallTuple(r io.Reader) (Term, error) { + size, err := read1(r) + if err != nil { + return nil, err + } + + tuple := make([]Term, size) + + for i := 0; i < size; i++ { + term, err := readTag(r) + if err != nil { + return nil, err + } + switch a := term.(type) { + case Atom: + if a == BertAtom { + return readComplex(r) + } + } + tuple[i] = term + } + + return tuple, nil +} + +func readLargeTuple(r io.Reader) (Term, error) { + size, err := read4(r) + if err != nil { + return nil, err + } + + tuple := make([]Term, size) + + for i := uint32(0); i < uint32(size); i++ { + term, err := readTag(r) + if err != nil { + return nil, err + } + switch a := term.(type) { + case Atom: + if a == BertAtom { + return readComplex(r) + } + } + tuple[i] = term + } + + return tuple, nil +} + +func readNil(r io.Reader) ([]Term, error) { + list := make([]Term, 0) + return list, nil +} + +func readString(r io.Reader) (string, error) { + size, err := read2(r) + if err != nil { + return "", err + } + + str, err := readLength(r, int64(size)) + if err != nil { + return "", err + } + + return string(str), nil +} + +func readSmallString(r io.Reader) (string, error) { + size, err := read1(r) + if err != nil { + return "", err + } + + str, err := readLength(r, int64(size)) + if err != nil { + return "", err + } + + return string(str), nil +} + +func readList(r io.Reader) ([]Term, error) { + size, err := read4(r) + if err != nil { + return nil, err + } + + list := make([]Term, size) + + for i := 0; i < size; i++ { + term, err := readTag(r) + if err != nil { + return nil, err + } + list[i] = term + } + + read1(r) + + return list, nil +} + +// use a specific type for the bin type so that +type bintag []uint8 + +// String will attempt to print the value as a string +func (b bintag) String() string { + return fmt.Sprintf("%s", string(b)) +} + +func readBin(r io.Reader) (bintag, error) { + size, err := read4(r) + if err != nil { + return bintag{}, err + } + + bytes, err := readLength(r, int64(size)) + if err != nil { + return bintag{}, err + } + + return bintag(bytes), nil +} + +// maptag is a specific type that allows us to override the print statement to always ensure +// that the keys are printed in order +type Map map[Term]Term + +func (m Map) String() string { + + // Cast back to the map type + var keys []string + realKeys := map[string]Term{} + for k := range m { + + // Turn the key into a string representation in order to quickly sort it + key := fmt.Sprintf("%v", k) + keys = append(keys, key) + realKeys[key] = k + } + sort.Strings(keys) + + // To perform the opertion you want + r := "{" + for _, k := range keys { + + // get the real key for this stringified version + rk := realKeys[k] + r += fmt.Sprintf("%v:%v,", rk, m[rk]) + } + r += "}" + return r +} + +func readMap(r io.Reader) (Map, error) { + pairs, err := read4(r) + if err != nil { + return nil, err + } + + m := make(map[Term]Term) + + for i := 0; i < pairs; i++ { + key, err := readTag(r) + if err != nil { + return nil, err + } + value, err := readTag(r) + if err != nil { + return nil, err + } + m[key] = value + } + + return Map(m), nil +} + +func readComplex(r io.Reader) (Term, error) { + term, err := readTag(r) + + if err != nil { + return term, err + } + + switch kind := term.(type) { + case Atom: + switch kind { + case NilAtom: + return nil, nil + case TrueAtom: + return true, nil + case FalseAtom: + return false, nil + } + } + + return term, nil +} + +func readReference(r io.Reader) (Reference, error) { + reference := Reference{} + + term, err := readTag(r) + if err != nil { + return reference, err + } + + switch a := term.(type) { + case Atom: + reference.Node = a + default: + return reference, ErrMissingAtom + } + + id, err := read4(r) + if err != nil { + return reference, err + } + reference.ID = uint32(id) + + creation, err := read1(r) + if err != nil { + return reference, err + } + reference.Creation = uint8(creation) + + return reference, nil +} + +func readNewReference(r io.Reader) (NewReference, error) { + reference := NewReference{} + + len, err := read2(r) + if err != nil { + return reference, err + } + + term, err := readTag(r) + if err != nil { + return reference, err + } + + switch a := term.(type) { + case Atom: + reference.Node = a + default: + return reference, ErrMissingAtom + } + + creation, err := read1(r) + if err != nil { + return reference, err + } + reference.Creation = uint8(creation) + + // Extract the IDS + ids := make([]uint32, len) + for i := 0; i < len; i++ { + id, err := read4(r) + if err != nil { + return reference, err + } + ids[i] = uint32(id) + } + reference.ID = ids + + return reference, nil +} + +func readPort(r io.Reader) (Port, error) { + port := Port{} + + term, err := readTag(r) + if err != nil { + return port, err + } + + switch a := term.(type) { + case Atom: + port.Node = a + default: + return port, ErrMissingAtom + } + + id, err := read4(r) + if err != nil { + return port, err + } + port.ID = uint32(id) + + creation, err := read1(r) + if err != nil { + return port, err + } + port.Creation = uint8(creation) + + return port, nil +} + +func readPid(r io.Reader) (Pid, error) { + pid := Pid{} + + term, err := readTag(r) + if err != nil { + return pid, err + } + + switch a := term.(type) { + case Atom: + pid.Node = a + default: + return pid, ErrMissingAtom + } + + id, err := read4(r) + if err != nil { + return pid, err + } + pid.ID = uint32(id) + + serial, err := read4(r) + if err != nil { + return pid, err + } + pid.Serial = uint32(serial) + + creation, err := read1(r) + if err != nil { + return pid, err + } + pid.Creation = uint8(creation) + + return pid, nil +} + +func readFunc(r io.Reader) (Func, error) { + function := Func{} + + numfree, err := read4(r) + if err != nil { + return function, err + } + + term, err := readTag(r) + if err != nil { + return function, err + } + + switch pid := term.(type) { + case Pid: + function.Pid = pid + default: + return function, ErrUnknownType + } + + term, err = readTag(r) + if err != nil { + return function, err + } + + switch module := term.(type) { + case Atom: + function.Module = module + default: + return function, ErrUnknownType + } + + term, err = readTag(r) + if err != nil { + return function, err + } + + switch v := reflect.ValueOf(term); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + function.Index = uint32(v.Int()) + default: + return function, ErrUnknownType + } + + term, err = readTag(r) + if err != nil { + return function, err + } + + switch v := reflect.ValueOf(term); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + function.Uniq = uint32(v.Int()) + default: + return function, ErrUnknownType + } + + // Extract the free vars + freeVars := make([]Term, numfree) + for i := 0; i < numfree; i++ { + term, err := readTag(r) + if err != nil { + return function, err + } + freeVars[i] = term + } + function.FreeVars = freeVars + + return function, nil +} + +func readNewFunc(r io.Reader) (NewFunc, error) { + function := NewFunc{} + + // Get size of the func including the 4 bytes itself + size, err := read4(r) + if err != nil { + return function, err + } + + // Only allow the next size-4 bytes to be read + lr := io.LimitReader(r, int64(size-4)) + + arity, err := read1(lr) + if err != nil { + return function, err + } + function.Arity = uint8(arity) + + uniq, err := readLength(r, 16) + if err != nil { + return function, err + } + function.Uniq = uniq + + index, err := read4(lr) + if err != nil { + return function, err + } + function.Index = uint32(index) + + numfree, err := read4(lr) + if err != nil { + return function, err + } + + term, err := readTag(lr) + if err != nil { + return function, err + } + + switch module := term.(type) { + case Atom: + function.Module = module + default: + return function, ErrUnknownType + } + + term, err = readTag(lr) + if err != nil { + return function, err + } + + switch v := reflect.ValueOf(term); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + function.OldIndex = uint32(v.Int()) + default: + return function, ErrUnknownType + } + + term, err = readTag(lr) + if err != nil { + return function, err + } + + switch v := reflect.ValueOf(term); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + function.OldUnique = uint32(v.Int()) + default: + return function, ErrUnknownType + } + + term, err = readTag(lr) + if err != nil { + return function, err + } + + switch pid := term.(type) { + case Pid: + function.Pid = pid + default: + return function, ErrUnknownType + } + + // Extract the free vars + freeVars := make([]Term, numfree) + for i := 0; i < numfree; i++ { + term, err := readTag(lr) + if err != nil { + return function, err + } + freeVars[i] = term + } + function.FreeVars = freeVars + + return function, nil +} + +func readExport(r io.Reader) (Export, error) { + export := Export{} + + term, err := readTag(r) + if err != nil { + return export, err + } + + switch module := term.(type) { + case Atom: + export.Module = module + default: + return export, ErrMissingAtom + } + + term, err = readTag(r) + if err != nil { + return export, err + } + + switch function := term.(type) { + case Atom: + export.Function = function + default: + return export, ErrMissingAtom + } + + term, err = readTag(r) + if err != nil { + return export, err + } + + switch v := reflect.ValueOf(term); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + export.Arity = uint8(v.Int()) + default: + return export, ErrUnknownType + } + + return export, nil +} + +func readTag(r io.Reader) (Term, error) { + tag, err := read1(r) + if err != nil { + return nil, err + } + + switch tag { + case CompressedTag: + return readCompressed(r) + case DistributionHeaderTag: + return readDistributionHeader(r) + case SmallIntTag: + return readSmallInt(r) + case IntTag: + return readInt(r) + case SmallBignumTag: + return readSmallBignum(r) + case LargeBignumTag: + return readLargeBignum(r) + case FloatTag: + return readFloat(r) + case NewFloatTag: + return readNewFloat(r) + case AtomCacheRefTag: + return readAtomRef(r) + case AtomTag, AtomUtf8Tag: + return readAtom(r) + case SmallAtomTag, SmallAtomUtf8Tag: + return readSmallAtom(r) + case SmallTupleTag: + return readSmallTuple(r) + case LargeTupleTag: + return readLargeTuple(r) + case NilTag: + return readNil(r) + case StringTag: + return readString(r) + case ListTag: + return readList(r) + case BinTag: + return readBin(r) + case MapTag: + return readMap(r) + case ReferenceTag: + return readReference(r) + case NewReferenceTag: + return readNewReference(r) + case PortTag: + return readPort(r) + case PidTag: + return readPid(r) + case FunTag: + return readFunc(r) + case NewFunTag: + return readNewFunc(r) + case ExportTag: + return readExport(r) + } + + return nil, ErrUnknownType +} + +// DecodeFrom decodes a Term from r and returns it or an error. +func DecodeFrom(r io.Reader) (Term, error) { + version, err := read1(r) + if err != nil { + return nil, err + } + + // check protocol version + if version != VersionTag { + return nil, ErrBadMagic + } + + return readTag(r) +} + +// Decode decodes a Term from data and returns it or an error. +func Decode(data []byte) (Term, error) { return DecodeFrom(bytes.NewBuffer(data)) } + +// UnmarshalFrom decodes a value from r, stores it in val, and returns any +// error encountered. +func UnmarshalFrom(r io.Reader, val interface{}) (err error) { + result, _ := DecodeFrom(r) + + value := reflect.ValueOf(val).Elem() + + switch v := value; v.Kind() { + case reflect.Struct: + slice := reflect.ValueOf(result) + for i := 0; i < slice.Len(); i++ { + e := slice.Index(i).Elem() + v.Field(i).Set(e) + } + } + + return nil +} + +// Unmarshal decodes a value from data, stores it in val, and returns any error +// encountered. +func Unmarshal(data []byte, val interface{}) (err error) { + return UnmarshalFrom(bytes.NewBuffer(data), val) +} + +// UnmarshalRequest decodes a BURP from r and returns it as a Request. +func UnmarshalRequest(r io.Reader) (Request, error) { + var req Request + + size, err := read4(r) + if err != nil { + return req, err + } + + err = UnmarshalFrom(io.LimitReader(r, int64(size)), &req) + + return req, err +} diff --git a/vendor/github.com/landonia/gobert/doc.go b/vendor/github.com/landonia/gobert/doc.go new file mode 100644 index 00000000..bfc651b7 --- /dev/null +++ b/vendor/github.com/landonia/gobert/doc.go @@ -0,0 +1,5 @@ +// Package bert implements the BERT serialization and RPC protocol. +// See http://bert-rpc.org/ +package bert + +// BUG(josh): Full BERT specification compliance is still in progress. diff --git a/vendor/github.com/landonia/gobert/encode.go b/vendor/github.com/landonia/gobert/encode.go new file mode 100644 index 00000000..3a2b39ba --- /dev/null +++ b/vendor/github.com/landonia/gobert/encode.go @@ -0,0 +1,413 @@ +package bert + +import ( + "bytes" + "compress/zlib" + "encoding/binary" + "fmt" + "io" + "math" + "math/big" + "reflect" + "unicode/utf8" +) + +func write1(w io.Writer, ui8 uint8) { w.Write([]byte{ui8}) } + +func write2(w io.Writer, ui16 uint16) { + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, ui16) + w.Write(b) +} + +func write4(w io.Writer, ui32 uint32) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, ui32) + w.Write(b) +} + +func writeSmallInt(w io.Writer, n uint8) { + write1(w, SmallIntTag) + write1(w, n) +} + +func writeInt(w io.Writer, n uint32) { + write1(w, IntTag) + write4(w, n) +} + +func writeFloat(w io.Writer, f float32) { + write1(w, FloatTag) + + s := fmt.Sprintf("%.20e", float32(f)) + w.Write([]byte(s)) + + pad := make([]byte, 31-len(s)) + w.Write(pad) +} + +func writeNewFloat(w io.Writer, f float64) { + write1(w, NewFloatTag) + + ui64 := math.Float64bits(f) + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, ui64) + w.Write(b) +} + +func writeAtom(w io.Writer, a string) { + write1(w, AtomTag) + write2(w, uint16(len(a))) + w.Write([]byte(a)) +} + +func writeAtomUtf8(w io.Writer, a string) { + write1(w, AtomUtf8Tag) + write2(w, uint16(len(a))) + w.Write([]byte(a)) +} + +func writeSmallTuple(w io.Writer, t reflect.Value, minorVersion int) { + write1(w, SmallTupleTag) + size := t.Len() + write1(w, uint8(size)) + + for i := 0; i < size; i++ { + writeTag(w, t.Index(i), minorVersion) + } +} + +func writeNil(w io.Writer) { write1(w, NilTag) } + +func writeString(w io.Writer, s string) { + write1(w, StringTag) + write2(w, uint16(len(s))) + w.Write([]byte(s)) +} + +func writeList(w io.Writer, l reflect.Value, minorVersion int) { + write1(w, ListTag) + size := l.Len() + write4(w, uint32(size)) + + for i := 0; i < size; i++ { + writeTag(w, l.Index(i), minorVersion) + } + + writeNil(w) +} + +func writeMap(w io.Writer, l reflect.Value, minorVersion int) { + write1(w, MapTag) + keys := l.MapKeys() + len := uint32(len(keys)) + write4(w, len) + + for i := uint32(0); i < len; i++ { + writeTag(w, keys[i], minorVersion) + writeTag(w, l.MapIndex(keys[i]), minorVersion) + } +} + +func writeNode(w io.Writer, node Atom) { + + // If the number of runes equal the number of bytes then UTF-8 is not required + if utf8.RuneCount([]byte(node)) == len(node) { + writeAtom(w, string(node)) + } else { + writeAtomUtf8(w, string(node)) + } +} + +func writeReference(w io.Writer, l Reference) { + write1(w, ReferenceTag) + writeNode(w, l.Node) + write4(w, l.ID) + write1(w, l.Creation) +} + +func writeNewReference(w io.Writer, l NewReference) { + write1(w, NewReferenceTag) + write2(w, uint16(len(l.ID))) + writeNode(w, l.Node) + write1(w, l.Creation) + for i := 0; i < len(l.ID); i++ { + write4(w, l.ID[i]) + } +} + +func writeLargeBigNum(w io.Writer, l big.Int) { + write1(w, LargeBignumTag) + write4(w, uint32(len(l.Bytes()))) + writeBigNum(w, l) +} + +func writeSmallBigNum(w io.Writer, l big.Int) { + write1(w, SmallBignumTag) + write1(w, uint8(len(l.Bytes()))) + writeBigNum(w, l) +} + +func writeBigNum(w io.Writer, l big.Int) { + if l.Sign() < 0 { + write1(w, 1) + } else { + write1(w, 0) + } + + // The big.Int uses BigEndian for the bytes and the format is + // expecting LittleEndian. Reverse the bytes. + bEndBits := l.Bytes() + var lEndBits []byte + for i := len(bEndBits) - 1; i >= 0; i-- { + lEndBits = append(lEndBits, bEndBits[i]) + } + w.Write(lEndBits) +} + +func writePort(w io.Writer, l Port) { + write1(w, PortTag) + writeNode(w, l.Node) + write4(w, l.ID) + write1(w, l.Creation) +} + +func writePid(w io.Writer, l Pid) { + write1(w, PidTag) + writeNode(w, l.Node) + write4(w, l.ID) + write4(w, l.Serial) + write1(w, l.Creation) +} + +func writeFunc(w io.Writer, l Func, minorVersion int) { + write1(w, FunTag) + write4(w, uint32(len(l.FreeVars))) + writePid(w, l.Pid) + writeNode(w, l.Module) + writeInt(w, l.Index) + writeInt(w, l.Uniq) + for i := 0; i < len(l.FreeVars); i++ { + writeTag(w, reflect.ValueOf(l.FreeVars[i]), minorVersion) + } +} + +func writeNewFunc(w io.Writer, l NewFunc, minorVersion int) { + write1(w, NewFunTag) + + // Create a new buffer to write the bytes to so the + // total size can be written + buf := bytes.NewBuffer([]byte{}) + write1(buf, l.Arity) + buf.Write(l.Uniq) + write4(buf, l.Index) + write4(buf, uint32(len(l.FreeVars))) + writeNode(buf, l.Module) + writeInt(buf, l.OldIndex) + writeInt(buf, l.OldUnique) + writePid(buf, l.Pid) + for i := 0; i < len(l.FreeVars); i++ { + writeTag(buf, reflect.ValueOf(l.FreeVars[i]), minorVersion) + } + + write4(w, uint32(buf.Len()+4)) + w.Write(buf.Bytes()) +} + +func writeExport(w io.Writer, l Export) { + write1(w, ExportTag) + writeNode(w, l.Module) + writeNode(w, l.Function) + writeSmallInt(w, l.Arity) +} + +func writeTag(w io.Writer, val reflect.Value, minorVersion int) (err error) { + switch v := val; v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := v.Int() + if n >= 0 && n < 256 { + writeSmallInt(w, uint8(n)) + } else { + writeInt(w, uint32(n)) + } + case reflect.Float32, reflect.Float64: + if minorVersion == MinorVersion0 { + writeFloat(w, float32(v.Float())) + } else { + writeNewFloat(w, v.Float()) + } + case reflect.String: + if v.Type().Name() == "Atom" { + + // If the number of runes equal the number of bytes then UTF-8 is not required + if utf8.RuneCount([]byte(v.String())) == len(v.String()) { + writeAtom(w, v.String()) + } else { + writeAtomUtf8(w, v.String()) + } + } else { + writeString(w, v.String()) + } + case reflect.Slice: + writeSmallTuple(w, v, minorVersion) + case reflect.Array: + writeList(w, v, minorVersion) + case reflect.Interface, reflect.Ptr: + writeTag(w, v.Elem(), minorVersion) + case reflect.Map: + writeMap(w, v, minorVersion) + case reflect.Struct: + vali := v.Interface() + switch rVal := vali.(type) { + case DistributionHeader: + err = ErrUnknownType + case Reference: + writeReference(w, rVal) + case NewReference: + writeNewReference(w, rVal) + case big.Int: + if len(rVal.Bytes()) >= 1<<8 { + writeLargeBigNum(w, rVal) + } else { + writeSmallBigNum(w, rVal) + } + case Port: + writePort(w, rVal) + case Pid: + writePid(w, rVal) + case Func: + writeFunc(w, rVal, minorVersion) + case NewFunc: + writeNewFunc(w, rVal, minorVersion) + case Export: + writeExport(w, rVal) + } + default: + if !reflect.Indirect(val).IsValid() { + writeNil(w) + } else { + err = ErrUnknownType + } + } + + return +} + +// EncodeTo encodes val and writes it to w, returning any error. +func EncodeTo(w io.Writer, val interface{}) (err error) { + return EncodeToUsingMinorVersion(w, val, MinorVersion1) +} + +// Encode encodes val and returns it or an error. +func Encode(val interface{}) ([]byte, error) { + return EncodeUsingMinorVersion(val, MinorVersion1) +} + +// Marshal is an alias for EncodeTo. +func Marshal(w io.Writer, val interface{}) error { + return MarshalUsingMinorVersion(w, val, MinorVersion1) +} + +// MarshalResponse encodes val into a BURP Response struct and writes it to w, +// returning any error. +func MarshalResponse(w io.Writer, val interface{}) (err error) { + return MarshalResponseUsingMinorVersion(w, val, MinorVersion1) +} + +// EncodeToAndCompress encodes val and writes it to w, returning any error. +// If compress is true the body will be compressed +func EncodeToAndCompress(w io.Writer, val interface{}, compress bool) (err error) { + return EncodeToAndCompressUsingMinorVersion(w, val, compress, MinorVersion1) +} + +// EncodeTo encodes val and writes it to w, returning any error. +func EncodeToUsingMinorVersion(w io.Writer, val interface{}, minorVersion int) (err error) { + return EncodeToAndCompressUsingMinorVersion(w, val, false, minorVersion) +} + +// Encode encodes val and returns it or an error. +func EncodeUsingMinorVersion(val interface{}, minorVersion int) ([]byte, error) { + return EncodeAndCompressUsingMinorVersion(val, false, minorVersion) +} + +// Marshal is an alias for EncodeTo. +func MarshalUsingMinorVersion(w io.Writer, val interface{}, minorVersion int) error { + return EncodeToUsingMinorVersion(w, val, minorVersion) +} + +// MarshalResponse encodes val into a BURP Response struct and writes it to w, +// returning any error. +func MarshalResponseUsingMinorVersion(w io.Writer, val interface{}, minorVersion int) (err error) { + return MarshalResponseAndCompressUsingMinorVersion(w, val, false, minorVersion) +} + +// EncodeToAndCompress encodes val and writes it to w, returning any error. +// If compress is true the body will be compressed +func EncodeToAndCompressUsingMinorVersion(w io.Writer, val interface{}, compress bool, minorVersion int) (err error) { + write1(w, VersionTag) + if compress { + + // Write the bytes to a buffer (the original length is required) + buf := bytes.NewBuffer([]byte{}) + err = writeTag(buf, reflect.ValueOf(val), minorVersion) + if err == nil { + write1(w, CompressedTag) + write4(w, uint32(buf.Len())) + zw := zlib.NewWriter(w) + _, err = zw.Write(buf.Bytes()) + zw.Close() + } + } else { + // Write directly to the writer + err = writeTag(w, reflect.ValueOf(val), minorVersion) + } + return +} + +// EncodeAndCompress encodes val and returns it or an error. +// If compress is true the body will be compressed +func EncodeAndCompress(val interface{}, compress bool) ([]byte, error) { + return EncodeAndCompressUsingMinorVersion(val, compress, MinorVersion1) +} + +// MarshalAndCompress is an alias for EncodeTo. +// If compress is true the body will be compressed +func MarshalAndCompress(w io.Writer, val interface{}, compress bool) error { + return MarshalAndCompressUsingMinorVersion(w, val, compress, MinorVersion1) +} + +// MarshalResponseAndCompress encodes val into a BURP Response struct and writes it to w, +// returning any error. +// If compress is true the body will be compressed +func MarshalResponseAndCompress(w io.Writer, val interface{}, compress bool) (err error) { + return MarshalResponseAndCompressUsingMinorVersion(w, val, compress, MinorVersion1) +} + +// EncodeAndCompressUsingMinorVersion encodes val and returns it or an error. +// If compress is true the body will be compressed +// It will use the minor version for the encoding +func EncodeAndCompressUsingMinorVersion(val interface{}, compress bool, minorVersion int) ([]byte, error) { + buf := bytes.NewBuffer([]byte{}) + err := EncodeToAndCompressUsingMinorVersion(buf, val, compress, minorVersion) + return buf.Bytes(), err +} + +// MarshalAndCompress is an alias for EncodeTo. +// If compress is true the body will be compressed +// It will use the minor version for the encoding +func MarshalAndCompressUsingMinorVersion(w io.Writer, val interface{}, compress bool, minorVersion int) error { + return EncodeToAndCompressUsingMinorVersion(w, val, compress, minorVersion) +} + +// MarshalResponseAndCompress encodes val into a BURP Response struct and writes it to w, +// returning any error. +// If compress is true the body will be compressed +// It will use the minor version for the encoding +func MarshalResponseAndCompressUsingMinorVersion(w io.Writer, val interface{}, compress bool, minorVersion int) (err error) { + resp, err := EncodeAndCompressUsingMinorVersion(val, compress, minorVersion) + + write4(w, uint32(len(resp))) + w.Write(resp) + + return +} diff --git a/vendor/github.com/landonia/gobert/type.go b/vendor/github.com/landonia/gobert/type.go new file mode 100644 index 00000000..3b0875c0 --- /dev/null +++ b/vendor/github.com/landonia/gobert/type.go @@ -0,0 +1,349 @@ +package bert + +import ( + "io" + "io/ioutil" +) + +const ( + VersionTag = 131 + DistributionHeaderTag = 68 + CompressedTag = 80 + SmallIntTag = 97 + IntTag = 98 + SmallBignumTag = 110 + LargeBignumTag = 111 + FloatTag = 99 + NewFloatTag = 70 + AtomCacheRefTag = 82 + AtomTag = 100 + SmallAtomTag = 115 + AtomUtf8Tag = 118 + SmallAtomUtf8Tag = 119 + SmallTupleTag = 104 + LargeTupleTag = 105 + NilTag = 106 + StringTag = 107 + ListTag = 108 + BinTag = 109 + MapTag = 116 + PidTag = 103 + PortTag = 102 + FunTag = 117 + ReferenceTag = 101 + NewReferenceTag = 114 + NewFunTag = 112 + ExportTag = 113 +) + +type Atom string + +const ( + BertAtom = Atom("bert") + NilAtom = Atom("nil") + TrueAtom = Atom("true") + FalseAtom = Atom("false") +) + +const ( + MinorVersion0 = 0 + MinorVersion1 = 1 +) + +type Term interface{} + +type Request struct { + Kind Atom + Module Atom + Function Atom + Arguments []Term +} + +const ( + // NewCacheEntry is used to determine if an entry is a new cache entry + NewCacheEntry byte = 8 + + // SegmentIndex can be used to extract the segment index + SegmentIndex byte = 7 + + // LongAtoms is used to determine if 2 byte atoms are used + LongAtoms byte = 1 +) + +// As of erts version 5.7.2 the old atom cache protocol was dropped and a new one was introduced. +// This atom cache protocol introduced the distribution header. Nodes with erts versions earlier than +// 5.7.2 can still communicate with new nodes, but no distribution header and no atom cache will be used. +// +// The distribution header currently only contains an atom cache reference section, but could in the future +// contain more information. The distribution header precedes one or more Erlang terms on the external format. +// For more information see the documentation of the protocol between connected nodes in the distribution +// protocol documentation. +// +// ATOM_CACHE_REF entries with corresponding AtomCacheReferenceIndex in terms encoded on the external format +// following a distribution header refers to the atom cache references made in the distribution header. +// The range is 0 <= AtomCacheReferenceIndex < 255, i.e., at most 255 different atom cache references +// from the following terms can be made. +type DistributionHeader struct { + + // bucket holds all the available atoms that have been set + // bucket[AtomCacheReferenceIndex][SegmentIndex][InternalSegmentIndex] + bucket [255][8][256]*Atom + + // cache holds the current lookup into the bucket for the specific atom cache reference + // cache[AtomCacheReferenceIndex] + cache [255]*cacheIndex + + // Flags for an even AtomCacheReferenceIndex are located in the least significant half byte and flags for an + // odd AtomCacheReferenceIndex are located in the most significant half byte. + // + // The flag field of an atom cache reference has the following format: + // 1 bit 3 bits + // NewCacheEntryFlag SegmentIndex + // + // The most significant bit is the NewCacheEntryFlag. If set, the corresponding cache reference is new. + // The three least significant bits are the SegmentIndex of the corresponding atom cache entry. + // An atom cache consists of 8 segments each of size 256, i.e., an atom cache can contain 2048 entries. + flags []byte +} + +// cacheIndex holds the current cache position for an atom +type cacheIndex struct { + + // SegmentIndex of the current atom cache + segmentIndex uint8 + + // InternalIndex of the current atom cache + internalIndex uint8 +} + +// GetAtom will return the atom that exists for the atomCacheReferenceIndex +func (dh DistributionHeader) GetAtom(atomCacheReferenceIndex uint8) (*Atom, error) { + + // Get the atom cache index position + atomCache := dh.cache[atomCacheReferenceIndex] + + // Look up the segment and internal index from the latest flags + if atomCache != nil { + atom := dh.bucket[atomCacheReferenceIndex][atomCache.segmentIndex][atomCache.internalIndex] + if atom == nil { + return nil, ErrMissingAtom + } + return atom, nil + } + return nil, ErrMissingAtom +} + +// UpdateFlags will update the flags for the cache +func (dh DistributionHeader) Update(r io.Reader) error { + noAtomRefs, err := read1(r) + if err != nil { + return err + } + + // If NumberOfAtomCacheRefs is 0, Flags and AtomCacheRefs are omitted + if noAtomRefs != 0 { + + // Flags consists of NumberOfAtomCacheRefs/2+1 bytes + flags, err := ioutil.ReadAll(io.LimitReader(r, int64((noAtomRefs/2)+1))) + if err != nil { + return err + } + + // Are these long atoms? Check the last half byte least significant bit + atomLen := 1 + if dh.flags[len(dh.flags)-1]&LongAtoms == LongAtoms { + atomLen = 2 + } + + // The flag information is stored within the + for i, even := 0, true; i < noAtomRefs; i, even = i+1, !even { + + // Get the cache item + cacheItem := dh.cache[i] + if cacheItem == nil { + cacheItem = &cacheIndex{} + dh.cache[i] = cacheItem + } + + // We need the cache entry and segment index + newCacheEntry := (flags[i/2] & NewCacheEntry) == NewCacheEntry + cacheItem.segmentIndex = flags[i/2] & SegmentIndex + if !even { + newCacheEntry = (flags[i/2] >> 4 & NewCacheEntry) == NewCacheEntry + cacheItem.segmentIndex = flags[i/2] >> 4 & SegmentIndex + } + + // We have the information to extract this atom + internalSegmentIndex, err := read1(r) + if err != nil { + return err + } + cacheItem.internalIndex = uint8(internalSegmentIndex) + + // Extract the atom info for this is a new entry + if newCacheEntry { + + // The length of the atom (can be 1 or 2 bytes) + alen := 0 + if atomLen == 2 { + alen, err = read2(r) + } else { + alen, err = read1(r) + } + if err != nil { + return err + } + + // Get the atom + atomBytes, err := ioutil.ReadAll(io.LimitReader(r, int64(alen))) + if err != nil { + return err + } + + // Store the atom in the bucket using the index position + atom := Atom(atomBytes) + dh.bucket[i][cacheItem.segmentIndex][cacheItem.internalIndex] = &atom + } + } + } + return nil +} + +// Reference wraps the REFERENCE_EXT tag type (101) +// +// Encode a reference object (an object generated with make_ref/0). +// +// The Node term is an encoded atom, i.e. ATOM_EXT, SMALL_ATOM_EXT or ATOM_CACHE_REF. +// +// The ID field contains a big-endian unsigned integer, but should be regarded as uninterpreted data +// since this field is node specific. Creation is a byte containing a node serial number that makes it +// possible to separate old (crashed) nodes from a new one. +// +// In ID, only 18 bits are significant; the rest should be 0. In Creation, only 2 bits are significant; +// the rest should be 0. See NEW_REFERENCE_EXT. +type Reference struct { + Node Atom + ID uint32 + Creation uint8 +} + +// NewReference wraps the NEW_REFERENCE_EXT tag type (114) +// +// Node and Creation are as in REFERENCE_EXT. +// +// ID contains a sequence of big-endian unsigned integers (4 bytes each, so N' is a multiple of 4), +// but should be regarded as uninterpreted data. +// +// N' = 4 * Len. +// +// In the first word (four bytes) of ID, only 18 bits are significant, the rest should be 0. +// +// In Creation, only 2 bits are significant, the rest should be 0. +// +// NEW_REFERENCE_EXT was introduced with distribution version 4. In version 4, N' should be at most 12 +type NewReference struct { + Node Atom + Creation uint8 + ID []uint32 +} + +// Port wraps the PORT_EXT tag type (102) +// +// Encode a port object (obtained form open_port/2). The ID is a node specific identifier for a local port. +// Port operations are not allowed across node boundaries. The Creation works just like in REFERENCE_EXT. +type Port struct { + Node Atom + ID uint32 + Creation uint8 +} + +// Pid wraps the PID_EXT tag type (103) +// +// Encode a process identifier object (obtained from spawn/3 or friends). The ID and Creation fields +// works just like in REFERENCE_EXT, while the Serial field is used to improve safety. In ID, +// only 15 bits are significant; the rest should be 0. +type Pid struct { + Node Atom + ID uint32 + Serial uint32 + Creation uint8 +} + +// Func wraps the FUN_EXT tag type (117) +// +// Pid +// is a process identifier as in PID_EXT. It represents the process in which the fun was created. +// +// Module +// is an encoded as an atom, using ATOM_EXT, SMALL_ATOM_EXT or ATOM_CACHE_REF. This is the module that the fun is implemented in. +// +// Index +// is an integer encoded using SMALL_INTEGER_EXT or INTEGER_EXT. It is typically a small index into the module's fun table. +// +// Uniq +// is an integer encoded using SMALL_INTEGER_EXT or INTEGER_EXT. Uniq is the hash value of the parse for the fun. +// +// Free vars +// is NumFree number of terms, each one encoded according to its type. +type Func struct { + Pid Pid + Module Atom + Index uint32 + Uniq uint32 + FreeVars []Term +} + +// NewFunc wraps the NEW_FUN_EXT tag type (112) +// This is the new encoding of internal funs: fun F/A and fun(Arg1,..) -> ... end. +// +// Size +// is the total number of bytes, including the Size field. +// +// Arity +// is the arity of the function implementing the fun. +// +// Uniq +// is the 16 bytes MD5 of the significant parts of the Beam file. +// +// Index +// is an index number. Each fun within a module has an unique index. Index is stored in big-endian byte order. +// +// NumFree +// is the number of free variables. +// +// Module +// is an encoded as an atom, using ATOM_EXT, SMALL_ATOM_EXT or ATOM_CACHE_REF. This is the module that the fun is implemented in. +// +// OldIndex +// is an integer encoded using SMALL_INTEGER_EXT or INTEGER_EXT. It is typically a small index into the module's fun table. +// +// OldUniq +// is an integer encoded using SMALL_INTEGER_EXT or INTEGER_EXT. Uniq is the hash value of the parse tree for the fun. +// +// Pid +// is a process identifier as in PID_EXT. It represents the process in which the fun was created. +// +// Free vars +// is NumFree number of terms, each one encoded according to its type. +type NewFunc struct { + Arity uint8 + Uniq []byte + Index uint32 + Module Atom + OldIndex uint32 + OldUnique uint32 + Pid Pid + FreeVars []Term +} + +// Export wraps the EXPORT_EXT tag type (113) +// This term is the encoding for external funs: fun M:F/A. +// +// Module and Function are atoms (encoded using ATOM_EXT, SMALL_ATOM_EXT or ATOM_CACHE_REF). +// +// Arity is an integer encoded using SMALL_INTEGER_EXT. +type Export struct { + Module Atom + Function Atom + Arity uint8 +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 870c0749..c672371a 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -26,6 +26,12 @@ "revision": "cfe83fdee93950807ff3d6bb5620cf9b407558f7", "revisionTime": "2016-11-19T08:28:11Z" }, + { + "checksumSHA1": "aFRmOoohKqT5mpLuoh/0KkAky2Q=", + "path": "github.com/landonia/gobert", + "revision": "421b9bb95e9ed5876d367a4c3b5c790d2af3e7ca", + "revisionTime": "2017-04-11T12:47:30Z" + }, { "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=", "path": "github.com/matttproud/golang_protobuf_extensions/pbutil",