From 5342df0e7348148dd65879a73876a23bb11bd27b Mon Sep 17 00:00:00 2001 From: Xenia Nisskhen Date: Tue, 20 May 2025 20:39:11 +0500 Subject: [PATCH 1/2] feat(liner): Add wsl linter --- .golangci.yml | 2 +- autocomplete/autocomplete.go | 66 ++++++++++++++++++ autocomplete/autocomplete_test.go | 8 +++ cache/cache.go | 4 ++ capabilities/handler.go | 8 +++ cmd/e2e-test/carbon-clickhouse.go | 9 +++ cmd/e2e-test/checks.go | 54 +++++++++++++++ cmd/e2e-test/clickhouse.go | 19 ++++++ cmd/e2e-test/e2etesting.go | 90 +++++++++++++++++++++++++ cmd/e2e-test/graphite-clickhouse.go | 12 ++++ cmd/e2e-test/main.go | 21 ++++++ cmd/e2e-test/rproxy.go | 4 ++ cmd/graphite-clickhouse-client/main.go | 33 +++++++++ config/config.go | 63 +++++++++++++++++ config/config_test.go | 18 +++++ config/json.go | 1 + find/find.go | 3 + find/handler.go | 27 ++++++++ find/handler_json_test.go | 5 ++ finder/base.go | 5 ++ finder/date.go | 1 + finder/date_reverse_test.go | 2 + finder/finder.go | 4 ++ finder/index.go | 12 ++++ finder/index_test.go | 1 + finder/plain_from_tagged.go | 6 ++ finder/prefix.go | 3 + finder/prefix_test.go | 2 + finder/reverse.go | 2 + finder/split.go | 10 +++ finder/split_test.go | 1 + finder/tag.go | 14 ++++ finder/tag_test.go | 3 + finder/tagged.go | 58 ++++++++++++++++ finder/tagged_test.go | 19 ++++++ finder/unescape.go | 4 ++ graphite-clickhouse.go | 40 +++++++++++ healthcheck/healthcheck.go | 12 ++++ helper/RowBinary/encode.go | 14 ++++ helper/clickhouse/clickhouse.go | 39 +++++++++++ helper/clickhouse/external-data.go | 6 ++ helper/clickhouse/external-data_test.go | 11 +++ helper/client/datetime.go | 1 + helper/client/find.go | 15 +++++ helper/client/render.go | 27 ++++++++ helper/client/tags.go | 37 ++++++++++ helper/client/types.go | 1 + helper/date/date.go | 2 + helper/date/date_test.go | 1 + helper/datetime/datetime.go | 12 +++- helper/datetime/datetime_test.go | 4 ++ helper/headers/headers.go | 3 + helper/pickle/pickle.go | 2 + helper/point/func.go | 15 +++++ helper/point/func_test.go | 10 +++ helper/point/points.go | 12 ++++ helper/rollup/aggr.go | 12 ++++ helper/rollup/compact.go | 3 + helper/rollup/remote.go | 8 +++ helper/rollup/rollup.go | 4 ++ helper/rollup/rules.go | 33 ++++++++- helper/rollup/rules_test.go | 13 ++++ helper/rollup/xml.go | 3 + helper/tests/clickhouse/server.go | 1 + helper/tests/compare/compare.go | 4 ++ helper/tests/compare/expand/expand.go | 2 + helper/utils/utils_test.go | 1 - index/handler.go | 2 + index/index.go | 8 +++ index/index_test.go | 11 +++ limiter/alimiter.go | 20 ++++++ limiter/alimiter_test.go | 9 +++ limiter/limiter.go | 4 ++ limiter/wlimiter.go | 14 ++++ load_avg/load_avg.go | 4 ++ load_avg/load_avg_test.go | 2 + metrics/metrics.go | 68 +++++++++++++++++++ metrics/metrics_test.go | 20 ++++++ metrics/query_metrics.go | 9 +++ pkg/alias/map.go | 12 ++++ pkg/alias/map_tagged_test.go | 9 +++ pkg/alias/map_test.go | 12 ++++ pkg/dry/math.go | 7 ++ pkg/dry/strings.go | 2 + pkg/reverse/reverse.go | 4 ++ pkg/scope/http_request.go | 2 + pkg/scope/key.go | 3 + pkg/scope/logger.go | 7 ++ pkg/where/match.go | 23 +++++++ pkg/where/match_test.go | 1 + pkg/where/where.go | 21 ++++++ pkg/where/where_test.go | 2 + prometheus/labels.go | 2 + prometheus/logger.go | 6 ++ prometheus/matcher.go | 7 ++ prometheus/querier_select.go | 10 +++ prometheus/querier_select_test.go | 3 + prometheus/series_set.go | 5 ++ render/data/carbonlink.go | 4 ++ render/data/carbonlink_test.go | 4 ++ render/data/ch_response.go | 25 +++++++ render/data/common_step.go | 2 + render/data/common_step_test.go | 5 ++ render/data/data.go | 18 +++++ render/data/data_parse_test.go | 23 +++++++ render/data/multi_target.go | 27 ++++++++ render/data/query.go | 36 ++++++++++ render/data/query_test.go | 49 ++++++++++++++ render/data/targets.go | 2 + render/data/targets_test.go | 2 + render/handler.go | 64 ++++++++++++++++++ render/handler_test.go | 1 + render/reply/formatter.go | 4 ++ render/reply/formatter_test.go | 19 ++++++ render/reply/json.go | 21 ++++++ render/reply/pickle.go | 11 +++ render/reply/protobuf.go | 13 ++++ render/reply/v2_pb.go | 6 ++ render/reply/v3_pb.go | 6 ++ sd/nginx/nginx.go | 47 +++++++++++++ sd/register.go | 6 ++ sd/utils/utils.go | 19 ++++++ tagger/metric.go | 1 + tagger/rule.go | 6 ++ tagger/rule_test.go | 2 + tagger/set.go | 1 + tagger/tagger.go | 65 ++++++++++++++++++ tagger/tagger_test.go | 14 ++++ 128 files changed, 1750 insertions(+), 4 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 05e9f78f5..a1d8c0948 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -49,7 +49,7 @@ linters: - usestdlibvars # - wastedassign - whitespace -# - wsl + - wsl settings: gocyclo: min-complexity: 15 diff --git a/autocomplete/autocomplete.go b/autocomplete/autocomplete.go index 16e931e71..4e50c54b6 100644 --- a/autocomplete/autocomplete.go +++ b/autocomplete/autocomplete.go @@ -53,6 +53,7 @@ func NewValues(config *config.Config) *Handler { func dateString(autocompleteDays int, tm time.Time) (string, string) { fromDate := date.FromTimeToDaysFormat(tm.AddDate(0, 0, -autocompleteDays)) untilDate := date.UntilTimeToDaysFormat(tm) + return fromDate, untilDate } @@ -73,6 +74,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *Handler) requestExpr(r *http.Request) (*where.Where, *where.Where, map[string]bool, error) { f := r.Form["expr"] expr := make([]string, 0, len(f)) + for i := 0; i < len(f); i++ { if f[i] != "" { expr = append(expr, f[i]) @@ -108,7 +110,9 @@ func (h *Handler) requestExpr(r *http.Request) (*where.Where, *where.Where, map[ func taggedKey(typ string, truncateSec int32, fromDate, untilDate string, tag string, exprs []string, tagPrefix string, limit int) (string, string) { ts := utils.TimestampTruncate(timeNow().Unix(), time.Duration(truncateSec)*time.Second) + var sb stringutils.Builder + sb.Grow(128) sb.WriteString(typ) sb.WriteString(fromDate) @@ -117,30 +121,37 @@ func taggedKey(typ string, truncateSec int32, fromDate, untilDate string, tag st sb.WriteString(";limit=") sb.WriteInt(int64(limit), 10) tagStart := sb.Len() + if tagPrefix != "" { sb.WriteString(";tagPrefix=") sb.WriteString(tagPrefix) } + if tag != "" { sb.WriteString(";tag=") sb.WriteString(tag) } + for _, expr := range exprs { sb.WriteString(";expr='") sb.WriteString(strings.Replace(expr, " = ", "=", 1)) sb.WriteByte('\'') } + exprEnd := sb.Len() sb.WriteString(";ts=") sb.WriteString(strconv.FormatInt(ts, 10)) s := sb.String() + return s, s[tagStart:exprEnd] } func taggedValuesKey(typ string, truncateSec int32, fromDate, untilDate string, tag string, exprs []string, valuePrefix string, limit int) (string, string) { ts := utils.TimestampTruncate(timeNow().Unix(), time.Duration(truncateSec)*time.Second) + var sb stringutils.Builder + sb.Grow(128) sb.WriteString(typ) sb.WriteString(fromDate) @@ -149,24 +160,29 @@ func taggedValuesKey(typ string, truncateSec int32, fromDate, untilDate string, sb.WriteString(";limit=") sb.WriteInt(int64(limit), 10) tagStart := sb.Len() + if valuePrefix != "" { sb.WriteString(";valuePrefix=") sb.WriteString(valuePrefix) } + if tag != "" { sb.WriteString(";tag=") sb.WriteString(tag) } + for _, expr := range exprs { sb.WriteString(";expr='") sb.WriteString(strings.Replace(expr, " = ", "=", 1)) sb.WriteByte('\'') } + exprEnd := sb.Len() sb.WriteString(";ts=") sb.WriteString(strconv.FormatInt(ts, 10)) s := sb.String() + return s, s[tagStart:exprEnd] } @@ -206,19 +222,23 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { defer func() { if rec := recover(); rec != nil { status = http.StatusInternalServerError + logger.Error("panic during eval:", zap.String("requestID", scope.String(r.Context(), "requestID")), zap.Any("reason", rec), zap.Stack("stack"), ) + answer := fmt.Sprintf("%v\nStack trace: %v", rec, zap.Stack("").String) http.Error(w, answer, status) } + d := time.Since(start) dMS := d.Milliseconds() logs.AccessLog(accessLogger, h.config, r, status, d, queueDuration, findCache, queueFail) limiter.SendDuration(queueDuration.Milliseconds()) metrics.SendFindMetrics(metrics.TagsRequestMetric, status, dMS, 0, h.config.Metrics.ExtendedStat, metricsCount) + if !findCache && chReadRows != 0 && chReadBytes != 0 { errored := status != http.StatusOK && status != http.StatusNotFound metrics.SendQueryRead(metrics.AutocompleteQMetric, 0, 0, dMS, metricsCount, readBytes, chReadRows, chReadBytes, errored) @@ -237,10 +257,12 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { if err == finder.ErrCostlySeriesByTag { status = http.StatusForbidden http.Error(w, err.Error(), status) + return } else if err != nil { status = http.StatusBadRequest http.Error(w, err.Error(), status) + return } } @@ -248,18 +270,22 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { fromDate, untilDate := dateString(h.config.ClickHouse.TaggedAutocompleDays, start) var key string + exprs := r.Form["expr"] // params := taggedTagsQuery(exprs, tagPrefix, limit) useCache := h.config.Common.FindCache != nil && h.config.Common.FindCacheConfig.FindTimeoutSec > 0 && !parser.TruthyBool(r.FormValue("noCache")) if useCache { key, _ = taggedKey("tags;", h.config.Common.FindCacheConfig.FindTimeoutSec, fromDate, untilDate, "", exprs, tagPrefix, limit) + body, err = h.config.Common.FindCache.Get(key) if err == nil { if metrics.FinderCacheMetrics != nil { metrics.FinderCacheMetrics.CacheHits.Add(1) } + findCache = true + w.Header().Set("X-Cached-Find", strconv.Itoa(int(h.config.Common.FindCacheConfig.FindTimeoutSec))) } } @@ -268,6 +294,7 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { if err != nil { status = http.StatusBadRequest http.Error(w, err.Error(), status) + return } @@ -276,11 +303,13 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { if len(usedTags) == 0 { valueSQL = "splitByChar('=', Tag1)[1] AS value" + if tagPrefix != "" { wr.And(where.HasPrefix("Tag1", tagPrefix)) } } else { valueSQL = "splitByChar('=', arrayJoin(Tags))[1] AS value" + if tagPrefix != "" { wr.And(where.HasPrefix("arrayJoin(Tags)", tagPrefix)) } @@ -303,24 +332,31 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { ctx context.Context cancel context.CancelFunc ) + if limiter.Enabled() { ctx, cancel = context.WithTimeout(context.Background(), h.config.ClickHouse.IndexTimeout) defer cancel() err = limiter.Enter(ctx, "tags") queueDuration = time.Since(start) + if err != nil { status = http.StatusServiceUnavailable queueFail = true + logger.Error(err.Error()) http.Error(w, err.Error(), status) + return } + queueDuration = time.Since(start) entered = true + defer func() { if entered { limiter.Leave(ctx, "tags") + entered = false } }() @@ -341,6 +377,7 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { if entered { // release early as possible limiter.Leave(ctx, "tags") + entered = false } @@ -348,12 +385,14 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { status, _ = clickhouse.HandleError(w, err) return } + readBytes = int64(len(body)) if useCache { if metrics.FinderCacheMetrics != nil { metrics.FinderCacheMetrics.CacheMisses.Add(1) } + h.config.Common.FindCache.Set(key, body, h.config.Common.FindCacheConfig.FindTimeoutSec) } } @@ -362,6 +401,7 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { tags := make([]string, 0, uint64(len(rows))+1) // +1 - reserve for "name" tag hasName := false + for i := 0; i < len(rows); i++ { if rows[i] == "" { continue @@ -387,9 +427,11 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { } sort.Strings(tags) + if len(tags) > limit { tags = tags[:limit] } + if useCache { if findCache { logger.Info("finder", zap.String("get_cache", key), @@ -406,6 +448,7 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) { if err != nil { status = http.StatusInternalServerError http.Error(w, err.Error(), status) + return } @@ -453,19 +496,23 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { defer func() { if rec := recover(); rec != nil { status = http.StatusInternalServerError + logger.Error("panic during eval:", zap.String("requestID", scope.String(r.Context(), "requestID")), zap.Any("reason", rec), zap.Stack("stack"), ) + answer := fmt.Sprintf("%v\nStack trace: %v", rec, zap.Stack("").String) http.Error(w, answer, status) } + d := time.Since(start) dMS := d.Milliseconds() logs.AccessLog(accessLogger, h.config, r, status, d, queueDuration, findCache, queueFail) limiter.SendDuration(queueDuration.Milliseconds()) metrics.SendFindMetrics(metrics.TagsRequestMetric, status, dMS, 0, h.config.Metrics.ExtendedStat, metricsCount) + if !findCache && chReadRows > 0 && chReadBytes > 0 { errored := status != http.StatusOK && status != http.StatusNotFound metrics.SendQueryRead(metrics.AutocompleteQMetric, 0, 0, dMS, metricsCount, int64(len(body)), chReadRows, chReadBytes, errored) @@ -473,6 +520,7 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { }() r.ParseMultipartForm(1024 * 1024) + tag := r.FormValue("tag") if tag == "name" { tag = "__name__" @@ -487,6 +535,7 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { if err != nil { status = http.StatusBadRequest http.Error(w, err.Error(), status) + return } } @@ -494,6 +543,7 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { fromDate, untilDate := dateString(h.config.ClickHouse.TaggedAutocompleDays, start) var key string + exprs := r.Form["expr"] // params := taggedValuesQuery(tag, exprs, valuePrefix, limit) @@ -502,12 +552,15 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { if useCache { // logger = logger.With(zap.String("use_cache", "true")) key, _ = taggedValuesKey("values;", h.config.Common.FindCacheConfig.FindTimeoutSec, fromDate, untilDate, tag, exprs, valuePrefix, limit) + body, err = h.config.Common.FindCache.Get(key) if err == nil { if metrics.FinderCacheMetrics != nil { metrics.FinderCacheMetrics.CacheHits.Add(1) } + findCache = true + w.Header().Set("X-Cached-Find", strconv.Itoa(int(h.config.Common.FindCacheConfig.FindTimeoutSec))) } } @@ -517,10 +570,12 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { if err == finder.ErrCostlySeriesByTag { status = http.StatusForbidden http.Error(w, err.Error(), status) + return } else if err != nil { status = http.StatusBadRequest http.Error(w, err.Error(), status) + return } @@ -549,24 +604,31 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { ctx context.Context cancel context.CancelFunc ) + if limiter.Enabled() { ctx, cancel = context.WithTimeout(context.Background(), h.config.ClickHouse.IndexTimeout) defer cancel() err = limiter.Enter(ctx, "tags") queueDuration = time.Since(start) + if err != nil { status = http.StatusServiceUnavailable queueFail = true + logger.Error(err.Error()) http.Error(w, err.Error(), status) + return } + queueDuration = time.Since(start) entered = true + defer func() { if entered { limiter.Leave(ctx, "tags") + entered = false } }() @@ -587,6 +649,7 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { if entered { // release early as possible limiter.Leave(ctx, "tags") + entered = false } @@ -599,6 +662,7 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { if metrics.FinderCacheMetrics != nil { metrics.FinderCacheMetrics.CacheMisses.Add(1) } + h.config.Common.FindCache.Set(key, body, h.config.Common.FindCacheConfig.FindTimeoutSec) } } @@ -609,6 +673,7 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { if len(rows) > 0 && rows[len(rows)-1] == "" { rows = rows[:len(rows)-1] } + metricsCount = int64(len(rows)) } @@ -628,6 +693,7 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) { if err != nil { status = http.StatusInternalServerError http.Error(w, err.Error(), status) + return } diff --git a/autocomplete/autocomplete_test.go b/autocomplete/autocomplete_test.go index 035c19be4..561340eed 100644 --- a/autocomplete/autocomplete_test.go +++ b/autocomplete/autocomplete_test.go @@ -53,7 +53,9 @@ func TestHandler_ServeValues(t *testing.T) { timeNow = func() time.Time { return time.Unix(1669714247, 0) } + metrics.DisableMetrics() + srv := chtest.NewTestServer() defer srv.Close() @@ -86,6 +88,7 @@ func TestHandler_ServeValues(t *testing.T) { } var queries uint64 + for i, tt := range tests { t.Run(tt.request.URL.RawQuery+"#"+strconv.Itoa(i), func(t *testing.T) { for i := 0; i < 2; i++ { @@ -102,7 +105,9 @@ func TestTagsAutocomplete_ServeValuesCached(t *testing.T) { timeNow = func() time.Time { return time.Unix(1669714247, 0) } + metrics.DisableMetrics() + srv := chtest.NewTestServer() defer srv.Close() @@ -115,7 +120,9 @@ func TestTagsAutocomplete_ServeValuesCached(t *testing.T) { Size: 8192, FindTimeoutSec: 1, } + var err error + cfg.Common.FindCache, err = config.CreateCache("autocomplete", &cfg.Common.FindCacheConfig) if err != nil { t.Fatalf("Failed to create find cache: %v", err) @@ -147,6 +154,7 @@ func TestTagsAutocomplete_ServeValuesCached(t *testing.T) { } var queries uint64 + for i, tt := range tests { t.Run(tt.request.URL.RawQuery+"#"+strconv.Itoa(i), func(t *testing.T) { testResponce(t, 0, h, &tt, "") diff --git a/cache/cache.go b/cache/cache.go index fcbb625f9..a4604d52f 100644 --- a/cache/cache.go +++ b/cache/cache.go @@ -25,6 +25,7 @@ type BytesCache interface { func NewExpireCache(maxsize uint64) BytesCache { ec := expirecache.New[string, []byte](maxsize) go ec.ApproximateCleaner(10 * time.Second) + return &ExpireCache{ec: ec} } @@ -62,6 +63,7 @@ func (m *MemcachedCache) Get(k string) ([]byte, error) { done := make(chan bool, 1) var err error + var item *memcache.Item go func() { @@ -83,6 +85,7 @@ func (m *MemcachedCache) Get(k string) ([]byte, error) { if errors.Is(err, memcache.ErrCacheMiss) { err = ErrNotFound } + return nil, err } @@ -92,6 +95,7 @@ func (m *MemcachedCache) Get(k string) ([]byte, error) { func (m *MemcachedCache) Set(k string, v []byte, expire int32) { key := sha256.Sum256([]byte(k)) hk := hex.EncodeToString(key[:]) + go func() { _ = m.client.Set(&memcache.Item{Key: m.prefix + hk, Value: v, Expiration: expire}) }() diff --git a/capabilities/handler.go b/capabilities/handler.go index 145555487..3e20079e6 100644 --- a/capabilities/handler.go +++ b/capabilities/handler.go @@ -57,6 +57,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } var pv3Request v3pb.CapabilityRequest + err = pv3Request.Unmarshal(body) if err != nil { status = http.StatusBadRequest @@ -67,6 +68,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { hostname = "(unknown)" } + pvResponse := v3pb.CapabilityResponse{ SupportedProtocols: []string{"carbonapi_v3_pb", "carbonapi_v2_pb", "graphite-web-pickle"}, Name: hostname, @@ -77,22 +79,28 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } var data []byte + contentType := "" + switch format { case "json": contentType = "application/json" + data, err = json.Marshal(pvResponse) if err != nil { status = http.StatusInternalServerError http.Error(w, err.Error(), status) + return } case "carbonapi_v3_pb": contentType = "application/x-carbonapi-v3-pb" + data, err = pvResponse.Marshal() if err != nil { status = http.StatusBadRequest http.Error(w, "Bad request (unsupported format)", status) + return } } diff --git a/cmd/e2e-test/carbon-clickhouse.go b/cmd/e2e-test/carbon-clickhouse.go index fcd45db3a..7f6d937ab 100644 --- a/cmd/e2e-test/carbon-clickhouse.go +++ b/cmd/e2e-test/carbon-clickhouse.go @@ -30,10 +30,13 @@ func (c *CarbonClickhouse) Start(testDir, clickhouseURL string) (string, error) if len(c.Version) == 0 { c.Version = "latest" } + if len(c.DockerImage) == 0 { c.DockerImage = "ghcr.io/go-graphite/carbon-clickhouse" } + var err error + c.address, err = getFreeTCPPort("") if err != nil { return "", err @@ -54,11 +57,13 @@ func (c *CarbonClickhouse) Start(testDir, clickhouseURL string) (string, error) name := filepath.Base(c.Template) tpl := path.Join(testDir, c.Template) + tmpl, err := template.New(name).ParseFiles(tpl) if err != nil { c.Cleanup() return "", err } + param := struct { CLICKHOUSE_URL string CCH_ADDR string @@ -68,11 +73,13 @@ func (c *CarbonClickhouse) Start(testDir, clickhouseURL string) (string, error) } configFile := path.Join(c.storeDir, "carbon-clickhouse.conf") + f, err := os.OpenFile(configFile, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { c.Cleanup() return "", err } + err = tmpl.ExecuteTemplate(f, name, param) if err != nil { c.Cleanup() @@ -95,6 +102,7 @@ func (c *CarbonClickhouse) Start(testDir, clickhouseURL string) (string, error) cchStart = append(cchStart, c.DockerImage+":"+c.Version) cmd := exec.Command(DockerBinary, cchStart...) + out, err := cmd.CombinedOutput() if err == nil { dateLocal, _ := exec.Command("date").Output() @@ -119,6 +127,7 @@ func (c *CarbonClickhouse) Stop(delete bool) (string, error) { if err == nil && delete { return c.Delete() } + return string(out), err } diff --git a/cmd/e2e-test/checks.go b/cmd/e2e-test/checks.go index e4b0fcc5f..61f9121e8 100644 --- a/cmd/e2e-test/checks.go +++ b/cmd/e2e-test/checks.go @@ -21,10 +21,12 @@ func isFindCached(header http.Header) (string, bool) { if header == nil { return "", false } + v, exist := header["X-Cached-Find"] if len(v) == 0 { return "", false } + return v[0], exist } @@ -32,10 +34,12 @@ func requestId(header http.Header) string { if header == nil { return "" } + v, exist := header["X-Gch-Request-Id"] if exist && len(v) > 0 { return v[0] } + return "" } @@ -44,13 +48,16 @@ func compareFindMatch(errors *[]string, name, url string, actual, expected []cli if findCached { cacheTTLStr = strconv.Itoa(cacheTTL) } + id := requestId(header) + if header != nil { v, actualFindCached := isFindCached(header) if actualFindCached != findCached || cacheTTLStr != v { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s: X-Cached-Find want '%s', got '%s'", name, id, url, cacheTTLStr, v)) } } + maxLen := compare.Max(len(expected), len(actual)) for i := 0; i < maxLen; i++ { if i > len(actual)-1 { @@ -66,20 +73,26 @@ func compareFindMatch(errors *[]string, name, url string, actual, expected []cli func verifyMetricsFind(ch *Clickhouse, gch *GraphiteClickhouse, check *MetricsFindCheck) []string { var errors []string + httpClient := http.Client{ Timeout: check.Timeout, } address := gch.URL() + for _, format := range check.Formats { name := "" + if url, result, respHeader, err := client.MetricsFind(&httpClient, address, format, check.Query, check.from, check.until); err == nil { id := requestId(respHeader) if check.ErrorRegexp != "" { errors = append(errors, fmt.Sprintf("TRY[%s] %s %s: want error with '%s'", "", id, url, check.ErrorRegexp)) } + compareFindMatch(&errors, name, url, result, check.Result, check.InCache, check.CacheTTL, respHeader) + if len(result) == 0 && len(check.Result) > 0 { gch.Grep(id) + if len(check.DumpIfEmpty) > 0 { for _, q := range check.DumpIfEmpty { if out, err := ch.Query(q); err == nil { @@ -119,13 +132,16 @@ func compareTags(errors *[]string, name, url string, actual, expected []string, if findCached { cacheTTLStr = strconv.Itoa(cacheTTL) } + id := requestId(header) + if header != nil { v, actualFindCached := isFindCached(header) if actualFindCached != findCached || cacheTTLStr != v { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s: X-Cached-Find want '%s', got '%s'", name, id, url, cacheTTLStr, v)) } } + maxLen := compare.Max(len(expected), len(actual)) for i := 0; i < maxLen; i++ { if i > len(actual)-1 { @@ -141,10 +157,12 @@ func compareTags(errors *[]string, name, url string, actual, expected []string, func verifyTags(ch *Clickhouse, gch *GraphiteClickhouse, check *TagsCheck) []string { var errors []string + httpClient := http.Client{ Timeout: check.Timeout, } address := gch.URL() + for _, format := range check.Formats { var ( result []string @@ -154,6 +172,7 @@ func verifyTags(ch *Clickhouse, gch *GraphiteClickhouse, check *TagsCheck) []str ) name := "" + if check.Names { url, result, respHeader, err = client.TagsNames(&httpClient, address, format, check.Query, check.Limits, check.from, check.until) } else { @@ -165,9 +184,12 @@ func verifyTags(ch *Clickhouse, gch *GraphiteClickhouse, check *TagsCheck) []str if check.ErrorRegexp != "" { errors = append(errors, fmt.Sprintf("TRY[%s] %s %s: want error with '%s'", "", id, url, check.ErrorRegexp)) } + compareTags(&errors, name, url, result, check.Result, check.InCache, check.CacheTTL, respHeader) + if len(result) == 0 && len(check.Result) > 0 { gch.Grep(id) + if len(check.DumpIfEmpty) > 0 { for _, q := range check.DumpIfEmpty { if out, err := ch.Query(q); err == nil { @@ -182,11 +204,13 @@ func verifyTags(ch *Clickhouse, gch *GraphiteClickhouse, check *TagsCheck) []str if check.CacheTTL > 0 && check.ErrorRegexp == "" { // second query must be find-cached name = "cache" + if check.Names { url, result, respHeader, err = client.TagsNames(&httpClient, address, format, check.Query, check.Limits, check.from, check.until) } else { url, result, respHeader, err = client.TagsValues(&httpClient, address, format, check.Query, check.Limits, check.from, check.until) } + if err == nil { compareTags(&errors, name, url, result, check.Result, true, check.CacheTTL, respHeader) } else { @@ -212,16 +236,20 @@ func compareRender(errors *[]string, name, url string, actual, expected []client if findCached { cacheTTLStr = strconv.Itoa(cacheTTL) } + sort.Slice(actual, func(i, j int) bool { return actual[i].Name < actual[j].Name }) + id := requestId(header) + if header != nil { v, actualFindCached := isFindCached(header) if actualFindCached != findCached || cacheTTLStr != v { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s: X-Cached-Find want '%s', got '%s'", name, id, url, cacheTTLStr, v)) } } + maxLen := compare.Max(len(expected), len(actual)) for i := 0; i < maxLen; i++ { if i > len(actual)-1 { @@ -235,36 +263,47 @@ func compareRender(errors *[]string, name, url string, actual, expected []client if actual[i].PathExpression != expected[i].PathExpression { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].PathExpression, got '%s', want '%s'", name, id, url, actual[i].Name, i, actual[i].PathExpression, expected[i].PathExpression)) } + if actual[i].ConsolidationFunc != expected[i].ConsolidationFunc { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].ConsolidationFunc, got '%s', want '%s'", name, id, url, actual[i].Name, i, actual[i].ConsolidationFunc, expected[i].ConsolidationFunc)) } + if actual[i].ConsolidationFunc != expected[i].ConsolidationFunc { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].ConsolidationFunc, got '%s', want '%s'", name, id, url, actual[i].Name, i, actual[i].ConsolidationFunc, expected[i].ConsolidationFunc)) } + if actual[i].StartTime != expected[i].StartTime { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].StartTime, got %d, want %d", name, id, url, actual[i].Name, i, actual[i].StartTime, expected[i].StartTime)) } + if actual[i].StopTime != expected[i].StopTime { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].StopTime, got %d, want %d", name, id, url, actual[i].Name, i, actual[i].StopTime, expected[i].StopTime)) } + if actual[i].StepTime != expected[i].StepTime { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].StepTime, got %d, want %d", name, id, url, actual[i].Name, i, actual[i].StepTime, expected[i].StepTime)) } + if actual[i].RequestStartTime != expected[i].RequestStartTime { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].RequestStartTime, got %d, want %d", name, id, url, actual[i].Name, i, actual[i].RequestStartTime, expected[i].RequestStartTime)) } + if actual[i].RequestStopTime != expected[i].RequestStopTime { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].RequestStopTime, got %d, want %d", name, id, url, actual[i].Name, i, actual[i].RequestStopTime, expected[i].RequestStopTime)) } + if actual[i].HighPrecisionTimestamps != expected[i].HighPrecisionTimestamps { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].HighPrecisionTimestamps, got %v, want %v", name, id, url, actual[i].Name, i, actual[i].HighPrecisionTimestamps, expected[i].HighPrecisionTimestamps)) } + if !reflect.DeepEqual(actual[i].AppliedFunctions, expected[i].AppliedFunctions) { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].AppliedFunctions, got '%s', want '%s'", name, id, url, actual[i].Name, i, actual[i].AppliedFunctions, expected[i].AppliedFunctions)) } + if !compare.NearlyEqual(float64(actual[i].XFilesFactor), float64(expected[i].XFilesFactor)) { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].XFilesFactor, got %g, want %g", name, id, url, actual[i].Name, i, actual[i].XFilesFactor, expected[i].XFilesFactor)) } + if !compare.NearlyEqualSlice(actual[i].Values, expected[i].Values) { *errors = append(*errors, fmt.Sprintf("TRY[%s] %s %s '%s': mismatch [%d].Values, got %g, want %g", name, id, url, actual[i].Name, i, actual[i].Values, expected[i].Values)) } @@ -274,34 +313,43 @@ func compareRender(errors *[]string, name, url string, actual, expected []client func parseFilteringFunctions(strFilteringFuncs []string) ([]*carbonapi_v3_pb.FilteringFunction, error) { res := make([]*carbonapi_v3_pb.FilteringFunction, 0, len(strFilteringFuncs)) + for _, strFF := range strFilteringFuncs { strFFSplit := strings.Split(strFF, "(") if len(strFFSplit) != 2 { return nil, fmt.Errorf("could not parse filtering function: %s", strFF) } + name := strFFSplit[0] + args := strings.Split(strFFSplit[1], ",") for i := range args { args[i] = strings.TrimSpace(args[i]) args[i] = strings.Trim(args[i], ")'") } + res = append(res, &carbonapi_v3_pb.FilteringFunction{Name: name, Arguments: args}) } + return res, nil } func verifyRender(ch *Clickhouse, gch *GraphiteClickhouse, check *RenderCheck, defaultPreision time.Duration) []string { var errors []string + httpClient := http.Client{ Timeout: check.Timeout, } address := gch.URL() from := datetime.TimestampTruncate(check.from, defaultPreision) until := datetime.TimestampTruncate(check.until, defaultPreision) + for _, format := range check.Formats { var filteringFunctions []*carbonapi_v3_pb.FilteringFunction + if format == client.FormatPb_v3 { var err error + filteringFunctions, err = parseFilteringFunctions(check.FilteringFunctions) if err != nil { errors = append(errors, err.Error()) @@ -312,12 +360,16 @@ func verifyRender(ch *Clickhouse, gch *GraphiteClickhouse, check *RenderCheck, d if url, result, respHeader, err := client.Render(&httpClient, address, format, check.Targets, filteringFunctions, check.MaxDataPoints, from, until); err == nil { id := requestId(respHeader) name := "" + if check.ErrorRegexp != "" { errors = append(errors, fmt.Sprintf("TRY[%s] %s %s: want error with '%s'", "", id, url, check.ErrorRegexp)) } + compareRender(&errors, name, url, result, check.result, check.InCache, respHeader, check.CacheTTL) + if len(result) == 0 && len(check.result) > 0 { gch.Grep(id) + if len(check.DumpIfEmpty) > 0 { for _, q := range check.DumpIfEmpty { if out, err := ch.Query(q); err == nil { @@ -361,8 +413,10 @@ func debug(test *TestSchema, ch *Clickhouse, gch *GraphiteClickhouse) { fmt.Printf("graphite-clickhouse log: %s , clickhouse container: %s\n", gch.storeDir+"/graphite-clickhouse.log", ch.container) fmt.Println("Some queries was failed, press y for continue after debug test, k for kill graphite-clickhouse:") + in := bufio.NewScanner(os.Stdin) in.Scan() + s := in.Text() if s == "y" || s == "Y" { break diff --git a/cmd/e2e-test/clickhouse.go b/cmd/e2e-test/clickhouse.go index 4a9cbb9f3..52ddeff4c 100644 --- a/cmd/e2e-test/clickhouse.go +++ b/cmd/e2e-test/clickhouse.go @@ -40,9 +40,11 @@ func (c *Clickhouse) CheckConfig(rootDir string) error { if c.Version == "" { c.Version = "latest" } + if len(c.Dir) == 0 { return ErrNoSetDir } + if !strings.HasPrefix(c.Dir, "/") { c.Dir = rootDir + "/" + c.Dir } @@ -52,6 +54,7 @@ func (c *Clickhouse) CheckConfig(rootDir string) error { c.DockerImage = ClickhouseDefaultImage } else { splitV := strings.Split(c.Version, ".") + majorV, err := strconv.Atoi(splitV[0]) if err != nil { c.DockerImage = ClickhouseDefaultImage @@ -62,6 +65,7 @@ func (c *Clickhouse) CheckConfig(rootDir string) error { } } } + return nil } @@ -71,10 +75,12 @@ func (c *Clickhouse) Key() string { func (c *Clickhouse) Start() (string, error) { var err error + c.httpAddress, err = getFreeTCPPort("") if err != nil { return "", err } + port := strings.Split(c.httpAddress, ":")[1] c.url = "http://" + c.httpAddress @@ -93,11 +99,13 @@ func (c *Clickhouse) Start() (string, error) { "-v", c.Dir + "/init.sql:/docker-entrypoint-initdb.d/init.sql", "--network", DockerNetwork, } + if c.TLSEnabled { c.httpsAddress, err = getFreeTCPPort("") if err != nil { return "", err } + port = strings.Split(c.httpsAddress, ":")[1] c.tlsurl = "https://" + c.httpsAddress chStart = append(chStart, @@ -107,6 +115,7 @@ func (c *Clickhouse) Start() (string, error) { "-p", port+":8443", ) } + if c.TZ != "" { chStart = append(chStart, "-e", "TZ="+c.TZ) } @@ -132,6 +141,7 @@ func (c *Clickhouse) Stop(delete bool) (string, error) { if err == nil && delete { return c.Delete() } + return string(out), err } @@ -170,6 +180,7 @@ func (c *Clickhouse) Exec(sql string) (bool, string) { func (c *Clickhouse) Query(sql string) (string, error) { reader := strings.NewReader(sql) + request, err := http.NewRequest(http.MethodPost, c.URL(), reader) if err != nil { return "", err @@ -185,9 +196,11 @@ func (c *Clickhouse) Query(sql string) (string, error) { if err != nil { return "", err } + if resp.StatusCode != http.StatusOK { return "", errors.New(resp.Status + ": " + string(bytes.TrimRight(msg, "\n"))) } + return string(msg), nil } @@ -195,10 +208,12 @@ func (c *Clickhouse) Alive() bool { if len(c.container) == 0 { return false } + req, err := http.DefaultClient.Get(c.URL()) if err != nil { return false } + defer req.Body.Close() return req.StatusCode == http.StatusOK @@ -208,11 +223,13 @@ func (c *Clickhouse) CopyLog(destDir string, tail uint64) error { if len(c.container) == 0 { return nil } + dest := destDir + "/clickhouse-server.log" chArgs := []string{"cp", c.container + ":/var/log/clickhouse-server/clickhouse-server.log", dest} cmd := exec.Command(DockerBinary, chArgs...) + out, err := cmd.CombinedOutput() if err != nil { return errors.New(err.Error() + ": " + string(bytes.TrimRight(out, "\n"))) @@ -230,11 +247,13 @@ func (c *Clickhouse) CopyErrLog(destDir string, tail uint64) error { if len(c.container) == 0 { return nil } + dest := destDir + "/clickhouse-server.err.log" chArgs := []string{"cp", c.container + ":/var/log/clickhouse-server/clickhouse-server.err.log", dest} cmd := exec.Command(DockerBinary, chArgs...) + out, err := cmd.CombinedOutput() if err != nil { return errors.New(err.Error() + ": " + string(bytes.TrimRight(out, "\n"))) diff --git a/cmd/e2e-test/e2etesting.go b/cmd/e2e-test/e2etesting.go index 8333ca7f0..87c7811de 100644 --- a/cmd/e2e-test/e2etesting.go +++ b/cmd/e2e-test/e2etesting.go @@ -165,6 +165,7 @@ func getFreeTCPPort(name string) (string, error) { } else if !strings.Contains(name, ":") { name = name + ":0" } + addr, err := net.ResolveTCPAddr("tcp", name) if err != nil { return name, err @@ -174,7 +175,9 @@ func getFreeTCPPort(name string) (string, error) { if err != nil { return name, err } + defer l.Close() + return l.Addr().String(), nil } @@ -183,32 +186,39 @@ func sendPlain(network, address string, metrics []InputMetric) error { return err } else { bw := bufio.NewWriter(conn) + for _, m := range metrics { conn.SetDeadline(time.Now().Add(time.Second)) + for _, point := range m.Points { if _, err = fmt.Fprintf(bw, "%s %f %d\n", m.Name, point.Value, point.time); err != nil { conn.Close() return err } + if point.Delay > 0 { if err = bw.Flush(); err != nil { conn.Close() return err } + time.Sleep(point.Delay) } } } + if err = bw.Flush(); err != nil { conn.Close() return err } + return conn.Close() } } func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickhouse *Clickhouse, testDir, clickhouseDir string, verbose, breakOnError bool, logger *zap.Logger) (testSuccess bool, verifyCount, verifyFailed int) { testSuccess = true + err := gch.Start(testDir, clickhouse.URL(), test.Proxy.URL(), clickhouse.TLSURL()) if err != nil { logger.Error("starting graphite-clickhouse", @@ -218,12 +228,15 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho zap.String("graphite-clickhouse config", gch.ConfigTpl), zap.Error(err), ) + testSuccess = false + return } for i := 100; i < 1000; i += 200 { time.Sleep(time.Duration(i) * time.Millisecond) + if gch.Alive() { break } @@ -239,11 +252,14 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho if len(check.Formats) == 0 { check.Formats = []client.FormatType{client.FormatPb_v3} } + if errs := verifyMetricsFind(clickhouse, gch, check); len(errs) > 0 { verifyFailed++ + for _, e := range errs { fmt.Fprintln(os.Stderr, e) } + logger.Error("verify metrics find", zap.String("config", test.name), zap.String("clickhouse version", clickhouse.Version), @@ -256,6 +272,7 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho zap.Int64("until", check.until), zap.String("name", check.Name+"["+strconv.Itoa(n)+"]"), ) + if breakOnError { debug(test, clickhouse, gch) } @@ -284,11 +301,14 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho if len(check.Formats) == 0 { check.Formats = []client.FormatType{client.FormatJSON} } + if errs := verifyTags(clickhouse, gch, check); len(errs) > 0 { verifyFailed++ + for _, e := range errs { fmt.Fprintln(os.Stderr, e) } + logger.Error("verify tags", zap.String("config", test.name), zap.String("clickhouse version", clickhouse.Version), @@ -302,6 +322,7 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho zap.Int64("until", check.until), zap.String("name", check.Name+"["+strconv.Itoa(n)+"]"), ) + if breakOnError { debug(test, clickhouse, gch) } @@ -331,6 +352,7 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho if len(check.Formats) == 0 { check.Formats = []client.FormatType{client.FormatPb_v3} } + if len(check.Optimize) > 0 { for _, table := range check.Optimize { if success, out := clickhouse.Exec("OPTIMIZE TABLE " + table + " FINAL"); !success { @@ -353,11 +375,14 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho } } } + if errs := verifyRender(clickhouse, gch, check, test.Precision); len(errs) > 0 { verifyFailed++ + for _, e := range errs { fmt.Fprintln(os.Stderr, e) } + logger.Error("verify render", zap.String("config", test.name), zap.String("clickhouse version", clickhouse.Version), @@ -371,6 +396,7 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho zap.Int64("until", check.until), zap.String("name", check.Name+"["+strconv.Itoa(n)+"]"), ) + if breakOnError { debug(test, clickhouse, gch) } @@ -390,8 +416,10 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho ) } } + if verifyFailed > 0 { testSuccess = false + logger.Error("verify", zap.String("config", test.name), zap.String("clickhouse version", clickhouse.Version), @@ -411,6 +439,7 @@ func verifyGraphiteClickhouse(test *TestSchema, gch *GraphiteClickhouse, clickho zap.String("clickhouse config", clickhouseDir), zap.Error(err), ) + testSuccess = false } @@ -429,6 +458,7 @@ func testGraphiteClickhouse(test *TestSchema, clickhouse *Clickhouse, testDir, r zap.String("sql", sql), zap.String("out", out), ) + return } } @@ -440,6 +470,7 @@ func testGraphiteClickhouse(test *TestSchema, clickhouse *Clickhouse, testDir, r zap.String("clickhouse config", clickhouse.Dir), zap.Error(err), ) + return } @@ -452,6 +483,7 @@ func testGraphiteClickhouse(test *TestSchema, clickhouse *Clickhouse, testDir, r zap.Error(err), zap.String("out", out), ) + testSuccess = false } @@ -471,8 +503,10 @@ func testGraphiteClickhouse(test *TestSchema, clickhouse *Clickhouse, testDir, r zap.String("clickhouse config", clickhouse.Dir), zap.Error(err), ) + testSuccess = false } + if testSuccess { time.Sleep(2 * time.Second) } @@ -482,6 +516,7 @@ func testGraphiteClickhouse(test *TestSchema, clickhouse *Clickhouse, testDir, r stepSuccess, vCount, vFailed := verifyGraphiteClickhouse(test, &gch, clickhouse, testDir, clickhouse.Dir, verbose, breakOnError, logger) verifyCount += vCount verifyFailed += vFailed + if !stepSuccess { testSuccess = false } @@ -498,6 +533,7 @@ func testGraphiteClickhouse(test *TestSchema, clickhouse *Clickhouse, testDir, r zap.Error(err), zap.String("out", out), ) + testSuccess = false } @@ -524,22 +560,28 @@ func testGraphiteClickhouse(test *TestSchema, clickhouse *Clickhouse, testDir, r func runTest(cfg *MainConfig, clickhouse *Clickhouse, rootDir string, now time.Time, verbose, breakOnError bool, logger *zap.Logger) (failed, total, verifyCount, verifyFailed int) { var isRunning bool + total++ + if exist, out := containerExist(CchContainerName); exist { logger.Error("carbon-clickhouse already exist", zap.String("container", CchContainerName), zap.String("out", out), ) + isRunning = true } + if isRunning { failed++ return } + success, vCount, vFailed := testGraphiteClickhouse(cfg.Test, clickhouse, cfg.Test.dir, rootDir, verbose, breakOnError, logger) if !success { failed++ } + verifyCount += vCount verifyFailed += vFailed @@ -556,15 +598,19 @@ func clickhouseStart(clickhouse *Clickhouse, logger *zap.Logger) bool { zap.String("out", out), ) clickhouse.Stop(true) + return false } + return true } func clickhouseStop(clickhouse *Clickhouse, logger *zap.Logger) (result bool) { result = true + if !clickhouse.Alive() { clickhouse.CopyLog(os.TempDir(), 10) + result = false } @@ -576,8 +622,10 @@ func clickhouseStop(clickhouse *Clickhouse, logger *zap.Logger) (result bool) { zap.Error(err), zap.String("out", out), ) + result = false } + return result } @@ -595,6 +643,7 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr if m.Points[i].time == 0 { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -604,18 +653,22 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.Int("point", i), zap.String("time", m.Points[i].Time), ) + return false } } } + for n, find := range cfg.Test.FindChecks { if find.Timeout == 0 { find.Timeout = 10 * time.Second } + find.from = datetime.DateParamToEpoch(find.From, tz, now, cfg.Test.Precision) if find.from == 0 && find.From != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -624,12 +677,15 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("from", find.From), zap.Int("step", n), ) + return false } + find.until = datetime.DateParamToEpoch(find.Until, tz, now, cfg.Test.Precision) if find.until == 0 && find.Until != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -638,20 +694,25 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("until", find.Until), zap.Int("step", n), ) + return false } + if find.ErrorRegexp != "" { find.errorRegexp = regexp.MustCompile(find.ErrorRegexp) } } + for n, tags := range cfg.Test.TagsChecks { if tags.Timeout == 0 { tags.Timeout = 10 * time.Second } + tags.from = datetime.DateParamToEpoch(tags.From, tz, now, cfg.Test.Precision) if tags.from == 0 && tags.From != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -660,12 +721,15 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("from", tags.From), zap.Int("find", n), ) + return false } + tags.until = datetime.DateParamToEpoch(tags.Until, tz, now, cfg.Test.Precision) if tags.until == 0 && tags.Until != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -675,20 +739,25 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.Int("tags", n), zap.Bool("names", tags.Names), ) + return false } + if tags.ErrorRegexp != "" { tags.errorRegexp = regexp.MustCompile(tags.ErrorRegexp) } } + for n, r := range cfg.Test.RenderChecks { if r.Timeout == 0 { r.Timeout = 10 * time.Second } + r.from = datetime.DateParamToEpoch(r.From, tz, now, cfg.Test.Precision) if r.from == 0 && r.From != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -697,12 +766,15 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("from", r.From), zap.Int("render", n), ) + return false } + r.until = datetime.DateParamToEpoch(r.Until, tz, now, cfg.Test.Precision) if r.until == 0 && r.Until != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -711,20 +783,25 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("until", r.Until), zap.Int("render", n), ) + return false } + if r.ErrorRegexp != "" { r.errorRegexp = regexp.MustCompile(r.ErrorRegexp) } + sort.Slice(r.Result, func(i, j int) bool { return r.Result[i].Name < r.Result[j].Name }) + r.result = make([]client.Metric, len(r.Result)) for i, result := range r.Result { r.result[i].StartTime = datetime.DateParamToEpoch(result.StartTime, tz, now, cfg.Test.Precision) if r.result[i].StartTime == 0 && result.StartTime != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -734,12 +811,15 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("metric", result.Name), zap.String("start", result.StartTime), ) + return false } + r.result[i].StopTime = datetime.DateParamToEpoch(result.StopTime, tz, now, cfg.Test.Precision) if r.result[i].StopTime == 0 && result.StopTime != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -749,12 +829,15 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("metric", result.Name), zap.String("stop", result.StopTime), ) + return false } + r.result[i].RequestStartTime = datetime.DateParamToEpoch(result.RequestStartTime, tz, now, cfg.Test.Precision) if r.result[i].RequestStartTime == 0 && result.RequestStartTime != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -764,12 +847,15 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("metric", result.Name), zap.String("req_start", result.RequestStartTime), ) + return false } + r.result[i].RequestStopTime = datetime.DateParamToEpoch(result.RequestStopTime, tz, now, cfg.Test.Precision) if r.result[i].RequestStopTime == 0 && result.RequestStopTime != "" { err = ErrTimestampInvalid } + if err != nil { logger.Error("failed to read config", zap.String("config", cfg.Test.name), @@ -779,8 +865,10 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr zap.String("metric", result.Name), zap.String("req_stop", result.RequestStopTime), ) + return false } + r.result[i].StepTime = result.StepTime r.result[i].Name = result.Name r.result[i].PathExpression = result.PathExpression @@ -791,6 +879,7 @@ func initTest(cfg *MainConfig, rootDir string, now time.Time, verbose, breakOnEr r.result[i].Values = result.Values } } + return true } @@ -813,6 +902,7 @@ func loadConfig(config string, rootDir string) (*MainConfig, error) { if cfg.Test == nil { return nil, ErrNoTest } + cfg.Test.chVersions = make(map[string]bool) for i := range cfg.Test.Clickhouse { if err := cfg.Test.Clickhouse[i].CheckConfig(rootDir); err == nil { diff --git a/cmd/e2e-test/graphite-clickhouse.go b/cmd/e2e-test/graphite-clickhouse.go index 9cb8a6a59..96e1a579e 100644 --- a/cmd/e2e-test/graphite-clickhouse.go +++ b/cmd/e2e-test/graphite-clickhouse.go @@ -38,11 +38,13 @@ func (c *GraphiteClickhouse) Start(testDir, chURL, chProxyURL, chTLSURL string) if len(c.Binary) == 0 { c.Binary = "./graphite-clickhouse" } + if len(c.ConfigTpl) == 0 { return errors.New("graphite-clickhouse config template not set") } var err error + c.storeDir, err = os.MkdirTemp("", "graphite-clickhouse") if err != nil { return err @@ -60,11 +62,13 @@ func (c *GraphiteClickhouse) Start(testDir, chURL, chProxyURL, chTLSURL string) } name := filepath.Base(c.ConfigTpl) + tmpl, err := template.New(name).ParseFiles(path.Join(testDir, c.ConfigTpl)) if err != nil { c.Cleanup() return err } + param := struct { CLICKHOUSE_URL string CLICKHOUSE_TLS_URL string @@ -83,10 +87,12 @@ func (c *GraphiteClickhouse) Start(testDir, chURL, chProxyURL, chTLSURL string) c.configFile = path.Join(c.storeDir, "graphite-clickhouse.conf") f, err := os.OpenFile(c.configFile, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { c.Cleanup() return err } + err = tmpl.ExecuteTemplate(f, name, param) if err != nil { c.Cleanup() @@ -96,9 +102,11 @@ func (c *GraphiteClickhouse) Start(testDir, chURL, chProxyURL, chTLSURL string) c.cmd = exec.Command(c.Binary, "-config", c.configFile) c.cmd.Stdout = os.Stdout c.cmd.Stderr = os.Stderr + if c.TZ != "" { c.cmd.Env = append(c.cmd.Env, "TZ="+c.TZ) } + err = c.cmd.Start() if err != nil { c.Cleanup() @@ -112,7 +120,9 @@ func (c *GraphiteClickhouse) Alive() bool { if c.cmd == nil { return false } + _, _, _, err := client.MetricsFind(http.DefaultClient, "http://"+c.address+"/alive", client.FormatDefault, "NonExistentTarget", 0, 0) + return err == nil } @@ -124,6 +134,7 @@ func (c *GraphiteClickhouse) Stop(cleanup bool) error { if c.cmd == nil { return nil } + var err error if err = c.cmd.Process.Kill(); err == nil { if err = c.cmd.Wait(); err != nil { @@ -137,6 +148,7 @@ func (c *GraphiteClickhouse) Stop(cleanup bool) error { } } } + return err } diff --git a/cmd/e2e-test/main.go b/cmd/e2e-test/main.go index d804dcb98..83089cfcb 100644 --- a/cmd/e2e-test/main.go +++ b/cmd/e2e-test/main.go @@ -22,6 +22,7 @@ func IsDir(filename string) (bool, error) { } else if err != nil { return false, err } + return info.IsDir(), nil } @@ -51,6 +52,7 @@ func expandFilename(filename string, paths *[]string) error { if len(filename) == 0 { return nil } + isDir, err := IsDir(filename) if err == nil { if isDir { @@ -61,6 +63,7 @@ func expandFilename(filename string, paths *[]string) error { *paths = append(*paths, filename) } } + return err } @@ -75,6 +78,7 @@ func main() { cleanup := flag.Bool("cleanup", false, "delete containers if exists before start") rmi := flag.Bool("rmi", false, "delete images after test end (for low space usage))") flag.Parse() + logger, err := zap.NewProduction() if err != nil { log.Fatal(err) @@ -84,6 +88,7 @@ func main() { if DockerBinary == "" { DockerBinary = "docker" } + if *cleanup { if exist, _ := containerExist(CchContainerName); exist { if ok, out := containerRemove(CchContainerName); !ok { @@ -93,6 +98,7 @@ func main() { ) } } + if exist, _ := containerExist(ClickhouseContainerName); exist { if ok, out := containerRemove(ClickhouseContainerName); !ok { logger.Fatal("failed to cleanup", @@ -101,12 +107,14 @@ func main() { ) } } + if *config == "" { return } } var allConfigs []string + err = expandFilename(*config, &allConfigs) if err != nil { logger.Fatal( @@ -114,19 +122,23 @@ func main() { zap.Error(err), ) } + if len(allConfigs) == 0 { logger.Fatal("config should be non-null") } chVersions := make(map[string]Clickhouse) configs := make([]*MainConfig, 0, len(allConfigs)) + for _, config := range allConfigs { cfg, err := loadConfig(config, rootDir) if err == nil { configs = append(configs, cfg) + for _, ch := range cfg.Test.Clickhouse { chVersions[ch.Key()] = ch } + now := time.Now() if !initTest(cfg, rootDir, now, *verbose, *breakOnError, logger) { os.Exit(1) @@ -159,6 +171,7 @@ func main() { for chVersion := range chVersions { ch := chVersions[chVersion] + if exist, out := containerExist(ClickhouseContainerName); exist { logger.Error("clickhouse already exist", zap.String("container", ClickhouseContainerName), @@ -173,20 +186,25 @@ func main() { zap.String("clickhouse config", ch.Dir), zap.String("tz", ch.TZ), ) + if clickhouseStart(&ch, logger) { time.Sleep(100 * time.Millisecond) + for i := 200; i < 3000; i += 200 { if ch.Alive() { break } + time.Sleep(time.Duration(i) * time.Millisecond) } + if !ch.Alive() { logger.Error("starting clickhouse", zap.Any("clickhouse version", ch.Version), zap.String("clickhouse config", ch.Dir), zap.String("error", "clickhouse is down"), ) + failed++ total++ verifyCount++ @@ -210,6 +228,7 @@ func main() { } } } + if !clickhouseStop(&ch, logger) { failed++ verifyFailed++ @@ -220,6 +239,7 @@ func main() { verifyCount++ verifyFailed++ } + if *rmi { if success, out := imageDelete(ch.DockerImage, ch.Version); !success { logger.Error("docker remove image", @@ -229,6 +249,7 @@ func main() { ) } } + if *abortOnError && failed > 0 { break } diff --git a/cmd/e2e-test/rproxy.go b/cmd/e2e-test/rproxy.go index 99290bfd4..53e980f75 100644 --- a/cmd/e2e-test/rproxy.go +++ b/cmd/e2e-test/rproxy.go @@ -35,7 +35,9 @@ func (d *AtomicDuration) UnmarshalText(b []byte) error { if err != nil { return err } + d.Store(val) + return nil } @@ -66,6 +68,7 @@ func (p *HttpReverseProxy) Start(remoteURL string) (err error) { p.srv = httptest.NewUnstartedServer(p) p.wg.Add(1) + go func() { defer p.wg.Done() @@ -79,6 +82,7 @@ func (p *HttpReverseProxy) Stop() { if p.srv == nil { return } + p.srv.CloseClientConnections() p.srv.Close() p.wg.Wait() diff --git a/cmd/graphite-clickhouse-client/main.go b/cmd/graphite-clickhouse-client/main.go index 3d597126a..fedaf5ffc 100644 --- a/cmd/graphite-clickhouse-client/main.go +++ b/cmd/graphite-clickhouse-client/main.go @@ -44,6 +44,7 @@ func main() { timeout := flag.Duration("timeout", time.Minute, "request timeout") var targets StringSlice + flag.Var(&targets, "target", "Target for /render") format := client.FormatDefault @@ -58,6 +59,7 @@ func main() { fmt.Printf("can't get timezone: %s\n", err.Error()) os.Exit(1) } + now := time.Now() from := datetime.DateParamToEpoch(*fromStr, tz, now, 0) @@ -65,15 +67,19 @@ func main() { fmt.Printf("invalid from: %s\n", *fromStr) os.Exit(1) } + var until int64 + if *untilStr == "" && len(targets) > 0 { *untilStr = "now" } + until = datetime.DateParamToEpoch(*untilStr, tz, now, 0) if until == 0 && len(targets) > 0 { fmt.Printf("invalid until: %s\n", *untilStr) os.Exit(1) } + maxDataPoints, err := strconv.ParseInt(*maxDataPointsStr, 10, 64) if err != nil { fmt.Printf("invalid maxDataPoints: %s\n", *maxDataPointsStr) @@ -94,26 +100,32 @@ func main() { if respHeader != nil { fmt.Printf("Responce header: %+v\n", respHeader) } + fmt.Print("'") fmt.Print(queryRaw) fmt.Print("' = ") + if err == nil { if len(r) > 0 { fmt.Println("[") + for i, m := range r { fmt.Printf(" { Path: '%s', IsLeaf: %v }", m.Path, m.IsLeaf) + if i < len(r)-1 { fmt.Println(",") } else { fmt.Println("") } } + fmt.Println("]") } else { fmt.Println("[]") } } else { ec = 1 + fmt.Printf("'%s'\n", strings.TrimRight(err.Error(), "\n")) } } @@ -123,30 +135,37 @@ func main() { if formatTags == client.FormatDefault { formatTags = client.FormatJSON } + queryRaw, r, respHeader, err := client.TagsValues(&httpClient, *address, formatTags, *tagsValues, *limit, from, until) if respHeader != nil { fmt.Printf("Responce header: %+v\n", respHeader) } + fmt.Print("'") fmt.Print(queryRaw) fmt.Print("' = ") + if err == nil { if len(r) > 0 { fmt.Println("[") + for i, v := range r { fmt.Printf(" { Value: '%s' }", v) + if i < len(r)-1 { fmt.Println(",") } else { fmt.Println("") } } + fmt.Println("]") } else { fmt.Println("[]") } } else { ec = 1 + fmt.Printf("'%s'\n", strings.TrimRight(err.Error(), "\n")) } } @@ -156,30 +175,37 @@ func main() { if formatTags == client.FormatDefault { formatTags = client.FormatJSON } + queryRaw, r, respHeader, err := client.TagsNames(&httpClient, *address, formatTags, *tagsNames, *limit, from, until) if respHeader != nil { fmt.Printf("Responce header: %+v\n", respHeader) } + fmt.Print("'") fmt.Print(queryRaw) fmt.Print("' = ") + if err == nil { if len(r) > 0 { fmt.Println("[") + for i, v := range r { fmt.Printf(" { Tag: '%s' }", v) + if i < len(r)-1 { fmt.Println(",") } else { fmt.Println("") } } + fmt.Println("]") } else { fmt.Println("[]") } } else { ec = 1 + fmt.Printf("'%s'\n", strings.TrimRight(err.Error(), "\n")) } } @@ -189,34 +215,41 @@ func main() { if formatRender == client.FormatDefault { formatRender = client.FormatPb_v3 } + queryRaw, r, respHeader, err := client.Render(&httpClient, *address, formatRender, targets, []*carbonapi_v3_pb.FilteringFunction{}, maxDataPoints, from, until) if respHeader != nil { fmt.Printf("Responce header: %+v\n", respHeader) } + fmt.Print("'") fmt.Print(queryRaw) fmt.Print("' = ") + if err == nil { if len(r) > 0 { fmt.Println("[") + for i, m := range r { fmt.Println(" {") fmt.Printf(" Name: '%s', PathExpression: '%v',\n", m.Name, m.PathExpression) fmt.Printf(" ConsolidationFunc: %s, XFilesFactor: %f, AppliedFunctions: %s,\n", m.ConsolidationFunc, m.XFilesFactor, m.AppliedFunctions) fmt.Printf(" Start: %d, Stop: %d, Step: %d, RequestStart: %d, RequestStop: %d,\n", m.StartTime, m.StopTime, m.StepTime, m.RequestStartTime, m.RequestStopTime) fmt.Printf(" Values: %+v\n", m.Values) + if i == len(r) { fmt.Println(" }") } else { fmt.Println(" },") } } + fmt.Println("]") } else { fmt.Println("[]") } } else { ec = 1 + fmt.Printf("'%s'\n", strings.TrimRight(err.Error(), "\n")) } } diff --git a/config/config.go b/config/config.go index 622b86680..0b2a2df44 100644 --- a/config/config.go +++ b/config/config.go @@ -50,6 +50,7 @@ func (a *SDType) Set(value string) error { default: return fmt.Errorf("invalid sd type %q", value) } + return nil } @@ -185,10 +186,12 @@ func binarySearchQueryParamLe(a []QueryParam, duration time.Duration, start, end if a[start].Duration > duration { return -1 } + return start } var result int + mid := start + length/2 if a[mid].Duration > duration { result = binarySearchQueryParamLe(a, duration, start, mid) @@ -268,6 +271,7 @@ func clickhouseURLValidate(chURL string) (*url.URL, error) { } else if strings.Contains(u.RawQuery, " ") { return nil, fmt.Errorf("space not allowed in url %q", chURL) } + return u, nil } @@ -437,6 +441,7 @@ func New() *Config { // Compile checks if IndexReverseRule are valid in the IndexReverses and compiles regexps if set func (ir IndexReverses) Compile() error { var err error + for i, n := range ir { if len(n.RegexStr) > 0 { if n.Regex, err = regexp.Compile(n.RegexStr); err != nil { @@ -445,16 +450,19 @@ func (ir IndexReverses) Compile() error { } else if len(n.Prefix) == 0 && len(n.Suffix) == 0 { return fmt.Errorf("empthy index-use-reverses[%d] rule", i) } + if _, ok := IndexReverse[n.Reverse]; !ok { return fmt.Errorf("%s is not valid value for index-reverses.reverse", n.Reverse) } } + return nil } func newLoggingConfig() zapwriter.Config { cfg := zapwriter.NewConfig() cfg.File = "/var/log/graphite-clickhouse/graphite-clickhouse.log" + return cfg } @@ -486,6 +494,7 @@ func DefaultConfig() (*Config, error) { &IndexReverseRule{Prefix: "prefix", Reverse: "direct"}, &IndexReverseRule{RegexStr: "regex", Reverse: "reversed"}, } + err := cfg.ClickHouse.IndexReverses.Compile() if err != nil { return nil, err @@ -498,6 +507,7 @@ func DefaultConfig() (*Config, error) { // PrintDefaultConfig prints the default config with some additions to be useful func PrintDefaultConfig() error { buf := new(bytes.Buffer) + cfg, err := DefaultConfig() if err != nil { return err @@ -512,12 +522,14 @@ func PrintDefaultConfig() error { out := strings.Replace(buf.String(), "\n", "", 1) fmt.Print(out) + return nil } // ReadConfig reads the content of the file with given name and process it to the *Config func ReadConfig(filename string, exactConfig bool) (*Config, []zap.Field, error) { var err error + var body []byte if filename != "" { body, err = os.ReadFile(filename) @@ -534,10 +546,12 @@ func Unmarshal(body []byte, exactConfig bool) (cfg *Config, warns []zap.Field, e deprecations := make(map[string]error) cfg = New() + if len(body) != 0 { // TODO: remove in v0.14 if bytes.Index(body, []byte("\n[logging]\n")) != -1 || bytes.Index(body, []byte("[logging]")) == 0 { deprecations["logging"] = fmt.Errorf("single [logging] value became multivalue [[logging]]; please, adjust your config") + body = bytes.ReplaceAll(body, []byte("\n[logging]\n"), []byte("\n[[logging]]\n")) if bytes.Index(body, []byte("[logging]")) == 0 { body = bytes.Replace(body, []byte("[logging]"), []byte("[[logging]]"), 1) @@ -560,6 +574,7 @@ func Unmarshal(body []byte, exactConfig bool) (cfg *Config, warns []zap.Field, e if cfg.ClickHouse.RenderConcurrentQueries > cfg.ClickHouse.RenderMaxQueries && cfg.ClickHouse.RenderMaxQueries > 0 { cfg.ClickHouse.RenderConcurrentQueries = 0 } + chURL, err := clickhouseURLValidate(cfg.ClickHouse.URL) if err != nil { return nil, nil, err @@ -570,13 +585,16 @@ func Unmarshal(body []byte, exactConfig bool) (cfg *Config, warns []zap.Field, e if err != nil { return nil, nil, err } + if chURL.Scheme == "https" { cfg.ClickHouse.TLSConfig = tlsConfig } else { warnings = append(warnings, "TLS configurations is ignored because scheme is not HTTPS") } + warns = append(warns, zap.Strings("tls-config", warnings)) } + for i := range cfg.ClickHouse.QueryParams { if cfg.ClickHouse.QueryParams[i].ConcurrentQueries > cfg.ClickHouse.QueryParams[i].MaxQueries && cfg.ClickHouse.QueryParams[i].MaxQueries > 0 { cfg.ClickHouse.QueryParams[i].ConcurrentQueries = 0 @@ -585,13 +603,16 @@ func Unmarshal(body []byte, exactConfig bool) (cfg *Config, warns []zap.Field, e if cfg.ClickHouse.QueryParams[i].Duration == 0 { return nil, nil, fmt.Errorf("query duration param not set for: %+v", cfg.ClickHouse.QueryParams[i]) } + if cfg.ClickHouse.QueryParams[i].DataTimeout == 0 { cfg.ClickHouse.QueryParams[i].DataTimeout = cfg.ClickHouse.DataTimeout } + if cfg.ClickHouse.QueryParams[i].URL == "" { // reuse default url cfg.ClickHouse.QueryParams[i].URL = cfg.ClickHouse.URL } + if _, err = clickhouseURLValidate(cfg.ClickHouse.QueryParams[i].URL); err != nil { return nil, nil, err } @@ -656,6 +677,7 @@ func Unmarshal(body []byte, exactConfig bool) (cfg *Config, warns []zap.Field, e if err != nil { return nil, nil, err } + cfg.Common.Blacklist[i] = r } } @@ -672,24 +694,30 @@ func Unmarshal(body []byte, exactConfig bool) (cfg *Config, warns []zap.Field, e if err != nil { return nil, nil, err } + _, port, err := net.SplitHostPort(cfg.Common.Listen) if err != nil { return nil, nil, err } + rawURL = fmt.Sprintf("http://%s:%s/", hostname, port) } + cfg.Prometheus.ExternalURL, err = url.Parse(rawURL) if err != nil { return nil, nil, err } + cfg.Prometheus.ExternalURL.Path = strings.TrimRight(cfg.Prometheus.ExternalURL.Path, "/") checkDeprecations(cfg, deprecations) + if len(deprecations) != 0 { deprecationList := make([]error, len(deprecations)) for name, message := range deprecations { deprecationList = append(deprecationList, errors.Wrap(message, name)) } + warns = append(warns, zap.Errors("config deprecations", deprecationList)) } @@ -731,6 +759,7 @@ func Unmarshal(body []byte, exactConfig bool) (cfg *Config, warns []zap.Field, e metricsEnabled, "render", duration.String(cfg.ClickHouse.QueryParams[i].Duration), ) } + for u, q := range cfg.ClickHouse.UserLimits { q.Limiter = limiter.NewALimiter( q.MaxQueries, q.ConcurrentQueries, q.AdaptiveQueries, metricsEnabled, u, "all", @@ -747,34 +776,44 @@ func (c *Config) NeedLoadAvgColect() bool { if c.Common.DegragedMultiply <= 0 { c.Common.DegragedMultiply = 4.0 } + if c.Common.DegragedLoad <= 0 { c.Common.DegragedLoad = 1.0 } + if c.Common.BaseWeight <= 0 { c.Common.BaseWeight = 100 } + if c.Common.SDNamespace == "" { c.Common.SDNamespace = "graphite" } + if c.Common.SDExpire < 24*time.Hour { c.Common.SDExpire = 24 * time.Hour } + return true } + if c.ClickHouse.RenderAdaptiveQueries > 0 { return true } + if c.ClickHouse.FindAdaptiveQueries > 0 { return true } + if c.ClickHouse.TagsAdaptiveQueries > 0 { return true } + for _, u := range c.ClickHouse.UserLimits { if u.AdaptiveQueries > 0 { return true } } + return false } @@ -794,6 +833,7 @@ func (c *Config) ProcessDataTables() (err error) { if err != nil { return err } + c.DataTable[i].TargetMatchAnyRegexp = r } @@ -802,17 +842,21 @@ func (c *Config) ProcessDataTables() (err error) { if err != nil { return err } + c.DataTable[i].TargetMatchAllRegexp = r } rdp := c.DataTable[i].RollupDefaultPrecision rdf := c.DataTable[i].RollupDefaultFunction + if c.DataTable[i].RollupConf == "auto" || c.DataTable[i].RollupConf == "" { table := c.DataTable[i].Table interval := time.Minute + if c.DataTable[i].RollupAutoTable != "" { table = c.DataTable[i].RollupAutoTable } + if c.DataTable[i].RollupAutoInterval != nil { interval = *c.DataTable[i].RollupAutoInterval } @@ -843,10 +887,12 @@ func (c *Config) ProcessDataTables() (err error) { if !knownDataTableContext[ctx] { return fmt.Errorf("unknown context %#v", ctx) } + c.DataTable[i].ContextMap[ctx] = true } } } + return nil } @@ -860,22 +906,28 @@ func CreateCache(cacheName string, cacheConfig *CacheConfig) (cache.BytesCache, if cacheConfig.DefaultTimeoutSec <= 0 && cacheConfig.ShortTimeoutSec <= 0 && cacheConfig.FindTimeoutSec <= 0 { return nil, nil } + if cacheConfig.DefaultTimeoutSec < cacheConfig.ShortTimeoutSec { cacheConfig.DefaultTimeoutSec = cacheConfig.ShortTimeoutSec } + if cacheConfig.ShortTimeoutSec < 0 || cacheConfig.DefaultTimeoutSec == cacheConfig.ShortTimeoutSec { // broken value or short timeout not need due to equal cacheConfig.ShortTimeoutSec = 0 } + if cacheConfig.DefaultTimeoutSec < cacheConfig.ShortTimeoutSec { cacheConfig.DefaultTimeoutSec = cacheConfig.ShortTimeoutSec } + if cacheConfig.ShortDuration == 0 { cacheConfig.ShortDuration = 3 * time.Hour } + if cacheConfig.ShortUntilOffsetSec == 0 { cacheConfig.ShortUntilOffsetSec = 120 } + cacheConfig.DefaultTimeoutStr = strconv.Itoa(int(cacheConfig.DefaultTimeoutSec)) cacheConfig.ShortTimeoutStr = strconv.Itoa(int(cacheConfig.ShortTimeoutSec)) @@ -884,6 +936,7 @@ func CreateCache(cacheName string, cacheConfig *CacheConfig) (cache.BytesCache, if len(cacheConfig.MemcachedServers) == 0 { return nil, fmt.Errorf(cacheName + ": memcache cache requested but no memcache servers provided") } + return cache.NewMemcached("gch-"+cacheName, cacheConfig.MemcachedServers...), nil case "mem": return cache.NewExpireCache(uint64(cacheConfig.Size * 1024 * 1024)), nil @@ -906,9 +959,11 @@ func (c *Config) setupGraphiteMetrics() bool { if c.Metrics.MetricInterval == 0 { c.Metrics.MetricInterval = 60 * time.Second } + if c.Metrics.MetricTimeout == 0 { c.Metrics.MetricTimeout = time.Second } + hostname, _ := os.Hostname() fqdn := strings.ReplaceAll(hostname, ".", "_") hostname = strings.Split(hostname, ".")[0] @@ -922,6 +977,7 @@ func (c *Config) setupGraphiteMetrics() bool { if c.Metrics.Statsd != "" && c.Metrics.ExtendedStat { var err error + config := &statsd.ClientConfig{ Address: c.Metrics.Statsd, Prefix: c.Metrics.MetricPrefix, @@ -929,9 +985,11 @@ func (c *Config) setupGraphiteMetrics() bool { UseBuffered: true, FlushInterval: 300 * time.Millisecond, } + metrics.Gstatsd, err = statsd.NewClientWithConfig(config) if err != nil { metrics.Gstatsd = metrics.NullSender{} + fmt.Fprintf(os.Stderr, "statsd init: %v\n", err) } } @@ -941,12 +999,15 @@ func (c *Config) setupGraphiteMetrics() bool { metrics.AutocompleteQMetric = metrics.InitQueryMetrics("tags", &c.Metrics) metrics.FindQMetric = metrics.InitQueryMetrics("find", &c.Metrics) + for i := 0; i < len(c.DataTable); i++ { c.DataTable[i].QueryMetrics = metrics.InitQueryMetrics(c.DataTable[i].Table, &c.Metrics) } + if c.ClickHouse.IndexTable != "" { metrics.InitQueryMetrics(c.ClickHouse.IndexTable, &c.Metrics) } + if c.ClickHouse.TaggedTable != "" { metrics.InitQueryMetrics(c.ClickHouse.TaggedTable, &c.Metrics) } @@ -960,6 +1021,7 @@ func (c *Config) GetUserFindLimiter(username string) limiter.ServerLimiter { return q.Limiter } } + return c.ClickHouse.FindLimiter } @@ -969,6 +1031,7 @@ func (c *Config) GetUserTagsLimiter(username string) limiter.ServerLimiter { return q.Limiter } } + return c.ClickHouse.TagsLimiter } diff --git a/config/config_test.go b/config/config_test.go index b77688c7d..c67ec336a 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -25,10 +25,12 @@ func TestProcessDataTables(t *testing.T) { table DataTable tableLegacy string } + type out struct { tables []DataTable err error } + type ctx map[string]bool regexpCompileWrapper := func(re string) *regexp.Regexp { @@ -199,14 +201,17 @@ func TestProcessDataTables(t *testing.T) { if test.in.table.Table != "" { cfg.DataTable = []DataTable{test.in.table} } + if test.in.tableLegacy != "" { cfg.ClickHouse.DataTableLegacy = test.in.tableLegacy } + err := cfg.ProcessDataTables() if err != nil { assert.Equal(t, test.out.err, err) return } + assert.Equal(t, len(test.out.tables), len(cfg.DataTable)) // it's difficult to check rollup.Rollup because Rules.updated field // We explicitly don't check it here @@ -214,6 +219,7 @@ func TestProcessDataTables(t *testing.T) { test.out.tables[i].Rollup = nil cfg.DataTable[i].Rollup = nil } + assert.Equal(t, test.out.tables, cfg.DataTable) }) } @@ -322,6 +328,7 @@ sample-thereafter = 12 ) config, _, err := Unmarshal(body, false) expected := New() + require.NoError(t, err) // Common @@ -558,6 +565,7 @@ sample-thereafter = 12 ) config, _, err := Unmarshal(body, false) expected := New() + require.NoError(t, err) assert.NotNil(t, metrics.Graphite) metrics.Graphite = nil @@ -673,17 +681,21 @@ sample-thereafter = 12 expected.ClickHouse.IndexReverses[0] = &IndexReverseRule{"suf", "pref", "", nil, "direct"} r, _ = regexp.Compile("^reg$") expected.ClickHouse.IndexReverses[1] = &IndexReverseRule{"", "", "^reg$", r, "reversed"} + for i := range config.ClickHouse.QueryParams { if _, ok := config.ClickHouse.QueryParams[i].Limiter.(*limiter.WLimiter); ok && config.ClickHouse.QueryParams[i].MaxQueries > 0 && config.ClickHouse.QueryParams[i].ConcurrentQueries > 0 { config.ClickHouse.QueryParams[i].Limiter = nil } } + if _, ok := config.ClickHouse.FindLimiter.(*limiter.WLimiter); ok && config.ClickHouse.FindMaxQueries > 0 && config.ClickHouse.FindConcurrentQueries > 0 { config.ClickHouse.FindLimiter = nil } + if _, ok := config.ClickHouse.TagsLimiter.(*limiter.WLimiter); ok && config.ClickHouse.TagsMaxQueries > 0 && config.ClickHouse.TagsConcurrentQueries > 0 { config.ClickHouse.TagsLimiter = nil } + for u, q := range config.ClickHouse.UserLimits { if _, ok := q.Limiter.(*limiter.WLimiter); ok && q.MaxQueries > 0 && q.ConcurrentQueries > 0 { q.Limiter = nil @@ -874,6 +886,7 @@ sample-thereafter = 12 ) config, _, err := Unmarshal(body, false) expected := New() + require.NoError(t, err) assert.NotNil(t, metrics.Graphite) metrics.Graphite = nil @@ -994,17 +1007,21 @@ sample-thereafter = 12 expected.ClickHouse.IndexReverses[0] = &IndexReverseRule{"suf", "pref", "", nil, "direct"} r, _ = regexp.Compile("^reg$") expected.ClickHouse.IndexReverses[1] = &IndexReverseRule{"", "", "^reg$", r, "reversed"} + for i := range config.ClickHouse.QueryParams { if _, ok := config.ClickHouse.QueryParams[i].Limiter.(*limiter.ALimiter); ok { config.ClickHouse.QueryParams[i].Limiter = nil } } + if _, ok := config.ClickHouse.FindLimiter.(*limiter.WLimiter); ok { config.ClickHouse.FindLimiter = nil } + if _, ok := config.ClickHouse.TagsLimiter.(*limiter.ALimiter); ok { config.ClickHouse.TagsLimiter = nil } + for u, q := range config.ClickHouse.UserLimits { if _, ok := q.Limiter.(*limiter.ALimiter); ok { q.Limiter = nil @@ -1260,6 +1277,7 @@ func TestGetQueryParam(t *testing.T) { for i := range config.ClickHouse.QueryParams { config.ClickHouse.QueryParams[i].Limiter = nil } + for i, duration := range tt.durations { got := GetQueryParam(config.ClickHouse.QueryParams, duration) if config.ClickHouse.QueryParams[got] != tt.wantParams[i] { diff --git a/config/json.go b/config/json.go index 34a3b1a06..9343d023c 100644 --- a/config/json.go +++ b/config/json.go @@ -18,6 +18,7 @@ func (c *ClickHouse) MarshalJSON() ([]byte, error) { if _, isSet := u.User.Password(); isSet { u.User = url.UserPassword(u.User.Username(), "xxxxxx") } + a.URL = u.String() } diff --git a/find/find.go b/find/find.go index 91260db8f..6e874acfc 100644 --- a/find/find.go +++ b/find/find.go @@ -87,6 +87,7 @@ func (f *Find) WritePickle(w io.Writer) error { } p.Stop() + return nil } @@ -185,6 +186,7 @@ func (f *Find) WriteProtobufV3(w io.Writer) error { response, }, } + body, err := proto.Marshal(&multiGlobResponse) if err != nil { return err @@ -203,6 +205,7 @@ func (f *Find) WriteJSON(w io.Writer) error { } var numResults int + var sb stringutils.Builder sb.WriteString("[") diff --git a/find/handler.go b/find/handler.go index ea8f55d76..5738d3169 100644 --- a/find/handler.go +++ b/find/handler.go @@ -54,19 +54,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer func() { if rec := recover(); rec != nil { status = http.StatusInternalServerError + logger.Error("panic during eval:", zap.String("requestID", scope.String(r.Context(), "requestID")), zap.Any("reason", rec), zap.Stack("stack"), ) + answer := fmt.Sprintf("%v\nStack trace: %v", rec, zap.Stack("").String) http.Error(w, answer, status) } + d := time.Since(start) dMS := d.Milliseconds() logs.AccessLog(accessLogger, h.config, r, status, d, queueDuration, findCache, queueFail) limiter.SendDuration(queueDuration.Milliseconds()) metrics.SendFindMetrics(metrics.FindRequestMetric, status, dMS, 0, h.config.Metrics.ExtendedStat, metricsCount) + if stat.ChReadRows > 0 && stat.ChReadBytes > 0 { errored := status != http.StatusOK && status != http.StatusNotFound metrics.SendQueryRead(metrics.FindQMetric, 0, 0, dMS, metricsCount, stat.ReadBytes, stat.ChReadRows, stat.ChReadBytes, errored) @@ -81,6 +85,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { status = http.StatusBadRequest http.Error(w, fmt.Sprintf("Failed to read request body: %v", err), status) + return } @@ -88,12 +93,14 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err := pv3Request.Unmarshal(body); err != nil { status = http.StatusBadRequest http.Error(w, fmt.Sprintf("Failed to unmarshal request: %v", err), status) + return } if len(pv3Request.Metrics) != 1 { status = http.StatusBadRequest http.Error(w, fmt.Sprintf("Multiple metrics in same find request is not supported yet: %v", err), status) + return } @@ -108,15 +115,20 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case "protobuf": default: logger.Error("unsupported formatter") + status = http.StatusBadRequest http.Error(w, "Failed to parse request: unsupported formatter", status) + return } + query = r.FormValue("query") } + if len(query) == 0 { status = http.StatusBadRequest http.Error(w, "Query not set", status) + return } @@ -126,12 +138,15 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if useCache { ts := utils.TimestampTruncate(time.Now().Unix(), time.Duration(h.config.Common.FindCacheConfig.FindTimeoutSec)*time.Second) key = "1970-02-12;query=" + query + ";ts=" + strconv.FormatInt(ts, 10) + body, err := h.config.Common.FindCache.Get(key) if err == nil { if metrics.FinderCacheMetrics != nil { metrics.FinderCacheMetrics.CacheHits.Add(1) } + findCache = true + w.Header().Set("X-Cached-Find", strconv.Itoa(int(h.config.Common.FindCacheConfig.FindTimeoutSec))) f := NewCached(h.config, body) metricsCount = int64(len(f.result.List())) @@ -140,6 +155,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { zap.Int32("ttl", h.config.Common.FindCacheConfig.FindTimeoutSec)) h.Reply(w, r, f) + return } } @@ -149,24 +165,31 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ctx context.Context cancel context.CancelFunc ) + if limiter.Enabled() { ctx, cancel = context.WithTimeout(context.Background(), h.config.ClickHouse.IndexTimeout) defer cancel() err := limiter.Enter(ctx, "find") queueDuration = time.Since(start) + if err != nil { status = http.StatusServiceUnavailable queueFail = true + logger.Error(err.Error()) http.Error(w, err.Error(), status) + return } + queueDuration = time.Since(start) entered = true + defer func() { if entered { limiter.Leave(ctx, "find") + entered = false } }() @@ -177,6 +200,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if entered { // release early as possible limiter.Leave(ctx, "find") + entered = false } @@ -190,6 +214,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if metrics.FinderCacheMetrics != nil { metrics.FinderCacheMetrics.CacheMisses.Add(1) } + h.config.Common.FindCache.Set(key, body, h.config.Common.FindCacheConfig.FindTimeoutSec) logger.Info("finder", zap.String("set_cache", key), zap.Int("metrics", len(f.result.List())), zap.Bool("find_cached", false), @@ -203,6 +228,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *Handler) Reply(w http.ResponseWriter, r *http.Request, f *Find) (status int) { status = http.StatusOK + switch r.FormValue("format") { case "json": f.WriteJSON(w) @@ -218,5 +244,6 @@ func (h *Handler) Reply(w http.ResponseWriter, r *http.Request, f *Find) (status status = http.StatusInternalServerError http.Error(w, "Failed to parse request: unhandled formatter", status) } + return } diff --git a/find/handler_json_test.go b/find/handler_json_test.go index 49a33e937..2b2c12395 100644 --- a/find/handler_json_test.go +++ b/find/handler_json_test.go @@ -51,6 +51,7 @@ func testResponce(t *testing.T, step int, h *Handler, tt *testStruct, wantCached func TestHandler_ServeValuesJSON(t *testing.T) { metrics.DisableMetrics() + srv := clickhouse.NewTestServer() defer srv.Close() @@ -87,6 +88,7 @@ func TestHandler_ServeValuesJSON(t *testing.T) { } var queries uint64 + for i, tt := range tests { t.Run(tt.request.URL.RawQuery+"#"+strconv.Itoa(i), func(t *testing.T) { for i := 0; i < 2; i++ { @@ -112,7 +114,9 @@ func TestHandler_ServeValuesCachedJSON(t *testing.T) { Size: 8192, FindTimeoutSec: 1, } + var err error + cfg.Common.FindCache, err = config.CreateCache("metric-finder", &cfg.Common.FindCacheConfig) if err != nil { t.Fatalf("Failed to create find cache: %v", err) @@ -148,6 +152,7 @@ func TestHandler_ServeValuesCachedJSON(t *testing.T) { } var queries uint64 + for i, tt := range tests { t.Run(tt.request.URL.RawQuery+"#"+strconv.Itoa(i), func(t *testing.T) { testResponce(t, 0, h, &tt, "") diff --git a/finder/base.go b/finder/base.go index 327d0316c..3e4d35ac8 100644 --- a/finder/base.go +++ b/finder/base.go @@ -36,6 +36,7 @@ func (b *BaseFinder) where(query string) *where.Where { w := where.New() w.And(where.Eq("Level", level)) w.And(where.TreeGlob("Path", query)) + return w } @@ -51,6 +52,7 @@ func (b *BaseFinder) Execute(ctx context.Context, config *config.Config, query s ) stat.Table = b.table stat.ReadBytes = int64(len(b.body)) + return } @@ -62,15 +64,18 @@ func (b *BaseFinder) makeList(onlySeries bool) [][]byte { rows := bytes.Split(b.body, []byte{'\n'}) skip := 0 + for i := 0; i < len(rows); i++ { if len(rows[i]) == 0 { skip++ continue } + if onlySeries && rows[i][len(rows[i])-1] == '.' { skip++ continue } + if skip > 0 { rows[i-skip] = rows[i] } diff --git a/finder/date.go b/finder/date.go index 8b38d9e66..9993be5f8 100644 --- a/finder/date.go +++ b/finder/date.go @@ -59,6 +59,7 @@ func (b *DateFinder) Execute(ctx context.Context, config *config.Config, query s nil, ) } + stat.ReadBytes = int64(len(b.body)) stat.Table = b.table diff --git a/finder/date_reverse_test.go b/finder/date_reverse_test.go index 38e9b40b1..5c6092058 100644 --- a/finder/date_reverse_test.go +++ b/finder/date_reverse_test.go @@ -37,10 +37,12 @@ func TestDateFinderV3_whereFilter(t *testing.T) { for _, tt := range tests { t.Run(tt.name+" "+time.Unix(tt.from, 0).Format(time.RFC3339), func(t *testing.T) { f := NewDateFinderV3("http://localhost:8123/", "graphite_index", clickhouse.Options{}).(*DateFinderV3) + got, gotDate := f.whereFilter(tt.query, tt.from, tt.until) if got.String() != tt.want { t.Errorf("DateFinderV3.whereFilter()[0] = %v, want %v", got, tt.want) } + if gotDate.String() != tt.wantDate { t.Errorf("DateFinderV3.whereFilter()[1] = %v, want %v", gotDate, tt.wantDate) } diff --git a/finder/finder.go b/finder/finder.go index 9b24103d5..fb0d4b9ac 100644 --- a/finder/finder.go +++ b/finder/finder.go @@ -110,10 +110,12 @@ func newPlainFinder(ctx context.Context, config *config.Config, query string, fr func Find(config *config.Config, ctx context.Context, query string, from int64, until int64, stat *FinderStat) (Result, error) { fnd := newPlainFinder(ctx, config, query, from, until, config.Common.FindCache != nil) + err := fnd.Execute(ctx, config, query, from, until, stat) if err != nil { return nil, err } + return fnd.(Result), nil } @@ -138,10 +140,12 @@ func FindTagged(ctx context.Context, config *config.Config, terms []TaggedTerm, plain := makePlainFromTagged(terms) if plain != nil { plain.wrappedPlain = newPlainFinder(ctx, config, plain.Target(), from, until, useCache) + err := plain.Execute(ctx, config, plain.Target(), from, until, stat) if err != nil { return nil, err } + return Result(plain), nil } diff --git a/finder/index.go b/finder/index.go index 138b2facb..949c01558 100644 --- a/finder/index.go +++ b/finder/index.go @@ -79,14 +79,18 @@ func (idx *IndexFinder) checkReverses(query string) uint8 { if len(rule.Prefix) > 0 && !strings.HasPrefix(query, rule.Prefix) { continue } + if len(rule.Suffix) > 0 && !strings.HasSuffix(query, rule.Suffix) { continue } + if rule.Regex != nil && rule.Regex.FindStringIndex(query) == nil { continue } + return config.IndexReverse[rule.Reverse] } + return idx.confReverse } @@ -106,6 +110,7 @@ func (idx *IndexFinder) useReverse(query string) bool { idx.reverse = queryDirect return idx.useReverse(query) } + firstWildcardNode := strings.Count(query[:w], ".") w = where.IndexLastWildcard(query) @@ -115,7 +120,9 @@ func (idx *IndexFinder) useReverse(query string) bool { idx.reverse = queryReversed return idx.useReverse(query) } + idx.reverse = queryDirect + return idx.useReverse(query) } @@ -125,6 +132,7 @@ func useDaily(dailyEnabled bool, from, until int64) bool { func calculateIndexLevelOffset(useDaily, reverse bool) int { var levelOffset int + if useDaily { if reverse { levelOffset = ReverseLevelOffset @@ -162,6 +170,7 @@ func (idx *IndexFinder) whereFilter(query string, from int64, until int64) *wher w := idx.where(query, levelOffset) addDatesToWhere(w, idx.useDaily, from, until) + return w } @@ -189,6 +198,7 @@ func (idx *IndexFinder) Execute(ctx context.Context, config *config.Config, quer if err != nil { return err } + w := idx.whereFilter(query, from, until) idx.body, stat.ChReadRows, stat.ChReadBytes, err = clickhouse.Query( @@ -200,6 +210,7 @@ func (idx *IndexFinder) Execute(ctx context.Context, config *config.Config, quer nil, ) stat.Table = idx.table + if err == nil { stat.ReadBytes = int64(len(idx.body)) idx.bodySplit() @@ -246,6 +257,7 @@ func splitIndexBody(body []byte, useReverse, useCache bool) ([]byte, [][]byte, b func (idx *IndexFinder) bodySplit() { setDirect := false idx.body, idx.rows, setDirect = splitIndexBody(idx.body, idx.useReverse(""), idx.useCache) + if setDirect { idx.reverse = queryDirect } diff --git a/finder/index_test.go b/finder/index_test.go index 3e9610dd8..17997dd59 100644 --- a/finder/index_test.go +++ b/finder/index_test.go @@ -189,6 +189,7 @@ func TestIndexFinder_whereFilter(t *testing.T) { if tt.indexReverse == "" { tt.indexReverse = "auto" } + idx := NewIndex("http://localhost:8123/", "graphite_index", tt.dailyEnabled, tt.indexReverse, tt.indexReverses, clickhouse.Options{}, false).(*IndexFinder) if got := idx.whereFilter(tt.query, tt.from, tt.until); got.String() != tt.want { t.Errorf("IndexFinder.whereFilter() = %v, want %v", got, tt.want) diff --git a/finder/plain_from_tagged.go b/finder/plain_from_tagged.go index 95fcd5b3d..e0a139924 100644 --- a/finder/plain_from_tagged.go +++ b/finder/plain_from_tagged.go @@ -21,11 +21,14 @@ type plainFromTaggedFinder struct { func makePlainFromTagged(matchers []TaggedTerm) *plainFromTaggedFinder { var isMetricNameFound bool + var target string + for _, m := range matchers { if m.Key == "__name__" && m.Value == "graphite" && m.Op == TaggedTermEq { isMetricNameFound = true } + if m.Key == "target" && m.Op == TaggedTermEq && m.Value != "" { target = m.Value } @@ -84,6 +87,7 @@ func (f *plainFromTaggedFinder) Abs(value []byte) []byte { lb := []taggedLabel{ {"metric", path}, } + if f.metricName != "" { name = f.metricName } @@ -104,10 +108,12 @@ func (f *plainFromTaggedFinder) Abs(value []byte) []byte { buf.WriteString(name) buf.WriteByte('?') + for i, l := range lb { if i > 0 { buf.WriteByte('&') } + buf.WriteString(url.QueryEscape(l.name)) buf.WriteByte('=') buf.WriteString(url.QueryEscape(l.value)) diff --git a/finder/prefix.go b/finder/prefix.go index 9bbee6fb3..327a61a57 100644 --- a/finder/prefix.go +++ b/finder/prefix.go @@ -29,6 +29,7 @@ func bytesConcat(s1 []byte, s2 []byte) []byte { ret := make([]byte, len(s1)+len(s2)) copy(ret, s1) copy(ret[len(s1):], s2) + return ret } @@ -59,6 +60,7 @@ func (p *PrefixFinder) Execute(ctx context.Context, config *config.Config, query if err != nil { return err } + if !m { // not matched return nil } @@ -68,6 +70,7 @@ func (p *PrefixFinder) Execute(ctx context.Context, config *config.Config, query // prefix matched, but not finished p.part = strings.Join(ps[:len(qs)], ".") + "." p.matched = PrefixPartialMathed + return nil } diff --git a/finder/prefix_test.go b/finder/prefix_test.go index 4a855d43b..87e87f8b4 100644 --- a/finder/prefix_test.go +++ b/finder/prefix_test.go @@ -37,6 +37,7 @@ func TestPrefixFinderExecute(t *testing.T) { f := WrapPrefix(m, test.prefix) var stat FinderStat + config := config.New() err := f.Execute(context.Background(), config, test.query, 0, 0, &stat) @@ -88,6 +89,7 @@ func TestPrefixFinderList(t *testing.T) { f := WrapPrefix(m, prefix) var stat FinderStat + config := config.New() f.Execute(context.Background(), config, test.query, 0, 0, &stat) diff --git a/finder/reverse.go b/finder/reverse.go index f4bea1b7a..dcdbaa076 100644 --- a/finder/reverse.go +++ b/finder/reverse.go @@ -21,6 +21,7 @@ type ReverseFinder struct { func ReverseString(target string) string { a := strings.Split(target, ".") l := len(a) + for i := 0; i < l/2; i++ { a[i], a[l-i-1] = a[l-i-1], a[i] } @@ -60,6 +61,7 @@ func (r *ReverseFinder) Execute(ctx context.Context, config *config.Config, quer } r.isUsed = true + return r.baseFinder.Execute(ctx, config, ReverseString(query), from, until, stat) } diff --git a/finder/split.go b/finder/split.go index 4edf43c08..41ef9467b 100644 --- a/finder/split.go +++ b/finder/split.go @@ -112,12 +112,14 @@ func (splitFinder *SplitIndexFinder) Execute( nil, ) stat.Table = splitFinder.table + if err != nil { return err } stat.ReadBytes = int64(len(splitFinder.body)) splitFinder.body, splitFinder.rows, _ = splitIndexBody(splitFinder.body, splitFinder.useReverse, splitFinder.useCache) + return nil } @@ -133,6 +135,7 @@ func splitQuery(query string, maxNodeToSplitIdx int) ([]string, error) { lastClosingBracketIndex := strings.LastIndex(query, "}") reverseNodeCount := strings.Count(query[lastClosingBracketIndex:], ".") + var reverseWildcardIndex int if lastClosingBracketIndex == len(query)-1 { reverseWildcardIndex = -1 @@ -141,17 +144,20 @@ func splitQuery(query string, maxNodeToSplitIdx int) ([]string, error) { } useDirect := true + if directWildcardIndex >= 0 && reverseWildcardIndex >= 0 { return []string{query}, nil } else if directWildcardIndex < 0 && reverseWildcardIndex >= 0 { if directNodeCount > maxNodeToSplitIdx { return []string{query}, nil } + useDirect = true } else if directWildcardIndex >= 0 && reverseWildcardIndex < 0 { if reverseNodeCount > maxNodeToSplitIdx { return []string{query}, nil } + useDirect = false } else { if directNodeCount > maxNodeToSplitIdx && reverseNodeCount > maxNodeToSplitIdx { @@ -177,11 +183,13 @@ func splitQuery(query string, maxNodeToSplitIdx int) ([]string, error) { if directNodeCount > maxNodeToSplitIdx { return []string{query}, nil } + useDirect = true } else if reverseNodeCount > directNodeCount { if reverseNodeCount > maxNodeToSplitIdx { return []string{query}, nil } + useDirect = false } else { if choicesInLeftMost >= choicesInRightMost { @@ -228,6 +236,7 @@ func splitPartOfQuery(prefix, queryPart, suffix string) ([]string, error) { func (splitFinder *SplitIndexFinder) whereFilter(queries []string, from, until int64) (*where.Where, error) { queryWithWildcardIdx := -1 + for i, q := range queries { err := validatePlainQuery(q, splitFinder.wildcardMinDistance) if err != nil { @@ -250,6 +259,7 @@ func (splitFinder *SplitIndexFinder) whereFilter(queries []string, from, until i nonWildcardQueries := make([]string, 0) aggregatedWhere := where.New() + for _, q := range queries { if splitFinder.useReverse { q = ReverseString(q) diff --git a/finder/split_test.go b/finder/split_test.go index c1a2e85db..b000432cd 100644 --- a/finder/split_test.go +++ b/finder/split_test.go @@ -348,6 +348,7 @@ func TestSplitIndexFinder_whereFilter(t *testing.T) { got, err := f.whereFilter(tc.givenQueries, tc.givenFrom, tc.givenUntil) assert.Equal(t, tc.expectedErr, err) + if err == nil { assert.Equal(t, tc.expectedWhereStr, got.String()) } diff --git a/finder/tag.go b/finder/tag.go index a5578fc52..c8b709c9d 100644 --- a/finder/tag.go +++ b/finder/tag.go @@ -33,12 +33,15 @@ func (q TagQ) String() string { if q.Param != nil && q.Value != nil { return fmt.Sprintf("{\"param\"=%#v, \"value\"=%#v}", *q.Param, *q.Value) } + if q.Param != nil { return fmt.Sprintf("{\"param\"=%#v}", *q.Param) } + if q.Value != nil { return fmt.Sprintf("{\"value\"=%#v}", *q.Value) } + return "{}" } @@ -46,9 +49,11 @@ func (q *TagQ) Where(field string) string { if q.Param != nil && q.Value != nil && *q.Value != "*" { return where.Eq(field, *q.Param+*q.Value) } + if q.Param != nil { return where.HasPrefix(field, *q.Param) } + if q.Value != nil && *q.Value != "*" { return where.Eq(field, *q.Value) } @@ -152,6 +157,7 @@ func (t *TagFinder) MakeSQL(query string) (string, error) { if len(qs) == 0 { break } + if qs[0] == "_tag" { if len(qs) >= 2 { v := qs[1] @@ -187,6 +193,7 @@ func (t *TagFinder) MakeSQL(query string) (string, error) { } else { t.state = TagList } + return t.tagListSQL() } @@ -196,6 +203,7 @@ func (t *TagFinder) MakeSQL(query string) (string, error) { } t.state = TagListSeries + return t.seriesSQL() } @@ -216,6 +224,7 @@ func (t *TagFinder) Execute(ctx context.Context, config *config.Config, query st } var sql string + sql, err = t.MakeSQL(query) if err != nil || sql == "" { return @@ -246,11 +255,13 @@ func (t *TagFinder) List() [][]byte { rows := bytes.Split(t.body, []byte{'\n'}) skip := 0 + for i := 0; i < len(rows); i++ { if len(rows[i]) == 0 { skip++ continue } + if skip > 0 { rows[i-skip] = rows[i] } @@ -295,15 +306,18 @@ func (t *TagFinder) Series() [][]byte { rows := t.List() skip := 0 + for i := 0; i < len(rows); i++ { if len(rows[i]) == 0 { skip++ continue } + if rows[i][len(rows[i])-1] == '.' { skip++ continue } + if skip > 0 { rows[i-skip] = rows[i] } diff --git a/finder/tag_test.go b/finder/tag_test.go index 5465976bb..4f8764528 100644 --- a/finder/tag_test.go +++ b/finder/tag_test.go @@ -51,6 +51,7 @@ func TestTagsMakeSQL(t *testing.T) { } else { assert.NoError(err) } + assert.Equal(test.sql, sql, testName) } } @@ -61,6 +62,7 @@ func _TestTags(t *testing.T) { mockData := [][]byte{[]byte("mock")} type w []string + mock := w{"mock"} empty := w{} @@ -104,6 +106,7 @@ func _TestTags(t *testing.T) { f := WrapTag(m, srv.URL, "graphite_tag", clickhouse.Options{Timeout: time.Second, ConnectTimeout: time.Second}) var stat FinderStat + config := config.New() f.Execute(context.Background(), config, test.query, 0, 0, &stat) diff --git a/finder/tagged.go b/finder/tagged.go index 4e379c519..22bb87589 100644 --- a/finder/tagged.go +++ b/finder/tagged.go @@ -58,6 +58,7 @@ func (s TaggedTermList) Less(i, j int) bool { if s[i].Op < s[j].Op { return true } + if s[i].Op > s[j].Op { return false } @@ -70,6 +71,7 @@ func (s TaggedTermList) Less(i, j int) bool { if s[i].Key == "__name__" && s[j].Key != "__name__" { return true } + return false } @@ -122,13 +124,16 @@ func TaggedTermWhere1(term *TaggedTerm, useCarbonBehaviour, dontMatchMissingTags // container_name="" ==> response should not contain container_name return fmt.Sprintf("NOT arrayExists((x) -> %s, Tags)", where.HasPrefix("x", term.Key+"=")), nil } + if strings.Contains(term.Value, "*") { return where.Like("Tag1", term.concatMask()), nil } + var values []string if err := where.GlobExpandSimple(term.Value, term.Key+"=", &values); err != nil { return "", err } + if len(values) == 1 { return where.Eq("Tag1", values[0]), nil } else if len(values) > 1 { @@ -142,18 +147,22 @@ func TaggedTermWhere1(term *TaggedTerm, useCarbonBehaviour, dontMatchMissingTags // container_name!="" ==> container_name exists and it is not empty return where.HasPrefixAndNotEq("Tag1", term.Key+"="), nil } + var whereLikeAnyVal string if dontMatchMissingTags { whereLikeAnyVal = where.HasPrefix("Tag1", term.Key+"=") + " AND " } + if strings.Contains(term.Value, "*") { whereLike := where.Like("x", term.concatMask()) return fmt.Sprintf("%sNOT arrayExists((x) -> %s, Tags)", whereLikeAnyVal, whereLike), nil } + var values []string if err := where.GlobExpandSimple(term.Value, term.Key+"=", &values); err != nil { return "", err } + if len(values) == 1 { whereEq := where.Eq("x", values[0]) return fmt.Sprintf("%sNOT arrayExists((x) -> %s, Tags)", whereLikeAnyVal, whereEq), nil @@ -171,7 +180,9 @@ func TaggedTermWhere1(term *TaggedTerm, useCarbonBehaviour, dontMatchMissingTags if dontMatchMissingTags { whereLikeAnyVal = where.HasPrefix("Tag1", term.Key+"=") + " AND " } + whereMatch := where.Match("x", term.Key, term.Value) + return fmt.Sprintf("%sNOT arrayExists((x) -> %s, Tags)", whereLikeAnyVal, whereMatch), nil default: return "", nil @@ -187,13 +198,16 @@ func TaggedTermWhereN(term *TaggedTerm, useCarbonBehaviour, dontMatchMissingTags // container_name="" ==> response should not contain container_name return fmt.Sprintf("NOT arrayExists((x) -> %s, Tags)", where.HasPrefix("x", term.Key+"=")), nil } + if strings.Contains(term.Value, "*") { return fmt.Sprintf("arrayExists((x) -> %s, Tags)", where.Like("x", term.concatMask())), nil } + var values []string if err := where.GlobExpandSimple(term.Value, term.Key+"=", &values); err != nil { return "", err } + if len(values) == 1 { return where.ArrayHas("Tags", values[0]), nil } else if len(values) > 1 { @@ -201,6 +215,7 @@ func TaggedTermWhereN(term *TaggedTerm, useCarbonBehaviour, dontMatchMissingTags for _, v := range values { w.Or(where.ArrayHas("Tags", v)) } + return w.String(), nil } else { return where.ArrayHas("Tags", term.concat()), nil @@ -211,18 +226,22 @@ func TaggedTermWhereN(term *TaggedTerm, useCarbonBehaviour, dontMatchMissingTags // container_name!="" ==> container_name exists and it is not empty return fmt.Sprintf("arrayExists((x) -> %s, Tags)", where.HasPrefixAndNotEq("x", term.Key+"=")), nil } + var whereLikeAnyVal string if dontMatchMissingTags { whereLikeAnyVal = fmt.Sprintf("arrayExists((x) -> %s, Tags) AND ", where.HasPrefix("x", term.Key+"=")) } + if strings.Contains(term.Value, "*") { whereLike := where.Like("x", term.concatMask()) return fmt.Sprintf("%sNOT arrayExists((x) -> %s, Tags)", whereLikeAnyVal, whereLike), nil } + var values []string if err := where.GlobExpandSimple(term.Value, term.Key+"=", &values); err != nil { return "", err } + if len(values) == 1 { whereEq := where.Eq("x", values[0]) return fmt.Sprintf("%sNOT arrayExists((x) -> %s, Tags)", whereLikeAnyVal, whereEq), nil @@ -240,7 +259,9 @@ func TaggedTermWhereN(term *TaggedTerm, useCarbonBehaviour, dontMatchMissingTags if dontMatchMissingTags { whereLikeAnyVal = fmt.Sprintf("arrayExists((x) -> %s, Tags) AND ", where.HasPrefix("x", term.Key+"=")) } + whereMatch := where.Match("x", term.Key, term.Value) + return fmt.Sprintf("%sNOT arrayExists((x) -> %s, Tags)", whereLikeAnyVal, whereMatch), nil default: return "", nil @@ -253,9 +274,11 @@ func setCost(term *TaggedTerm, costs *config.Costs) { if cost, ok := costs.ValuesCost[term.Value]; ok { term.Cost = cost term.NonDefaultCost = true + return } } + if term.Op == TaggedTermEq && !term.HasWildcard && costs.Cost != nil { term.Cost = *costs.Cost // only for non-wildcared eq term.NonDefaultCost = true @@ -352,6 +375,7 @@ func parseString(s string) (string, string, error) { func seriesByTagArgs(query string) ([]string, error) { var err error + args := make([]string, 0, 8) // trim spaces @@ -359,13 +383,16 @@ func seriesByTagArgs(query string) ([]string, error) { if !strings.HasPrefix(e, "seriesByTag(") { return nil, ErrSyntaxSeriesByTag } + if e[len(e)-1] != ')' { return nil, ErrSyntaxSeriesByTag } + e = e[12 : len(e)-1] for len(e) > 0 { var arg string + if e[0] == '\'' || e[0] == '"' { if arg, e, err = parseString(e); err != nil { return nil, err @@ -380,6 +407,7 @@ func seriesByTagArgs(query string) ([]string, error) { return nil, errs.NewErrorfWithCode(http.StatusBadRequest, "seriesByTag arg missing quote %q", e) } } + return args, nil } @@ -399,13 +427,16 @@ func ParseSeriesByTag(query string, config *config.Config) ([]TaggedTerm, error) func TaggedWhere(terms []TaggedTerm, useCarbonBehaviour, dontMatchMissingTags bool) (*where.Where, *where.Where, error) { w := where.New() pw := where.New() + x, err := TaggedTermWhere1(&terms[0], useCarbonBehaviour, dontMatchMissingTags) if err != nil { return nil, nil, err } + if terms[0].Op == TaggedTermMatch { pw.And(x) } + w.And(x) for i := 1; i < len(terms); i++ { @@ -413,6 +444,7 @@ func TaggedWhere(terms []TaggedTerm, useCarbonBehaviour, dontMatchMissingTags bo if err != nil { return nil, nil, err } + w.And(and) } @@ -430,6 +462,7 @@ func (t *TaggedFinder) Execute(ctx context.Context, config *config.Config, query if err != nil { return err } + return t.ExecutePrepared(ctx, terms, from, until, stat) } @@ -451,6 +484,7 @@ func (t *TaggedFinder) whereFilter(terms []TaggedTerm, from int64, until int64) date.FromTimestampToDaysFormat(from), ) } + return w, pw, nil } @@ -464,6 +498,7 @@ func (t *TaggedFinder) ExecutePrepared(ctx context.Context, terms []TaggedTerm, t.body, stat.ChReadRows, stat.ChReadBytes, err = clickhouse.Query(scope.WithTable(ctx, t.table), t.url, sql, t.opts, nil) stat.Table = t.table stat.ReadBytes = int64(len(t.body)) + return err } @@ -475,11 +510,13 @@ func (t *TaggedFinder) List() [][]byte { rows := bytes.Split(t.body, []byte{'\n'}) skip := 0 + for i := 0; i < len(rows); i++ { if len(rows[i]) == 0 { skip++ continue } + if skip > 0 { rows[i-skip] = rows[i] } @@ -499,15 +536,18 @@ func tagsParse(path string) (string, []string, error) { if n == 1 || args == "" { return name, nil, fmt.Errorf("incomplete tags in '%s'", path) } + tags := strings.Split(args, "&") for i := range tags { tags[i] = unescape(tags[i]) } + return unescape(name), tags, nil } func TaggedDecode(v []byte) []byte { s := stringutils.UnsafeString(v) + name, tags, err := tagsParse(s) if err != nil { return v @@ -516,6 +556,7 @@ func TaggedDecode(v []byte) []byte { if len(tags) == 0 { return stringutils.UnsafeStringBytes(&name) } + sort.Strings(tags) var sb stringutils.Builder @@ -528,10 +569,12 @@ func TaggedDecode(v []byte) []byte { sb.Grow(length) sb.WriteString(name) + for _, tag := range tags { sb.WriteString(";") sb.WriteString(tag) } + return sb.Bytes() } @@ -565,6 +608,7 @@ func (t *TaggedFinder) PrepareTaggedTerms(ctx context.Context, cfg *config.Confi if t.metricMightExists || len(t.taggedCosts) != 0 { SetCosts(terms, t.taggedCosts) } + SortTaggedTermsByCost(terms) return terms, nil @@ -615,7 +659,9 @@ func (t *TaggedFinder) SetCostsFromCountTable(ctx context.Context, terms []Tagge if err != nil { return err } + w.Or(sqlTerm) + eqTermCount++ } } @@ -640,6 +686,7 @@ func (t *TaggedFinder) SetCostsFromCountTable(ctx context.Context, terms []Tagge sql := fmt.Sprintf("SELECT Tag1, sum(Count) as cnt FROM %s %s GROUP BY Tag1 FORMAT TabSeparatedRaw", t.tag1CountTable, w.SQL()) var err error + t.body, _, _, err = clickhouse.Query(scope.WithTable(ctx, t.tag1CountTable), t.url, sql, t.opts, nil) if err != nil { return err @@ -649,6 +696,7 @@ func (t *TaggedFinder) SetCostsFromCountTable(ctx context.Context, terms []Tagge // create cost var to validate CH response without writing to t.taggedCosts var costs map[string]*config.Costs + costs, err = chResultToCosts(rows) if err != nil { return err @@ -661,6 +709,7 @@ func (t *TaggedFinder) SetCostsFromCountTable(ctx context.Context, terms []Tagge if len(rows) < eqTermCount { t.body = []byte{} t.metricMightExists = false + return nil } @@ -679,17 +728,22 @@ func SetCosts(terms []TaggedTerm, costs map[string]*config.Costs) { func chResultToCosts(body [][]byte) (map[string]*config.Costs, error) { costs := make(map[string]*config.Costs, 0) + for i := 0; i < len(body); i++ { s := stringutils.UnsafeString(body[i]) + tag, val, count, err := parseTag1CountRow(s) if err != nil { return nil, fmt.Errorf("failed to parse result from clickhouse while querying for tag costs: %s", err.Error()) } + if costs[tag] == nil { costs[tag] = &config.Costs{Cost: nil, ValuesCost: make(map[string]int, 0)} } + costs[tag].ValuesCost[val] = count } + return costs, nil } @@ -699,14 +753,18 @@ func parseTag1CountRow(s string) (string, string, int, error) { cnt, n int err error ) + if tag1, count, n = stringutils.Split2(s, "\t"); n != 2 { return "", "", 0, fmt.Errorf("no tag count") } + if tag, val, n = stringutils.Split2(tag1, "="); n != 2 { return "", "", 0, fmt.Errorf("no '=' in Tag1") } + if cnt, err = strconv.Atoi(count); err != nil { return "", "", 0, fmt.Errorf("can't convert count to int") } + return tag, val, cnt, nil } diff --git a/finder/tagged_test.go b/finder/tagged_test.go index c6cb7cb0d..8ebe2894d 100644 --- a/finder/tagged_test.go +++ b/finder/tagged_test.go @@ -98,6 +98,7 @@ func TestTaggedWhere(t *testing.T) { return } } + require.NoError(err, testName+", err") var w, pw *where.Where @@ -198,6 +199,7 @@ func TestTaggedWhere_UseCarbonBehaviourFlag(t *testing.T) { return } } + require.NoError(err, testName+", err") var w, pw *where.Where @@ -300,6 +302,7 @@ func TestTaggedWhere_DontMatchMissingTagsFlag(t *testing.T) { return } } + require.NoError(err, testName+", err") var w, pw *where.Where @@ -402,6 +405,7 @@ func TestTaggedWhere_BothFeatureFlags(t *testing.T) { return } } + require.NoError(err, testName+", err") var w, pw *where.Where @@ -430,10 +434,12 @@ func TestParseSeriesByTag(t *testing.T) { p, err := ParseSeriesByTag(query, config) assert.NoError(err) assert.Equal(len(expected), len(p)) + length := len(expected) if length < len(p) { length = len(p) } + for i := 0; i < length; i++ { if i >= len(p) { t.Errorf("%s\n- [%d]=%+v", query, i, expected[i]) @@ -474,6 +480,7 @@ func TestParseSeriesByTag(t *testing.T) { func newInt(i int) *int { p := new(int) *p = i + return p } @@ -495,10 +502,12 @@ func TestParseSeriesByTagWithCosts(t *testing.T) { SetCosts(terms, config.ClickHouse.TaggedCosts) SortTaggedTermsByCost(terms) assert.NoError(err) + length := len(expected) if length < len(terms) { length = len(terms) } + for i := 0; i < length; i++ { if i >= len(terms) { t.Errorf("%s\n- [%d]=%+v", query, i, expected[i]) @@ -625,6 +634,7 @@ func TestParseSeriesByTagWithCostsFromCountTable(t *testing.T) { cfg, _ := config.DefaultConfig() cfg.ClickHouse.URL = srv.URL + if useTagCostsFromConfig { cfg.ClickHouse.TaggedCosts = taggedCosts } @@ -650,11 +660,13 @@ func TestParseSeriesByTagWithCostsFromCountTable(t *testing.T) { ) stat := &FinderStat{} + terms, err := taggedFinder.PrepareTaggedTerms(context.Background(), cfg, query, from, until, stat) if expectedErr != nil { assert.Equal(expectedErr, err, testName+", err") return } + assert.NoError(err) assert.Equal(metricMightExist, taggedFinder.metricMightExists, testName+", metricMightExist") @@ -662,6 +674,7 @@ func TestParseSeriesByTagWithCostsFromCountTable(t *testing.T) { if length < len(terms) { length = len(terms) } + for i := 0; i < length; i++ { if i >= len(terms) { t.Errorf("%s\n- [%d]=%+v", testName, i, expected[i]) @@ -964,10 +977,12 @@ func TestTaggedFinder_whereFilter(t *testing.T) { t.Run(tt.name+" "+time.Unix(tt.from, 0).Format(time.RFC3339), func(t *testing.T) { config := config.New() config.ClickHouse.TaggedCosts = tt.taggedCosts + terms, err := ParseSeriesByTag(tt.query, config) if err != nil { t.Fatal(err) } + f := NewTagged( "http://localhost:8123/", "graphite_tags", @@ -979,13 +994,16 @@ func TestTaggedFinder_whereFilter(t *testing.T) { clickhouse.Options{}, tt.taggedCosts, ) + got, gotDate, err := f.whereFilter(terms, tt.from, tt.until) if err != nil { t.Fatal(err) } + if got.String() != tt.want { t.Errorf("TaggedFinder.whereFilter()[0] = %v, want %v", got, tt.want) } + if gotDate.String() != tt.wantPre { t.Errorf("TaggedFinder.whereFilter()[1] = %v, want %v", gotDate, tt.wantPre) } @@ -1024,6 +1042,7 @@ func TestTaggedFinder_Abs(t *testing.T) { } else { tf = NewTagged("http:/127.0.0.1:8123", "graphite_tags", "", true, false, false, false, clickhouse.Options{}, nil) } + if got := string(tf.Abs(tt.v)); got != string(tt.want) { t.Errorf("TaggedDecode() =\n%q\nwant\n%q", got, string(tt.want)) } diff --git a/finder/unescape.go b/finder/unescape.go index 0e51dba5f..ab3950468 100644 --- a/finder/unescape.go +++ b/finder/unescape.go @@ -11,6 +11,7 @@ func ishex(c byte) bool { case 'A' <= c && c <= 'F': return true } + return false } @@ -23,6 +24,7 @@ func unhex(c byte) byte { case 'A' <= c && c <= 'F': return c - 'A' + 10 } + return 0 } @@ -36,7 +38,9 @@ func unescape(s string) string { if first == -1 { return s } + var t strings.Builder + t.Grow(len(s)) t.WriteString(s[:first]) diff --git a/graphite-clickhouse.go b/graphite-clickhouse.go index 12ab7bc3c..462847933 100644 --- a/graphite-clickhouse.go +++ b/graphite-clickhouse.go @@ -60,6 +60,7 @@ func (w *LogResponseWriter) Status() int { if w.status == 0 { return http.StatusOK } + return w.status } @@ -67,6 +68,7 @@ func WrapResponseWriter(w http.ResponseWriter) *LogResponseWriter { if wrapped, ok := w.(*LogResponseWriter); ok { return wrapped } + return &LogResponseWriter{ResponseWriter: w} } @@ -103,6 +105,7 @@ func sdList(name string, args []string) { flagSet.PrintDefaults() } flagSet.Parse(args) + if *help || flagSet.NArg() > 0 { flagSet.Usage() return @@ -115,6 +118,7 @@ func sdList(name string, args []string) { if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { var s sd.SD + logger := zapwriter.Default() if s, err = sd.New(&cfg.Common, "", logger); err != nil { fmt.Fprintf(os.Stderr, "service discovery type %q can be registered", cfg.Common.SDType.String()) @@ -144,6 +148,7 @@ func sdDelete(name string, args []string) { flagSet.PrintDefaults() } flagSet.Parse(args) + if *help || flagSet.NArg() > 0 { flagSet.Usage() return @@ -156,6 +161,7 @@ func sdDelete(name string, args []string) { if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { var s sd.SD + logger := zapwriter.Default() if s, err = sd.New(&cfg.Common, "", logger); err != nil { fmt.Fprintf(os.Stderr, "service discovery type %q can be registered", cfg.Common.SDType.String()) @@ -164,6 +170,7 @@ func sdDelete(name string, args []string) { hostname, _ := os.Hostname() hostname, _, _ = strings.Cut(hostname, ".") + if err = s.Clear("", ""); err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) @@ -184,10 +191,12 @@ func sdEvict(name string, args []string) { fmt.Fprintf(os.Stderr, " HOST []string\n List of hostnames\n") } flagSet.Parse(args) + if *help { flagSet.Usage() return } + cfg, _, err := config.ReadConfig(*configFile, *exactConfig) if err != nil { log.Fatal(err) @@ -196,11 +205,13 @@ func sdEvict(name string, args []string) { if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { for _, host := range flagSet.Args() { var s sd.SD + logger := zapwriter.Default() if s, err = sd.New(&cfg.Common, host, logger); err != nil { fmt.Fprintf(os.Stderr, "service discovery type %q can be registered", cfg.Common.SDType.String()) os.Exit(1) } + err = s.Clear("", "") } } @@ -218,6 +229,7 @@ func sdExpired(name string, args []string) { flagSet.PrintDefaults() } flagSet.Parse(args) + if *help || flagSet.NArg() > 0 { flagSet.Usage() return @@ -230,6 +242,7 @@ func sdExpired(name string, args []string) { if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { var s sd.SD + logger := zapwriter.Default() if s, err = sd.New(&cfg.Common, "", logger); err != nil { fmt.Fprintf(os.Stderr, "service discovery type %q can be registered", cfg.Common.SDType.String()) @@ -255,6 +268,7 @@ func sdClean(name string, args []string) { flagSet.PrintDefaults() } flagSet.Parse(args) + if *help || flagSet.NArg() > 0 { flagSet.Usage() return @@ -267,6 +281,7 @@ func sdClean(name string, args []string) { if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { var s sd.SD + logger := zapwriter.Default() if s, err = sd.New(&cfg.Common, "", logger); err != nil { fmt.Fprintf(os.Stderr, "service discovery type %q can be registered", cfg.Common.SDType.String()) @@ -284,10 +299,13 @@ func printMatchedRollupRules(metric string, age uint32, rollupRules *rollup.Rule // check metric rollup rules prec, aggr, aggrPattern, retentionPattern := rollupRules.Lookup(metric, age, true) fmt.Printf(" metric %q, age %d -> precision=%d, aggr=%s\n", metric, age, prec, aggr.Name()) + if aggrPattern != nil { fmt.Printf(" aggr pattern: type=%s, regexp=%q, function=%s", aggrPattern.RuleType.String(), aggrPattern.Regexp, aggrPattern.Function) + if len(aggrPattern.Retention) > 0 { fmt.Print(", retentions:\n") + for i := range aggrPattern.Retention { fmt.Printf(" [age: %d, precision: %d]\n", aggrPattern.Retention[i].Age, aggrPattern.Retention[i].Precision) } @@ -295,8 +313,10 @@ func printMatchedRollupRules(metric string, age uint32, rollupRules *rollup.Rule fmt.Print("\n") } } + if retentionPattern != nil { fmt.Printf(" retention pattern: type=%s, regexp=%q, function=%s, retentions:\n", retentionPattern.RuleType.String(), retentionPattern.Regexp, retentionPattern.Function) + for i := range retentionPattern.Retention { fmt.Printf(" [age: %d, precision: %d]\n", retentionPattern.Retention[i].Age, retentionPattern.Retention[i].Precision) } @@ -321,6 +341,7 @@ func checkRollupMatch(name string, args []string) { fmt.Fprintf(os.Stderr, " METRIC []string\n List of metric names\n") } flagSet.Parse(args) + if *help { flagSet.Usage() return @@ -333,6 +354,7 @@ func checkRollupMatch(name string, args []string) { if *rollupFile != "" { fmt.Printf("rollup file %q\n", *rollupFile) + if rollup, err := rollup.NewXMLFile(*rollupFile, 0, ""); err == nil { for _, metric := range flagSet.Args() { printMatchedRollupRules(metric, uint32(*age), rollup.Rules()) @@ -341,20 +363,25 @@ func checkRollupMatch(name string, args []string) { log.Fatal(err) } } + if *configFile != "" { cfg, _, err := config.ReadConfig(*configFile, *exactConfig) if err != nil { log.Fatal(err) } + ec := 0 + for i := range cfg.DataTable { var rulesTable string + if *table == "" || *table == cfg.DataTable[i].Table { if cfg.DataTable[i].RollupConf == "auto" || cfg.DataTable[i].RollupConf == "" { rulesTable = cfg.DataTable[i].Table if cfg.DataTable[i].RollupAutoTable != "" { rulesTable = cfg.DataTable[i].RollupAutoTable } + fmt.Printf("table %q, rollup rules table %q in Clickhouse\n", cfg.DataTable[i].Table, rulesTable) } else { fmt.Printf("rollup file %q\n", cfg.DataTable[i].RollupConf) @@ -367,10 +394,12 @@ func checkRollupMatch(name string, args []string) { cfg.ClickHouse.TLSConfig, rulesTable) if err != nil { ec = 1 + fmt.Fprintf(os.Stderr, "%v\n", err) } } } + if rules != nil { for _, metric := range flagSet.Args() { printMatchedRollupRules(metric, uint32(*age), rules) @@ -378,6 +407,7 @@ func checkRollupMatch(name string, args []string) { } } } + os.Exit(ec) } } @@ -451,6 +481,7 @@ func main() { if err = config.PrintDefaultConfig(); err != nil { log.Fatal(err) } + return } @@ -472,6 +503,7 @@ func main() { if err != nil { log.Fatal(err) } + logger := localManager.Logger("start") if len(warns) > 0 { @@ -509,6 +541,7 @@ func main() { if *pprof != "" { listen = *pprof } + go func() { log.Fatal(http.ListenAndServe(listen, nil)) }() } @@ -517,6 +550,7 @@ func main() { if err := tagger.Make(cfg); err != nil { log.Fatal(err) } + return } @@ -551,8 +585,10 @@ func main() { if err != nil { status = http.StatusInternalServerError http.Error(w, err.Error(), status) + return } + w.Write(b) }) @@ -567,6 +603,7 @@ func main() { } var exitWait sync.WaitGroup + srv = &http.Server{ Addr: cfg.Common.Listen, Handler: mux, @@ -576,6 +613,7 @@ func main() { go func() { defer exitWait.Done() + if err := srv.ListenAndServe(); err != http.ErrServerClosed { // unexpected error. port in use? log.Fatalf("ListenAndServe(): %v", err) @@ -585,6 +623,7 @@ func main() { if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { go func() { time.Sleep(time.Millisecond * 100) + sdLogger := localManager.Logger("service discovery") sd.Register(&cfg.Common, sdLogger) }() @@ -595,6 +634,7 @@ func main() { signal.Notify(stop, syscall.SIGTERM, syscall.SIGINT) <-stop logger.Info("stoping graphite-clickhouse") + if cfg.Common.SD != "" { // unregister SD sd.Stop() diff --git a/healthcheck/healthcheck.go b/healthcheck/healthcheck.go index 3ba0c4f2f..8c5bd2b28 100644 --- a/healthcheck/healthcheck.go +++ b/healthcheck/healthcheck.go @@ -37,6 +37,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { query string failed int32 ) + if h.config.ClickHouse.IndexTable != "" { // non-existing name with wrong level query = "SELECT Path FROM " + h.config.ClickHouse.IndexTable + " WHERE ((Level=20002) AND (Path IN ('NonExistient','NonExistient.'))) AND (Date='1970-02-12') GROUP BY Path FORMAT TabSeparatedRaw" @@ -44,9 +45,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // non-existing partition query = "SELECT Path FROM " + h.config.ClickHouse.TaggedTable + " WHERE (Tag1='__name__=NonExistient') AND (Date='1970-02-12') GROUP BY Path FORMAT TabSeparatedRaw" } + if query != "" { failed = 1 now := time.Now().Unix() + for { last := atomic.LoadInt64(&h.last) if now-last < 10 { @@ -63,6 +66,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { client := http.Client{ Timeout: 2 * time.Second, } + var u string if pos := strings.Index(h.config.ClickHouse.URL, "/?"); pos > 0 { u = h.config.ClickHouse.URL[:pos+2] + "query=" + url.QueryEscape(query) @@ -71,6 +75,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } req, _ := http.NewRequest(http.MethodGet, u, nil) + resp, err := client.Do(req) if err != nil { logger.Error("healthcheck error", @@ -84,27 +89,34 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { failed = 0 } else { failed = 1 + logger.Error("healthcheck error", zap.String("error", stringutils.UnsafeString(body)), ) } } else { failed = 1 + logger.Error("healthcheck error", zap.Error(err), ) } + resp.Body.Close() } else { failed = 1 + logger.Error("healthcheck error", zap.Error(err), ) } + atomic.StoreInt32(&h.failed, failed) + break } } + if failed > 0 { http.Error(w, "Storage healthcheck failed", http.StatusServiceUnavailable) } else { diff --git a/helper/RowBinary/encode.go b/helper/RowBinary/encode.go index f3b1ab075..ad1a61e71 100644 --- a/helper/RowBinary/encode.go +++ b/helper/RowBinary/encode.go @@ -37,12 +37,14 @@ func (w *Encoder) Uint8(value uint8) error { func (w *Encoder) Uint16(value uint16) error { binary.LittleEndian.PutUint16(w.buffer, value) _, err := w.wrapped.Write(w.buffer[:2]) + return err } func (w *Encoder) Uint32(value uint32) error { binary.LittleEndian.PutUint32(w.buffer, value) _, err := w.wrapped.Write(w.buffer[:4]) + return err } @@ -51,16 +53,19 @@ func (w *Encoder) NullableUint32(value uint32) error { _, err := w.wrapped.Write([]byte{1}) return err } + _, err := w.wrapped.Write([]byte{0}) if err != nil { return err } + return w.Uint32(value) } func (w *Encoder) Uint64(value uint64) error { binary.LittleEndian.PutUint64(w.buffer, value) _, err := w.wrapped.Write(w.buffer[:8]) + return err } @@ -73,21 +78,25 @@ func (w *Encoder) NullableFloat64(value float64) error { _, err := w.wrapped.Write([]byte{1}) return err } + _, err := w.wrapped.Write([]byte{0}) if err != nil { return err } + return w.Float64(value) } func (w *Encoder) Bytes(value []byte) error { n := binary.PutUvarint(w.buffer, uint64(len(value))) + _, err := w.wrapped.Write(w.buffer[:n]) if err != nil { return err } _, err = w.wrapped.Write(value) + return err } @@ -97,6 +106,7 @@ func (w *Encoder) String(value string) error { func (w *Encoder) StringList(value []string) error { n := binary.PutUvarint(w.buffer, uint64(len(value))) + _, err := w.wrapped.Write(w.buffer[:n]) if err != nil { return err @@ -114,6 +124,7 @@ func (w *Encoder) StringList(value []string) error { func (w *Encoder) Uint32List(value []uint32) error { n := binary.PutUvarint(w.buffer, uint64(len(value))) + _, err := w.wrapped.Write(w.buffer[:n]) if err != nil { return err @@ -131,6 +142,7 @@ func (w *Encoder) Uint32List(value []uint32) error { func (w *Encoder) NullableUint32List(value []uint32) error { n := binary.PutUvarint(w.buffer, uint64(len(value))) + _, err := w.wrapped.Write(w.buffer[:n]) if err != nil { return err @@ -148,6 +160,7 @@ func (w *Encoder) NullableUint32List(value []uint32) error { func (w *Encoder) Float64List(value []float64) error { n := binary.PutUvarint(w.buffer, uint64(len(value))) + _, err := w.wrapped.Write(w.buffer[:n]) if err != nil { return err @@ -165,6 +178,7 @@ func (w *Encoder) Float64List(value []float64) error { func (w *Encoder) NullableFloat64List(value []float64) error { n := binary.PutUvarint(w.buffer, uint64(len(value))) + _, err := w.wrapped.Write(w.buffer[:n]) if err != nil { return err diff --git a/helper/clickhouse/clickhouse.go b/helper/clickhouse/clickhouse.go index b503464c0..5f677b853 100644 --- a/helper/clickhouse/clickhouse.go +++ b/helper/clickhouse/clickhouse.go @@ -63,6 +63,7 @@ func extractClickhouseError(e string) (int, string) { if end := strings.Index(e, " (version "); end != -1 { e = e[0:end] } + return http.StatusForbidden, "Storage read limit " + e } else if start := strings.Index(e, ": Memory limit "); start != -1 { return http.StatusForbidden, "Storage read limit for memory" @@ -72,34 +73,44 @@ func extractClickhouseError(e string) (int, string) { return http.StatusServiceUnavailable, "Storage configuration error" } } + if strings.HasPrefix(e, "clickhouse response status 404: Code: 60. DB::Exception: Table default.") { return http.StatusServiceUnavailable, "Storage default tables damaged" } + if strings.HasPrefix(e, "clickhouse response status 500: Code: 427") || strings.HasPrefix(e, "clickhouse response status 400: Code: 427.") { return http.StatusBadRequest, "Incorrect regex syntax" } + return http.StatusServiceUnavailable, "Storage unavailable" } func HandleError(w http.ResponseWriter, err error) (status int, queueFail bool) { status = http.StatusOK errStr := err.Error() + if err == ErrInvalidTimeRange { status = http.StatusBadRequest http.Error(w, errStr, status) + return } + if err == limiter.ErrTimeout || err == limiter.ErrOverflow { queueFail = true status = http.StatusServiceUnavailable http.Error(w, err.Error(), status) + return } + if _, ok := err.(*ErrWithDescr); ok { status, errStr = extractClickhouseError(errStr) http.Error(w, errStr, status) + return } + netErr, ok := err.(net.Error) if ok { if netErr.Timeout() { @@ -117,8 +128,10 @@ func HandleError(w http.ResponseWriter, err error) (status int, queueFail bool) status = http.StatusServiceUnavailable http.Error(w, "Storage network error", status) } + return } + errCode, ok := err.(errs.ErrorWithCode) if ok { if (errCode.Code > 500 && errCode.Code < 512) || @@ -129,8 +142,10 @@ func HandleError(w http.ResponseWriter, err error) (status int, queueFail bool) status = http.StatusInternalServerError http.Error(w, html.EscapeString(errStr), status) } + return } + if errors.Is(err, context.Canceled) { status = http.StatusGatewayTimeout http.Error(w, "Storage read context canceled", status) @@ -139,6 +154,7 @@ func HandleError(w http.ResponseWriter, err error) (status int, queueFail bool) status = http.StatusInternalServerError http.Error(w, html.EscapeString(errStr), status) } + return } @@ -164,15 +180,18 @@ func (r *LoggedReader) Read(p []byte) (int, error) { r.finished = true r.logger.Info("query", zap.String("query_id", r.queryID), zap.Duration("time", time.Since(r.start))) } + return n, err } func (r *LoggedReader) Close() error { err := r.reader.Close() + if !r.finished { r.finished = true r.logger.Info("query", zap.String("query_id", r.queryID), zap.Duration("time", time.Since(r.start))) } + return err } @@ -230,6 +249,7 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e if len(queryForLogger) > 500 { queryForLogger = queryForLogger[:395] + "<...>" + queryForLogger[len(queryForLogger)-100:] } + logger := scope.Logger(ctx).With(zap.String("query", formatSQL(queryForLogger))) defer func() { @@ -245,6 +265,7 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e } var b [8]byte + binary.LittleEndian.PutUint64(b[:], rand.Uint64()) queryID := fmt.Sprintf("%x", b) @@ -257,6 +278,7 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e p.RawQuery = q.Encode() var contentHeader string + if postBody != nil { q := p.Query() q.Set("query", query) @@ -265,6 +287,7 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e q := p.Query() q.Set("query", query) p.RawQuery = q.Encode() + postBody, contentHeader, err = extData.buildBody(ctx, p) if err != nil { return @@ -281,6 +304,7 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e } req.Header.Add("User-Agent", scope.ClickhouseUserAgent(ctx)) + if contentHeader != "" { req.Header.Add("Content-Type", contentHeader) } @@ -306,6 +330,7 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e DisableKeepAlives: true, }, } + resp, err := client.Do(req) if err != nil { return @@ -317,14 +342,17 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e summaryHeader := resp.Header.Get("X-Clickhouse-Summary") read_rows := int64(-1) read_bytes := int64(-1) + if len(summaryHeader) > 0 { summary := make(map[string]string) err = json.Unmarshal([]byte(summaryHeader), &summary) + if err == nil { // TODO: use in carbon metrics sender when it will be implemented fields := make([]zapcore.Field, 0, len(summary)) for k, v := range summary { fields = append(fields, zap.String(k, v)) + switch k { case "read_rows": read_rows, _ = strconv.ParseInt(v, 10, 64) @@ -332,9 +360,11 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e read_bytes, _ = strconv.ParseInt(v, 10, 64) } } + sort.Slice(fields, func(i int, j int) bool { return fields[i].Key < fields[j].Key }) + logger = logger.With(fields...) } else { logger.Warn("query", zap.Error(err), zap.String("clickhouse-summary", summaryHeader)) @@ -347,11 +377,13 @@ func reader(ctx context.Context, dsn string, query string, postBody io.Reader, e body, _ := io.ReadAll(resp.Body) resp.Body.Close() err = errs.NewErrorWithCode(string(body), resp.StatusCode) + return } else if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) resp.Body.Close() err = NewErrWithDescr("clickhouse response status "+strconv.Itoa(resp.StatusCode), string(body)) + return } @@ -375,6 +407,7 @@ func do(ctx context.Context, dsn string, query string, postBody io.Reader, encod body, err := io.ReadAll(bodyReader) bodyReader.Close() + if err != nil { return nil, bodyReader.ChReadRows(), bodyReader.ChReadBytes(), err } @@ -384,18 +417,24 @@ func do(ctx context.Context, dsn string, query string, postBody io.Reader, encod func ReadUvarint(array []byte) (uint64, int, error) { var x uint64 + var s uint + l := len(array) - 1 + for i := 0; ; i++ { if i > l { return x, i + 1, ErrUvarintRead } + if array[i] < 0x80 { if i > 9 || i == 9 && array[i] > 1 { return x, i + 1, ErrUvarintOverflow } + return x | uint64(array[i])< 0 { v["from"] = []string{fromStr} } + if until > 0 { v["until"] = []string{untilStr} } @@ -69,19 +74,24 @@ func MetricsFind(client *http.Client, address string, format FormatType, query s } u.RawQuery = v.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), reader) if err != nil { return queryParams, nil, nil, err } + resp, err := client.Do(req) if err != nil { return queryParams, nil, nil, err } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) if err != nil { return queryParams, nil, nil, err } + if resp.StatusCode == http.StatusNotFound { return queryParams, nil, resp.Header, nil } else if resp.StatusCode != http.StatusOK { @@ -89,12 +99,14 @@ func MetricsFind(client *http.Client, address string, format FormatType, query s } var globs []FindMatch + switch format { case FormatProtobuf: var globsv2 protov2.GlobResponse if err = globsv2.Unmarshal(b); err != nil { return queryParams, nil, resp.Header, err } + for _, m := range globsv2.Matches { globs = append(globs, FindMatch{Path: m.Path, IsLeaf: m.IsLeaf}) } @@ -103,6 +115,7 @@ func MetricsFind(client *http.Client, address string, format FormatType, query s if err = globsv3.Unmarshal(b); err != nil { return queryParams, nil, resp.Header, err } + for _, m := range globsv3.Metrics { for _, v := range m.Matches { globs = append(globs, FindMatch{Path: v.Path, IsLeaf: v.IsLeaf}) @@ -111,10 +124,12 @@ func MetricsFind(client *http.Client, address string, format FormatType, query s case FormatPickle: reader := bytes.NewReader(b) decoder := pickle.NewDecoder(reader) + p, err := decoder.Decode() if err != nil { return queryParams, nil, resp.Header, err } + for _, v := range p.([]interface{}) { m := v.(map[interface{}]interface{}) path := m["metric_path"].(string) diff --git a/helper/client/render.go b/helper/client/render.go index f2ecda036..89c457f4b 100644 --- a/helper/client/render.go +++ b/helper/client/render.go @@ -41,6 +41,7 @@ type Metric struct { // Valid formats are carbonapi_v3_pb. protobuf, pickle, json func Render(client *http.Client, address string, format FormatType, targets []string, filteringFunctions []*protov3.FilteringFunction, maxDataPoints, from, until int64) (string, []Metric, http.Header, error) { rUrl := "/render/" + if format == FormatDefault { format = FormatPb_v3 } @@ -53,9 +54,11 @@ func Render(client *http.Client, address string, format FormatType, targets []st if from <= 0 { return queryParams, nil, nil, ErrInvalidFrom } + if until <= 0 { return queryParams, nil, nil, ErrInvalidUntil } + fromStr := strconv.FormatInt(from, 10) untilStr := strconv.FormatInt(until, 10) maxDataPointsStr := strconv.FormatInt(maxDataPoints, 10) @@ -66,7 +69,9 @@ func Render(client *http.Client, address string, format FormatType, targets []st } var v url.Values + var reader io.Reader + switch format { case FormatPb_v3: v = url.Values{ @@ -75,6 +80,7 @@ func Render(client *http.Client, address string, format FormatType, targets []st u.RawQuery = v.Encode() var body []byte + r := protov3.MultiFetchRequest{ Metrics: make([]protov3.FetchRequest, len(targets)), } @@ -93,6 +99,7 @@ func Render(client *http.Client, address string, format FormatType, targets []st if err != nil { return queryParams, nil, nil, err } + if body != nil { reader = bytes.NewReader(body) } @@ -113,16 +120,19 @@ func Render(client *http.Client, address string, format FormatType, targets []st if err != nil { return queryParams, nil, nil, err } + resp, err := client.Do(req) if err != nil { return queryParams, nil, nil, err } + defer resp.Body.Close() b, err := io.ReadAll(resp.Body) if err != nil { return queryParams, nil, nil, err } + if resp.StatusCode == http.StatusNotFound { return queryParams, nil, resp.Header, nil } else if resp.StatusCode != http.StatusOK { @@ -133,6 +143,7 @@ func Render(client *http.Client, address string, format FormatType, targets []st if err != nil { return queryParams, nil, resp.Header, err } + return queryParams, metrics, resp.Header, nil } @@ -142,13 +153,16 @@ func Decode(b []byte, format FormatType) ([]Metric, error) { metrics []Metric err error ) + switch format { case FormatPb_v3: var r protov3.MultiFetchResponse + err = r.Unmarshal(b) if err != nil { return nil, err } + metrics = make([]Metric, 0, len(r.Metrics)) for _, m := range r.Metrics { metrics = append(metrics, Metric{ @@ -168,11 +182,14 @@ func Decode(b []byte, format FormatType) ([]Metric, error) { } case FormatPb_v2, FormatProtobuf: var r protov2.MultiFetchResponse + err = r.Unmarshal(b) if err != nil { return nil, err } + metrics = make([]Metric, 0, len(r.Metrics)) + for _, m := range r.Metrics { for i, a := range m.IsAbsent { if a { @@ -191,14 +208,17 @@ func Decode(b []byte, format FormatType) ([]Metric, error) { case FormatPickle: reader := bytes.NewReader(b) decoder := pickle.NewDecoder(reader) + p, err := decoder.Decode() if err != nil { return nil, err } + for _, v := range p.([]interface{}) { m := v.(map[interface{}]interface{}) vals := m["values"].([]interface{}) values := make([]float64, len(vals)) + for i, vv := range vals { if _, isNaN := vv.(pickle.None); isNaN { values[i] = math.NaN() @@ -206,6 +226,7 @@ func Decode(b []byte, format FormatType) ([]Metric, error) { values[i] = vv.(float64) } } + metrics = append(metrics, Metric{ Name: m["name"].(string), PathExpression: m["pathExpression"].(string), @@ -217,13 +238,17 @@ func Decode(b []byte, format FormatType) ([]Metric, error) { } case FormatJSON: var r jsonResponse + err = json.Unmarshal(b, &r) if err != nil { return nil, err } + metrics = make([]Metric, 0, len(r.Metrics)) + for _, m := range r.Metrics { values := make([]float64, len(m.Values)) + for i, v := range m.Values { if v == nil { values[i] = math.NaN() @@ -231,6 +256,7 @@ func Decode(b []byte, format FormatType) ([]Metric, error) { values[i] = *v } } + metrics = append(metrics, Metric{ Name: m.Name, PathExpression: m.PathExpression, @@ -243,6 +269,7 @@ func Decode(b []byte, format FormatType) ([]Metric, error) { default: return nil, ErrUnsupportedFormat } + return metrics, nil } diff --git a/helper/client/tags.go b/helper/client/tags.go index c161aca85..1143d0f75 100644 --- a/helper/client/tags.go +++ b/helper/client/tags.go @@ -38,6 +38,7 @@ func TagsNames(client *http.Client, address string, format FormatType, query str } var tagPrefix string + var exprs []string if query != "" && query != "<>" { @@ -47,6 +48,7 @@ func TagsNames(client *http.Client, address string, format FormatType, query str } exprs = make([]string, 0, len(args)) + for i, arg := range args { delim := strings.IndexRune(arg, '=') if i == 0 && delim == -1 { @@ -60,20 +62,26 @@ func TagsNames(client *http.Client, address string, format FormatType, query str } v := make([]string, 0, 2+len(exprs)) + var rawQuery stringutils.Builder + rawQuery.Grow(128) v = append(v, "format="+format.String()) + rawQuery.WriteString("format=") rawQuery.WriteString(url.QueryEscape(format.String())) if tagPrefix != "" { v = append(v, "tagPrefix="+tagPrefix) + rawQuery.WriteString("&tagPrefix=") rawQuery.WriteString(url.QueryEscape(tagPrefix)) } + for _, expr := range exprs { v = append(v, "expr="+expr) + rawQuery.WriteString("&expr=") rawQuery.WriteString(url.QueryEscape(expr)) } @@ -81,18 +89,23 @@ func TagsNames(client *http.Client, address string, format FormatType, query str if from > 0 { fromStr := strconv.FormatInt(from, 10) v = append(v, "from="+fromStr) + rawQuery.WriteString("&from=") rawQuery.WriteString(fromStr) } + if until > 0 { untilStr := strconv.FormatInt(until, 10) v = append(v, "until="+untilStr) + rawQuery.WriteString("&until=") rawQuery.WriteString(untilStr) } + if limit > 0 { limitStr := strconv.FormatUint(limit, 10) v = append(v, "limit="+limitStr) + rawQuery.WriteString("&limit=") rawQuery.WriteString(limitStr) } @@ -105,6 +118,7 @@ func TagsNames(client *http.Client, address string, format FormatType, query str if err != nil { return queryParams, nil, nil, err } + resp, err := client.Do(req) if err != nil { return queryParams, nil, nil, err @@ -116,6 +130,7 @@ func TagsNames(client *http.Client, address string, format FormatType, query str if err != nil { return queryParams, nil, nil, err } + if resp.StatusCode == http.StatusNotFound { return u.RawQuery, nil, resp.Header, nil } else if resp.StatusCode != http.StatusOK { @@ -123,6 +138,7 @@ func TagsNames(client *http.Client, address string, format FormatType, query str } var values []string + err = json.Unmarshal(b, &values) if err != nil { return queryParams, nil, resp.Header, errors.New(err.Error() + ": " + string(b)) @@ -169,6 +185,7 @@ func TagsValues(client *http.Client, address string, format FormatType, query st vals := strings.Split(args[0], "=") tag = vals[0] + if len(vals) > 2 { return queryParams, nil, nil, errors.New("invalid tag: " + args[0]) } else if len(vals) == 2 { @@ -176,35 +193,45 @@ func TagsValues(client *http.Client, address string, format FormatType, query st } exprs = make([]string, 0, len(args)-1) + for i := 1; i < len(args); i++ { expr := args[i] if strings.IndexRune(expr, '=') <= 0 { return queryParams, nil, nil, errors.New("invalid expr: " + expr) } + exprs = append(exprs, expr) } } v := make([]string, 0, 2+len(exprs)) + var rawQuery stringutils.Builder + rawQuery.Grow(128) v = append(v, "format="+format.String()) + rawQuery.WriteString("format=") rawQuery.WriteString(url.QueryEscape(format.String())) if tag != "" { v = append(v, "tag="+tag) + rawQuery.WriteString("&tag=") rawQuery.WriteString(url.QueryEscape(tag)) } + if valuePrefix != "" { v = append(v, "valuePrefix="+valuePrefix) + rawQuery.WriteString("&valuePrefix=") rawQuery.WriteString(url.QueryEscape(valuePrefix)) } + for _, expr := range exprs { v = append(v, "expr="+expr) + rawQuery.WriteString("&expr=") rawQuery.WriteString(url.QueryEscape(expr)) } @@ -212,18 +239,23 @@ func TagsValues(client *http.Client, address string, format FormatType, query st if from > 0 { fromStr := strconv.FormatInt(from, 10) v = append(v, "from="+fromStr) + rawQuery.WriteString("&from=") rawQuery.WriteString(fromStr) } + if until > 0 { untilStr := strconv.FormatInt(until, 10) v = append(v, "until="+untilStr) + rawQuery.WriteString("&until=") rawQuery.WriteString(untilStr) } + if limit > 0 { limitStr := strconv.FormatUint(limit, 10) v = append(v, "limit="+limitStr) + rawQuery.WriteString("&limit=") rawQuery.WriteString(limitStr) } @@ -236,15 +268,19 @@ func TagsValues(client *http.Client, address string, format FormatType, query st if err != nil { return queryParams, nil, nil, err } + resp, err := client.Do(req) if err != nil { return u.RawQuery, nil, nil, err } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) if err != nil { return queryParams, nil, nil, err } + if resp.StatusCode == http.StatusNotFound { return queryParams, nil, resp.Header, nil } else if resp.StatusCode != http.StatusOK { @@ -252,6 +288,7 @@ func TagsValues(client *http.Client, address string, format FormatType, query st } var values []string + err = json.Unmarshal(b, &values) if err != nil { return queryParams, nil, resp.Header, errors.New(err.Error() + ": " + string(b)) diff --git a/helper/client/types.go b/helper/client/types.go index ba8010c00..03eaf4eed 100644 --- a/helper/client/types.go +++ b/helper/client/types.go @@ -41,6 +41,7 @@ func (a *FormatType) Set(value string) error { default: return fmt.Errorf("invalid format type %s", value) } + return nil } diff --git a/helper/date/date.go b/helper/date/date.go index 4ce4c6718..1522ec83b 100644 --- a/helper/date/date.go +++ b/helper/date/date.go @@ -61,6 +61,7 @@ func defaultDate(t time.Time) time.Time { func minLocalAndUTC(t time.Time) time.Time { tu := defaultDate(t.UTC()) td := defaultDate(t) + if tu.Unix() < td.Unix() { return tu } else { @@ -83,6 +84,7 @@ func MinTimeToDaysFormat(t time.Time) string { func maxLocalAndUTC(t time.Time) time.Time { tu := defaultDate(t.UTC()) td := defaultDate(t) + if tu.Unix() > td.Unix() { return tu } else { diff --git a/helper/date/date_test.go b/helper/date/date_test.go index 25b544ce1..99cba44e5 100644 --- a/helper/date/date_test.go +++ b/helper/date/date_test.go @@ -15,6 +15,7 @@ func isVerbose() bool { return true } } + return false } diff --git a/helper/datetime/datetime.go b/helper/datetime/datetime.go index 01fce3200..6a3f86713 100644 --- a/helper/datetime/datetime.go +++ b/helper/datetime/datetime.go @@ -76,6 +76,7 @@ func DateParamToEpoch(s string, tz *time.Location, now time.Time, truncate time. yy, mm, dd := now.Date() hh, min, _ := parseTime(s) // error ignored, we know it's valid dt := time.Date(yy, mm, dd, hh, min, 0, 0, tz) + return dt.Unix() } @@ -88,9 +89,11 @@ func DateParamToEpoch(s string, tz *time.Location, now time.Time, truncate time. s = strings.Replace(s, "_", " ", 1) // Go can't parse _ in date strings var ts, ds string + split := strings.Fields(s) var t time.Time + switch { case len(split) == 1: delim := strings.IndexAny(s, "+-") @@ -126,14 +129,17 @@ func DateParamToEpoch(s string, tz *time.Location, now time.Time, truncate time. ts = s[:delim+1] s = s[delim+1:] } + offset, err := parser.IntervalString(ts, 1) if err != nil { offset64, err := strconv.ParseInt(ts, 10, 32) if err != nil { return 0 } + offset = int32(offset64) } + t = t.Add(time.Duration(offset) * time.Second) } @@ -176,8 +182,8 @@ dateStringSwitch: var hour, minute int if ts != "" { - hour, minute, _ = parseTime(ts) // defaults to hour=0, minute=0 on error, which is midnight, which is fine for now + hour, minute, _ = parseTime(ts) } yy, mm, dd := t.Date() @@ -190,6 +196,7 @@ func Timezone(qtz string) (*time.Location, error) { if qtz == "" { qtz = "Local" } + return time.LoadLocation(qtz) } @@ -197,7 +204,9 @@ func TimestampTruncate(ts int64, truncate time.Duration) int64 { if ts == 0 || truncate == 0 { return ts } + tm := time.Unix(ts, 0).UTC() + return tm.Truncate(truncate).UTC().Unix() } @@ -205,5 +214,6 @@ func TimeTruncate(tm time.Time, truncate time.Duration) time.Time { if truncate == 0 { return tm } + return tm.Truncate(truncate) } diff --git a/helper/datetime/datetime_test.go b/helper/datetime/datetime_test.go index a16d62b69..1abc8c7eb 100644 --- a/helper/datetime/datetime_test.go +++ b/helper/datetime/datetime_test.go @@ -40,11 +40,13 @@ func TestDateParamToEpoch(t *testing.T) { want int64 wantTime string ) + if tt.output != "" { ts, err := time.ParseInLocation(shortForm, tt.output, timeZone) if err != nil { t.Fatalf("error parsing time: %q: %v", tt.output, err) } + want = ts.Unix() wantTime = ts.Format(time.RFC3339Nano) } @@ -95,11 +97,13 @@ func TestDateParamToEpochTruncate(t *testing.T) { want int64 wantTime string ) + if tt.output != "" { ts, err := time.ParseInLocation(shortForm, tt.output, timeZone) if err != nil { t.Fatalf("error parsing time: %q: %v", tt.output, err) } + want = ts.Unix() wantTime = ts.Format(time.RFC3339Nano) } diff --git a/helper/headers/headers.go b/helper/headers/headers.go index e748a0ebd..4fb53cb55 100644 --- a/helper/headers/headers.go +++ b/helper/headers/headers.go @@ -5,13 +5,16 @@ import "net/http" func GetHeaders(header *http.Header, keys []string) map[string]string { if len(keys) > 0 { headers := make(map[string]string) + for _, key := range keys { value := header.Get(key) if len(value) > 0 { headers[key] = value } } + return headers } + return nil } diff --git a/helper/pickle/pickle.go b/helper/pickle/pickle.go index c84ad61c2..8db99b47a 100644 --- a/helper/pickle/pickle.go +++ b/helper/pickle/pickle.go @@ -66,7 +66,9 @@ func (p *Writer) String(v string) { func (p *Writer) Uint32(v uint32) { p.w.Write([]byte{'J'}) + var b [4]byte + binary.LittleEndian.PutUint32(b[:], v) p.w.Write(b[:]) } diff --git a/helper/point/func.go b/helper/point/func.go index 2ad3b25a6..b17b412b6 100644 --- a/helper/point/func.go +++ b/helper/point/func.go @@ -16,6 +16,7 @@ func CleanUp(points []Point) []Point { squashed++ continue } + if squashed > 0 { points[i-squashed] = points[i] } @@ -27,6 +28,7 @@ func CleanUp(points []Point) []Point { // Uniq removes points with equal metric and time func Uniq(points []Point) []Point { l := len(points) + var i, n int // i - current position of iterator // n - position on first record with current key (metric + time) @@ -55,49 +57,62 @@ func FillNulls(points []Point, from, until, step uint32) (start, stop, count uin if start < from { start += step } + stop = until - (until % step) + step count = (stop - start) / step last := start - step currentPoint := 0 + var metricID uint32 if len(points) > 0 { metricID = points[0].MetricID } + getter = func() (float64, error) { if stop <= last { return 0, ErrTimeGreaterStop } + for i := currentPoint; i < len(points); i++ { point := points[i] if metricID != point.MetricID { return 0, fmt.Errorf("the point MetricID %d differs from other %d: %w", point.MetricID, metricID, ErrWrongMetricID) } + if point.Time < start { // Points begin before request's start currentPoint++ continue } + if point.Time <= last { // This is definitely an error. Possible reason is unsorted points return 0, fmt.Errorf("the time is less or equal to previous %d < %d: %w", point.Time, last, ErrPointsUnsorted) } + if stop <= point.Time { break } + if last+step < point.Time { // There are nulls in slice last += step return math.NaN(), nil } + last = point.Time currentPoint = i + 1 + return point.Value, nil } + if last+step < stop { last += step return math.NaN(), nil } + return 0, ErrTimeGreaterStop } + return } diff --git a/helper/point/func_test.go b/helper/point/func_test.go index 71aa9acf9..3f6f54acb 100644 --- a/helper/point/func_test.go +++ b/helper/point/func_test.go @@ -110,6 +110,7 @@ func TestFillNulls(t *testing.T) { until uint32 step uint32 } + type expected struct { values []float64 start uint32 @@ -117,6 +118,7 @@ func TestFillNulls(t *testing.T) { count uint32 err error } + tests := []struct { name string in @@ -186,36 +188,44 @@ func TestFillNulls(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { result := expected{} + var ( getter GetValueOrNaN value float64 ) + result.start, result.stop, result.count, getter = FillNulls(test.points, test.from, test.until, test.step) result.values = make([]float64, 0, result.count) + for { value, result.err = getter() if result.err != nil { break } + result.values = append(result.values, value) } if !errors.Is(result.err, ErrTimeGreaterStop) { assert.ErrorIs(t, result.err, test.expected.err) } + result.err = nil test.expected.err = nil // Comparing values requires work around NaNs assert.Equal(t, len(result.values), len(test.expected.values), "the length of expected and got values are different") + for i := range result.values { if math.IsNaN(test.expected.values[i]) { assert.True(t, math.IsNaN(result.values[i])) continue } + assert.Equal(t, test.expected.values[i], result.values[i]) } + result.values = []float64{} test.expected.values = []float64{} diff --git a/helper/point/points.go b/helper/point/points.go index cb6d95955..412952ec8 100644 --- a/helper/point/points.go +++ b/helper/point/points.go @@ -45,6 +45,7 @@ func (pp *Points) MetricID(metricName string) uint32 { id = uint32(len(pp.metrics)) pp.idMap[metricName] = id } + return id } @@ -60,6 +61,7 @@ func (pp *Points) MetricName(metricID uint32) string { if i < 1 || len(pp.metrics) < i { return "" } + return pp.metrics[i-1] } @@ -79,6 +81,7 @@ func (pp *Points) GetStep(id uint32) (uint32, error) { if i < 1 || len(pp.steps) < i { return 0, fmt.Errorf("wrong id %d for given steps %d: %w", i, len(pp.steps), ErrWrongMetricID) } + return pp.steps[i-1], nil } @@ -89,6 +92,7 @@ func (pp *Points) SetSteps(steps map[uint32][]string) { } pp.steps = make([]uint32, len(pp.metrics)) + for step, mm := range steps { for _, m := range mm { if id, ok := pp.idMap[m]; ok { @@ -104,6 +108,7 @@ func (pp *Points) GetAggregation(id uint32) (string, error) { if i < 1 || len(pp.aggs) < i { return "", fmt.Errorf("wrong id %d for given functions %d: %w", i, len(pp.aggs), ErrWrongMetricID) } + return *pp.aggs[i-1], nil } @@ -111,9 +116,11 @@ func (pp *Points) GetAggregation(id uint32) (string, error) { func (pp *Points) SetAggregations(functions map[string][]string) { pp.aggs = make([]*string, len(pp.metrics)) pp.uniqAgg = make([]string, 0, len(functions)) + for f := range functions { pp.uniqAgg = append(pp.uniqAgg, f) } + for i, f := range pp.uniqAgg { for _, m := range functions[f] { if id, ok := pp.idMap[m]; ok { @@ -153,6 +160,7 @@ func (pp *Points) Uniq() { // It should be called only on sorted and cleaned Points. func (pp *Points) GroupByMetric() NextMetric { var i, n int + l := pp.Len() // i - current position of iterator // n - position of the first record with current metric @@ -160,15 +168,19 @@ func (pp *Points) GroupByMetric() NextMetric { if n == l { return []Point{} } + for i = n; i < l; i++ { if pp.list[i].MetricID != pp.list[n].MetricID { points := pp.list[n:i] n = i + return points } } + points := pp.list[n:i] n = i + return points } } diff --git a/helper/rollup/aggr.go b/helper/rollup/aggr.go index aade03ddc..0727f27c4 100644 --- a/helper/rollup/aggr.go +++ b/helper/rollup/aggr.go @@ -22,6 +22,7 @@ func (ag *Aggr) Name() string { if ag == nil { return "" } + return ag.name } @@ -29,6 +30,7 @@ func (ag *Aggr) String() string { if ag == nil { return "" } + return ag.name } @@ -36,6 +38,7 @@ func (ag *Aggr) Do(points []point.Point) (r float64) { if ag == nil || ag.f == nil { return 0 } + return ag.f(points) } @@ -43,6 +46,7 @@ func AggrSum(points []point.Point) (r float64) { for _, p := range points { r += p.Value } + return } @@ -50,11 +54,13 @@ func AggrMax(points []point.Point) (r float64) { if len(points) > 0 { r = points[0].Value } + for _, p := range points { if p.Value > r { r = p.Value } } + return } @@ -62,11 +68,13 @@ func AggrMin(points []point.Point) (r float64) { if len(points) > 0 { r = points[0].Value } + for _, p := range points { if p.Value < r { r = p.Value } } + return } @@ -74,7 +82,9 @@ func AggrAvg(points []point.Point) (r float64) { if len(points) == 0 { return } + r = AggrSum(points) / float64(len(points)) + return } @@ -82,6 +92,7 @@ func AggrAny(points []point.Point) (r float64) { if len(points) > 0 { r = points[0].Value } + return } @@ -89,5 +100,6 @@ func AggrAnyLast(points []point.Point) (r float64) { if len(points) > 0 { r = points[len(points)-1].Value } + return } diff --git a/helper/rollup/compact.go b/helper/rollup/compact.go index 9478b46f5..83bf0fcf1 100644 --- a/helper/rollup/compact.go +++ b/helper/rollup/compact.go @@ -20,14 +20,17 @@ func parseCompact(body string) (*Rules, error) { if strings.TrimSpace(line) == "" { continue } + p2 := strings.LastIndexByte(line, ';') if p2 < 0 { return nil, fmt.Errorf("can't parse line: %#v", line) } + p1 := strings.LastIndexByte(line[:p2], ';') if p1 < 0 { return nil, fmt.Errorf("can't parse line: %#v", line) } + regexp := strings.TrimSpace(line[:p1]) function := strings.TrimSpace(line[p1+1 : p2]) retention := make([]Retention, 0) diff --git a/helper/rollup/remote.go b/helper/rollup/remote.go index 0fe0488f2..1826f7ac0 100644 --- a/helper/rollup/remote.go +++ b/helper/rollup/remote.go @@ -28,6 +28,7 @@ type rollupRulesResponse struct { func parseJson(body []byte) (*Rules, error) { resp := &rollupRulesResponse{} + err := json.Unmarshal(body, resp) if err != nil { return nil, err @@ -55,6 +56,7 @@ func parseJson(body []byte) (*Rules, error) { if len(r.Pattern) == 0 { return nil } + return &r.Pattern[len(r.Pattern)-1] } @@ -67,11 +69,13 @@ func parseJson(body []byte) (*Rules, error) { if d.Function != "" { defaultFunction = d.Function } + if d.Age != "" && d.Precision != "" && d.Precision != "0" { rt, err := makeRetention(&d) if err != nil { return nil, err } + defaultRetention = append(defaultRetention, rt) } } else { @@ -83,11 +87,13 @@ func parseJson(body []byte) (*Rules, error) { Function: d.Function, }) } + if d.Age != "" && d.Precision != "" && d.Precision != "0" { rt, err := makeRetention(&d) if err != nil { return nil, err } + last().Retention = append(last().Retention, rt) } } @@ -108,6 +114,7 @@ var timeoutRulesLoad = 10 * time.Second func RemoteLoad(addr string, tlsConf *tls.Config, table string) (*Rules, error) { var db string + arr := strings.SplitN(table, ".", 2) if len(arr) == 1 { db = "default" @@ -182,5 +189,6 @@ func RemoteLoad(addr string, tlsConf *tls.Config, table string) (*Rules, error) if r != nil { r.Updated = time.Now().Unix() } + return r, err } diff --git a/helper/rollup/rollup.go b/helper/rollup/rollup.go index fcdb1c616..9a1337a00 100644 --- a/helper/rollup/rollup.go +++ b/helper/rollup/rollup.go @@ -34,6 +34,7 @@ func NewAuto(addr string, tlsConfig *tls.Config, table string, interval time.Dur } go r.updateWorker() + return r, nil } @@ -62,6 +63,7 @@ func NewXMLFile(filename string, defaultPrecision uint32, defaultFunction string func NewDefault(defaultPrecision uint32, defaultFunction string) (*Rollup, error) { rules := &Rules{Pattern: []Pattern{}} + rules, err := rules.prepare(defaultPrecision, defaultFunction) if err != nil { return nil, err @@ -78,6 +80,7 @@ func (r *Rollup) Rules() *Rules { r.mu.RLock() rules := r.rules r.mu.RUnlock() + return rules } @@ -97,6 +100,7 @@ func (r *Rollup) update() error { r.mu.Lock() r.rules = rules r.mu.Unlock() + return nil } diff --git a/helper/rollup/rules.go b/helper/rollup/rules.go index 3704f1fa3..fe14c80bc 100644 --- a/helper/rollup/rules.go +++ b/helper/rollup/rules.go @@ -48,6 +48,7 @@ func (r *RuleType) Set(value string) error { default: return fmt.Errorf("invalid rule type %s", value) } + return nil } @@ -56,6 +57,7 @@ func (r *RuleType) UnmarshalJSON(data []byte) error { if strings.HasPrefix(s, `"`) && strings.HasSuffix(s, `"`) { s = s[1 : len(s)-1] } + return r.Set(s) } @@ -77,6 +79,7 @@ func splitTags(tagsStr string) (tags []string) { tags = append(tags, v) } } + return } @@ -109,7 +112,6 @@ func buildTaggedRegex(regexpStr string) string { // * nam.* ; tag1=val1 ; tag2=val2 // * produce // * nam.*\?(.*&)?tag1=val1&(.*&)?tag2=val2(&.*)?$ - tags := splitTags(regexpStr) if strings.Contains(tags[0], "=") { @@ -155,10 +157,12 @@ func NewMockRules(pattern []Pattern, defaultPrecision uint32, defaultFunction st if err != nil { return nil, err } + rules, err = rules.prepare(defaultPrecision, defaultFunction) if err != nil { return nil, err } + return rules, nil } @@ -208,6 +212,7 @@ func (r *Rules) compile() (*Rules, error) { if r.Pattern == nil { r.Pattern = make([]Pattern, 0) } + r.PatternPlain = make([]Pattern, 0) r.PatternTagged = make([]Pattern, 0) @@ -216,6 +221,7 @@ func (r *Rules) compile() (*Rules, error) { if err := r.Pattern[i].compile(); err != nil { return r, err } + if !r.Separated && r.Pattern[i].RuleType != RuleAll { r.Separated = true } @@ -243,6 +249,7 @@ func (r *Rules) prepare(defaultPrecision uint32, defaultFunction string) (*Rules if defaultFunction != "" && defaultAggr == nil { return r, fmt.Errorf("unknown function %#v", defaultFunction) } + return r.withDefault(defaultPrecision, defaultAggr).withSuperDefault().setUpdated(), nil } @@ -261,6 +268,7 @@ func (r *Rules) withDefault(defaultPrecision uint32, defaultFunction *Aggr) *Rul Retention: retention, }) n, _ := (&Rules{Pattern: patterns, Updated: r.Updated}).compile() + return n } @@ -279,8 +287,10 @@ func (r *Rules) Lookup(metric string, age uint32, verbose bool) (precision uint3 if strings.Contains(metric, "?") { return lookup(metric, age, r.PatternTagged, verbose) } + return lookup(metric, age, r.PatternPlain, verbose) } + return lookup(metric, age, r.Pattern, verbose) } @@ -303,6 +313,7 @@ func lookup(metric string, age uint32, patterns []Pattern, verbose bool) (precis if verbose { aggrPattern = &patterns[n] } + ag = p.aggr } @@ -312,18 +323,23 @@ func lookup(metric string, age uint32, patterns []Pattern, verbose bool) (precis if i > 0 { precision = p.Retention[i-1].Precision precisionFound = true + if verbose { retentionPattern = &patterns[n] } } + break } + if i == len(p.Retention)-1 { precision = r.Precision precisionFound = true + if verbose { retentionPattern = &patterns[n] } + break } } @@ -353,6 +369,7 @@ func (r *Rules) LookupBytes(metric []byte, age uint32, verbose bool) (precision func doMetricPrecision(points []point.Point, precision uint32, aggr *Aggr) []point.Point { l := len(points) + var i, n int // i - current position of iterator // n - position of the first record with time rounded to precision @@ -377,9 +394,11 @@ func doMetricPrecision(points []point.Point, precision uint32, aggr *Aggr) []poi if i > n+1 { points[n].Value = aggr.Do(points[n:i]) } + n = i } } + if i > n+1 { points[n].Value = aggr.Do(points[n:i]) } @@ -406,9 +425,11 @@ func (r *Rules) RollupMetricAge(metricName string, age uint32, points []point.Po func (r *Rules) RollupMetric(metricName string, from uint32, points []point.Point) ([]point.Point, uint32, error) { now := uint32(timeNow().Unix()) age := uint32(0) + if now > from { age = now - from } + return r.RollupMetricAge(metricName, age, points) } @@ -421,6 +442,7 @@ func (r *Rules) RollupPoints(pp *point.Points, from int64, step int64) error { now := timeNow().Unix() age := int64(0) + if now > from { age = now - from } @@ -432,20 +454,25 @@ func (r *Rules) RollupPoints(pp *point.Points, from int64, step int64) error { if l == 0 { return nil } + oldPoints := pp.List() newPoints := make([]point.Point, 0, pp.Len()) rollup := func(p []point.Point) ([]point.Point, error) { metricName := pp.MetricName(p[0].MetricID) + var err error + if step == 0 { p, _, err = r.RollupMetricAge(metricName, uint32(age), p) } else { _, agg, _, _ := r.Lookup(metricName, uint32(from), false) p = doMetricPrecision(p, uint32(step), agg) } + for i := range p { p[i].MetricID = p[0].MetricID } + return p, err } @@ -455,8 +482,10 @@ func (r *Rules) RollupPoints(pp *point.Points, from int64, step int64) error { if err != nil { return err } + newPoints = append(newPoints, points...) n = i + continue } } @@ -465,7 +494,9 @@ func (r *Rules) RollupPoints(pp *point.Points, from int64, step int64) error { if err != nil { return err } + newPoints = append(newPoints, points...) pp.ReplaceList(newPoints) + return nil } diff --git a/helper/rollup/rules_test.go b/helper/rollup/rules_test.go index e3dfdeef7..53e89fd3e 100644 --- a/helper/rollup/rules_test.go +++ b/helper/rollup/rules_test.go @@ -83,6 +83,7 @@ func Test_buildTaggedRegex(t *testing.T) { if tt.match != "" && !re.Match([]byte(tt.match)) { t.Errorf("match(%q, %q) must be true", tt.tagsStr, tt.match) } + if tt.nomatch != "" && re.Match([]byte(tt.nomatch)) { t.Errorf("match(%q, %q) must be false", tt.tagsStr, tt.match) } @@ -706,6 +707,7 @@ func BenchmarkLookupSum(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("test.sum", 1, false) _ = precision @@ -718,6 +720,7 @@ func BenchmarkLookupSumSeparated(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("test.sum", 1, false) _ = precision @@ -730,6 +733,7 @@ func BenchmarkLookupSumTagged(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("sum?env=test&tag=Fake5", 1, false) _ = precision @@ -742,6 +746,7 @@ func BenchmarkLookupSumTaggedSeparated(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("sum?env=test&tag=Fake5", 1, false) _ = precision @@ -754,6 +759,7 @@ func BenchmarkLookupMax(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("test.max", 1, false) _ = precision @@ -766,6 +772,7 @@ func BenchmarkLookupMaxSeparated(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("test.max", 1, false) _ = precision @@ -778,6 +785,7 @@ func BenchmarkLookupMaxTagged(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("max?env=test&tag=Fake5", 1, false) _ = precision @@ -790,6 +798,7 @@ func BenchmarkLookupMaxTaggedSeparated(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("max?env=test&tag=Fake5", 1, false) _ = precision @@ -802,6 +811,7 @@ func BenchmarkLookupDefault(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("test.p95", 1, false) _ = precision @@ -814,6 +824,7 @@ func BenchmarkLookupDefaultSeparated(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("test.p95", 1, false) _ = precision @@ -826,6 +837,7 @@ func BenchmarkLookupDefaultTagged(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("p95?env=test&tag=Fake5", 1, false) _ = precision @@ -838,6 +850,7 @@ func BenchmarkLookupDefaultTaggedSeparated(b *testing.B) { require.NoError(b, err) b.ResetTimer() + for i := 0; i < b.N; i++ { precision, ag, _, _ := r.Lookup("p95?env=test&tag=Fake5", 1, false) _ = precision diff --git a/helper/rollup/xml.go b/helper/rollup/xml.go index 5a202dd84..c0dfb83f9 100644 --- a/helper/rollup/xml.go +++ b/helper/rollup/xml.go @@ -78,6 +78,7 @@ func (p *PatternXML) pattern() Pattern { func parseXML(body []byte) (*Rules, error) { r := &RulesXML{} + err := xml.Unmarshal(body, r) if err != nil { return nil, err @@ -86,10 +87,12 @@ func parseXML(body []byte) (*Rules, error) { // Maybe we've got Clickhouse's graphite.xml? if r.Default == nil && r.Pattern == nil { y := &ClickhouseRollupXML{} + err = xml.Unmarshal(body, y) if err != nil { return nil, err } + r = &y.Rules } diff --git a/helper/tests/clickhouse/server.go b/helper/tests/clickhouse/server.go index 2792ff459..567154ee5 100644 --- a/helper/tests/clickhouse/server.go +++ b/helper/tests/clickhouse/server.go @@ -40,6 +40,7 @@ func (h *TestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { for k, v := range resp.Headers { w.Header().Set(k, v) } + if resp.Code == 0 || resp.Code == http.StatusOK { w.Write(resp.Body) } else { diff --git a/helper/tests/compare/compare.go b/helper/tests/compare/compare.go index 4e37a312d..c24560156 100644 --- a/helper/tests/compare/compare.go +++ b/helper/tests/compare/compare.go @@ -14,6 +14,7 @@ func NearlyEqualSlice(a, b []float64) bool { if math.IsNaN(a[i]) && math.IsNaN(b[i]) { continue } + if math.IsNaN(a[i]) || math.IsNaN(b[i]) { // unexpected NaN return false @@ -31,10 +32,12 @@ func NearlyEqual(a, b float64) bool { if math.IsNaN(a) && math.IsNaN(b) { return true } + if math.IsNaN(a) || math.IsNaN(b) { // unexpected NaN return false } + if math.Abs(a-b) > eps { return false } @@ -46,5 +49,6 @@ func Max(a, b int) int { if a >= b { return a } + return b } diff --git a/helper/tests/compare/expand/expand.go b/helper/tests/compare/expand/expand.go index ff2734829..21de90d67 100644 --- a/helper/tests/compare/expand/expand.go +++ b/helper/tests/compare/expand/expand.go @@ -11,9 +11,11 @@ func ExpandTimestamp(fs *token.FileSet, s string, replace map[string]string) (in if s == "" { return 0, nil } + for k, v := range replace { s = strings.ReplaceAll(s, k, v) } + if tv, err := types.Eval(fs, nil, token.NoPos, s); err == nil { return strconv.ParseInt(tv.Value.String(), 10, 32) } else { diff --git a/helper/utils/utils_test.go b/helper/utils/utils_test.go index c19721037..f5a78cb62 100644 --- a/helper/utils/utils_test.go +++ b/helper/utils/utils_test.go @@ -8,7 +8,6 @@ import ( func TestTimestampTruncate(t *testing.T) { // reverse sorted - tests := []struct { ts int64 duration time.Duration diff --git a/index/handler.go b/index/handler.go index 8c8eab528..027774164 100644 --- a/index/handler.go +++ b/index/handler.go @@ -36,8 +36,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { status = http.StatusBadRequest http.Error(w, err.Error(), status) + return } + i.WriteJSON(w) i.Close() } diff --git a/index/index.go b/index/index.go index 281a187e6..5ab89c605 100644 --- a/index/index.go +++ b/index/index.go @@ -22,6 +22,7 @@ type Index struct { func New(config *config.Config, ctx context.Context) (*Index, error) { var reader io.ReadCloser + var err error opts := clickhouse.Options{ @@ -73,11 +74,13 @@ func (i *Index) WriteJSON(w http.ResponseWriter) error { s := bufio.NewScanner(i.rowsReader) idx := 0 + for s.Scan() { b := s.Bytes() if len(b) == 0 { continue } + if b[len(b)-1] == '.' { continue } @@ -86,6 +89,7 @@ func (i *Index) WriteJSON(w http.ResponseWriter) error { if err != nil { return err } + jsonParts := [][]byte{ nil, json_b, @@ -93,18 +97,22 @@ func (i *Index) WriteJSON(w http.ResponseWriter) error { if idx != 0 { jsonParts[0] = []byte{','} } + jsonified := bytes.Join(jsonParts, []byte("")) _, err = w.Write(jsonified) if err != nil { return err } + idx++ } + if err := s.Err(); err != nil { return err } _, err = w.Write([]byte("]")) + return err } diff --git a/index/index_test.go b/index/index_test.go index 1e37380eb..f97f15532 100644 --- a/index/index_test.go +++ b/index/index_test.go @@ -17,13 +17,16 @@ func TestWriteJSONEmptyRows(t *testing.T) { "testing.leaf.node", "", } + metrics, err := writeRows(rows) if err != nil { t.Fatalf("Error during transform or unmarshal: %s", err) } + if len(metrics) != 2 { t.Fatalf("Wrong metrics slice length = %d: %s", len(metrics), metrics) } + if metrics[0] != "testing.leaf" || metrics[1] != "testing.leaf.node" { t.Fatalf("Wrong metrics contents: %s", metrics) } @@ -36,13 +39,16 @@ func TestWriteJSONNonleafRows(t *testing.T) { "testing.leaf.node", "testing.\"broken\".node", } + metrics, err := writeRows(rows) if err != nil { t.Fatalf("Error during transform or unmarshal: %s", err) } + if len(metrics) != 3 { t.Fatalf("Wrong metrics slice length = %d: %s", len(metrics), metrics) } + if metrics[0] != "testing.leaf" || metrics[1] != "testing.leaf.node" || metrics[2] != "testing.\"broken\".node" { t.Fatalf("Wrong metrics contents: %s", metrics) } @@ -50,10 +56,12 @@ func TestWriteJSONNonleafRows(t *testing.T) { func TestWriteJSONEmptyIndex(t *testing.T) { rows := []string{} + metrics, err := writeRows(rows) if err != nil { t.Fatalf("Error during transform or unmarshal: %s", err) } + if len(metrics) != 0 { t.Fatalf("Wrong metrics slice length = %d: %s", len(metrics), metrics) } @@ -61,6 +69,7 @@ func TestWriteJSONEmptyIndex(t *testing.T) { func indexForBytes(b []byte) *Index { buffer := bytes.NewBuffer(b) + return &Index{ config: nil, rowsReader: io.NopCloser(buffer), @@ -71,12 +80,14 @@ func writeRows(rows []string) ([]string, error) { rowsBytes := []byte(strings.Join(rows, string('\n'))) index := indexForBytes(rowsBytes) mockResponse := httptest.NewRecorder() + err := index.WriteJSON(mockResponse) if err != nil { return nil, err } var metrics []string + err = json.Unmarshal(mockResponse.Body.Bytes(), &metrics) if err != nil { return nil, err diff --git a/limiter/alimiter.go b/limiter/alimiter.go index dc2510081..67e4d92a6 100644 --- a/limiter/alimiter.go +++ b/limiter/alimiter.go @@ -18,6 +18,7 @@ func getWeighted(n, max int) int { if n <= 0 { return 0 } + loadAvg := load_avg.Load() if loadAvg < 0.6 { return 0 @@ -28,6 +29,7 @@ func getWeighted(n, max int) int { if max <= 1 { return 1 } + return max - 1 } @@ -49,9 +51,11 @@ func NewALimiter(capacity, concurrent, n int, enableMetrics bool, scope, sub str if capacity <= 0 && concurrent <= 0 { return NoopLimiter{} } + if n >= concurrent { n = concurrent - 1 } + if n <= 0 { return NewWLimiter(capacity, concurrent, enableMetrics, scope, sub) } @@ -69,8 +73,10 @@ func NewALimiter(capacity, concurrent, n int, enableMetrics bool, scope, sub str func (sl *ALimiter) balance() int { var last int + for { start := time.Now() + n := getWeighted(sl.n, sl.concurrent) if n > last { for i := 0; i < n-last; i++ { @@ -78,13 +84,16 @@ func (sl *ALimiter) balance() int { break } } + last = n } else if n < last { for i := 0; i < last-n; i++ { sl.concurrentLimiter.leave(ctxMain, "balance") } + last = n } + delay := time.Since(start) if delay < checkDelay { time.Sleep(checkDelay - delay) @@ -103,16 +112,21 @@ func (sl *ALimiter) Enter(ctx context.Context, s string) (err error) { return } } + if sl.concurrentLimiter.cap > 0 { if sl.concurrentLimiter.enter(ctx, s) != nil { if sl.limiter.cap > 0 { sl.limiter.leave(ctx, s) } + sl.m.WaitErrors.Add(1) + err = ErrTimeout } } + sl.m.Requests.Add(1) + return } @@ -124,16 +138,21 @@ func (sl *ALimiter) TryEnter(ctx context.Context, s string) (err error) { return } } + if sl.concurrentLimiter.cap > 0 { if sl.concurrentLimiter.tryEnter(ctx, s) != nil { if sl.limiter.cap > 0 { sl.limiter.leave(ctx, s) } + sl.m.WaitErrors.Add(1) + err = ErrTimeout } } + sl.m.Requests.Add(1) + return } @@ -142,6 +161,7 @@ func (sl *ALimiter) Leave(ctx context.Context, s string) { if sl.limiter.cap > 0 { sl.limiter.leave(ctx, s) } + sl.concurrentLimiter.leave(ctx, s) } diff --git a/limiter/alimiter_test.go b/limiter/alimiter_test.go index 74d5dede4..0dbe67fcd 100644 --- a/limiter/alimiter_test.go +++ b/limiter/alimiter_test.go @@ -39,6 +39,7 @@ func Test_getWeighted(t *testing.T) { for n, tt := range tests { t.Run(strconv.Itoa(n), func(t *testing.T) { load_avg.Store(tt.loadAvg) + if got := getWeighted(tt.n, tt.max); got != tt.want { t.Errorf("load avg = %f getWeighted(%d, %d) = %v, want %v", tt.loadAvg, tt.n, tt.max, got, tt.want) } @@ -57,6 +58,7 @@ func TestNewALimiter(t *testing.T) { load_avg.Store(0) var i int + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) for i = 0; i < concurrent; i++ { @@ -73,11 +75,13 @@ func TestNewALimiter(t *testing.T) { // load_avg 0.5 load_avg.Store(0.5) + k := getWeighted(n, concurrent) require.Equal(t, 0, k) // load_avg 0.6 load_avg.Store(0.6) + k = getWeighted(n, concurrent) require.Equal(t, n*6/10, k) @@ -98,6 +102,7 @@ func TestNewALimiter(t *testing.T) { // // load_avg 1 load_avg.Store(1) + k = getWeighted(n, concurrent) require.Equal(t, n, k) @@ -143,6 +148,7 @@ func Benchmark_Limiter_Parallel(b *testing.B) { } load_avg.Store(0.5) + for _, tt := range tests { b.Run(fmt.Sprintf("L%d_C%d_N%d_CONCURRENCY%d", tt.l, tt.c, tt.n, tt.concurrencyLevel), func(b *testing.B) { var ( @@ -153,13 +159,16 @@ func Benchmark_Limiter_Parallel(b *testing.B) { wgStart := sync.WaitGroup{} wg := sync.WaitGroup{} + wgStart.Add(tt.concurrencyLevel) ctx := context.Background() b.ResetTimer() + for i := 0; i < tt.concurrencyLevel; i++ { wg.Add(1) + go func() { wgStart.Done() wgStart.Wait() diff --git a/limiter/limiter.go b/limiter/limiter.go index 34b8e042c..7dd632faa 100644 --- a/limiter/limiter.go +++ b/limiter/limiter.go @@ -41,7 +41,9 @@ func (sl *Limiter) Enter(ctx context.Context, s string) (err error) { if err = sl.limiter.enter(ctx, s); err != nil { sl.metrics.WaitErrors.Add(1) } + sl.metrics.Requests.Add(1) + return } @@ -50,7 +52,9 @@ func (sl *Limiter) TryEnter(ctx context.Context, s string) (err error) { if err = sl.limiter.tryEnter(ctx, s); err != nil { sl.metrics.WaitErrors.Add(1) } + sl.metrics.Requests.Add(1) + return } diff --git a/limiter/wlimiter.go b/limiter/wlimiter.go index 45e63fa25..4977e967c 100644 --- a/limiter/wlimiter.go +++ b/limiter/wlimiter.go @@ -18,6 +18,7 @@ func NewWLimiter(capacity, concurrent int, enableMetrics bool, scope, sub string if capacity <= 0 && concurrent <= 0 { return NoopLimiter{} } + if concurrent <= 0 { return NewLimiter(capacity, enableMetrics, scope, sub) } @@ -29,10 +30,12 @@ func NewWLimiter(capacity, concurrent int, enableMetrics bool, scope, sub string w.limiter.ch = make(chan struct{}, capacity) w.limiter.cap = capacity } + if concurrent > 0 { w.concurrentLimiter.ch = make(chan struct{}, concurrent) w.concurrentLimiter.cap = concurrent } + return w } @@ -47,16 +50,21 @@ func (sl *WLimiter) Enter(ctx context.Context, s string) (err error) { return } } + if sl.concurrentLimiter.cap > 0 { if sl.concurrentLimiter.enter(ctx, s) != nil { if sl.limiter.cap > 0 { sl.limiter.leave(ctx, s) } + sl.metrics.WaitErrors.Add(1) + err = ErrTimeout } } + sl.metrics.Requests.Add(1) + return } @@ -68,16 +76,21 @@ func (sl *WLimiter) TryEnter(ctx context.Context, s string) (err error) { return } } + if sl.concurrentLimiter.cap > 0 { if sl.concurrentLimiter.tryEnter(ctx, s) != nil { if sl.limiter.cap > 0 { sl.limiter.leave(ctx, s) } + sl.metrics.WaitErrors.Add(1) + err = ErrTimeout } } + sl.metrics.Requests.Add(1) + return } @@ -86,6 +99,7 @@ func (sl *WLimiter) Leave(ctx context.Context, s string) { if sl.limiter.cap > 0 { sl.limiter.leave(ctx, s) } + sl.concurrentLimiter.leave(ctx, s) } diff --git a/load_avg/load_avg.go b/load_avg/load_avg.go index 9e6945a36..8c9dfef59 100644 --- a/load_avg/load_avg.go +++ b/load_avg/load_avg.go @@ -24,14 +24,18 @@ func Weight(weight int, degraged, degragedLoadAvg, normalizedLoadAvg float64) in if normalizedLoadAvg > degragedLoadAvg { normalizedLoadAvg *= degraged } + normalizedLoadAvg = math.Round(10*normalizedLoadAvg) / 10 if normalizedLoadAvg == 0 { return 2 * int64(weight) } + normalizedLoadAvg = math.Log10(normalizedLoadAvg) + w := int64(weight) - int64(float64(weight)*normalizedLoadAvg) if w <= 0 { return 1 } + return w } diff --git a/load_avg/load_avg_test.go b/load_avg/load_avg_test.go index e632f3f7b..48eff1f70 100644 --- a/load_avg/load_avg_test.go +++ b/load_avg/load_avg_test.go @@ -76,9 +76,11 @@ func TestWeight(t *testing.T) { if tt.degraged == 0 { tt.degraged = 4 // default } + if tt.degragedLoadAvg == 0 { tt.degragedLoadAvg = 1.0 // default } + t.Run(fmt.Sprintf("%d#%f#%f#%f", tt.weight, tt.degraged, tt.degragedLoadAvg, tt.loadAvg), func(t *testing.T) { if got := Weight(tt.weight, tt.degraged, tt.degragedLoadAvg, tt.loadAvg); got != tt.want { t.Errorf("Weight(%d, %f, %f, %f) = %v, want %v", tt.weight, tt.degraged, tt.degragedLoadAvg, tt.loadAvg, got, tt.want) diff --git a/metrics/metrics.go b/metrics/metrics.go index 42334278e..734a12fc6 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -80,6 +80,7 @@ func NewWaitMetric(enable bool, scope, sub string) WaitMetric { return w } + return WaitMetric{ WaitErrors: metrics.NilCounter{}, Requests: metrics.NilCounter{}, @@ -183,6 +184,7 @@ func initFindMetrics(scope string, c *Config, waitQueue bool) *FindMetrics { requestMetric.RequestsH = metrics.NewVSumHistogram(c.BucketsWidth, c.BucketsLabels).SetNameTotal("") metrics.Register(scope+".all.requests", requestMetric.RequestsH) metrics.Register(scope+".all.errors", requestMetric.Errors) + if c.ExtendedStat { metrics.Register(scope+".all.requests_status_code.200", requestMetric.Requests200) metrics.Register(scope+".all.requests_status_code.400", requestMetric.Requests400) @@ -194,10 +196,12 @@ func initFindMetrics(scope string, c *Config, waitQueue bool) *FindMetrics { metrics.Register(scope+".all.requests_status_code.504", requestMetric.Requests504) metrics.Register(scope+".all.requests_status_code.5xx", requestMetric.Requests5xx) } + if len(c.FindRangeS) > 0 { requestMetric.RangeS = c.FindRangeS requestMetric.RangeNames = c.FindRangeNames requestMetric.RangeMetrics = make([]ReqMetric, len(c.FindRangeS)) + for i := range c.FindRangeS { requestMetric.RangeMetrics[i].RequestsH = metrics.NewVSumHistogram(c.BucketsWidth, c.BucketsLabels).SetNameTotal("") requestMetric.RangeMetrics[i].Errors = metrics.NewCounter() @@ -205,6 +209,7 @@ func initFindMetrics(scope string, c *Config, waitQueue bool) *FindMetrics { requestMetric.RangeMetrics[i].PointsCountName = scope + "." + requestMetric.RangeNames[i] + ".points" metrics.Register(scope+"."+c.FindRangeNames[i]+".requests", requestMetric.RangeMetrics[i].RequestsH) metrics.Register(scope+"."+c.FindRangeNames[i]+".errors", requestMetric.RangeMetrics[i].Errors) + if c.ExtendedStat { requestMetric.RangeMetrics[i].Requests200 = metrics.NewCounter() requestMetric.RangeMetrics[i].Requests400 = metrics.NewCounter() @@ -273,6 +278,7 @@ func initRenderMetrics(scope string, c *Config) *RenderMetrics { metrics.Register(scope+".all.requests", requestMetric.RequestsH) metrics.Register(scope+".all.requests_finder", requestMetric.FinderH) metrics.Register(scope+".all.errors", requestMetric.Errors) + if c.ExtendedStat { metrics.Register(scope+".all.requests_status_code.200", requestMetric.Requests200) metrics.Register(scope+".all.requests_status_code.400", requestMetric.Requests400) @@ -284,10 +290,12 @@ func initRenderMetrics(scope string, c *Config) *RenderMetrics { metrics.Register(scope+".all.requests_status_code.504", requestMetric.Requests504) metrics.Register(scope+".all.requests_status_code.5xx", requestMetric.Requests5xx) } + if len(c.RangeS) > 0 { requestMetric.RangeS = c.RangeS requestMetric.RangeNames = c.RangeNames requestMetric.RangeMetrics = make([]RenderMetric, len(c.RangeS)) + for i := range c.RangeS { requestMetric.RangeMetrics[i].RequestsH = metrics.NewVSumHistogram(c.BucketsWidth, c.BucketsLabels).SetNameTotal("") requestMetric.RangeMetrics[i].FinderH = metrics.NewVSumHistogram(c.BucketsWidth, c.BucketsLabels).SetNameTotal("") @@ -297,6 +305,7 @@ func initRenderMetrics(scope string, c *Config) *RenderMetrics { metrics.Register(scope+"."+c.RangeNames[i]+".requests", requestMetric.RangeMetrics[i].RequestsH) metrics.Register(scope+"."+c.RangeNames[i]+".requests_finder", requestMetric.RangeMetrics[i].FinderH) metrics.Register(scope+"."+c.RangeNames[i]+".errors", requestMetric.RangeMetrics[i].Errors) + if c.ExtendedStat { requestMetric.RangeMetrics[i].Requests200 = metrics.NewCounter() requestMetric.RangeMetrics[i].Requests400 = metrics.NewCounter() @@ -332,15 +341,19 @@ func SendFindMetrics(r *FindMetrics, statusCode int, durationMs, untilFromS int6 if len(r.RangeS) > 0 { fromPos = metrics.SearchInt64Le(r.RangeS, untilFromS) } + r.RequestsH.Add(durationMs) + if fromPos >= 0 { r.RangeMetrics[fromPos].RequestsH.Add(durationMs) } + switch statusCode { case 200: if extended { r.Requests200.Add(1) Gstatsd.Timing(r.MetricsCountName, metricsCount, 1.0) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests200.Add(1) Gstatsd.Timing(r.RangeMetrics[fromPos].MetricsCountName, metricsCount, 1.0) @@ -348,8 +361,10 @@ func SendFindMetrics(r *FindMetrics, statusCode int, durationMs, untilFromS int6 } case 400: r.Errors.Add(1) + if extended { r.Requests400.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests400.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -357,8 +372,10 @@ func SendFindMetrics(r *FindMetrics, statusCode int, durationMs, untilFromS int6 } case 403: r.Errors.Add(1) + if extended { r.Requests403.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests403.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -368,6 +385,7 @@ func SendFindMetrics(r *FindMetrics, statusCode int, durationMs, untilFromS int6 if extended { r.Requests404.Add(1) Gstatsd.Timing(r.MetricsCountName, metricsCount, 1.0) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests404.Add(1) Gstatsd.Timing(r.RangeMetrics[fromPos].MetricsCountName, metricsCount, 1.0) @@ -375,8 +393,10 @@ func SendFindMetrics(r *FindMetrics, statusCode int, durationMs, untilFromS int6 } case 500: r.Errors.Add(1) + if extended { r.Requests500.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests500.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -384,8 +404,10 @@ func SendFindMetrics(r *FindMetrics, statusCode int, durationMs, untilFromS int6 } case 503: r.Errors.Add(1) + if extended { r.Requests503.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests503.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -393,8 +415,10 @@ func SendFindMetrics(r *FindMetrics, statusCode int, durationMs, untilFromS int6 } case 504: r.Errors.Add(1) + if extended { r.Requests504.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests504.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -404,18 +428,21 @@ func SendFindMetrics(r *FindMetrics, statusCode int, durationMs, untilFromS int6 if extended { if statusCode > 500 { r.Requests5xx.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests5xx.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) } } else { r.Requests4xx.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests4xx.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) } } } + r.Errors.Add(1) } } @@ -425,12 +452,15 @@ func SendRenderMetrics(r *RenderMetrics, statusCode int, start, fetch, end time. if len(r.RangeS) > 0 { fromPos = metrics.SearchInt64Le(r.RangeS, untilFromS) } + startMs := start.UnixMilli() endMs := end.UnixMilli() + var ( durFinderMs int64 durFetchMs int64 ) + durMs := endMs - startMs if fetch.IsZero() { durFinderMs = durMs @@ -439,31 +469,39 @@ func SendRenderMetrics(r *RenderMetrics, statusCode int, start, fetch, end time. durFinderMs = fetchMs - startMs durFetchMs = endMs - fetchMs } + r.RequestsH.Add(durMs) r.FinderH.Add(durFinderMs) + if fromPos >= 0 { r.RangeMetrics[fromPos].RequestsH.Add(durMs) r.RangeMetrics[fromPos].FinderH.Add(durFinderMs) } + switch statusCode { case 200: if extended { r.Requests200.Add(1) Gstatsd.Timing(r.MetricsCountName, metricsCount, 1.0) Gstatsd.Timing(r.PointsCountName, points, 1.0) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests200.Add(1) Gstatsd.Timing(r.RangeMetrics[fromPos].MetricsCountName, metricsCount, 1.0) + if durFetchMs > 0 { Gstatsd.Timing(r.RangeMetrics[fromPos].PointsCountName, points, 1.0) } + r.RangeMetrics[fromPos].FinderH.Add(durFinderMs) } } case 400: r.Errors.Add(1) + if extended { r.Requests400.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests400.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -471,8 +509,10 @@ func SendRenderMetrics(r *RenderMetrics, statusCode int, start, fetch, end time. } case 403: r.Errors.Add(1) + if extended { r.Requests403.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests403.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -482,20 +522,25 @@ func SendRenderMetrics(r *RenderMetrics, statusCode int, start, fetch, end time. if extended { r.Requests404.Add(1) Gstatsd.Timing(r.MetricsCountName, metricsCount, 1.0) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests404.Add(1) Gstatsd.Timing(r.RangeMetrics[fromPos].MetricsCountName, metricsCount, 1.0) Gstatsd.Timing(r.RangeMetrics[fromPos].PointsCountName, points, 1.0) + if durFetchMs > 0 { Gstatsd.Timing(r.RangeMetrics[fromPos].PointsCountName, points, 1.0) } + r.RangeMetrics[fromPos].FinderH.Add(durFinderMs) } } case 500: r.Errors.Add(1) + if extended { r.Requests500.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests500.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -503,8 +548,10 @@ func SendRenderMetrics(r *RenderMetrics, statusCode int, start, fetch, end time. } case 503: r.Errors.Add(1) + if extended { r.Requests503.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests503.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -512,8 +559,10 @@ func SendRenderMetrics(r *RenderMetrics, statusCode int, start, fetch, end time. } case 504: r.Errors.Add(1) + if extended { r.Requests504.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests504.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) @@ -523,18 +572,21 @@ func SendRenderMetrics(r *RenderMetrics, statusCode int, start, fetch, end time. if extended { if statusCode > 500 { r.Requests5xx.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests5xx.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) } } else { r.Requests4xx.Add(1) + if fromPos >= 0 { r.RangeMetrics[fromPos].Requests4xx.Add(1) r.RangeMetrics[fromPos].Errors.Add(1) } } } + r.Errors.Add(1) } } @@ -548,10 +600,13 @@ func InitMetrics(c *Config, findWaitQueue, tagsWaitQueue bool) { if c != nil && Graphite != nil { metrics.RegisterRuntimeMemStats(nil) go metrics.CaptureRuntimeMemStats(c.MetricInterval) + if len(c.BucketsWidth) == 0 { c.BucketsWidth = []int64{200, 500, 1000, 2000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000, 40000, 50000, 60000} } + labels := make([]string, len(c.BucketsWidth)+1) + for i := 0; i <= len(c.BucketsWidth); i++ { if i >= len(c.BucketsLabels) || c.BucketsLabels[i] == "" { if i < len(c.BucketsWidth) { @@ -563,11 +618,13 @@ func InitMetrics(c *Config, findWaitQueue, tagsWaitQueue bool) { labels[i] = c.BucketsLabels[i] } } + c.BucketsLabels = labels if len(c.Ranges) > 0 { // c.RangeS = make([]int64, 0, len(c.Range)+1) untilFrom := make([]rangeName, 0, len(c.Ranges)+1) + for name, v := range c.Ranges { if v <= 0 { untilFrom = append(untilFrom, rangeName{name: name, v: math.MaxInt64}) @@ -575,14 +632,18 @@ func InitMetrics(c *Config, findWaitQueue, tagsWaitQueue bool) { untilFrom = append(untilFrom, rangeName{name: name, v: int64(v.Seconds())}) } } + sort.Slice(untilFrom, func(i, j int) bool { return untilFrom[i].v < untilFrom[j].v }) + if untilFrom[len(untilFrom)-1].v != math.MaxInt64 { untilFrom = append(untilFrom, rangeName{name: "history", v: math.MaxInt64}) } + c.RangeS = make([]int64, len(untilFrom)) c.RangeNames = make([]string, len(untilFrom)) + for i := range untilFrom { c.RangeNames[i] = untilFrom[i].name c.RangeS[i] = untilFrom[i].v @@ -592,6 +653,7 @@ func InitMetrics(c *Config, findWaitQueue, tagsWaitQueue bool) { if len(c.FindRanges) > 0 { // c.RangeS = make([]int64, 0, len(c.Range)+1) untilFrom := make([]rangeName, 0, len(c.Ranges)+1) + for name, v := range c.FindRanges { if v <= 0 { untilFrom = append(untilFrom, rangeName{name: name, v: math.MaxInt64}) @@ -599,20 +661,25 @@ func InitMetrics(c *Config, findWaitQueue, tagsWaitQueue bool) { untilFrom = append(untilFrom, rangeName{name: name, v: int64(v.Seconds())}) } } + sort.Slice(untilFrom, func(i, j int) bool { return untilFrom[i].v < untilFrom[j].v }) + if untilFrom[len(untilFrom)-1].v != math.MaxInt64 { untilFrom = append(untilFrom, rangeName{name: "history", v: math.MaxInt64}) } + c.FindRangeS = make([]int64, len(untilFrom)) c.FindRangeNames = make([]string, len(untilFrom)) + for i := range untilFrom { c.FindRangeNames[i] = untilFrom[i].name c.FindRangeS[i] = untilFrom[i].v } } } + initFindCacheMetrics(c) FindRequestMetric = initFindMetrics("find", c, findWaitQueue) TagsRequestMetric = initFindMetrics("tags", c, tagsWaitQueue) @@ -621,6 +688,7 @@ func InitMetrics(c *Config, findWaitQueue, tagsWaitQueue bool) { func DisableMetrics() { metrics.UseNilMetrics = true + InitMetrics(nil, false, false) } diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go index d6f2f576f..74e749c10 100644 --- a/metrics/metrics_test.go +++ b/metrics/metrics_test.go @@ -15,6 +15,7 @@ func max(a, b int) int { if a > b { return a } + return b } @@ -437,11 +438,16 @@ func TestInitMetrics(t *testing.T) { FindRequestMetric = nil TagsRequestMetric = nil RenderRequestMetric = nil + UnregisterAll() + c := tt.c Graphite = &graphite.Graphite{} + InitMetrics(&c, tt.findWaitQueue, tt.tagsWaitQueue) + Graphite = nil + assert.Equal(t, tt.want, c) // FindRequestH compareInterface(t, "find.all.requests", FindRequestMetric.RequestsH, true) @@ -457,6 +463,7 @@ func TestInitMetrics(t *testing.T) { compareInterface(t, "find.all.requests_status_code.5xx", FindRequestMetric.Requests5xx, c.ExtendedStat) //FindRequestMetric assert.Equal(t, tt.wantFindCountName, FindRequestMetric.MetricsCountName) + for i := 0; i < max(len(c.FindRangeS), len(tt.wantFindRangesMetricsCountNames)); i++ { if i < len(c.FindRangeNames) { // FindRequestH @@ -472,15 +479,19 @@ func TestInitMetrics(t *testing.T) { compareInterface(t, "find."+c.FindRangeNames[i]+".requests_status_code.504", FindRequestMetric.RangeMetrics[i].Requests504, c.ExtendedStat) compareInterface(t, "find."+c.FindRangeNames[i]+".requests_status_code.5xx", FindRequestMetric.RangeMetrics[i].Requests5xx, c.ExtendedStat) } + var want, got string if i < len(tt.wantFindRangesMetricsCountNames) { want = tt.wantFindRangesMetricsCountNames[i] } + if i < len(FindRequestMetric.RangeMetrics) { got = FindRequestMetric.RangeMetrics[i].MetricsCountName } + assert.Equal(t, want, got) } + assert.Equal(t, tt.want.FindRangeS, FindRequestMetric.RangeS) assert.Equal(t, tt.want.FindRangeNames, FindRequestMetric.RangeNames) assert.Equalf(t, len(tt.want.FindRangeS), len(FindRequestMetric.RangeMetrics), "FindRequestMetric.RangeMetrics") @@ -500,8 +511,10 @@ func TestInitMetrics(t *testing.T) { // RenderRequestMetric assert.Equal(t, tt.wantRenderMetricsCountName, RenderRequestMetric.MetricsCountName) assert.Equal(t, tt.wantRenderPointsCountName, RenderRequestMetric.PointsCountName) + for i := 0; i < max(len(c.RangeS), len(tt.wantRenderRangesMetricsCountNames)); i++ { var want, got string + if i < len(c.RangeNames) { // FindRequestH compareInterface(t, "render."+c.RangeNames[i]+".requests", RenderRequestMetric.RangeMetrics[i].RequestsH, true) @@ -517,28 +530,35 @@ func TestInitMetrics(t *testing.T) { compareInterface(t, "render."+c.RangeNames[i]+".requests_status_code.504", RenderRequestMetric.RangeMetrics[i].Requests504, c.ExtendedStat) compareInterface(t, "render."+c.RangeNames[i]+".requests_status_code.5xx", RenderRequestMetric.RangeMetrics[i].Requests5xx, c.ExtendedStat) } + if i < len(tt.wantRenderRangesMetricsCountNames) { want = tt.wantRenderRangesMetricsCountNames[i] } + if i < len(RenderRequestMetric.RangeMetrics) { got = RenderRequestMetric.RangeMetrics[i].MetricsCountName } + assert.Equalf(t, want, got, strconv.Itoa(i)) if i < len(tt.wantRenderRangesPointsCountNames) { want = tt.wantRenderRangesPointsCountNames[i] } + if i < len(tt.wantRenderRangesPointsCountNames) { got = RenderRequestMetric.RangeMetrics[i].PointsCountName } + assert.Equalf(t, want, got, strconv.Itoa(i)) } + assert.Equal(t, tt.want.RangeS, RenderRequestMetric.RangeS) assert.Equal(t, tt.want.RangeNames, RenderRequestMetric.RangeNames) assert.Equalf(t, len(tt.want.RangeS), len(RenderRequestMetric.RangeMetrics), "RenderRequestMetric.RangeMetrics") // cleanup global vars FindRequestMetric = nil RenderRequestMetric = nil + UnregisterAll() }) } diff --git a/metrics/query_metrics.go b/metrics/query_metrics.go index 2d2827191..b2b9a1f73 100644 --- a/metrics/query_metrics.go +++ b/metrics/query_metrics.go @@ -28,6 +28,7 @@ func InitQueryMetrics(table string, c *Config) *QueryMetrics { if table == "" { table = "default" } + if q, exist := QMetrics[table]; exist { return q } @@ -46,10 +47,12 @@ func InitQueryMetrics(table string, c *Config) *QueryMetrics { queryMetric.RequestsH = metrics.NewVSumHistogram(c.BucketsWidth, c.BucketsLabels).SetNameTotal("") metrics.Register("query."+table+".all.requests", queryMetric.RequestsH) metrics.Register("query."+table+".all.errors", queryMetric.Errors) + if len(c.RangeS) > 0 { queryMetric.RangeS = c.RangeS queryMetric.RangeNames = c.RangeNames queryMetric.RangeMetrics = make([]QueryMetric, len(c.RangeS)) + for i := range c.RangeS { queryMetric.RangeMetrics[i].RequestsH = metrics.NewVSumHistogram(c.BucketsWidth, c.BucketsLabels).SetNameTotal("") metrics.Register("query."+table+"."+queryMetric.RangeNames[i]+".requests", queryMetric.RangeMetrics[i].RequestsH) @@ -64,6 +67,7 @@ func InitQueryMetrics(table string, c *Config) *QueryMetrics { } else { queryMetric.RequestsH = metrics.NilHistogram{} } + QMetrics[table] = queryMetric return queryMetric @@ -71,23 +75,28 @@ func InitQueryMetrics(table string, c *Config) *QueryMetrics { func SendQueryRead(r *QueryMetrics, from, until, durationMs, read_rows, read_bytes, ch_read_rows, ch_read_bytes int64, err bool) { r.RequestsH.Add(durationMs) + if ch_read_rows > 0 { Gstatsd.Timing(r.ChReadBytesName, ch_read_bytes, 1.0) Gstatsd.Timing(r.ChReadRowsName, ch_read_rows, 1.0) } + if err { r.Errors.Add(1) } else { Gstatsd.Timing(r.ReadBytesName, read_bytes, 1.0) Gstatsd.Timing(r.ReadRowsName, read_rows, 1.0) } + if len(r.RangeS) > 0 { fromPos := metrics.SearchInt64Le(r.RangeS, until-from) r.RangeMetrics[fromPos].RequestsH.Add(durationMs) + if ch_read_rows > 0 { Gstatsd.Timing(r.RangeMetrics[fromPos].ChReadBytesName, ch_read_bytes, 1.0) Gstatsd.Timing(r.RangeMetrics[fromPos].ChReadRowsName, ch_read_rows, 1.0) } + if err { r.RangeMetrics[fromPos].Errors.Add(1) } else { diff --git a/pkg/alias/map.go b/pkg/alias/map.go index 7eab353ef..777c6d633 100644 --- a/pkg/alias/map.go +++ b/pkg/alias/map.go @@ -39,16 +39,20 @@ func (m *Map) MergeTarget(r finder.Result, target string, saveCache bool) []byte series := r.Series() buf.Grow(len(series) * 24) + for i := 0; i < len(series); i++ { if saveCache { buf.Write(series[i]) buf.WriteByte('\n') } + key := string(series[i]) if len(key) == 0 { continue } + abs := string(r.Abs(series[i])) + m.lock.Lock() if x, ok := m.data[key]; ok { m.data[key] = append(x, Value{Target: target, DisplayName: abs}) @@ -69,23 +73,28 @@ func (m *Map) MergeTarget(r finder.Result, target string, saveCache bool) []byte func (m *Map) Len() int { m.lock.RLock() defer m.lock.RUnlock() + return len(m.data) } // Size returns count of values func (m *Map) Size() int { s := 0 + m.lock.RLock() defer m.lock.RUnlock() + for _, v := range m.data { s += len(v) } + return s } // Series returns keys of aliases map func (m *Map) Series(isReverse bool) []string { series := make([]string, 0, m.Len()) + for k := range m.data { if isReverse { series = append(series, reverse.String(k)) @@ -93,17 +102,20 @@ func (m *Map) Series(isReverse bool) []string { series = append(series, k) } } + return series } // DisplayNames returns DisplayName from all Values func (m *Map) DisplayNames() []string { dn := make([]string, 0, m.Size()) + for _, v := range m.data { for _, a := range v { dn = append(dn, a.DisplayName) } } + return dn } diff --git a/pkg/alias/map_tagged_test.go b/pkg/alias/map_tagged_test.go index f0c4135b7..876ce9d70 100644 --- a/pkg/alias/map_tagged_test.go +++ b/pkg/alias/map_tagged_test.go @@ -20,11 +20,13 @@ var taggedTarget string = "seriesByTag('name=cpu.loadavg)" func createAMTagged() *Map { am := New() am.MergeTarget(taggedResult, taggedTarget, false) + return am } func TestCreationTagged(t *testing.T) { am := createAMTagged() + for _, m := range taggedResult.List() { metric := string(m) v, ok := am.data[metric] @@ -51,6 +53,7 @@ func TestAsyncMergeTagged(t *testing.T) { am := New() wg := sync.WaitGroup{} wg.Add(2) + go func() { am.MergeTarget(finder.NewMockTagged(testEnvResult), targetTest, false) wg.Done() @@ -59,6 +62,7 @@ func TestAsyncMergeTagged(t *testing.T) { am.MergeTarget(finder.NewMockTagged(prodEnvResult), targetProd, false) wg.Done() }() + resultAM := &Map{ data: map[string][]Value{ "cpu.loadavg?env=test&host=host1": { @@ -75,15 +79,20 @@ func TestAsyncMergeTagged(t *testing.T) { }, }, } + wg.Wait() + if !assert.Equal(t, resultAM.Len(), am.Len()) { t.FailNow() } + for i := range am.data { var dv Values = am.data[i] + sort.Sort(&dv) am.data[i] = dv } + assert.Equal(t, resultAM, am) } diff --git a/pkg/alias/map_test.go b/pkg/alias/map_test.go index 317c5cb03..8d3a1fdcc 100644 --- a/pkg/alias/map_test.go +++ b/pkg/alias/map_test.go @@ -36,11 +36,13 @@ var findTarget string = "*.name.*" func createAM() *Map { am := New() am.MergeTarget(finderResult, findTarget, false) + return am } func TestCreation(t *testing.T) { am := createAM() + for _, m := range finderResult.List() { metric := string(m) v, ok := am.data[metric] @@ -55,6 +57,7 @@ func TestAsyncMerge(t *testing.T) { target2 := "5*.name.*" wg := sync.WaitGroup{} wg.Add(2) + go func() { am.MergeTarget(finderResult, findTarget, false) wg.Done() @@ -68,6 +71,7 @@ func TestAsyncMerge(t *testing.T) { am.MergeTarget(finder.NewMockFinder(result), target2, false) wg.Done() }() + resultAM := &Map{ data: map[string][]Value{ "5_sec.name.max": { @@ -89,21 +93,27 @@ func TestAsyncMerge(t *testing.T) { }, }, } + wg.Wait() + if !assert.Equal(t, resultAM.Len(), am.Len()) { t.FailNow() } + for i := range am.data { var dv Values = am.data[i] + sort.Sort(&dv) am.data[i] = dv } + assert.Equal(t, resultAM, am) } func TestLen(t *testing.T) { am := createAM() assert.Equal(t, 4, am.Len()) + result := [][]byte{ []byte("5_sec.name.any"), []byte("5_min.name.min"), // it's repeated @@ -115,6 +125,7 @@ func TestLen(t *testing.T) { func TestSize(t *testing.T) { am := createAM() assert.Equal(t, 4, am.Size()) + result := [][]byte{ []byte("5_sec.name.any"), []byte("5_min.name.min"), // it's repeated, but it increases Size @@ -130,6 +141,7 @@ func TestDisplayNames(t *testing.T) { expectedSeries := finderResult.Strings() sort.Strings(expectedSeries) assert.Equal(t, expectedSeries, sortedDisplayNames) + anotherFinderResult := finder.NewMockFinder([][]byte{ []byte("5_sec.name.any"), []byte("5_min.name.min"), // it's repeated, but it increases Size diff --git a/pkg/dry/math.go b/pkg/dry/math.go index 9c2bc54f1..2d9c6c51e 100644 --- a/pkg/dry/math.go +++ b/pkg/dry/math.go @@ -5,6 +5,7 @@ func Max(x, y int64) int64 { if x > y { return x } + return y } @@ -13,6 +14,7 @@ func Min(x, y int64) int64 { if x < y { return x } + return y } @@ -22,6 +24,7 @@ func Ceil(x, d int64) int64 { if x <= 0 || d <= 0 { return int64(0) } + return (x + d - 1) / d } @@ -37,6 +40,7 @@ func FloorToMultiplier(x, m int64) int64 { if x <= 0 || m <= 0 { return int64(0) } + return x / m * m } @@ -45,12 +49,14 @@ func GCD(a, b int64) int64 { if b < 0 { b = -b } + var t int64 for b != 0 { t = b b = a % b a = t } + return a } @@ -59,5 +65,6 @@ func LCM(a, b int64) int64 { if a*b < 0 { return -a / GCD(a, b) * b } + return a / GCD(a, b) * b } diff --git a/pkg/dry/strings.go b/pkg/dry/strings.go index 15ee0f8c4..05fd9d7f2 100644 --- a/pkg/dry/strings.go +++ b/pkg/dry/strings.go @@ -3,11 +3,13 @@ package dry // RemoveEmptyStrings removes empty strings from list and returns truncated slice func RemoveEmptyStrings(stringList []string) []string { rm := 0 + for i := 0; i < len(stringList); i++ { if stringList[i] == "" { rm++ continue } + if rm > 0 { stringList[i-rm] = stringList[i] } diff --git a/pkg/reverse/reverse.go b/pkg/reverse/reverse.go index 76241df50..514acaecd 100644 --- a/pkg/reverse/reverse.go +++ b/pkg/reverse/reverse.go @@ -24,6 +24,7 @@ func String(path string) string { func reverse(m []byte) { i := 0 j := len(m) - 1 + for i < j { m[i], m[j] = m[j], m[i] i++ @@ -39,6 +40,7 @@ func Inplace(path []byte) { reverse(path) var a, b int + l := len(path) for b = 0; b < l; b++ { if path[b] == '.' { @@ -46,6 +48,7 @@ func Inplace(path []byte) { a = b + 1 } } + reverse(path[a:b]) } @@ -55,6 +58,7 @@ func Bytes(path []byte) []byte { if bytes.IndexByte(path, '?') >= 0 { return path } + r := make([]byte, len(path)) copy(r, path) Inplace(r) diff --git a/pkg/scope/http_request.go b/pkg/scope/http_request.go index 140eae71e..ea686b344 100644 --- a/pkg/scope/http_request.go +++ b/pkg/scope/http_request.go @@ -25,6 +25,7 @@ func HttpRequest(r *http.Request) *http.Request { requestID := r.Header.Get("X-Request-Id") if requestID == "" || !requestIdRegexp.MatchString(requestID) { var b [16]byte + binary.LittleEndian.PutUint64(b[:], rand.Uint64()) binary.LittleEndian.PutUint64(b[8:], rand.Uint64()) requestID = fmt.Sprintf("%x", b) @@ -62,5 +63,6 @@ func Grafana(ctx context.Context) string { if o != "" || d != "" || p != "" { return fmt.Sprintf("Org:%s; Dashboard:%s; Panel:%s", o, d, p) } + return "" } diff --git a/pkg/scope/key.go b/pkg/scope/key.go index fa442dac1..16cb70df3 100644 --- a/pkg/scope/key.go +++ b/pkg/scope/key.go @@ -18,6 +18,7 @@ func String(ctx context.Context, key string) string { if value, ok := ctx.Value(scopeKey(key)).(string); ok { return value } + return "" } @@ -26,6 +27,7 @@ func Bool(ctx context.Context, key string) bool { if _, ok := ctx.Value(scopeKey(key)).(bool); ok { return true } + return false } @@ -65,5 +67,6 @@ func ClickhouseUserAgent(ctx context.Context) string { if grafana != "" { return fmt.Sprintf("Graphite-Clickhouse/%s (table:%s) Grafana(%s)", Version, Table(ctx), grafana) } + return fmt.Sprintf("Graphite-Clickhouse/%s (table:%s)", Version, Table(ctx)) } diff --git a/pkg/scope/logger.go b/pkg/scope/logger.go index bac70b395..271441b6a 100644 --- a/pkg/scope/logger.go +++ b/pkg/scope/logger.go @@ -17,13 +17,16 @@ var ( // Logger returns zap.Logger instance func Logger(ctx context.Context) *zap.Logger { logger := ctx.Value(scopeKey("logger")) + var zapLogger *zap.Logger + if logger != nil { if zl, ok := logger.(*zap.Logger); ok { zapLogger = zl return zapLogger } } + if zapLogger == nil { zapLogger = zapwriter.Default() } @@ -39,13 +42,16 @@ func Logger(ctx context.Context) *zap.Logger { // Logger returns zap.Logger instance func LoggerWithHeaders(ctx context.Context, r *http.Request, headersToLog []string) *zap.Logger { logger := ctx.Value(scopeKey("logger")) + var zapLogger *zap.Logger + if logger != nil { if zl, ok := logger.(*zap.Logger); ok { zapLogger = zl return zapLogger } } + if zapLogger == nil { zapLogger = zapwriter.Default() } @@ -59,6 +65,7 @@ func LoggerWithHeaders(ctx context.Context, r *http.Request, headersToLog []stri if carbonapiUUID != "" { zapLogger = zapLogger.With(zap.String("carbonapi_uuid", carbonapiUUID)) } + requestHeaders := headers.GetHeaders(&r.Header, headersToLog) if len(requestHeaders) > 0 { zapLogger = zapLogger.With(zap.Any("request_headers", requestHeaders)) diff --git a/pkg/where/match.go b/pkg/where/match.go index c6907e33a..9d0e841e9 100644 --- a/pkg/where/match.go +++ b/pkg/where/match.go @@ -10,12 +10,14 @@ var opEq = "=" // ClearGlob cleanup grafana globs like {name} func ClearGlob(query string) string { p := 0 + s := strings.IndexAny(query, "{[") if s == -1 { return query } found := false + var builder strings.Builder for { @@ -26,13 +28,17 @@ func ClearGlob(query string) string { // { not closed, glob with error break } + e += s + 1 + delim := strings.IndexRune(query[s+1:e], ',') if delim == -1 { if !found { builder.Grow(len(query) - 2) + found = true } + builder.WriteString(query[p:s]) builder.WriteString(query[s+1 : e-1]) p = e @@ -44,33 +50,41 @@ func ClearGlob(query string) string { break } else { symbols := 0 + for _, c := range query[s+1 : s+e+1] { _ = c // for loop over runes + symbols++ if symbols == 2 { break } } + if symbols <= 1 { if !found { builder.Grow(len(query) - 2) + found = true } + builder.WriteString(query[p:s]) builder.WriteString(query[s+1 : s+e+1]) p = e + s + 2 } } + e += s + 2 } if e >= len(query) { break } + s = strings.IndexAny(query[e:], "{[") if s == -1 { break } + s += e } @@ -78,8 +92,10 @@ func ClearGlob(query string) string { if p < len(query) { builder.WriteString(query[p:]) } + return builder.String() } + return query } @@ -95,13 +111,16 @@ func HasUnmatchedBrackets(query string) bool { if c == '{' || c == '[' { stack = append(stack, c) } + if c == '}' || c == ']' { if len(stack) == 0 || stack[len(stack)-1] != matchingBracket[c] { return true } + stack = stack[:len(stack)-1] } } + return len(stack) != 0 } @@ -173,11 +192,13 @@ func TreeGlob(field string, query string) string { func ConcatMatchKV(key, value string) string { startLine := value[0] == '^' endLine := value[len(value)-1] == '$' + if startLine && endLine { return key + opEq + value[1:] } else if startLine { return key + opEq + value[1:] + "\\\\%" } + return key + opEq + "\\\\%" + value } @@ -185,8 +206,10 @@ func Match(field string, key, value string) string { if len(value) == 0 || value == "*" { return Like(field, key+"=%") } + expr := ConcatMatchKV(key, value) simplePrefix := NonRegexpPrefix(expr) + if len(simplePrefix) == len(expr) { return Eq(field, expr) } else if len(simplePrefix) == len(expr)-1 && expr[len(expr)-1] == '$' { diff --git a/pkg/where/match_test.go b/pkg/where/match_test.go index 485c8b690..c04720989 100644 --- a/pkg/where/match_test.go +++ b/pkg/where/match_test.go @@ -57,6 +57,7 @@ func Test_HasUnmatchedBrackets(t *testing.T) { func TestGlob(t *testing.T) { field := "test" + tests := []struct { query string want string diff --git a/pkg/where/where.go b/pkg/where/where.go index f0bab382a..6326eb8b4 100644 --- a/pkg/where/where.go +++ b/pkg/where/where.go @@ -31,18 +31,23 @@ func GlobExpandSimple(value, prefix string, result *[]string) error { if end <= 1 { return errs.NewErrorWithCode("malformed glob: "+value, http.StatusBadRequest) } + if end == -1 || strings.IndexAny(value[start+1:start+end], "{}") != -1 { return errs.NewErrorWithCode("malformed glob: "+value, http.StatusBadRequest) } + if start > 0 { prefix = prefix + value[0:start] } + g := value[start+1 : start+end] values := strings.Split(g, ",") + var postfix string if end+start-1 < len(value) { postfix = value[start+end+1:] } + for _, v := range values { if err := GlobExpandSimple(postfix, prefix+v, result); err != nil { return err @@ -62,6 +67,7 @@ func GlobToRegexp(g string) string { s = strings.ReplaceAll(s, "?", "[^.]") s = strings.ReplaceAll(s, ",", "|") s = strings.ReplaceAll(s, "*", "([^.]*?)") + return s } @@ -100,15 +106,18 @@ func NonRegexpPrefix(expr string) string { return expr[:eq+1] } } + return expr[:i] } } + return expr } func escape(s string) string { s = strings.ReplaceAll(s, `\`, `\\`) s = strings.ReplaceAll(s, `'`, `\'`) + return s } @@ -117,6 +126,7 @@ func escapeRegex(s string) string { if strings.Contains(s, "|") { s = "(" + s + ")" } + return s } @@ -125,6 +135,7 @@ func likeEscape(s string) string { s = strings.ReplaceAll(s, `%`, `\%`) s = strings.ReplaceAll(s, `\`, `\\`) s = strings.ReplaceAll(s, `'`, `\'`) + return s } @@ -148,6 +159,7 @@ func quoteRegex(key, value string) string { if startLine { return fmt.Sprintf("'^%s%s%s'", key, opEq, escapeRegex(value[1:])) } + return fmt.Sprintf("'^%s%s.*%s'", key, opEq, escapeRegex(value)) } @@ -181,15 +193,20 @@ func In(field string, list []string) string { } var buf strings.Builder + buf.WriteString(field) buf.WriteString(" IN (") + for i, v := range list { if i > 0 { buf.WriteByte(',') } + buf.WriteString(quote(v)) } + buf.WriteByte(')') + return buf.String() } @@ -220,6 +237,7 @@ func (w *Where) And(exp string) { if exp == "" { return } + if w.where != "" { w.where = fmt.Sprintf("(%s) AND (%s)", w.where, exp) } else { @@ -231,6 +249,7 @@ func (w *Where) Or(exp string) { if exp == "" { return } + if w.where != "" { w.where = fmt.Sprintf("(%s) OR (%s)", w.where, exp) } else { @@ -250,6 +269,7 @@ func (w *Where) SQL() string { if w.where == "" { return "" } + return "WHERE " + w.where } @@ -257,5 +277,6 @@ func (w *Where) PreWhereSQL() string { if w.where == "" { return "" } + return "PREWHERE " + w.where } diff --git a/pkg/where/where_test.go b/pkg/where/where_test.go index ca09a0042..0393e796b 100644 --- a/pkg/where/where_test.go +++ b/pkg/where/where_test.go @@ -25,12 +25,14 @@ func TestGlobExpandSimple(t *testing.T) { for _, tt := range tests { t.Run(tt.value, func(t *testing.T) { var got []string + err := GlobExpandSimple(tt.value, "", &got) if tt.wantErr { assert.Error(t, err, "Expand() not returns error for %v", tt.value) } else { assert.NoErrorf(t, err, "Expand() returns error %v for %v", err, tt.value) } + assert.Equal(t, tt.want, got, "Expand() result") }) } diff --git a/prometheus/labels.go b/prometheus/labels.go index eb9910598..48d55c5cd 100644 --- a/prometheus/labels.go +++ b/prometheus/labels.go @@ -16,10 +16,12 @@ func urlParse(rawurl string) (*url.URL, error) { if p < 0 { return url.Parse(rawurl) } + m, err := url.Parse(rawurl[p:]) if m != nil { m.Path = rawurl[:p] } + return m, err } diff --git a/prometheus/logger.go b/prometheus/logger.go index 0f2613573..bfe9b2635 100644 --- a/prometheus/logger.go +++ b/prometheus/logger.go @@ -17,11 +17,14 @@ type logger struct { func (l *logger) Log(keyvals ...interface{}) error { lg := l.z + var msg string + var level errorLevel for i := 1; i < len(keyvals); i += 2 { keyObj := keyvals[i-1] + keyStr, ok := keyObj.(string) if !ok { l.z.Error("can't handle log, wrong key", zap.Any("keyvals", keyvals)) @@ -34,6 +37,7 @@ func (l *logger) Log(keyvals ...interface{}) error { l.z.Error("can't handle log, wrong level", zap.Any("keyvals", keyvals)) return nil } + continue } @@ -43,6 +47,7 @@ func (l *logger) Log(keyvals ...interface{}) error { l.z.Error("can't handle log, wrong msg", zap.Any("keyvals", keyvals)) return nil } + continue } @@ -62,5 +67,6 @@ func (l *logger) Log(keyvals ...interface{}) error { l.z.Error("can't handle log, unknown level", zap.Any("keyvals", keyvals)) return nil } + return nil } diff --git a/prometheus/matcher.go b/prometheus/matcher.go index 1d4b9b62c..0d8931d6e 100644 --- a/prometheus/matcher.go +++ b/prometheus/matcher.go @@ -28,14 +28,17 @@ var promqlMatchMap = map[labels.MatchType]finder.TaggedTermOp{ func makeTaggedFromPromPB(matchers []*prompb.LabelMatcher) ([]finder.TaggedTerm, error) { terms := make([]finder.TaggedTerm, 0, len(matchers)) + for i := 0; i < len(matchers); i++ { if matchers[i] == nil { continue } + op, ok := prompbMatchMap[matchers[i].Type] if !ok { return nil, fmt.Errorf("unknown matcher type %#v", matchers[i].GetType()) } + terms = append(terms, finder.TaggedTerm{ Key: matchers[i].Name, Value: matchers[i].Value, @@ -50,20 +53,24 @@ func makeTaggedFromPromPB(matchers []*prompb.LabelMatcher) ([]finder.TaggedTerm, func makeTaggedFromPromQL(matchers []*labels.Matcher) ([]finder.TaggedTerm, error) { terms := make([]finder.TaggedTerm, 0, len(matchers)) + for i := 0; i < len(matchers); i++ { if matchers[i] == nil { continue } + op, ok := promqlMatchMap[matchers[i].Type] if !ok { return nil, fmt.Errorf("unknown matcher type %#v", matchers[i].Type) } + terms = append(terms, finder.TaggedTerm{ Key: matchers[i].Name, Value: matchers[i].Value, Op: op, }) } + sort.Sort(finder.TaggedTermList(terms)) return terms, nil diff --git a/prometheus/querier_select.go b/prometheus/querier_select.go index 850934592..176277aa7 100644 --- a/prometheus/querier_select.go +++ b/prometheus/querier_select.go @@ -24,23 +24,28 @@ func (q *Querier) lookup(ctx context.Context, from, until int64, qlimiter limite if err != nil { return nil, err } + var ( stat finder.FinderStat limitCtx context.Context cancel context.CancelFunc ) + if qlimiter.Enabled() { limitCtx, cancel = context.WithTimeout(ctx, q.config.ClickHouse.IndexTimeout) defer cancel() + start := time.Now() err = qlimiter.Enter(limitCtx, "render") *queueDuration += time.Since(start) + if err != nil { // status = http.StatusServiceUnavailable // queueFail = true // http.Error(w, err.Error(), status) return nil, err } + defer qlimiter.Leave(limitCtx, "render") } // TODO: implement use stat for Prometheus queries @@ -52,6 +57,7 @@ func (q *Querier) lookup(ctx context.Context, from, until int64, qlimiter limite am := alias.New() am.Merge(fndResult, false) + return am, nil } @@ -62,6 +68,7 @@ func (q *Querier) timeRange(hints *storage.SelectHints) (int64, int64) { if hints != nil && hints.Start > 0 && hints.Start < 5662310400000 { from = time.Unix(hints.Start/1000, (hints.Start%1000)*1000000) } + if hints != nil && hints.End > 0 && hints.End < 5662310400000 { until = time.Unix(hints.End/1000, (hints.End%1000)*1000000) } @@ -90,8 +97,10 @@ func (q *Querier) Select(ctx context.Context, sortSeries bool, hints *storage.Se var ( queueDuration time.Duration ) + from, until := q.timeRange(hints) qlimiter := data.GetQueryLimiterFrom("", q.config, from, until) + am, err := q.lookup(ctx, from, until, qlimiter, &queueDuration, labelsMatcher...) if err != nil { return nil //, nil, err @TODO @@ -120,6 +129,7 @@ func (q *Querier) Select(ctx context.Context, sortSeries bool, hints *storage.Se MaxDataPoints: maxDataPoints, }: data.NewTargets([]string{}, am), } + reply, err := multiTarget.Fetch(ctx, q.config, config.ContextPrometheus, qlimiter, &queueDuration) if err != nil { return nil // , nil, err @TODO diff --git a/prometheus/querier_select_test.go b/prometheus/querier_select_test.go index 50c321f41..5022d27b4 100644 --- a/prometheus/querier_select_test.go +++ b/prometheus/querier_select_test.go @@ -22,6 +22,7 @@ func TestQuerier_timeRange(t *testing.T) { TaggedAutocompleDays: 4, }, } + tests := []struct { name string @@ -81,12 +82,14 @@ func TestQuerier_timeRange(t *testing.T) { // Querier returns a new Querier on the storage. sq, err := s.Querier(tt.mint, tt.maxt) require.NoError(t, err) + q := sq.(*Querier) gotFrom, gotUntil := q.timeRange(tt.hints) if gotFrom != tt.wantFrom { t.Errorf("Querier.timeRange().from got = %v, want %v", gotFrom, tt.wantFrom) } + if gotUntil != tt.wantUntil { t.Errorf("Querier.timeRange().until got = %v, want %v", gotUntil, tt.wantUntil) } diff --git a/prometheus/series_set.go b/prometheus/series_set.go index ceda48496..42b95dfeb 100644 --- a/prometheus/series_set.go +++ b/prometheus/series_set.go @@ -52,6 +52,7 @@ func makeSeriesSet(data *data.Data, step int64) (storage.SeriesSet, error) { } nextMetric := data.GroupByMetric() + for { points := nextMetric() if len(points) == 0 { @@ -100,9 +101,11 @@ func (sit *seriesIterator) At() (t int64, v float64) { if index == len(sit.points) { return int64(sit.points[len(sit.points)-1].Time)*1000 + sit.step, math.NaN() } + if index < 0 || index >= len(sit.points) { index = 0 } + p := sit.points[index] // sit.logger().Debug("seriesIterator.At", zap.Int64("t", int64(p.Time)*1000), zap.Float64("v", p.Value)) return int64(p.Time) * 1000, p.Value @@ -139,6 +142,7 @@ func (sit *seriesIterator) Next() chunkenc.ValueType { if sit.step == 0 && sit.current == len(sit.points)-1 { return chunkenc.ValNone } + sit.current++ // sit.logger().Debug("seriesIterator.Next", zap.Bool("ret", true)) return chunkenc.ValFloat @@ -158,6 +162,7 @@ func (ss *seriesSet) At() storage.Series { // zap.L().Debug("seriesSet.At", zap.String("metricName", "nil")) return nil } + s := &ss.series[ss.current] // zap.L().Debug("seriesSet.At", zap.String("metricName", s.name())) return s diff --git a/render/data/carbonlink.go b/render/data/carbonlink.go index a74f27c9b..48068b79b 100644 --- a/render/data/carbonlink.go +++ b/render/data/carbonlink.go @@ -29,9 +29,11 @@ func setCarbonlinkClient(config *config.Carbonlink) { if carbonlink != nil { return } + if config.Server == "" { return } + carbonlink = &carbonlinkClient{ graphitePickle.NewCarbonlinkClient( config.Server, @@ -42,12 +44,14 @@ func setCarbonlinkClient(config *config.Carbonlink) { ), config.TotalTimeout, } + return } // queryCarbonlink returns callable result fetcher func queryCarbonlink(parentCtx context.Context, carbonlink *carbonlinkClient, metrics []string) func() *point.Points { logger := scope.Logger(parentCtx) + if carbonlink == nil { return func() *point.Points { return nil } } diff --git a/render/data/carbonlink_test.go b/render/data/carbonlink_test.go index 2db8330dd..dc989fc4e 100644 --- a/render/data/carbonlink_test.go +++ b/render/data/carbonlink_test.go @@ -24,6 +24,7 @@ func (c *carbonlinkMocked) CacheQueryMulti(ctx context.Context, metrics []string func TestSetCarbonlingClient(t *testing.T) { assert.Nil(t, carbonlink, "client is set in the begining of tests") + cfg := config.New() cfg.Carbonlink.Server = "localhost:0" setCarbonlinkClient(&cfg.Carbonlink) @@ -47,6 +48,7 @@ func TestQueryCarbonlink(t *testing.T) { Value: 14, }, } + for _, m := range metrics { res[m] = dataPoints } @@ -60,6 +62,7 @@ func TestQueryCarbonlink(t *testing.T) { // Result points.metrics are not ordered pMetrics := []string{points.MetricName(1), points.MetricName(2)} i := 0 + for _, m := range pMetrics { for _, dp := range dataPoints { // There is a tiny chance that point will have greated Timestamp than now. Here we test it's at most the next second @@ -67,6 +70,7 @@ func TestQueryCarbonlink(t *testing.T) { expectedPoint := point.Point{MetricID: points.MetricID(m), Value: dp.Value, Time: uint32(dp.Timestamp), Timestamp: points.List()[i].Timestamp} assert.Equal(t, expectedPoint, points.List()[i], "point is not correct") + i++ } } diff --git a/render/data/ch_response.go b/render/data/ch_response.go index 02414e57b..d85421ac2 100644 --- a/render/data/ch_response.go +++ b/render/data/ch_response.go @@ -36,6 +36,7 @@ func (c *CHResponse) ToMultiFetchResponseV2() (*v2pb.MultiFetchResponse, error) start, stop, count, getValue := point.FillNulls(points, from, until, step) values := make([]float64, 0, count) isAbsent := make([]bool, 0, count) + for { value, err := getValue() if err != nil { @@ -45,6 +46,7 @@ func (c *CHResponse) ToMultiFetchResponseV2() (*v2pb.MultiFetchResponse, error) // if err is not point.ErrTimeGreaterStop, the points are corrupted return err } + if math.IsNaN(value) { values = append(values, 0) isAbsent = append(isAbsent, true) @@ -53,6 +55,7 @@ func (c *CHResponse) ToMultiFetchResponseV2() (*v2pb.MultiFetchResponse, error) isAbsent = append(isAbsent, false) } } + for _, a := range data.AM.Get(name) { fr := v2pb.FetchResponse{ Name: a.DisplayName, @@ -64,24 +67,29 @@ func (c *CHResponse) ToMultiFetchResponseV2() (*v2pb.MultiFetchResponse, error) } mfr.Metrics = append(mfr.Metrics, fr) } + return nil } // process metrics with points writtenMetrics := make(map[string]struct{}) nextMetric := data.GroupByMetric() + for { points := nextMetric() if len(points) == 0 { break } + id := points[0].MetricID name := data.MetricName(id) writtenMetrics[name] = struct{}{} + step, err := data.GetStep(id) if err != nil { return nil, err } + if err := addResponse(name, step, points); err != nil { return nil, err } @@ -97,19 +105,23 @@ func (c *CHResponse) ToMultiFetchResponseV2() (*v2pb.MultiFetchResponse, error) } } } + return mfr, nil } // ToMultiFetchResponseV2 returns protobuf v2pb.MultiFetchResponse message for given CHResponses func (cc *CHResponses) ToMultiFetchResponseV2() (*v2pb.MultiFetchResponse, error) { mfr := &v2pb.MultiFetchResponse{Metrics: make([]v2pb.FetchResponse, 0)} + for _, c := range *cc { m, err := c.ToMultiFetchResponseV2() if err != nil { return nil, err } + mfr.Metrics = append(mfr.Metrics, m.Metrics...) } + return mfr, nil } @@ -121,6 +133,7 @@ func (c *CHResponse) ToMultiFetchResponseV3() (*v3pb.MultiFetchResponse, error) from, until := uint32(c.From), uint32(c.Until) start, stop, count, getValue := point.FillNulls(points, from, until, step) values := make([]float64, 0, count) + for { value, err := getValue() if err != nil { @@ -130,8 +143,10 @@ func (c *CHResponse) ToMultiFetchResponseV3() (*v3pb.MultiFetchResponse, error) // if err is not point.ErrTimeGreaterStop, the points are corrupted return err } + values = append(values, value) } + for _, a := range data.AM.Get(name) { fr := v3pb.FetchResponse{ Name: a.DisplayName, @@ -149,28 +164,34 @@ func (c *CHResponse) ToMultiFetchResponseV3() (*v3pb.MultiFetchResponse, error) } mfr.Metrics = append(mfr.Metrics, fr) } + return nil } // process metrics with points writtenMetrics := make(map[string]struct{}) nextMetric := data.GroupByMetric() + for { points := nextMetric() if len(points) == 0 { break } + id := points[0].MetricID name := data.MetricName(id) writtenMetrics[name] = struct{}{} + consolidationFunc, err := data.GetAggregation(id) if err != nil { return nil, err } + step, err := data.GetStep(id) if err != nil { return nil, err } + if err := addResponse(name, consolidationFunc, step, points); err != nil { return nil, err } @@ -186,18 +207,22 @@ func (c *CHResponse) ToMultiFetchResponseV3() (*v3pb.MultiFetchResponse, error) } } } + return mfr, nil } // ToMultiFetchResponseV3 returns protobuf v3pb.MultiFetchResponse message for given CHResponses func (cc *CHResponses) ToMultiFetchResponseV3() (*v3pb.MultiFetchResponse, error) { mfr := &v3pb.MultiFetchResponse{Metrics: make([]v3pb.FetchResponse, 0)} + for _, c := range *cc { m, err := c.ToMultiFetchResponseV3() if err != nil { return nil, err } + mfr.Metrics = append(mfr.Metrics, m.Metrics...) } + return mfr, nil } diff --git a/render/data/common_step.go b/render/data/common_step.go index e2bda39fa..231013afa 100644 --- a/render/data/common_step.go +++ b/render/data/common_step.go @@ -30,6 +30,7 @@ func (c *commonStep) calculateUnsafe(a, b int64) int64 { if a == 0 || b == 0 { return dry.Max(a, b) } + return dry.LCM(a, b) } @@ -43,6 +44,7 @@ func (c *commonStep) calculate(value int64) { func (c *commonStep) getResult() int64 { ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) defer cancel() + ch := make(chan int64) go func(ch chan int64) { c.wg.Wait() diff --git a/render/data/common_step_test.go b/render/data/common_step_test.go index a9edf8e97..dcd61eb95 100644 --- a/render/data/common_step_test.go +++ b/render/data/common_step_test.go @@ -36,11 +36,13 @@ func newWrapper() *wrapper { func TestCommonStepWorker(t *testing.T) { w := newWrapper() w.addTargets(4) + go func() { lastStep := int64(0) for i := 0; i < 20000; i++ { w.calculateUnsafe(lastStep, 0) } + w.calc(0) assert.Equal(t, int64(120), w.commonStep.getResult()) }() @@ -49,6 +51,7 @@ func TestCommonStepWorker(t *testing.T) { for i := 0; i < 30000; i++ { w.calculateUnsafe(lastStep, 6) } + w.calc(6) assert.Equal(t, int64(120), w.commonStep.getResult()) }() @@ -57,6 +60,7 @@ func TestCommonStepWorker(t *testing.T) { for i := 0; i < 40000; i++ { w.calculateUnsafe(lastStep, 8) } + w.calc(8) assert.Equal(t, int64(120), w.commonStep.getResult()) }() @@ -65,6 +69,7 @@ func TestCommonStepWorker(t *testing.T) { for i := 0; i < 50000; i++ { w.calculateUnsafe(lastStep, 10) } + w.calc(10) assert.Equal(t, int64(120), w.commonStep.getResult()) }() diff --git a/render/data/data.go b/render/data/data.go index 07a2a622c..9bb7bdbff 100644 --- a/render/data/data.go +++ b/render/data/data.go @@ -45,6 +45,7 @@ func (d *Data) GetStep(id uint32) (uint32, error) { if 0 < d.CommonStep { return uint32(d.CommonStep), nil } + return d.Points.GetStep(id) } @@ -54,6 +55,7 @@ func (d *Data) GetAggregation(id uint32) (string, error) { if err != nil { return function, err } + switch function { case "any": return "first", nil @@ -119,12 +121,14 @@ func prepareData(ctx context.Context, targets int, fetcher func() *point.Points) ) } } + return case <-ctx.Done(): data.e <- fmt.Errorf("prepareData failed: %w", ctx.Err()) return } }() + return data } @@ -134,6 +138,7 @@ func (d *data) setSteps(cond *conditions) { d.CommonStep = cond.step return } + d.Points.SetSteps(cond.steps) } @@ -161,18 +166,21 @@ func dataSplitAggregated(data []byte, atEOF bool) (advance int, token []byte, er nameLen, readBytes, err := ReadUvarint(data) tokenLen := readBytes + int(nameLen) + if err != nil || len(data) < tokenLen { return splitErrorHandler(&data, atEOF, tokenLen, err) } timeLen, readBytes, err := ReadUvarint(data[tokenLen:]) tokenLen += readBytes + int(timeLen)*4 + if err != nil || len(data) < tokenLen { return splitErrorHandler(&data, atEOF, tokenLen, err) } valueLen, readBytes, err := ReadUvarint(data[tokenLen:]) tokenLen += readBytes + int(valueLen)*8 + if err != nil || len(data) < tokenLen { return splitErrorHandler(&data, atEOF, tokenLen, err) } @@ -193,24 +201,28 @@ func dataSplitUnaggregated(data []byte, atEOF bool) (advance int, token []byte, nameLen, readBytes, err := ReadUvarint(data) tokenLen := readBytes + int(nameLen) + if err != nil || len(data) < tokenLen { return splitErrorHandler(&data, atEOF, tokenLen, err) } timeLen, readBytes, err := ReadUvarint(data[tokenLen:]) tokenLen += readBytes + int(timeLen)*4 + if err != nil || len(data) < tokenLen { return splitErrorHandler(&data, atEOF, tokenLen, err) } valueLen, readBytes, err := ReadUvarint(data[tokenLen:]) tokenLen += readBytes + int(valueLen)*8 + if err != nil || len(data) < tokenLen { return splitErrorHandler(&data, atEOF, tokenLen, err) } timestampLen, readBytes, err := ReadUvarint(data[tokenLen:]) tokenLen += readBytes + int(timestampLen)*4 + if err != nil || len(data) < tokenLen { return splitErrorHandler(&data, atEOF, tokenLen, err) } @@ -226,6 +238,7 @@ func dataSplitUnaggregated(data []byte, atEOF bool) (advance int, token []byte, // Expected, that on error the context will be cancelled on the upper level. func (d *data) parseResponse(ctx context.Context, bodyReader io.ReadCloser, cond *conditions) error { pp := d.Points + dataSplit := dataSplitUnaggregated if cond.aggregated { dataSplit = dataSplitAggregated @@ -244,6 +257,7 @@ func (d *data) parseResponse(ctx context.Context, bodyReader io.ReadCloser, cond } var metricID uint32 + d.mut.Lock() defer func() { d.mut.Unlock() @@ -306,6 +320,7 @@ func (d *data) parseResponse(ctx context.Context, bodyReader io.ReadCloser, cond if !cond.aggregated { timestamps = make([]uint32, 0, arrayLen) row = row[readBytes:] + for i := uint64(0); i < arrayLen; i++ { timestamps = append(timestamps, binary.LittleEndian.Uint32(row[:4])) row = row[4:] @@ -316,6 +331,7 @@ func (d *data) parseResponse(ctx context.Context, bodyReader io.ReadCloser, cond pp.AppendPoint(metricID, values[i], times[i], timestamps[i]) } } + d.spent += time.Since(start) err := scanner.Err() @@ -325,7 +341,9 @@ func (d *data) parseResponse(ctx context.Context, bodyReader io.ReadCloser, cond // format full error string, sometimes parse not failed at start orf error string dataErr.PrependDescription(string(rowStart)) } + bodyReader.Close() + return err } diff --git a/render/data/data_parse_test.go b/render/data/data_parse_test.go index 71b1d9809..f7657a063 100644 --- a/render/data/data_parse_test.go +++ b/render/data/data_parse_test.go @@ -60,6 +60,7 @@ func testCarbonlinkReaderNil() *point.Points { func TestUnaggregatedDataParse(t *testing.T) { ctx := context.Background() cond := &conditions{Targets: &Targets{isReverse: false}, aggregated: false} + t.Run("empty response", func(t *testing.T) { body := []byte{} r := io.NopCloser(bytes.NewReader(body)) @@ -67,6 +68,7 @@ func TestUnaggregatedDataParse(t *testing.T) { err := d.parseResponse(ctx, r, cond) assert.NoError(t, err) + werr := d.wait(ctx) assert.NoError(t, werr) assert.Empty(t, d.Points.List()) @@ -99,21 +101,25 @@ func TestUnaggregatedDataParse(t *testing.T) { err := d.parseResponse(ctx, r, cond) assert.NoError(t, err) + werr := d.wait(ctx) assert.NoError(t, werr) // point number p := 0 + for j := 0; j < len(table[i]); j++ { for m := 0; m < len(table[i][j].PointValues.Times); m++ { assert.Equal(t, table[i][j].Metric, d.Points.MetricName(d.Points.List()[p].MetricID)) assert.Equal(t, table[i][j].PointValues.Times[m], d.Points.List()[p].Time) assert.Equal(t, table[i][j].PointValues.Values[m], d.Points.List()[p].Value) assert.Equal(t, table[i][j].PointValues.Timestamps[m], d.Points.List()[p].Timestamp) + p++ } } }) } + for i := 0; i < len(table); i++ { t.Run(fmt.Sprintf("reversed #%d", i), func(t *testing.T) { cond := &conditions{Targets: &Targets{isReverse: true}, aggregated: false} @@ -124,16 +130,19 @@ func TestUnaggregatedDataParse(t *testing.T) { err := d.parseResponse(ctx, r, cond) assert.NoError(t, err) + werr := d.wait(ctx) assert.NoError(t, werr) // point number p := 0 + for j := 0; j < len(table[i]); j++ { for m := 0; m < len(table[i][j].PointValues.Times); m++ { assert.Equal(t, table[i][j].Metric, reverse.String(d.Points.MetricName(d.Points.List()[p].MetricID))) assert.Equal(t, table[i][j].PointValues.Times[m], d.Points.List()[p].Time) assert.Equal(t, table[i][j].PointValues.Values[m], d.Points.List()[p].Value) assert.Equal(t, table[i][j].PointValues.Timestamps[m], d.Points.List()[p].Timestamp) + p++ } } @@ -185,6 +194,7 @@ func TestUnaggregatedDataParse(t *testing.T) { // length of the first metric continue } + r := io.NopCloser(bytes.NewReader(body[:i])) d := prepareData(ctx, 1, testCarbonlinkReaderNil) @@ -198,6 +208,7 @@ func TestUnaggregatedDataParse(t *testing.T) { func TestAggregatedDataParse(t *testing.T) { ctx := context.Background() cond := &conditions{Targets: &Targets{isReverse: false}, aggregated: true} + t.Run("empty response", func(t *testing.T) { body := []byte{} d := prepareData(ctx, 1, testCarbonlinkReaderNil) @@ -233,6 +244,7 @@ func TestAggregatedDataParse(t *testing.T) { // length of the first metric continue } + r := io.NopCloser(bytes.NewReader(body[:i])) d := prepareData(ctx, 1, testCarbonlinkReaderNil) @@ -288,6 +300,7 @@ func TestAggregatedDataParse(t *testing.T) { {MetricID: 2, Value: 42.1, Time: 1520056686, Timestamp: 1520056686}, {MetricID: 2, Value: 43, Time: 1520056690, Timestamp: 1520056690}, } + assert.NoError(t, err) assert.Equal(t, result, d.Points.List()) }) @@ -323,6 +336,7 @@ func TestAggregatedDataParse(t *testing.T) { func TestPrepareDataParse(t *testing.T) { ctx := context.Background() + t.Run("empty datapoints", func(t *testing.T) { data := prepareData(ctx, 1, testCarbonlinkReaderNil) err := data.wait(ctx) @@ -333,6 +347,7 @@ func TestPrepareDataParse(t *testing.T) { t.Run("cancelled context", func(t *testing.T) { ctx, cancel := context.WithCancel(ctx) cancel() + data := prepareData(ctx, 1, testCarbonlinkReaderNil) err := data.wait(ctx) assert.ErrorIs(t, err, context.Canceled) @@ -346,6 +361,7 @@ func TestPrepareDataParse(t *testing.T) { extraPoints.MetricID("some.metric2") extraPoints.AppendPoint(1, 1, 3, 3) extraPoints.AppendPoint(2, 1, 3, 3) + reader := func() *point.Points { time.Sleep(1 * time.Millisecond) return extraPoints @@ -376,9 +392,12 @@ func TestAsyncDataParse(t *testing.T) { extraPoints.MetricID("some.metric2") extraPoints.AppendPoint(1, 1, 3, 3) extraPoints.AppendPoint(2, 1, 3, 3) + reader := func() *point.Points { return extraPoints } + ctx, cancel := context.WithTimeout(ctx, -1*time.Nanosecond) defer cancel() + d := prepareData(ctx, 1, reader) assert.Len(t, d.Points.List(), 0, "timeout should prevent points parsing") @@ -407,12 +426,15 @@ func TestAsyncDataParse(t *testing.T) { extraPoints.MetricID("some.metric2") extraPoints.AppendPoint(1, 1, 3, 3) extraPoints.AppendPoint(2, 1, 3, 3) + reader := func() *point.Points { time.Sleep(1 * time.Second) return extraPoints } + ctx, cancel := context.WithTimeout(ctx, 50*time.Nanosecond) defer cancel() + d := prepareData(ctx, 1, reader) err := d.wait(ctx) assert.Len(t, d.Points.List(), 0, "timeout should prevent points parsing") @@ -433,6 +455,7 @@ func TestAsyncDataParse(t *testing.T) { // fails after context is cancelled err = d.wait(ctx) assert.ErrorIs(t, err, context.Canceled) + r = io.NopCloser(bytes.NewReader(body)) err = d.parseResponse(ctx, r, cond) assert.ErrorIs(t, err, context.Canceled) diff --git a/render/data/multi_target.go b/render/data/multi_target.go index 7d1b7ad40..a62831f0e 100644 --- a/render/data/multi_target.go +++ b/render/data/multi_target.go @@ -43,11 +43,13 @@ func MFRToMultiTarget(v3Request *v3pb.MultiFetchRequest) MultiTarget { } else { multiTarget[tf] = NewTargetsOne(m.PathExpression, len(v3Request.Metrics), alias.New()) } + if len(m.FilterFunctions) > 0 { multiTarget[tf].SetFilteringFunctions(m.PathExpression, m.FilterFunctions) } } } + return multiTarget } @@ -56,18 +58,22 @@ func (m *MultiTarget) checkMetricsLimitExceeded(num int) error { // zero or negative means unlimited return nil } + for _, t := range *m { if num < t.AM.Len() { return errs.NewErrorWithCode(fmt.Sprintf("metrics limit exceeded: %d < %d", num, t.AM.Len()), http.StatusForbidden) } } + return nil } func getDataTimeout(cfg *config.Config, m *MultiTarget) time.Duration { dataTimeout := cfg.ClickHouse.DataTimeout + if len(cfg.ClickHouse.QueryParams) > 1 { var maxDuration time.Duration + for tf := range *m { duration := time.Second * time.Duration(tf.Until-tf.From) if duration >= maxDuration { @@ -76,6 +82,7 @@ func getDataTimeout(cfg *config.Config, m *MultiTarget) time.Duration { } n := config.GetQueryParam(cfg.ClickHouse.QueryParams, maxDuration) + return cfg.ClickHouse.QueryParams[n].DataTimeout } @@ -84,6 +91,7 @@ func getDataTimeout(cfg *config.Config, m *MultiTarget) time.Duration { func GetQueryLimiter(username string, cfg *config.Config, m *MultiTarget) (string, limiter.ServerLimiter) { n := 0 + if username != "" && len(cfg.ClickHouse.UserLimits) > 0 { if u, ok := cfg.ClickHouse.UserLimits[username]; ok { return username, u.Limiter @@ -92,6 +100,7 @@ func GetQueryLimiter(username string, cfg *config.Config, m *MultiTarget) (strin if len(cfg.ClickHouse.QueryParams) > 1 { var maxDuration time.Duration + for tf := range *m { duration := time.Second * time.Duration(tf.Until-tf.From) if duration >= maxDuration { @@ -107,6 +116,7 @@ func GetQueryLimiter(username string, cfg *config.Config, m *MultiTarget) (strin func GetQueryLimiterFrom(username string, cfg *config.Config, from, until int64) limiter.ServerLimiter { n := 0 + if username != "" && len(cfg.ClickHouse.UserLimits) > 0 { if u, ok := cfg.ClickHouse.UserLimits[username]; ok { return u.Limiter @@ -125,6 +135,7 @@ func GetQueryParam(username string, cfg *config.Config, m *MultiTarget) (*config if len(cfg.ClickHouse.QueryParams) > 1 { var maxDuration time.Duration + for tf := range *m { duration := time.Second * time.Duration(tf.Until-tf.From) if duration >= maxDuration { @@ -145,7 +156,9 @@ func (m *MultiTarget) Fetch(ctx context.Context, cfg *config.Config, chContext s wg sync.WaitGroup entered int ) + logger := scope.Logger(ctx) + setCarbonlinkClient(&cfg.Carbonlink) err := m.checkMetricsLimitExceeded(cfg.Common.MaxMetricsPerTarget) @@ -161,6 +174,7 @@ func (m *MultiTarget) Fetch(ctx context.Context, cfg *config.Config, chContext s for i := 0; i < entered; i++ { qlimiter.Leave(ctxTimeout, "render") } + cancel() }() @@ -169,6 +183,7 @@ func (m *MultiTarget) Fetch(ctx context.Context, cfg *config.Config, chContext s for tf, targets := range *m { tf, targets := tf, targets + cond := &conditions{TimeFrame: &tf, Targets: targets, aggregated: cfg.ClickHouse.InternalAggregation, @@ -177,18 +192,22 @@ func (m *MultiTarget) Fetch(ctx context.Context, cfg *config.Config, chContext s if cond.MaxDataPoints <= 0 || int64(cfg.ClickHouse.MaxDataPoints) < cond.MaxDataPoints { cond.MaxDataPoints = int64(cfg.ClickHouse.MaxDataPoints) } + err := cond.selectDataTable(cfg, cond.TimeFrame, chContext) if err != nil { lock.Lock() errors = append(errors, err) lock.Unlock() logger.Error("data tables is not specified", zap.Error(err)) + return EmptyResponse(), err } + if qlimiter.Enabled() { start := time.Now() err = qlimiter.Enter(ctxTimeout, "render") *queueDuration += time.Since(start) + if err != nil { // status = http.StatusServiceUnavailable // queueFail = true @@ -196,23 +215,31 @@ func (m *MultiTarget) Fetch(ctx context.Context, cfg *config.Config, chContext s lock.Lock() errors = append(errors, err) lock.Unlock() + break } + entered++ } + wg.Add(1) + go func(cond *conditions) { defer wg.Done() + err := query.getDataPoints(ctxTimeout, cond) if err != nil { lock.Lock() errors = append(errors, err) lock.Unlock() + return } }(cond) } + wg.Wait() + for len(errors) != 0 { return EmptyResponse(), errors[0] } diff --git a/render/data/query.go b/render/data/query.go index 55d2a4cd8..150e74fd1 100644 --- a/render/data/query.go +++ b/render/data/query.go @@ -136,9 +136,11 @@ func (q *query) getParam(from, until int64) (string, time.Duration) { func (q *query) getDataPoints(ctx context.Context, cond *conditions) error { logger := scope.Logger(ctx) + var err error cond.prepareMetricsLists() + if len(cond.metricsRequested) == 0 { q.cStep.doneTarget() return nil @@ -152,26 +154,35 @@ func (q *query) getDataPoints(ctx context.Context, cond *conditions) error { logger.Error("prepare_lookup", zap.Error(err)) return errs.NewErrorWithCode(err.Error(), http.StatusBadRequest) } + cond.setStep(q.cStep) + if cond.step < 1 { return ErrSetStepTimeout } + cond.setFromUntil() cond.setPrewhere() cond.setWhere() queryContext, queryCancel := context.WithCancel(ctx) defer queryCancel() + data := prepareData(queryContext, len(cond.extDataBodies), carbonlinkResponseRead) var ch_read_bytes, ch_read_rows int64 + for agg, extTableBody := range cond.extDataBodies { extData := q.metricsListExtData(extTableBody) query := cond.generateQuery(agg) + data.wg.Add(1) + go func() { defer data.wg.Done() + chURL, chDataTimeout := q.getParam(cond.from, cond.until) + body, err := clickhouse.Reader( scope.WithTable(ctx, cond.pointsTable), chURL, @@ -186,15 +197,18 @@ func (q *query) getDataPoints(ctx context.Context, cond *conditions) error { if err == nil { atomic.AddInt64(&ch_read_bytes, body.ChReadBytes()) atomic.AddInt64(&ch_read_rows, body.ChReadRows()) + err = data.parseResponse(queryContext, body, cond) if err != nil { logger.Error("reader", zap.Error(err)) data.e <- err + queryCancel() } } else { logger.Error("reader", zap.Error(err)) data.e <- err + queryCancel() } }() @@ -202,13 +216,16 @@ func (q *query) getDataPoints(ctx context.Context, cond *conditions) error { err = data.wait(queryContext) metrics.SendQueryRead(cond.queryMetrics, cond.from, cond.until, data.spent.Milliseconds(), int64(data.Points.Len()), int64(data.length), ch_read_rows, ch_read_bytes, err != nil) + if err != nil { logger.Error( "data_parser", zap.Error(err), zap.Int("read_bytes", data.length), zap.String("runtime", data.spent.String()), zap.Duration("runtime_ns", data.spent), ) + return err } + logger.Info( "data_parse", zap.Int("read_bytes", data.length), zap.Int("read_points", data.Points.Len()), zap.String("runtime", data.spent.String()), zap.Duration("runtime_ns", data.spent), @@ -221,17 +238,22 @@ func (q *query) getDataPoints(ctx context.Context, cond *conditions) error { // But if carbonlink is used, we still need to sort, filter and rollup points if !cond.aggregated || carbonlink != nil { sortStart := time.Now() + data.Points.Sort() + d := time.Since(sortStart) logger.Debug("sort", zap.String("runtime", d.String()), zap.Duration("runtime_ns", d)) data.Points.Uniq() + rollupStart := time.Now() + err = cond.rollupRules.RollupPoints(data.Points, cond.From, data.CommonStep) if err != nil { logger.Error("rollup failed", zap.Error(err)) return err } + rollupTime := time.Since(rollupStart) logger.Debug( "rollup", @@ -249,6 +271,7 @@ func (q *query) getDataPoints(ctx context.Context, cond *conditions) error { AppendOutEmptySeries: cond.appendEmptySeries, AppliedFunctions: cond.appliedFunctions, }) + return nil } @@ -265,6 +288,7 @@ func (q *query) metricsListExtData(body *strings.Builder) *clickhouse.ExternalDa extData := clickhouse.NewExternalData(extTable) extData.SetDebug(q.debugDir, q.debugExtDataPerm) + return extData } @@ -305,9 +329,11 @@ func (c *conditions) prepareLookup() error { if err != nil { return fmt.Errorf("failed to choose appropriate aggregation for '%s': %s", alias.Target, err.Error()) } + if requestedAgg != "" { agg = rollup.AggrMap[requestedAgg] c.appliedFunctions[alias.Target] = []string{graphiteConsolidationFunction} + break } } @@ -332,6 +358,7 @@ func (c *conditions) prepareLookup() error { if c.aggregated { aggName = agg.Name() } + if mm, ok := c.extDataBodies[aggName]; ok { mm.WriteString(c.metricsRequested[i] + "\n") } else { @@ -340,6 +367,7 @@ func (c *conditions) prepareLookup() error { mm.WriteString(c.metricsRequested[i] + "\n") } } + return nil } @@ -347,12 +375,15 @@ var ErrSetStepTimeout = errors.New("unexpected error, setStep timeout") func (c *conditions) setStep(cStep *commonStep) { step := int64(0) + if !c.aggregated { // Use max(steps) for s := range c.steps { step = dry.Max(step, int64(s)) } + c.step = step + return } @@ -362,14 +393,18 @@ func (c *conditions) setStep(cStep *commonStep) { for s := range c.steps { step = cStep.calculateUnsafe(step, int64(s)) } + cStep.calculate(step) + rStep := cStep.getResult() if rStep == -1 { c.step = -1 return } + step = dry.Max(rStep, dry.Ceil(c.Until-c.From, c.MaxDataPoints)) c.step = dry.CeilToMultiplier(step, rStep) + return } @@ -395,6 +430,7 @@ func (c *conditions) generateQuery(agg string) string { if c.aggregated { return c.generateQueryaAggregated(agg) } + return c.generateQueryUnaggregated() } diff --git a/render/data/query_test.go b/render/data/query_test.go index 28772a39e..0a258e1e8 100644 --- a/render/data/query_test.go +++ b/render/data/query_test.go @@ -31,6 +31,7 @@ var finderResult *finder.MockFinder = finder.NewMockFinder([][]byte{ func newAM() *alias.Map { am := alias.New() am.MergeTarget(finderResult, "*.name.*", false) + return am } @@ -39,7 +40,9 @@ func newRules(reversed bool) *rollup.Rules { oneMin := []rollup.Retention{{Age: 0, Precision: 60}, {Age: 3600, Precision: 300}} fiveMin := []rollup.Retention{{Age: 0, Precision: 300}, {Age: 3600, Precision: 1200}} emptyRet := make([]rollup.Retention, 0) + var pattern []rollup.Pattern + if reversed { pattern = []rollup.Pattern{ genPattern("[.]5_sec$", "", fiveSec), @@ -59,7 +62,9 @@ func newRules(reversed bool) *rollup.Rules { genPattern("[.]avg$", "avg", emptyRet), } } + rules, _ := rollup.NewMockRules(pattern, 30, "avg") + return rules } @@ -73,6 +78,7 @@ func newCondition(fromAge, untilAge, maxDataPoints int64) *conditions { tt := NewTargets([]string{"*.name.*"}, newAM()) tt.pointsTable = "graphite.data" tt.rollupRules = newRules(false) + return &conditions{TimeFrame: &tf, Targets: tt} } @@ -81,6 +87,7 @@ func extTableString(et map[string]*strings.Builder) map[string]string { for a := range et { ett[a] = et[a].String() } + return ett } @@ -90,6 +97,7 @@ func TestPrepareMetricsLists(t *testing.T) { cond.isReverse = false cond.rollupUseReverted = false cond.prepareMetricsLists() + expectedSeries := finderResult.Strings() sort.Strings(expectedSeries) sort.Strings(cond.metricsLookup) @@ -115,15 +123,19 @@ func TestPrepareMetricsLists(t *testing.T) { cond.isReverse = true cond.rollupUseReverted = false cond.prepareMetricsLists() + for i := range cond.metricsRequested { assert.Equal(t, cond.metricsRequested[i], reverse.String(cond.metricsUnreverse[i])) } + expectedSeries := finderResult.Strings() sort.Strings(expectedSeries) + expectedSeriesReversed := make([]string, len(expectedSeries)) for i := range expectedSeries { expectedSeriesReversed[i] = reverse.String(expectedSeries[i]) } + sort.Strings(expectedSeriesReversed) sort.Strings(cond.metricsLookup) sort.Strings(cond.metricsRequested) @@ -134,9 +146,11 @@ func TestPrepareMetricsLists(t *testing.T) { cond.rollupUseReverted = true cond.prepareMetricsLists() + for i := range cond.metricsRequested { assert.Equal(t, cond.metricsRequested[i], reverse.String(cond.metricsUnreverse[i])) } + sort.Strings(cond.metricsLookup) sort.Strings(cond.metricsRequested) sort.Strings(cond.metricsUnreverse) @@ -163,6 +177,7 @@ func TestPrepareLookup(t *testing.T) { sort.Strings(cond.metricsRequested) sort.Strings(cond.metricsUnreverse) cond.prepareLookup() + aggregations := map[string][]string{ "avg": {"10_min.name.any", "1_min.name.avg"}, "max": {"5_sec.name.max"}, @@ -177,15 +192,18 @@ func TestPrepareLookup(t *testing.T) { 1200: {}, } assert.Equal(t, steps, cond.steps) + bodies := make(map[string]string) for a, m := range aggregations { bodies[a] = strings.Join(m, "\n") + "\n" } + assert.Equal(t, bodies, extTableString(cond.extDataBodies)) cond.From = ageToTimestamp(1800) cond.Until = ageToTimestamp(0) cond.prepareLookup() + steps = map[uint32][]string{ 30: {}, 60: {}, @@ -206,6 +224,7 @@ func TestPrepareLookup(t *testing.T) { sort.Strings(cond.metricsRequested) sort.Strings(cond.metricsUnreverse) cond.prepareLookup() + aggregations := map[string][]string{ "avg": {"10_min.name.any", "1_min.name.avg"}, "max": {"5_sec.name.max"}, @@ -220,12 +239,14 @@ func TestPrepareLookup(t *testing.T) { 1200: {"5_min.name.min"}, } assert.Equal(t, steps, cond.steps) + bodies := map[string]string{"": "10_min.name.any\n1_min.name.avg\n5_min.name.min\n5_sec.name.max\n"} assert.Equal(t, bodies, extTableString(cond.extDataBodies)) cond.From = ageToTimestamp(1800) cond.Until = ageToTimestamp(0) cond.prepareLookup() + steps = map[uint32][]string{ 5: {"5_sec.name.max"}, 30: {"10_min.name.any"}, @@ -245,6 +266,7 @@ func TestPrepareLookup(t *testing.T) { sort.Strings(cond.metricsUnreverse) sort.Strings(cond.metricsRequested) cond.prepareLookup() + aggregations := map[string][]string{ "avg": {"10_min.name.any", "1_min.name.avg", "5_min.name.min", "5_sec.name.max"}, } @@ -254,6 +276,7 @@ func TestPrepareLookup(t *testing.T) { 30: {"10_min.name.any", "1_min.name.avg", "5_min.name.min", "5_sec.name.max"}, } assert.Equal(t, steps, cond.steps) + bodies := map[string]string{"": "any.name.10_min\navg.name.1_min\nmax.name.5_sec\nmin.name.5_min\n"} assert.Equal(t, bodies, extTableString(cond.extDataBodies)) @@ -272,9 +295,11 @@ func TestPrepareLookup(t *testing.T) { cond.isReverse = true cond.prepareMetricsLists() cond.prepareLookup() + for a := range cond.aggregations { sort.Strings(cond.aggregations[a]) } + aggregations := map[string][]string{ "avg": {"10_min.name.any", "1_min.name.avg"}, "max": {"5_sec.name.max"}, @@ -293,15 +318,18 @@ func TestPrepareLookup(t *testing.T) { cond.From = ageToTimestamp(1800) cond.Until = ageToTimestamp(0) cond.prepareLookup() + steps = map[uint32][]string{ 5: {"5_sec.name.max"}, 30: {"10_min.name.any"}, 60: {"1_min.name.avg"}, 300: {"5_min.name.min"}, } + for a := range cond.aggregations { sort.Strings(cond.aggregations[a]) } + assert.Equal(t, steps, cond.steps) assert.Equal(t, aggregations, cond.aggregations) }) @@ -315,13 +343,16 @@ func TestPrepareLookup(t *testing.T) { sort.Strings(cond.metricsLookup) sort.Strings(cond.metricsRequested) sort.Strings(cond.metricsUnreverse) + var aggregations map[string][]string + for _, aggrStr := range []string{"avg", "min", "max", "sum"} { cond.SetFilteringFunctions( "*.name.*", []*v3pb.FilteringFunction{{Name: "consolidateBy", Arguments: []string{aggrStr}}}, ) cond.prepareLookup() + aggregations = map[string][]string{ aggrStr: {"10_min.name.any", "1_min.name.avg", "5_min.name.min", "5_sec.name.max"}, } @@ -336,15 +367,18 @@ func TestPrepareLookup(t *testing.T) { 1200: {}, } assert.Equal(t, steps, cond.steps) + bodies := make(map[string]string) for a, m := range aggregations { bodies[a] = strings.Join(m, "\n") + "\n" } + assert.Equal(t, bodies, extTableString(cond.extDataBodies)) cond.From = ageToTimestamp(1800) cond.Until = ageToTimestamp(0) cond.prepareLookup() + steps = map[uint32][]string{ 30: {}, 60: {}, @@ -363,12 +397,15 @@ func TestSetStep(t *testing.T) { cond.prepareMetricsLists() cond.prepareLookup() cond.setStep(nil) + var step int64 = 300 + assert.Equal(t, step, cond.step) cond.From = ageToTimestamp(5400) cond.prepareLookup() cond.setStep(nil) + step = 1200 assert.Equal(t, step, cond.step) }) @@ -387,7 +424,9 @@ func TestSetStep(t *testing.T) { cStep.addTargets(1) cond.setStep(cStep) + var step int64 = 1800 / 2 + assert.Equal(t, step, cond.step) cStep.addTargets(1) @@ -396,6 +435,7 @@ func TestSetStep(t *testing.T) { cond.Until = ageToTimestamp(700) cond.MaxDataPoints = 5 cond.setStep(cStep) + step = 300 assert.Equal(t, step, cond.step) @@ -404,6 +444,7 @@ func TestSetStep(t *testing.T) { cond.MaxDataPoints = 10 cond.steps = map[uint32][]string{1: {}, 5: {}, 3: {}, 4: {}} cond.setStep(cStep) + step = 60 assert.Equal(t, step, cond.step) @@ -412,12 +453,15 @@ func TestSetStep(t *testing.T) { cond.MaxDataPoints = 7 cond.steps = map[uint32][]string{1: {}, 5: {}, 8: {}, 4: {}} cond.setStep(cStep) + step = 80 assert.Equal(t, step, cond.step) cStep.addTargets(1) + cond.MaxDataPoints = 6 cond.setStep(cStep) + step = 120 assert.Equal(t, step, cond.step) }) @@ -429,10 +473,12 @@ func TestSetFromUntil(t *testing.T) { until int64 step int64 } + type out struct { from int64 until int64 } + tests := []struct { in in out out @@ -460,12 +506,14 @@ func TestSetFromUntil(t *testing.T) { // prewhere, where and both generators are checked here func TestGenerateQuery(t *testing.T) { table := "graphite.table" + type in struct { from int64 until int64 step int64 agg string } + tests := []struct { in in aggregated string @@ -519,6 +567,7 @@ func TestGenerateQuery(t *testing.T) { cond.setWhere() unaggQuery := cond.generateQuery(test.in.agg) assert.Equal(t, test.unaggregated, unaggQuery) + cond.aggregated = true aggQuery := cond.generateQuery(test.in.agg) assert.Equal(t, test.aggregated, aggQuery) diff --git a/render/data/targets.go b/render/data/targets.go index 00de1a284..de602d0be 100644 --- a/render/data/targets.go +++ b/render/data/targets.go @@ -46,6 +46,7 @@ func NewTargets(list []string, am *alias.Map) *Targets { AM: am, filteringFunctionsByTarget: make(FilteringFunctionsByTarget), } + return targets } @@ -58,6 +59,7 @@ func NewTargetsOne(target string, capacity int, am *alias.Map) *Targets { AM: am, filteringFunctionsByTarget: make(FilteringFunctionsByTarget), } + return targets } diff --git a/render/data/targets_test.go b/render/data/targets_test.go index d717fc72f..3bc086f20 100644 --- a/render/data/targets_test.go +++ b/render/data/targets_test.go @@ -47,6 +47,7 @@ func TestSelectDataTableTime(t *testing.T) { } err := cfg.ProcessDataTables() assert.NoError(t, err) + tg := NewTargets([]string{"metric"}, nil) tests := []struct { @@ -117,6 +118,7 @@ func TestSelectDataTableMatch(t *testing.T) { } err := cfg.ProcessDataTables() assert.NoError(t, err) + tf := &TimeFrame{ageToTimestamp(3600*24 - 1), ageToTimestamp(1800), 1} tests := []struct { diff --git a/render/handler.go b/render/handler.go index 8f6feffe3..8c4b13629 100644 --- a/render/handler.go +++ b/render/handler.go @@ -46,6 +46,7 @@ func getCacheTimeout(now time.Time, from, until int64, cacheConfig *config.Cache if cacheConfig.ShortDuration == 0 { return cacheConfig.DefaultTimeoutSec, cacheConfig.DefaultTimeoutStr, metrics.DefaultCacheMetrics } + duration := time.Second * time.Duration(until-from) if duration > cacheConfig.ShortDuration || now.Unix()-until > cacheConfig.ShortUntilOffsetSec { return cacheConfig.DefaultTimeoutSec, cacheConfig.DefaultTimeoutStr, metrics.DefaultCacheMetrics @@ -57,12 +58,17 @@ func getCacheTimeout(now time.Time, from, until int64, cacheConfig *config.Cache // try to fetch cached finder queries func (h *Handler) finderCached(ts time.Time, fetchRequests data.MultiTarget, logger *zap.Logger, metricsLen *int) (cachedFind int, maxCacheTimeoutStr string, err error) { var lock sync.RWMutex + var maxCacheTimeout int32 + errors := make([]error, 0, len(fetchRequests)) + var wg sync.WaitGroup + for tf, targets := range fetchRequests { for i, expr := range targets.List { wg.Add(1) + go func(tf data.TimeFrame, target string, targets *data.Targets, n int) { defer wg.Done() @@ -72,12 +78,15 @@ func (h *Handler) finderCached(ts time.Time, fetchRequests data.MultiTarget, log maxCacheTimeout = targets.Cache[n].Timeout maxCacheTimeoutStr = targets.Cache[n].TimeoutStr } + targets.Cache[n].TS = utils.TimestampTruncate(ts.Unix(), time.Duration(targets.Cache[n].Timeout)*time.Second) targets.Cache[n].Key = targetKey(tf.From, tf.Until, target, targets.Cache[n].TimeoutStr) + body, err := h.config.Common.FindCache.Get(targets.Cache[n].Key) if err == nil { if len(body) > 0 { targets.Cache[n].M.CacheHits.Add(1) + var f finder.Finder if strings.HasPrefix(target, "seriesByTag(") { f = finder.NewCachedTags(body) @@ -90,6 +99,7 @@ func (h *Handler) finderCached(ts time.Time, fetchRequests data.MultiTarget, log amLen := targets.AM.Len() *metricsLen += amLen lock.Unlock() + targets.Cache[n].Cached = true logger.Info("finder", zap.String("get_cache", targets.Cache[n].Key), zap.Time("timestamp_cached", time.Unix(targets.Cache[n].TS, 0)), @@ -97,29 +107,37 @@ func (h *Handler) finderCached(ts time.Time, fetchRequests data.MultiTarget, log zap.String("ttl", targets.Cache[n].TimeoutStr), zap.Int64("from", tf.From), zap.Int64("until", tf.Until)) } + return } } }(tf, expr, targets, i) } } + wg.Wait() + if len(errors) != 0 { err = errors[0] return } + for _, targets := range fetchRequests { var cached int + for _, c := range targets.Cache { if c.Cached { cached++ } } + cachedFind += cached + if cached == len(targets.Cache) { targets.Cached = true } } + return } @@ -132,6 +150,7 @@ func (h *Handler) finder(fetchRequests data.MultiTarget, ctx context.Context, lo limitCtx context.Context cancel context.CancelFunc ) + if qlimiter.Enabled() { // no reason wait longer than index-timeout limitCtx, cancel = context.WithTimeout(ctx, h.config.ClickHouse.IndexTimeout) @@ -139,53 +158,68 @@ func (h *Handler) finder(fetchRequests data.MultiTarget, ctx context.Context, lo for i := 0; i < entered; i++ { qlimiter.Leave(limitCtx, "render") } + defer cancel() }() } errors := make([]error, 0, len(fetchRequests)) + for tf, targets := range fetchRequests { for i, expr := range targets.List { d := tf.Until - tf.From if maxDuration < d { maxDuration = d } + if targets.Cache[i].Cached { continue } + if qlimiter.Enabled() { start := time.Now() err = qlimiter.Enter(limitCtx, "render") *queueDuration += time.Since(start) + if err != nil { lock.Lock() errors = append(errors, err) lock.Unlock() + break } + entered++ } + wg.Add(1) + go func(tf data.TimeFrame, target string, targets *data.Targets, n int) { defer wg.Done() var fndResult finder.Result + var err error // Search in small index table first var stat finder.FinderStat + fStart := time.Now() fndResult, err = finder.Find(h.config, ctx, target, tf.From, tf.Until, &stat) d := time.Since(fStart).Milliseconds() + if err != nil { metrics.SendQueryReadByTable(stat.Table, tf.From, tf.Until, d, 0, 0, stat.ChReadRows, stat.ChReadBytes, true) logger.Error("find", zap.Error(err)) lock.Lock() errors = append(errors, err) lock.Unlock() + return } + body := targets.AM.MergeTarget(fndResult, target, useCache) + cacheTimeout := targets.Cache[n].Timeout if useCache && cacheTimeout > 0 { cacheTimeoutStr := targets.Cache[n].TimeoutStr @@ -197,18 +231,23 @@ func (h *Handler) finder(fetchRequests data.MultiTarget, ctx context.Context, lo zap.String("ttl", cacheTimeoutStr), zap.Int64("from", tf.From), zap.Int64("until", tf.Until)) } + lock.Lock() rows := targets.AM.Len() lock.Unlock() + *metricsLen += rows metrics.SendQueryReadByTable(stat.Table, tf.From, tf.Until, d, int64(rows), stat.ReadBytes, stat.ChReadRows, stat.ChReadBytes, false) }(tf, expr, targets, i) } } + wg.Wait() + if len(errors) != 0 { err = errors[0] } + return } @@ -226,6 +265,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fetchRequests data.MultiTarget luser string ) + start := time.Now() status := http.StatusOK accessLogger := scope.LoggerWithHeaders(r.Context(), r, h.config.Common.HeadersToLog).Named("http") @@ -234,19 +274,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { r = r.WithContext(scope.WithLogger(r.Context(), logger)) username := r.Header.Get("X-Forwarded-User") + var qlimiter limiter.ServerLimiter = limiter.NoopLimiter{} defer func() { if rec := recover(); rec != nil { status = http.StatusInternalServerError + logger.Error("panic during eval:", zap.String("requestID", scope.String(r.Context(), "requestID")), zap.Any("reason", rec), zap.Stack("stack"), ) + answer := fmt.Sprintf("%v\nStack trace: %v", rec, zap.Stack("").String) http.Error(w, answer, status) } + end := time.Now() logs.AccessLog(accessLogger, h.config, r, status, end.Sub(start), queueDuration, cachedFind, queueFail) qlimiter.SendDuration(queueDuration.Milliseconds()) @@ -254,11 +298,14 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { }() r.ParseMultipartForm(1024 * 1024) + formatter, err := reply.GetFormatter(r) if err != nil { status = http.StatusBadRequest + logger.Error("formatter", zap.Error(err)) http.Error(w, fmt.Sprintf("Failed to parse request: %v", err.Error()), status) + return } @@ -266,8 +313,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { status = http.StatusBadRequest http.Error(w, fmt.Sprintf("Failed to parse request: %v", err.Error()), status) + return } + for tf, targets := range fetchRequests { if tf.From >= tf.Until { // wrong duration @@ -276,6 +325,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } } + targetsLen += len(targets.List) } @@ -283,22 +333,28 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { logger.Debug("use user limiter", zap.String("username", username), zap.String("luser", luser)) var maxCacheTimeoutStr string + useCache := h.config.Common.FindCache != nil && !parser.TruthyBool(r.FormValue("noCache")) if useCache { var cached int + cached, maxCacheTimeoutStr, err = h.finderCached(start, fetchRequests, logger, &metricsLen) if err != nil { status, _ = clickhouse.HandleError(w, err) return } + if cached > 0 { if cached == targetsLen && metricsLen == 0 { // all from cache and no metric status = http.StatusNotFound + formatter.Reply(w, r, data.EmptyResponse()) + return } + cachedFind = true } } @@ -314,9 +370,12 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if cachedFind { w.Header().Set("X-Cached-Find", maxCacheTimeoutStr) } + if metricsLen == 0 { status = http.StatusNotFound + formatter.Reply(w, r, data.EmptyResponse()) + return } @@ -330,15 +389,20 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if len(reply) == 0 { status = http.StatusNotFound + formatter.Reply(w, r, reply) + return } for i := range reply { pointsCount += int64(reply[i].Data.Len()) } + rStart := time.Now() + formatter.Reply(w, r, reply) + d := time.Since(rStart) logger.Debug("reply", zap.String("runtime", d.String()), zap.Duration("runtime_ns", d)) } diff --git a/render/handler_test.go b/render/handler_test.go index f464bb849..93075a492 100644 --- a/render/handler_test.go +++ b/render/handler_test.go @@ -83,6 +83,7 @@ func Test_getCacheTimeout(t *testing.T) { if got != tt.want { t.Errorf("getCacheTimeout() = %v, want %v", got, tt.want) } + if gotStr != tt.wantStr { t.Errorf("getCacheTimeout() = %q, want %q", gotStr, tt.wantStr) } diff --git a/render/reply/formatter.go b/render/reply/formatter.go index 234094fb4..966825f49 100644 --- a/render/reply/formatter.go +++ b/render/reply/formatter.go @@ -34,15 +34,19 @@ func GetFormatter(r *http.Request) (Formatter, error) { case "carbonapi_v2_pb": return &V2PB{}, nil } + err := fmt.Errorf("format %v is not supported, supported formats: carbonapi_v3_pb, pickle, protobuf (aka carbonapi_v2_pb)", format) if !scope.Debug(r.Context(), "Output") { return nil, err } + switch format { case "json": return &JSON{}, nil } + err = fmt.Errorf("%w\n(formats available for output debug: json)", err) + return nil, err } diff --git a/render/reply/formatter_test.go b/render/reply/formatter_test.go index 1a0dcaf63..e64c615de 100644 --- a/render/reply/formatter_test.go +++ b/render/reply/formatter_test.go @@ -61,6 +61,7 @@ func TestFormatterReply(t *testing.T) { {&JSON{}, "json", client.FormatJSON}, {&Pickle{}, "pickle", client.FormatPickle}, } + tests := []struct { name string input data.CHResponses @@ -105,6 +106,7 @@ func TestFormatterReply(t *testing.T) { }, results[1:]...), }, } + for _, formatter := range formatters { t.Run(fmt.Sprintf("format=%s", formatter.name), func(t *testing.T) { for _, tt := range tests { @@ -112,17 +114,21 @@ func TestFormatterReply(t *testing.T) { // case 1: test for AppendOutEmptySeries = true for i := 0; i < 2; i++ { var expected []client.Metric + var testName string + switch i { case 0: expected = tt.expectedWithoutEmpty testName = fmt.Sprintf("NoAppend: %s", tt.name) + for j := range tt.input { tt.input[j].AppendOutEmptySeries = false } case 1: expected = tt.expectedWithEmpty testName = fmt.Sprintf("WithAppend: %s", tt.name) + for j := range tt.input { tt.input[j].AppendOutEmptySeries = true } @@ -134,12 +140,14 @@ func TestFormatterReply(t *testing.T) { // ctx = scope.WithDebug(ctx, "Protobuf") // } w := httptest.NewRecorder() + r, err := http.NewRequestWithContext(ctx, "", "", nil) if err != nil { require.NoErrorf(t, err, "failed to create request") } formatter.impl.Reply(w, r, tt.input) + response := w.Result() defer response.Body.Close() @@ -149,6 +157,7 @@ func TestFormatterReply(t *testing.T) { require.NoError(t, err) got, err := client.Decode(data, formatter.format) require.NoError(t, err) + if !equalMetrics(expected, got) { t.Errorf("metrics not equal: expected:\n%#v\ngot:\n%#v\n", expected, got) } @@ -168,20 +177,24 @@ func prepareCHResponses(from, until int64, indices [][]byte, points map[string][ // points pts := point.NewPoints() + stringIndex := make([]string, 0, len(indices)) for _, each := range indices { stringIndex = append(stringIndex, string(each)) } + for k, v := range points { id := pts.MetricID(k) for _, eachPoint := range v { pts.AppendPoint(id, eachPoint.Value, eachPoint.Time, eachPoint.Timestamp) } } + pts.SetAggregations(map[string][]string{ "avg": stringIndex, }) sort.Sort(pts) + return data.CHResponses{{ Data: &data.Data{ Points: pts, @@ -199,6 +212,7 @@ func emptyValues(size int) []float64 { for i := 0; i < size; i++ { arr = append(arr, math.NaN()) } + return arr } @@ -208,12 +222,14 @@ func equalMetrics(m1, m2 []client.Metric) bool { if len(m1) != len(m2) { return false } + sort.Slice(m1, func(i, j int) bool { return m1[i].Name < m1[j].Name }) sort.Slice(m2, func(i, j int) bool { return m2[i].Name < m2[j].Name }) + for i := 0; i < len(m1); i++ { // compare props if m1[i].Name != m2[i].Name || @@ -226,15 +242,18 @@ func equalMetrics(m1, m2 []client.Metric) bool { if len(m1[i].Values) != len(m2[i].Values) { return false } + for j := 0; j < len(m1[i].Values); j++ { a, b := m1[i].Values[j], m2[i].Values[j] if math.IsNaN(a) && math.IsNaN(b) { continue } + if a != b { return false } } } + return true } diff --git a/render/reply/json.go b/render/reply/json.go index 8c8f0dd0f..d1db1d330 100644 --- a/render/reply/json.go +++ b/render/reply/json.go @@ -22,45 +22,59 @@ type JSON struct{} func marshalJSON(mfr *v3pb.MultiFetchResponse) []byte { buf := bytes.Buffer{} buf.WriteString(`{"metrics":[`) + for _, m := range mfr.Metrics { buf.WriteRune('{') + if m.Name != "" { buf.WriteString(fmt.Sprintf(`"name":%q,`, m.Name)) } + if m.PathExpression != "" { buf.WriteString(fmt.Sprintf(`"pathExpression":%q,`, m.PathExpression)) } + if m.ConsolidationFunc != "" { buf.WriteString(fmt.Sprintf(`"consolidationFunc":%q,`, m.ConsolidationFunc)) } + buf.WriteString(fmt.Sprintf(`"startTime":%d,`, m.StartTime)) buf.WriteString(fmt.Sprintf(`"stopTime":%d,`, m.StopTime)) buf.WriteString(fmt.Sprintf(`"stepTime":%d,`, m.StepTime)) buf.WriteString(fmt.Sprintf(`"xFilesFactor":%f,`, m.XFilesFactor)) + if m.HighPrecisionTimestamps { buf.WriteString(`"highPrecisionTimestamp":true,`) } + if len(m.Values) != 0 { buf.WriteString(`"values":[`) + for _, v := range m.Values { if math.IsNaN(v) || math.IsInf(v, 0) { buf.WriteString("null,") continue } + buf.WriteString(fmt.Sprintf("%f,", v)) } + buf.Truncate(buf.Len() - 1) buf.WriteString("],") } + buf.WriteString(fmt.Sprintf(`"requestStartTime":%d,`, m.RequestStartTime)) buf.WriteString(fmt.Sprintf(`"requestStopTime":%d,`, m.RequestStopTime)) buf.Truncate(buf.Len() - 1) buf.WriteString("},") } + if len(mfr.Metrics) != 0 { buf.Truncate(buf.Len() - 1) } + buf.WriteString("]}") + return buf.Bytes() } @@ -68,11 +82,14 @@ func parseJSONBody(r *http.Request) (data.MultiTarget, error) { logger := scope.Logger(r.Context()).Named("json_parser") var pv3Request v3pb.MultiFetchRequest + err := json.NewDecoder(r.Body).Decode(&pv3Request) if err != nil { return nil, err } + fetchRequests := data.MFRToMultiTarget(&pv3Request) + if len(pv3Request.Metrics) > 0 { for _, m := range pv3Request.Metrics { logger.Info( @@ -84,6 +101,7 @@ func parseJSONBody(r *http.Request) (data.MultiTarget, error) { ) } } + return fetchRequests, nil } @@ -93,10 +111,12 @@ func (*JSON) ParseRequest(r *http.Request) (data.MultiTarget, error) { if !scope.Debug(r.Context(), "Output") { return nil, errors.New("json format is only enabled for debugging purposes, pass 'X-Gch-Debug-Output: true' header") } + fetchRequests, err := parseJSONBody(r) if err == nil { return fetchRequests, err } + return parseRequestForms(r) } @@ -107,6 +127,7 @@ func (*JSON) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHResp http.Error(w, fmt.Sprintf("failed to convert response to v3pb.MultiFetchResponse: %v", err), http.StatusInternalServerError) return } + response := marshalJSON(mfr) w.Write(response) } diff --git a/render/reply/pickle.go b/render/reply/pickle.go index 067e13d67..4a241f1e3 100644 --- a/render/reply/pickle.go +++ b/render/reply/pickle.go @@ -54,6 +54,7 @@ func (*Pickle) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHRe writeAlias := func(name string, pathExpression string, points []point.Point, step uint32) { pickleStart := time.Now() + p.Dict() p.String("name") @@ -72,6 +73,7 @@ func (*Pickle) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHRe p.String("values") p.List() + for { value, err := getValue() if err != nil { @@ -81,12 +83,15 @@ func (*Pickle) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHRe // if err is not point.ErrTimeGreaterStop, the points are corrupted return } + if !math.IsNaN(value) { p.AppendFloat64(value) continue } + p.AppendNulls(1) } + p.SetItem() p.String("start") @@ -98,6 +103,7 @@ func (*Pickle) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHRe p.SetItem() p.Append() + pickleTime += time.Since(pickleStart) } @@ -105,15 +111,19 @@ func (*Pickle) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHRe writeMetric := func(points []point.Point, writeMap map[string]struct{}) error { metricName := data.MetricName(points[0].MetricID) writeMap[metricName] = struct{}{} + step, err := data.GetStep(points[0].MetricID) if err != nil { logger.Error("fail to get step", zap.Error(err)) http.Error(w, fmt.Sprintf("failed to get step for metric: %v", data.MetricName(points[0].MetricID)), http.StatusInternalServerError) + return err } + for _, a := range data.AM.Get(metricName) { writeAlias(a.DisplayName, a.Target, points, step) } + return nil } @@ -125,6 +135,7 @@ func (*Pickle) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHRe if len(points) == 0 { break } + if err := writeMetric(points, writtenMetrics); err != nil { return } diff --git a/render/reply/protobuf.go b/render/reply/protobuf.go index 5f180c5ae..7a051ff59 100644 --- a/render/reply/protobuf.go +++ b/render/reply/protobuf.go @@ -38,6 +38,7 @@ func replyProtobuf(p pb, w http.ResponseWriter, r *http.Request, multiData data. p.initBuffer() totalWritten := 0 + for _, d := range multiData { data := d.Data from := uint32(d.From) @@ -54,18 +55,23 @@ func replyProtobuf(p pb, w http.ResponseWriter, r *http.Request, multiData data. if len(points) == 0 { break } + metricName := data.MetricName(points[0].MetricID) writtenMetrics[metricName] = struct{}{} + step, err := data.GetStep(points[0].MetricID) if err != nil { logger.Error("fail to get step", zap.Error(err)) http.Error(w, fmt.Sprintf("failed to get step for metric: %v", data.MetricName(points[0].MetricID)), http.StatusInternalServerError) + return } + function, err := data.GetAggregation(points[0].MetricID) if err != nil { logger.Error("fail to get function", zap.Error(err)) http.Error(w, fmt.Sprintf("failed to get function for metric: %v", data.MetricName(points[0].MetricID)), http.StatusInternalServerError) + return } @@ -105,13 +111,17 @@ func init() { func VarintEncode(x uint64) []byte { var buf [protobufMaxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { buf[n] = 0x80 | uint8(x&0x7F) x >>= 7 } + buf[n] = uint8(x) n++ + return buf[0:n] } @@ -130,13 +140,16 @@ func VarintLen(x uint64) uint64 { if x < 128 { return 1 } + if x < 16384 { return 2 } + j := uint64(2) for i := uint64(16384); i <= x; i *= 128 { j++ } + return j } diff --git a/render/reply/v2_pb.go b/render/reply/v2_pb.go index 1fed28e0a..fc235bf7f 100644 --- a/render/reply/v2_pb.go +++ b/render/reply/v2_pb.go @@ -29,6 +29,7 @@ func (v *V2PB) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHRe if scope.Debug(r.Context(), "Protobuf") { v.replyDebug(w, r, multiData) } + replyProtobuf(v, w, r, multiData) } @@ -42,10 +43,12 @@ func (v *V2PB) replyDebug(w http.ResponseWriter, r *http.Request, multiData data if err != nil { http.Error(w, fmt.Sprintf("failed to convert response to v2pb.MultiFetchResponse: %v", err), http.StatusInternalServerError) } + response, err := mfr.Marshal() if err != nil { http.Error(w, fmt.Sprintf("failed to marshal v2pb.MultiFetchResponse: %v", err), http.StatusInternalServerError) } + w.Write(response) } @@ -90,11 +93,14 @@ func (v *V2PB) writeBody(writer *bufio.Writer, target, name, function string, fr // if err is not point.ErrTimeGreaterStop, the points are corrupted return } + if !math.IsNaN(value) { ProtobufWriteDouble(v.b1, value) v.b2.WriteByte(0) + continue } + ProtobufWriteDouble(v.b1, 0) v.b2.WriteByte(1) } diff --git a/render/reply/v3_pb.go b/render/reply/v3_pb.go index 33109064c..8e7bc7d92 100644 --- a/render/reply/v3_pb.go +++ b/render/reply/v3_pb.go @@ -38,6 +38,7 @@ func (*V3PB) ParseRequest(r *http.Request) (data.MultiTarget, error) { } multiTarget := data.MFRToMultiTarget(&pv3Request) + if len(pv3Request.Metrics) > 0 { for _, m := range pv3Request.Metrics { logger.Info( @@ -65,6 +66,7 @@ func (v *V3PB) Reply(w http.ResponseWriter, r *http.Request, multiData data.CHRe if scope.Debug(r.Context(), "Protobuf") { v.replyDebug(w, r, multiData) } + replyProtobuf(v, w, r, multiData) } @@ -77,10 +79,12 @@ func (v *V3PB) replyDebug(w http.ResponseWriter, r *http.Request, multiData data if err != nil { http.Error(w, fmt.Sprintf("failed to convert response to v3pb.MultiFetchResponse: %v", err), http.StatusInternalServerError) } + response, err := mfr.Marshal() if err != nil { http.Error(w, fmt.Sprintf("failed to marshal v3pb.MultiFetchResponse: %v", err), http.StatusInternalServerError) } + w.Write(response) } @@ -129,6 +133,7 @@ func (v *V3PB) writeBody(writer *bufio.Writer, target, name, function string, fr // Values header VarintWrite(v.b, (9<<3)+repeated) // tag VarintWrite(v.b, uint64(8*count)) + for { value, err := getValue() if err != nil { @@ -138,6 +143,7 @@ func (v *V3PB) writeBody(writer *bufio.Writer, target, name, function string, fr // if err is not point.ErrTimeGreaterStop, the points are corrupted return } + ProtobufWriteDouble(v.b, value) } diff --git a/sd/nginx/nginx.go b/sd/nginx/nginx.go index 09d4d7077..fba4cad4e 100644 --- a/sd/nginx/nginx.go +++ b/sd/nginx/nginx.go @@ -31,16 +31,20 @@ var ( func splitNode(node string) (dc, host, listen string, ok bool) { var v string + dc, v, ok = strings.Cut(node, "/") if !ok { return } + host, v, ok = strings.Cut(v, "/") if !ok { return } + listen, _, ok = strings.Cut(v, "/") ok = !ok + return } @@ -62,6 +66,7 @@ func New(url, namespace, hostname string, logger *zap.Logger) *Nginx { if namespace == "" { namespace = "graphite" } + sd := &Nginx{ logger: logger, body: make([]byte, 128), @@ -74,10 +79,12 @@ func New(url, namespace, hostname string, logger *zap.Logger) *Nginx { sd.url.WriteString(url) sd.url.WriteByte('/') + if namespace != "" { sd.url.WriteString(namespace) sd.url.WriteByte('/') } + sd.pos = sd.url.Len() return sd @@ -87,6 +94,7 @@ func (sd *Nginx) setWeight(weight int64) { if weight <= 0 { weight = 1 } + if sd.weight != weight { sd.weight = weight sd.body = sd.body[:0] @@ -103,15 +111,19 @@ func (sd *Nginx) Namespace() string { func (sd *Nginx) List() (nodes []string, err error) { sd.url.Truncate(sd.pos) sd.url.WriteString("?recurse") + var data []byte + data, err = utils.HttpGet(sd.url.String()) if err != nil { return } + var iNodes []interface{} if err = json.Unmarshal(data, &iNodes); err != nil { return nil, err } + nodes = make([]string, 0, len(iNodes)) for _, i := range iNodes { @@ -121,6 +133,7 @@ func (sd *Nginx) List() (nodes []string, err error) { if strings.HasPrefix(s, sd.nsEnd) { s = s[len(sd.nsEnd):] _, host, _, ok := splitNode(s) + if ok && host == sd.hostname { nodes = append(nodes, s) } @@ -142,15 +155,19 @@ func (sd *Nginx) List() (nodes []string, err error) { func (sd *Nginx) ListMap() (nodes map[string]string, err error) { sd.url.Truncate(sd.pos) sd.url.WriteString("?recurse") + var data []byte + data, err = utils.HttpGet(sd.url.String()) if err != nil { return } + var iNodes []interface{} if err = json.Unmarshal(data, &iNodes); err != nil { return nil, err } + nodes = make(map[string]string) for _, i := range iNodes { @@ -159,6 +176,7 @@ func (sd *Nginx) ListMap() (nodes map[string]string, err error) { if s, ok := i.(string); ok { if strings.HasPrefix(s, sd.nsEnd) { s = s[len(sd.nsEnd):] + _, host, _, ok := splitNode(s) if ok && host == sd.hostname { if i, ok := jNode["Value"]; ok { @@ -167,6 +185,7 @@ func (sd *Nginx) ListMap() (nodes map[string]string, err error) { if err != nil { return nil, err } + nodes[s] = stringutils.UnsafeString(d) } else { nodes[s] = "" @@ -193,15 +212,19 @@ func (sd *Nginx) ListMap() (nodes map[string]string, err error) { func (sd *Nginx) Nodes() (nodes []utils.KV, err error) { sd.url.Truncate(sd.pos) sd.url.WriteString("?recurse") + var data []byte + data, err = utils.HttpGet(sd.url.String()) if err != nil { return } + var iNodes []interface{} if err = json.Unmarshal(data, &iNodes); err != nil { return nil, err } + nodes = make([]utils.KV, 0, 3) for _, i := range iNodes { @@ -211,15 +234,18 @@ func (sd *Nginx) Nodes() (nodes []utils.KV, err error) { if strings.HasPrefix(s, sd.nsEnd) { s = s[len(sd.nsEnd):] kv := utils.KV{Key: s} + if i, ok := jNode["Value"]; ok { if v, ok := i.(string); ok { d, err := base64.StdEncoding.DecodeString(v) if err != nil { return nil, err } + kv.Value = stringutils.UnsafeString(d) } } + if i, ok := jNode["Flags"]; ok { switch v := i.(type) { case float64: @@ -230,6 +256,7 @@ func (sd *Nginx) Nodes() (nodes []utils.KV, err error) { kv.Flags = v } } + nodes = append(nodes, kv) } else { return nil, ErrInvalidKey{key: sd.nsEnd, val: s} @@ -252,9 +279,11 @@ func (sd *Nginx) update(ip, port string, dc []string) (err error) { sd.url.WriteString("_/") sd.url.WriteString(sd.hostname) sd.url.WriteByte('/') + if ip != "" { sd.url.WriteString(ip) } + sd.url.WriteString(port) // add custom query flags @@ -279,9 +308,11 @@ func (sd *Nginx) update(ip, port string, dc []string) (err error) { sd.url.WriteString(sd.hostname) sd.url.WriteByte('/') n := sd.url.Len() + if ip != "" { sd.url.WriteString(ip) } + sd.url.WriteString(port) // add custom query flags @@ -292,6 +323,7 @@ func (sd *Nginx) update(ip, port string, dc []string) (err error) { sd.logger.Error( "put", zap.String("address", sd.url.String()[n:]), zap.String("dc", dc[i]), zap.Error(nErr), ) + err = nErr } } else { @@ -299,6 +331,7 @@ func (sd *Nginx) update(ip, port string, dc []string) (err error) { sd.logger.Error( "put", zap.String("address", sd.url.String()[n:]), zap.String("dc", dc[i]), zap.Error(nErr), ) + err = nErr } } @@ -331,9 +364,11 @@ func (sd *Nginx) Delete(ip, port string, dc []string) (err error) { sd.url.WriteString("_/") sd.url.WriteString(sd.hostname) sd.url.WriteByte('/') + if ip != "" { sd.url.WriteString(ip) } + sd.url.WriteString(port) if err = utils.HttpDelete(sd.url.String()); err != nil { @@ -348,15 +383,18 @@ func (sd *Nginx) Delete(ip, port string, dc []string) (err error) { sd.url.WriteString(sd.hostname) sd.url.WriteByte('/') n := sd.url.Len() + if ip != "" { sd.url.WriteString(ip) } + sd.url.WriteString(port) if nErr := utils.HttpDelete(sd.url.String()); nErr != nil { sd.logger.Error( "delete", zap.String("address", sd.url.String()[n:]), zap.String("dc", dc[i]), zap.Error(nErr), ) + err = nErr } } @@ -367,27 +405,36 @@ func (sd *Nginx) Delete(ip, port string, dc []string) (err error) { func (sd *Nginx) Clear(preserveIP, preservePort string) (err error) { var nodes []string + nodes, err = sd.List() if err != nil { sd.logger.Error( "list", zap.String("address", sd.url.String()[sd.pos:]), zap.Error(err), ) + return } + if len(nodes) == 0 { return } + preserveListen := preserveIP + preservePort + sd.url.WriteByte('/') + for _, node := range nodes { sd.url.Truncate(sd.pos) + _, host, listen, _ := splitNode(node) if host == sd.hostname && listen != preserveListen { sd.url.WriteString(node) + if nErr := utils.HttpDelete(sd.url.String()); nErr != nil { sd.logger.Error( "delete", zap.String("address", sd.url.String()), zap.Error(nErr), ) + err = nErr } } diff --git a/sd/register.go b/sd/register.go index 0521abaab..3d575d622 100644 --- a/sd/register.go +++ b/sd/register.go @@ -58,12 +58,14 @@ func Register(cfg *config.Common, logger *zap.Logger) { load float64 w int64 ) + if cfg.SD != "" { if strings.HasPrefix(cfg.Listen, ":") { registerFirst = true listenIP = utils.GetLocalIP() prevIP = listenIP } + hostname, _ = os.Hostname() hostname, _, _ = strings.Cut(hostname, ".") @@ -71,6 +73,7 @@ func Register(cfg *config.Common, logger *zap.Logger) { if err != nil { panic("serive discovery type not registered") } + load, err = load_avg.Normalized() if err == nil { load_avg.Store(load) @@ -135,6 +138,7 @@ func Stop() { func Cleanup(cfg *config.Common, sd SD, checkOnly bool) error { if cfg.SD != "" && cfg.SDExpire > 0 { ts := time.Now().Unix() - int64(cfg.SDExpire.Seconds()) + if nodes, err := sd.Nodes(); err == nil { for _, node := range nodes { if node.Flags > 0 { @@ -145,6 +149,7 @@ func Cleanup(cfg *config.Common, sd SD, checkOnly bool) error { if err = sd.DeleteNode(node.Key); err != nil { return err } + fmt.Printf("%s: %s (%s), deleted\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) } } @@ -156,5 +161,6 @@ func Cleanup(cfg *config.Common, sd SD, checkOnly bool) error { return err } } + return nil } diff --git a/sd/utils/utils.go b/sd/utils/utils.go index f106830dc..464552471 100644 --- a/sd/utils/utils.go +++ b/sd/utils/utils.go @@ -21,6 +21,7 @@ type KV struct { func HttpGet(url string) ([]byte, error) { client := &http.Client{Timeout: 2 * time.Second} + resp, err := client.Get(url) if err != nil { return nil, err @@ -28,12 +29,15 @@ func HttpGet(url string) ([]byte, error) { data, err := io.ReadAll(resp.Body) resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { return nil, ErrNotFound } + if resp.StatusCode != http.StatusOK { return nil, errs.NewErrorWithCode(string(data), resp.StatusCode) } + return data, err } @@ -42,20 +46,27 @@ func HttpPut(url string, body []byte) error { if err != nil { return err } + req.Header.Set("Content-Type", "application/json") + client := &http.Client{Timeout: 2 * time.Second} + resp, err := client.Do(req) if err != nil { return err } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { return ErrNotFound } + if resp.StatusCode != http.StatusOK { data, _ := io.ReadAll(resp.Body) return errs.NewErrorWithCode(string(data), resp.StatusCode) } + return nil } @@ -64,19 +75,25 @@ func HttpDelete(url string) error { if err != nil { return err } + client := &http.Client{Timeout: 2 * time.Second} + resp, err := client.Do(req) if err != nil { return err } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { return ErrNotFound } + if resp.StatusCode != http.StatusOK { data, _ := io.ReadAll(resp.Body) return errs.NewErrorWithCode(string(data), resp.StatusCode) } + return nil } @@ -86,6 +103,7 @@ func GetLocalIP() string { if err != nil { return "" } + for _, address := range addrs { // check the address type and if it is not a loopback the display it if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { @@ -94,5 +112,6 @@ func GetLocalIP() string { } } } + return "" } diff --git a/tagger/metric.go b/tagger/metric.go index 0f4b00191..0cffa4e5a 100644 --- a/tagger/metric.go +++ b/tagger/metric.go @@ -31,6 +31,7 @@ func (m *Metric) IsLeaf() uint8 { if len(m.Path) > 0 && m.Path[len(m.Path)-1] == '.' { return 0 } + return 1 } diff --git a/tagger/rule.go b/tagger/rule.go index cc4ed36f1..256fe4c88 100644 --- a/tagger/rule.go +++ b/tagger/rule.go @@ -93,15 +93,19 @@ func Parse(content string) (*Rules, error) { if err != nil { return nil, err } + if rule.Equal != "" { rule.BytesEqual = []byte(rule.Equal) } + if rule.Contains != "" { rule.BytesContains = []byte(rule.Contains) } + if rule.HasPrefix != "" { rule.BytesHasPrefix = []byte(rule.HasPrefix) } + if rule.HasSuffix != "" { rule.BytesHasSuffix = []byte(rule.HasSuffix) } @@ -132,6 +136,7 @@ func (r *Rules) Match(m *Metric) { func matchByPrefix(path []byte, tree *Tree, m *Metric) { x := tree i := 0 + for { if i >= len(path) { break @@ -155,6 +160,7 @@ func matchByPrefix(path []byte, tree *Tree, m *Metric) { func matchBySuffix(path []byte, tree *Tree, m *Metric) { x := tree i := len(path) - 1 + for { if i <= 0 { break diff --git a/tagger/rule_test.go b/tagger/rule_test.go index 0ae29b0b3..041efc53a 100644 --- a/tagger/rule_test.go +++ b/tagger/rule_test.go @@ -90,7 +90,9 @@ func TestRules(t *testing.T) { if expected == nil { expected = []string{} } + sort.Strings(expected) + tags := m.Tags.List() sort.Strings(tags) diff --git a/tagger/set.go b/tagger/set.go index 9092d6236..ed5bee22d 100644 --- a/tagger/set.go +++ b/tagger/set.go @@ -67,6 +67,7 @@ func (s *Set) MarshalJSON() ([]byte, error) { } var err error + s.json, err = json.Marshal(s.list) if err != nil { return nil, err diff --git a/tagger/tagger.go b/tagger/tagger.go index 19b897e57..83ca2a844 100644 --- a/tagger/tagger.go +++ b/tagger/tagger.go @@ -34,8 +34,11 @@ func (nopCloser) Close() error { return nil } func countMetrics(body []byte) (int, error) { var namelen uint64 + bodyLen := len(body) + var count, offset, readBytes int + var err error for { @@ -43,6 +46,7 @@ func countMetrics(body []byte) (int, error) { if offset == bodyLen { return count, nil } + return 0, clickhouse.ErrClickHouseResponse } @@ -50,6 +54,7 @@ func countMetrics(body []byte) (int, error) { if err != nil { return 0, err } + offset += readBytes + int(namelen) count++ } @@ -69,6 +74,7 @@ func pathLevel(path []byte) int { func Make(cfg *config.Config) error { var start time.Time + var block string logger := zapwriter.Logger("tagger") @@ -81,12 +87,15 @@ func Make(cfg *config.Config) error { begin := func(b string, fields ...zapcore.Field) { block = b start = time.Now() + logger.Info(fmt.Sprintf("begin %s", block), fields...) } end := func() { var m runtime.MemStats + runtime.ReadMemStats(&m) + d := time.Since(start) logger.Info(fmt.Sprintf("end %s", block), zap.Duration("time", d), @@ -104,6 +113,7 @@ func Make(cfg *config.Config) error { // Parse rules begin("parse rules") + rules, err := ParseGlob(cfg.Tags.Rules) if err != nil { return err @@ -113,6 +123,7 @@ func Make(cfg *config.Config) error { if err != nil { return err } + end() selectChunksCount := SelectChunksCount @@ -130,13 +141,16 @@ func Make(cfg *config.Config) error { if err != nil { return err } + bodies = [][]byte{body} } else { bodies = make([][]byte, selectChunksCount) + extraWhere := "" if cfg.Tags.ExtraWhere != "" { extraWhere = fmt.Sprintf("AND (%s)", cfg.Tags.ExtraWhere) } + for i := 0; i < selectChunksCount; i++ { bodies[i], _, _, err = clickhouse.Query( scope.New(context.Background()).WithLogger(logger).WithTable(cfg.ClickHouse.IndexTable), @@ -168,6 +182,7 @@ func Make(cfg *config.Config) error { if err != nil { return err } + count += c } @@ -179,8 +194,11 @@ func Make(cfg *config.Config) error { for i := 0; i < len(bodies); i++ { body := bodies[i] + var namelen uint64 + bodyLen := len(body) + var offset, readBytes int for ; ; index++ { @@ -188,6 +206,7 @@ func Make(cfg *config.Config) error { if offset == bodyLen { break } + return clickhouse.ErrClickHouseResponse } @@ -206,15 +225,20 @@ func Make(cfg *config.Config) error { offset += readBytes + int(namelen) } } + end() begin("sort") + start = time.Now() + sort.Slice(metricList, func(i, j int) bool { return bytes.Compare(metricList[i].Path, metricList[j].Path) < 0 }) end() begin("make map") + levelMap := make([]int, maxLevel+1) + for index := 0; index < len(metricList); index++ { m := &metricList[index] levelMap[m.Level] = index @@ -228,9 +252,11 @@ func Make(cfg *config.Config) error { } } } + end() begin("match", zap.Int("metrics_count", len(metricList))) + for index := 0; index < count; index++ { m := &metricList[index] @@ -242,10 +268,12 @@ func Make(cfg *config.Config) error { rules.Match(m) } + end() // copy from childs to parents begin("copy tags from childs to parents") + for index := 0; index < count; index++ { m := &metricList[index] @@ -253,18 +281,24 @@ func Make(cfg *config.Config) error { metricList[p].Tags = metricList[p].Tags.Merge(m.Tags) } } + end() begin("remove metrics without tags", zap.Int("metrics_count", len(metricList))) + i := 0 + for _, m := range metricList { if m.Tags == nil || m.Tags.Len() == 0 { continue } + metricList[i] = m i++ } + metricList = metricList[:i] + end() if len(metricList) == 0 { @@ -275,6 +309,7 @@ func Make(cfg *config.Config) error { begin("cut metrics into parts", zap.Int("metrics_count", len(metricList))) metricListParts, tagsCount := cutMetricsIntoParts(metricList, cfg.Tags.Threads) threads := len(metricListParts) + end() begin("marshal RowBinary", @@ -287,56 +322,70 @@ func Make(cfg *config.Config) error { eg := new(errgroup.Group) eg.SetLimit(cfg.Common.MaxCPU) + for i := 0; i < threads; i++ { binaryParts[i] = new(bytes.Buffer) + wc, err := wrapWithCompressor(cfg, binaryParts[i]) if err != nil { return err } + metricList := metricListParts[i] + eg.Go(func() error { return encodeMetricsToRowBinary(metricList, date, version, wc) }) } + err = eg.Wait() if err != nil { return err } emptyRecord := new(bytes.Buffer) + wc, err := wrapWithCompressor(cfg, emptyRecord) if err != nil { return err } + err = encodeEmptyMetricToRowBinary(date, version, wc) if err != nil { return err } + end() if cfg.Tags.OutputFile != "" { begin(fmt.Sprintf("write to %#v", cfg.Tags.OutputFile)) + f, err := os.Create(cfg.Tags.OutputFile) if err != nil { return err } + for i := 0; i < threads; i++ { // just concatenate the parts because zstd and gzip allow it _, err = binaryParts[i].WriteTo(f) if err != nil { return err } } + _, err = emptyRecord.WriteTo(f) if err != nil { return err } + err = f.Close() if err != nil { return err } + end() } else { begin("upload to clickhouse", zap.Int("threads", threads)) + upload := func(outBuf *bytes.Buffer) error { _, _, _, err := clickhouse.PostWithEncoding( scope.New(context.Background()).WithLogger(logger).WithTable(cfg.ClickHouse.TagTable), @@ -347,23 +396,29 @@ func Make(cfg *config.Config) error { chOpts, nil, ) + return err } eg := new(errgroup.Group) + for i := 0; i < threads; i++ { outBuf := binaryParts[i] + eg.Go(func() error { return upload(outBuf) }) } + err = eg.Wait() if err != nil { return err } + err = upload(emptyRecord) if err != nil { return err } + end() } @@ -375,12 +430,15 @@ func cutMetricsIntoParts(metricList []Metric, threads int) ([][]Metric, int) { for _, m := range metricList { tagsCount += m.Tags.Len() } + if threads < 2 { return [][]Metric{metricList}, tagsCount } + parts := make([][]Metric, 0, threads) i := 0 partSize := (tagsCount-1)/threads + 1 // round up for cases like 99/50 + cnt := 0 for j, m := range metricList { // assert m.Tags != nil && m.Tags.Len() != 0 @@ -391,15 +449,19 @@ func cutMetricsIntoParts(metricList []Metric, threads int) ([][]Metric, int) { cnt = 0 } } + if i < len(metricList) { parts = append(parts, metricList[i:]) } + return parts, tagsCount } func wrapWithCompressor(cfg *config.Config, writer io.Writer) (io.WriteCloser, error) { var wc io.WriteCloser + var err error + switch cfg.Tags.Compression { case clickhouse.ContentEncodingNone: wc = nopCloser{writer} @@ -413,6 +475,7 @@ func wrapWithCompressor(cfg *config.Config, writer io.Writer) (io.WriteCloser, e default: return nil, fmt.Errorf("unknown compression: %s", cfg.Tags.Compression) } + return wc, nil } @@ -477,6 +540,7 @@ func encodeMetricsToRowBinary(metricList []Metric, date time.Time, version uint3 } wc.Close() + return nil } @@ -523,5 +587,6 @@ func encodeEmptyMetricToRowBinary(date time.Time, version uint32, wc io.WriteClo } wc.Close() + return nil } diff --git a/tagger/tagger_test.go b/tagger/tagger_test.go index b461e2816..91af1a63e 100644 --- a/tagger/tagger_test.go +++ b/tagger/tagger_test.go @@ -293,39 +293,53 @@ func TestCutMetricsIntoParts(t *testing.T) { func TestCutMetricsIntoPartsRandom(t *testing.T) { require := require.New(t) + rand.Seed(time.Now().UnixNano()) + for n := 0; n < 1000; n++ { metricList := make([]Metric, rand.Intn(100)) tagsMax := rand.Intn(100) + 1 tagsCnt := 0 + for i := range metricList { tags := make([]string, rand.Intn(tagsMax)+1) tagsCnt += len(tags) + for j := range tags { tags[j] = fmt.Sprintf("tag%d", j) } + metricList[i].Tags = new(Set).Add(tags...) } + threads := rand.Intn(110) parts, _ := cutMetricsIntoParts(metricList, threads) + if threads == 0 { threads = 1 } + if len(parts) > threads { v, _ := json.MarshalIndent(parts, "", " ") fmt.Println(string(v)) } + require.LessOrEqual(len(parts), threads, fmt.Sprint(tagsCnt, len(metricList), len(parts), threads)) + if len(metricList) > 0 { require.LessOrEqual(len(parts), len(metricList), fmt.Sprint(tagsCnt, len(metricList), len(parts), threads)) } + i := 0 + for _, p := range parts { for _, m := range p { require.Equal(metricList[i], m) + i++ } } + require.Equal(len(metricList), i) } } From d240addb7730adcc8668d7dc1850fcabb20a696a Mon Sep 17 00:00:00 2001 From: Xenia Nisskhen Date: Tue, 20 May 2025 20:42:58 +0500 Subject: [PATCH 2/2] feat(liner): Add wsl linter --- load_avg/load_avg_linux.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/load_avg/load_avg_linux.go b/load_avg/load_avg_linux.go index e6de3f297..6db26c360 100644 --- a/load_avg/load_avg_linux.go +++ b/load_avg/load_avg_linux.go @@ -13,6 +13,7 @@ import ( func Normalized() (float64, error) { var info syscall.Sysinfo_t + err := syscall.Sysinfo(&info) if err != nil { return 0, err @@ -25,6 +26,7 @@ func Normalized() (float64, error) { const si_load_shift = 16 load := float64(info.Loads[0]) / float64(1<