diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 02e2a3a..65b45bd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,8 +34,8 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 - - name: skip generated files - run: rm -rf geminiql/parser.go + - name: skip generated and lib files + run: rm -rf geminiql/parser.go lib - name: licenselint uses: hezhangjian/licenselint@main with: diff --git a/cmd/subcmd/export.go b/cmd/subcmd/export.go new file mode 100644 index 0000000..fac066f --- /dev/null +++ b/cmd/subcmd/export.go @@ -0,0 +1,1874 @@ +// Copyright 2025 openGemini Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package subcmd + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "crypto/tls" + "encoding/binary" + "encoding/json" + "flag" + "fmt" + "io" + "io/fs" + "log" + "math" + "net" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" + "github.com/golang/snappy" + "github.com/openGemini/openGemini-cli/core" + "github.com/openGemini/openGemini/engine" + "github.com/openGemini/openGemini/engine/immutable" + "github.com/openGemini/openGemini/engine/index/tsi" + "github.com/openGemini/openGemini/lib/bufferpool" + "github.com/openGemini/openGemini/lib/config" + "github.com/openGemini/openGemini/lib/errno" + "github.com/openGemini/openGemini/lib/fileops" + "github.com/openGemini/openGemini/lib/index" + "github.com/openGemini/openGemini/lib/record" + "github.com/openGemini/openGemini/lib/util" + "github.com/openGemini/openGemini/lib/util/lifted/vm/protoparser/influx" + "github.com/openGemini/opengemini-client-go/opengemini" + "github.com/vbauerster/mpb/v7" + "github.com/vbauerster/mpb/v7/decor" +) + +const ( + tsspFileExtension = "tssp" + walFileExtension = "wal" + csvFormatExporter = "csv" + txtFormatExporter = "txt" + remoteFormatExporter = "remote" + resumeFilePrefix = "resume_" + dirNameSeparator = "_" +) + +var ( + MpbProgress = mpb.New(mpb.WithWidth(100)) + ResumeJsonPath string + ProgressedFilesPath string +) + +type ExportConfig struct { + *core.CommandLineConfig + Export bool + Format string `json:"format"` + Out string `json:"out"` + DataDir string `json:"data"` + WalDir string `json:"wal"` + Remote string `json:"remote"` + RemoteUsername string `json:"-"` + RemotePassword string `json:"-"` + RemoteSsl bool `json:"remotessl"` + DBFilter string `json:"dbfilter"` + RetentionFilter string `json:"retentionfilter"` + MeasurementFilter string `json:"mstfilter"` + TimeFilter string `json:"timefilter"` + Compress bool `json:"compress"` + Resume bool +} + +type ExportCommand struct { + cfg *ExportConfig + exportCmd *Exporter +} + +func (c *ExportCommand) Run(config *ExportConfig) error { + if err := flag.CommandLine.Parse([]string{"-loggerLevel=ERROR"}); err != nil { + return err + } + c.cfg = config + c.exportCmd = NewExporter() + + return c.process() +} + +func (c *ExportCommand) process() error { + if c.cfg.Resume { + if err := ReadLatestProgressFile(); err != nil { + return err + } + oldConfig, err := getResumeConfig(c.cfg) + if err != nil { + return err + } + progressedFiles, err := getProgressedFiles() + if err != nil { + return err + } + return c.exportCmd.Export(oldConfig, progressedFiles) + } else { + if err := CreateNewProgressFolder(); err != nil { + return err + } + return c.exportCmd.Export(c.cfg, nil) + } +} + +func getResumeConfig(options *ExportConfig) (*ExportConfig, error) { + jsonData, err := os.ReadFile(ResumeJsonPath) + if err != nil { + return nil, err + } + var config ExportConfig + err = json.Unmarshal(jsonData, &config) + if err != nil { + return nil, err + } + config.Resume = true + config.RemoteUsername = options.RemoteUsername + config.RemotePassword = options.RemotePassword + return &config, nil +} + +func getProgressedFiles() (map[string]struct{}, error) { + file, err := os.Open(ProgressedFilesPath) + if err != nil { + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + lineSet := make(map[string]struct{}) + + for scanner.Scan() { + line := scanner.Text() + lineSet[line] = struct{}{} + } + + if err := scanner.Err(); err != nil { + return nil, err + } + return lineSet, nil +} + +// CreateNewProgressFolder init ResumeJsonPath and ProgressedFilesPath +func CreateNewProgressFolder() error { + home, err := os.UserHomeDir() + if err != nil { + return err + } + targetPath := filepath.Join(home, ".ts-cli", time.Now().Format("2006-01-02_15-04-05.000000000")) + err = os.MkdirAll(targetPath, os.ModePerm) + if err != nil { + return err + } + // create progress.json + progressJson := filepath.Join(targetPath, "progress.json") + ResumeJsonPath = progressJson + // create progressedFiles + progressedFiles := filepath.Join(targetPath, "progressedFiles") + ProgressedFilesPath = progressedFiles + return nil +} + +// ReadLatestProgressFile reads and processes the latest folder +func ReadLatestProgressFile() error { + home, err := os.UserHomeDir() + if err != nil { + return err + } + baseDir := filepath.Join(home, ".ts-cli") + var dirs []string + err = filepath.Walk(baseDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() || path == baseDir { + return nil + } + dirs = append(dirs, path) + return nil + }) + if err != nil { + return err + } + sort.Strings(dirs) + latestDir := dirs[len(dirs)-1] + // read progress.json + ResumeJsonPath = filepath.Join(latestDir, "progress.json") + // read progressedFiles + ProgressedFilesPath = filepath.Join(latestDir, "progressedFiles") + return nil +} + +type dataFilter struct { + database string + retention string + measurement string + startTime int64 + endTime int64 +} + +func newDataFilter() *dataFilter { + return &dataFilter{ + database: "", + measurement: "", + startTime: math.MinInt64, + endTime: math.MaxInt64, + } +} + +func (d *dataFilter) parseTime(clc *ExportConfig) error { + var start, end string + timeSlot := strings.Split(clc.TimeFilter, "~") + if len(timeSlot) == 2 { + start = timeSlot[0] + end = timeSlot[1] + } else if clc.TimeFilter != "" { + return fmt.Errorf("invalid time filter %q", clc.TimeFilter) + } + + if start != "" { + st, err := convertTime(start) + if err != nil { + return err + } + d.startTime = st + } + + if end != "" { + ed, err := convertTime(end) + if err != nil { + return err + } + d.endTime = ed + } + + if d.startTime > d.endTime { + return fmt.Errorf("start time `%q` > end time `%q`", start, end) + } + + return nil +} + +func (d *dataFilter) parseDatabase(dbFilter string) { + if dbFilter == "" { + return + } + d.database = dbFilter +} + +func (d *dataFilter) parseRetention(retentionFilter string) { + if retentionFilter == "" { + return + } + d.retention = retentionFilter +} + +func (d *dataFilter) parseMeasurement(mstFilter string) error { + if mstFilter == "" { + return nil + } + if mstFilter != "" && d.database == "" { + return fmt.Errorf("measurement filter %q requires database filter", mstFilter) + } + d.measurement = mstFilter + return nil +} + +// timeFilter [startTime, endTime] +func (d *dataFilter) timeFilter(t int64) bool { + return t >= d.startTime && t <= d.endTime +} + +func (d *dataFilter) isBelowMinTimeFilter(t int64) bool { + return t < d.startTime +} + +func (d *dataFilter) isAboveMaxTimeFilter(t int64) bool { + return t > d.endTime +} + +type DatabaseDiskInfo struct { + dbName string // ie. "NOAA_water_database" + rps map[string]struct{} // ie. ["0:autogen","1:every_one_day"] + dataDir string // ie. "/tmp/openGemini/data/data/NOAA_water_database" + walDir string // ie. "/tmp/openGemini/data/wal/NOAA_water_database" + rpToTsspDirMap map[string]string // ie. {"0:autogen", "/tmp/openGemini/data/data/NOAA_water_database/0/autogen"} + rpToWalDirMap map[string]string // ie. {"0:autogen", "/tmp/openGemini/data/wal/NOAA_water_database/0/autogen"} + rpToIndexDirMap map[string]string // ie. {"0:autogen", "/tmp/openGemini/data/data/NOAA_water_database/0/autogen/index"} +} + +func newDatabaseDiskInfo() *DatabaseDiskInfo { + return &DatabaseDiskInfo{ + rps: make(map[string]struct{}), + rpToTsspDirMap: make(map[string]string), + rpToWalDirMap: make(map[string]string), + rpToIndexDirMap: make(map[string]string), + } +} + +func (d *DatabaseDiskInfo) init(actualDataDir string, actualWalDir string, databaseName string, retentionPolicy string) error { + d.dbName = databaseName + + // check whether the database is in actualDataPath + dataDir := filepath.Join(actualDataDir, databaseName) + if _, err := os.Stat(dataDir); err != nil { + return err + } + // check whether the database is in actualWalPath + walDir := filepath.Join(actualWalDir, databaseName) + if _, err := os.Stat(walDir); err != nil { + return err + } + + // ie. /tmp/openGemini/data/data/my_db /tmp/openGemini/data/wal/my_db + d.dataDir, d.walDir = dataDir, walDir + + ptDirs, err := os.ReadDir(d.dataDir) + if err != nil { + return err + } + for _, ptDir := range ptDirs { + // ie. /tmp/openGemini/data/data/my_db/0 + ptTsspPath := filepath.Join(d.dataDir, ptDir.Name()) + // ie. /tmp/openGemini/data/wal/my_db/0 + ptWalPath := filepath.Join(d.walDir, ptDir.Name()) + + if retentionPolicy != "" { + ptWithRp := ptDir.Name() + ":" + retentionPolicy + // ie. /tmp/openGemini/data/data/my_db/0/autogen + rpTsspPath := filepath.Join(ptTsspPath, retentionPolicy) + if _, err := os.Stat(rpTsspPath); err != nil { + return fmt.Errorf("retention policy %q invalid : %s", retentionPolicy, err) + } else { + d.rps[ptWithRp] = struct{}{} + d.rpToTsspDirMap[ptWithRp] = rpTsspPath + d.rpToIndexDirMap[ptWithRp] = filepath.Join(rpTsspPath, "index") + } + // ie. /tmp/openGemini/data/wal/my_db/0/autogen + rpWalPath := filepath.Join(ptWalPath, retentionPolicy) + if _, err := os.Stat(rpWalPath); err != nil { + return fmt.Errorf("retention policy %q invalid : %s", retentionPolicy, err) + } else { + d.rpToWalDirMap[ptWithRp] = rpWalPath + } + continue + } + + rpTsspDirs, err1 := os.ReadDir(ptTsspPath) + if err1 != nil { + return err1 + } + for _, rpDir := range rpTsspDirs { + if !rpDir.IsDir() { + continue + } + ptWithRp := ptDir.Name() + ":" + rpDir.Name() + rpPath := filepath.Join(ptTsspPath, rpDir.Name()) + d.rps[ptWithRp] = struct{}{} + d.rpToTsspDirMap[ptWithRp] = rpPath + d.rpToIndexDirMap[ptWithRp] = filepath.Join(rpPath, "index") + } + + rpWalDirs, err2 := os.ReadDir(ptWalPath) + if err2 != nil { + return err2 + } + for _, rpDir := range rpWalDirs { + ptWithRp := ptDir.Name() + ":" + rpDir.Name() + if !rpDir.IsDir() { + continue + } + rpPath := filepath.Join(ptWalPath, rpDir.Name()) + d.rpToWalDirMap[ptWithRp] = rpPath + } + } + return nil +} + +type Exporter struct { + exportFormat string + databaseDiskInfos []*DatabaseDiskInfo + filesTotalCount int + actualDataPath string + actualWalPath string + outPutPath string + filter *dataFilter + compress bool + lineCount uint64 + resume bool + progress map[string]struct{} + remote string + remoteExporter *remoteExporter + parser + + stderrLogger *log.Logger + stdoutLogger *log.Logger + defaultLogger *log.Logger + + manifest map[string]struct{} // {dbName:rpName, struct{}{}} + rpNameToMeasurementTsspFilesMap map[string]map[string][]string // {dbName:rpName, {measurementName, tssp file absolute path}} + rpNameToIdToIndexMap map[string]map[uint64]*tsi.MergeSetIndex // {dbName:rpName, {indexId, *mergeSetIndex}} + rpNameToWalFilesMap map[string][]string // {dbName:rpName:shardDurationRange, wal file absolute path} + + Stderr io.Writer + Stdout io.Writer + bar *mpb.Bar +} + +func NewExporter() *Exporter { + return &Exporter{ + resume: false, + progress: make(map[string]struct{}), + + stderrLogger: log.New(os.Stderr, "export: ", log.LstdFlags), + stdoutLogger: log.New(os.Stdout, "export: ", log.LstdFlags), + + manifest: make(map[string]struct{}), + rpNameToMeasurementTsspFilesMap: make(map[string]map[string][]string), + rpNameToIdToIndexMap: make(map[string]map[uint64]*tsi.MergeSetIndex), + rpNameToWalFilesMap: make(map[string][]string), + remoteExporter: newRemoteExporter(), + + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// parseActualDir transforms user puts in datadir and waldir to actual dirs +func (e *Exporter) parseActualDir(clc *ExportConfig) error { + actualDataDir := filepath.Join(clc.DataDir, config.DataDirectory) + if _, err := os.Stat(actualDataDir); err != nil { + return err + } else { + e.actualDataPath = actualDataDir + } + + actualWalDir := filepath.Join(clc.WalDir, config.WalDirectory) + if _, err := os.Stat(actualWalDir); err != nil { + return err + } else { + e.actualWalPath = actualWalDir + } + + return nil +} + +// parseDatabaseInfos get all path infos for export. +func (e *Exporter) parseDatabaseInfos() error { + dbName := e.filter.database + + dbDiskInfo := newDatabaseDiskInfo() + err := dbDiskInfo.init(e.actualDataPath, e.actualWalPath, dbName, e.filter.retention) + if err != nil { + return fmt.Errorf("can't find database files for %s : %s", dbName, err) + } + e.databaseDiskInfos = append(e.databaseDiskInfos, dbDiskInfo) + return nil +} + +// Init inits the Exporter instance ues CommandLineConfig specific by user +func (e *Exporter) Init(clc *ExportConfig, progressedFiles map[string]struct{}) error { + if clc.Format == "" { + return fmt.Errorf("export flag format is required") + } + if clc.DataDir == "" { + return fmt.Errorf("export flag data is required") + } + if clc.DBFilter == "" { + return fmt.Errorf("export flag dbfilter is required") + } + if clc.Format != csvFormatExporter && clc.Format != txtFormatExporter && clc.Format != remoteFormatExporter { + return fmt.Errorf("unsupported export format %q", clc.Format) + } + if clc.Format != remoteFormatExporter && clc.Out == "" { + return fmt.Errorf("execute -export cmd, not using remote format, --out is required") + } + if clc.Format == remoteFormatExporter { + if err := e.remoteExporter.Init(clc); err != nil { + return err + } + } + e.exportFormat = clc.Format + if e.exportFormat == txtFormatExporter || e.exportFormat == remoteFormatExporter { + e.parser = newTxtParser() + } else if e.exportFormat == csvFormatExporter { + e.parser = newCsvParser() + } + e.outPutPath = clc.Out + e.compress = clc.Compress + e.remote = clc.Remote + e.defaultLogger = e.stdoutLogger + if clc.Resume { + e.resume = true + e.progress = progressedFiles + e.defaultLogger.Printf("starting resume export file, you have exported %d files \n", len(e.progress)) + } + if err := e.writeProgressJson(clc); err != nil { + return err + } + // filter db, mst, time + e.filter = newDataFilter() + e.filter.parseDatabase(clc.DBFilter) + e.filter.parseRetention(clc.RetentionFilter) + if err := e.filter.parseTime(clc); err != nil { + return err + } + if err := e.filter.parseMeasurement(clc.MeasurementFilter); err != nil { + return err + } + // ie. dataDir=/tmp/openGemini/data walDir=/tmp/openGemini/data + // actualDataPath=/tmp/openGemini/data/data actualWalPath=/tmp/openGemini/data/wal + if err := e.parseActualDir(clc); err != nil { + return err + } + + // Get all dir infos that we need,like all database/rp/tsspDirs and database/rp/walDirs + if err := e.parseDatabaseInfos(); err != nil { + return err + } + + return nil +} + +// Export exports all data user want. +func (e *Exporter) Export(clc *ExportConfig, progressedFiles map[string]struct{}) error { + err := e.Init(clc, progressedFiles) + if err != nil { + return err + } + for _, dbDiskInfo := range e.databaseDiskInfos { + err = e.walkDatabase(dbDiskInfo) + if err != nil { + return err + } + } + e.bar, err = e.newBar() + if err != nil { + return err + } + return e.write() +} + +// walkDatabase gets all db's tssp filepath, wal filepath, and index filepath. +func (e *Exporter) walkDatabase(dbDiskInfo *DatabaseDiskInfo) error { + if err := e.walkTsspFile(dbDiskInfo); err != nil { + return err + } + if err := e.walkIndexFiles(dbDiskInfo); err != nil { + return err + } + if err := e.walkWalFile(dbDiskInfo); err != nil { + return err + } + return nil +} + +func (e *Exporter) newBar() (*mpb.Bar, error) { + for _, measurementToTsspFileMap := range e.rpNameToMeasurementTsspFilesMap { + for _, tsspFiles := range measurementToTsspFileMap { + e.filesTotalCount += len(tsspFiles) + } + } + for _, walFiles := range e.rpNameToWalFilesMap { + e.filesTotalCount += len(walFiles) + } + if e.filesTotalCount == 0 { + return nil, fmt.Errorf("no files to export.check your filter or datapath") + } + bar := MpbProgress.New(int64(e.filesTotalCount), + mpb.BarStyle().Lbound("[").Filler("=").Tip(">").Padding("-").Rbound("]"), + mpb.PrependDecorators( + decor.Name("Exporting Data:", decor.WC{W: 20, C: decor.DidentRight}), + decor.CountersNoUnit("%d/%d", decor.WC{W: 15, C: decor.DidentRight}), + decor.OnComplete( + decor.AverageETA(decor.ET_STYLE_GO, decor.WC{W: 6}), + "complete", + ), + ), + mpb.AppendDecorators( + decor.Percentage(), + ), + ) + return bar, nil +} + +// write writes data to output fd user specifics. +func (e *Exporter) write() error { + var outputWriter, metaWriter io.Writer + var err error + if e.remoteExporter.isExist { + outputWriter = io.Discard + } else { + err = os.MkdirAll(filepath.Dir(e.outPutPath), 0755) + if err != nil { + return err + } + var outputFile *os.File // open file descriptor. + if e.resume { + exportDir := filepath.Dir(e.outPutPath) + exportFilePath := filepath.Join(exportDir, resumeFilePrefix+time.Now().Format("2006-01-02_15-04-05.000000000")+filepath.Ext(e.outPutPath)) + outputFile, err = os.OpenFile(exportFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return err + } + } else { + outputFile, err = os.OpenFile(e.outPutPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return err + } + } + defer outputFile.Close() + + outputWriter = outputFile + } + + if e.compress { + if e.remoteExporter.isExist { + return fmt.Errorf("remote format can't compress") + } + gzipWriter := gzip.NewWriter(outputWriter) + defer gzipWriter.Close() + outputWriter = gzipWriter + } + + // metaWriter to write information that are not line-protocols + if e.remoteExporter.isExist { + metaWriter = io.Discard + } else { + metaWriter = outputWriter + } + + return e.writeFull(metaWriter, outputWriter) +} + +// writeFull writes all DDL and DML +func (e *Exporter) writeFull(metaWriter io.Writer, outputWriter io.Writer) error { + start, end := time.Unix(0, e.filter.startTime).UTC().Format(time.RFC3339), time.Unix(0, e.filter.endTime).UTC().Format(time.RFC3339) + e.parser.writeMetaInfo(metaWriter, 0, fmt.Sprintf("# openGemini EXPORT: %s - %s", start, end)) + e.defaultLogger.Printf("Exporting data total %d files\n", e.filesTotalCount) + if err := e.writeDDL(metaWriter, outputWriter); err != nil { + return err + } + + if err := e.writeDML(metaWriter, outputWriter); err != nil { + return err + } + e.defaultLogger.Printf("Summarize %d line protocol\n", e.lineCount) + return nil +} + +// walkTsspFile walk all tssp files for every database. +func (e *Exporter) walkTsspFile(dbDiskInfo *DatabaseDiskInfo) error { + for ptWithRp := range dbDiskInfo.rps { + rpDir := dbDiskInfo.rpToTsspDirMap[ptWithRp] + if err := filepath.Walk(rpDir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) != "."+tsspFileExtension { + return nil + } + // search .tssp file + tsspPathSplits := strings.Split(path, string(byte(os.PathSeparator))) + measurementDirWithVersion := tsspPathSplits[len(tsspPathSplits)-2] // measurement_version: m_0000 + measurementName := influx.GetOriginMstName(measurementDirWithVersion) + // filter measurement + if len(e.filter.measurement) != 0 && e.filter.measurement != measurementName { + return nil + } + // eg. "0:autogen" to ["0","autogen"] + splitPtWithRp := strings.Split(ptWithRp, ":") + key := dbDiskInfo.dbName + ":" + splitPtWithRp[1] + e.manifest[key] = struct{}{} + if _, ok := e.rpNameToMeasurementTsspFilesMap[key]; !ok { // db:rp + e.rpNameToMeasurementTsspFilesMap[key] = make(map[string][]string) + } + e.rpNameToMeasurementTsspFilesMap[key][measurementName] = append(e.rpNameToMeasurementTsspFilesMap[key][measurementName], path) + return nil + }); err != nil { + return err + } + } + return nil +} + +func (e *Exporter) walkWalFile(dbDiskInfo *DatabaseDiskInfo) error { + for ptWithRp := range dbDiskInfo.rps { + rpDir := dbDiskInfo.rpToWalDirMap[ptWithRp] + if err := filepath.Walk(rpDir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) != "."+walFileExtension { + return nil + } + // eg. "0:autogen" to ["0","autogen"] + splitPtWithRp := strings.Split(ptWithRp, ":") + key := dbDiskInfo.dbName + ":" + splitPtWithRp[1] + e.manifest[key] = struct{}{} + e.rpNameToWalFilesMap[key] = append(e.rpNameToWalFilesMap[key], path) + return nil + }); err != nil { + return err + } + } + return nil +} + +func (e *Exporter) walkIndexFiles(dbDiskInfo *DatabaseDiskInfo) error { + for ptWithRp := range dbDiskInfo.rps { + indexPath := dbDiskInfo.rpToIndexDirMap[ptWithRp] + files, err := os.ReadDir(indexPath) + if err != nil { + return err + } + for _, file := range files { + if !file.IsDir() { + continue + } + indexId, err2 := parseIndexDir(file.Name()) + if err2 != nil { + return err2 + } + // eg. "0:autogen" to ["0","autogen"] + splitPtWithRp := strings.Split(ptWithRp, ":") + key := dbDiskInfo.dbName + ":" + splitPtWithRp[1] + lockPath := "" + opt := &tsi.Options{} + opt.Path(filepath.Join(indexPath, file.Name())).IndexType(index.MergeSet).Lock(&lockPath) + if _, ok := e.rpNameToIdToIndexMap[key]; !ok { // db:rp + e.rpNameToIdToIndexMap[key] = make(map[uint64]*tsi.MergeSetIndex) + } + e.manifest[key] = struct{}{} + if e.rpNameToIdToIndexMap[key][indexId], err = tsi.NewMergeSetIndex(opt); err != nil { + return err + } + } + } + return nil +} + +// writeDDL write every "database:retention policy" DDL +func (e *Exporter) writeDDL(metaWriter io.Writer, outputWriter io.Writer) error { + e.parser.writeMetaInfo(metaWriter, 0, "# DDL") + for _, dbDiskInfo := range e.databaseDiskInfos { + avoidRepetition := map[string]struct{}{} + databaseName := dbDiskInfo.dbName + e.parser.writeOutputInfo(outputWriter, fmt.Sprintf("CREATE DATABASE %s\n", databaseName)) + if e.remoteExporter.isExist { + // write DDL to remote + if err := e.remoteExporter.createDatabase(databaseName); err != nil { + return err + } + } + for ptWithRp := range dbDiskInfo.rps { + rpName := strings.Split(ptWithRp, ":")[1] + if _, ok := avoidRepetition[rpName]; !ok { + if e.remoteExporter.isExist { + // write DDL to remote + if err := e.remoteExporter.createRetentionPolicy(databaseName, rpName); err != nil { + return err + } + } + e.parser.writeOutputInfo(outputWriter, fmt.Sprintf("CREATE RETENTION POLICY %s ON %s DURATION 0s REPLICATION 1\n", rpName, databaseName)) + avoidRepetition[rpName] = struct{}{} + } + } + e.parser.writeMetaInfo(metaWriter, 0, "") + } + return nil +} + +// writeDML write every "database:retention policy" DML +func (e *Exporter) writeDML(metaWriter io.Writer, outputWriter io.Writer) error { + e.parser.writeMetaInfo(metaWriter, 0, "# DML") + var curDatabaseName string + // write DML for every item which key = "database:retention policy" + for key := range e.manifest { + keySplits := strings.Split(key, ":") + + if keySplits[0] != curDatabaseName { + e.parser.writeMetaInfo(metaWriter, InfoTypeDatabase, keySplits[0]) + curDatabaseName = keySplits[0] + } + e.remoteExporter.database = curDatabaseName + + // shardKeyToIndexMap stores all indexes for this "database:retention policy" + shardKeyToIndexMap, ok := e.rpNameToIdToIndexMap[key] + if !ok { + return fmt.Errorf("cant find rpNameToIdToIndexMap for %q", key) + } + e.remoteExporter.retentionPolicy = keySplits[1] + + e.parser.writeMetaInfo(metaWriter, InfoTypeRetentionPolicy, keySplits[1]) + // Write all tssp files from this "database:retention policy" + if measurementToTsspFileMap, ok := e.rpNameToMeasurementTsspFilesMap[key]; ok { + if err := e.writeAllTsspFilesInRp(metaWriter, outputWriter, measurementToTsspFileMap, shardKeyToIndexMap); err != nil { + return err + } + } + // Write all wal files from this "database:retention policy" + if files, ok := e.rpNameToWalFilesMap[key]; ok { + if err := e.writeAllWalFilesInRp(metaWriter, outputWriter, files, curDatabaseName); err != nil { + return err + } + } + } + MpbProgress.Wait() + return nil +} + +// writeProgressJson writes progress to json file +func (e *Exporter) writeProgressJson(clc *ExportConfig) error { + output, err := json.MarshalIndent(clc, "", "\t") + if err != nil { + return err + } + err = os.WriteFile(ResumeJsonPath, output, 0644) + if err != nil { + return err + } + return nil +} + +// writeProgressedFiles writes progressed file name +func (e *Exporter) writeProgressedFiles(filename string) error { + file, err := os.OpenFile(ProgressedFilesPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer file.Close() + + _, err = file.WriteString(filename + "\n") + if err != nil { + return err + } + return nil +} + +// writeAllTsspFilesInRp writes all tssp files in a "database:retention policy" +func (e *Exporter) writeAllTsspFilesInRp(metaWriter io.Writer, outputWriter io.Writer, measurementFilesMap map[string][]string, indexesMap map[uint64]*tsi.MergeSetIndex) error { + e.parser.writeMetaInfo(metaWriter, 0, "# FROM TSSP FILE") + var isOrder bool + hasWrittenMstInfo := make(map[string]bool) + for measurementName, files := range measurementFilesMap { + e.parser.writeMetaInfo(metaWriter, InfoTypeMeasurement, measurementName) + hasWrittenMstInfo[measurementName] = false + for _, file := range files { + if _, ok := e.progress[file]; ok { + e.bar.Increment() + continue + } + splits := strings.Split(file, string(os.PathSeparator)) + var shardDir string + if strings.Contains(file, "out-of-order") { + isOrder = false + // ie./tmp/openGemini/data/data/db1/0/autogen/1_1567382400000000000_1567987200000000000_1/tssp/average_temperature_0000/out-of-order/00000002-0000-00000000.tssp + shardDir = splits[len(splits)-5] + } else { + isOrder = true + // ie./tmp/openGemini/data/data/db1/0/autogen/1_1567382400000000000_1567987200000000000_1/tssp/average_temperature_0000/00000002-0000-00000000.tssp + shardDir = splits[len(splits)-4] + } + _, dirStartTime, dirEndTime, indexId, err := parseShardDir(shardDir) + if err != nil { + return err + } + if err = indexesMap[indexId].Open(); err != nil { + return err + } + if !hasWrittenMstInfo[measurementName] { + if err := e.parser.writeMstInfoFromTssp(metaWriter, outputWriter, file, isOrder, indexesMap[indexId]); err != nil { + return err + } + hasWrittenMstInfo[measurementName] = true + } + if e.filter.isBelowMinTimeFilter(dirEndTime) || e.filter.isAboveMaxTimeFilter(dirStartTime) { + e.bar.Increment() + continue + } + if err := e.writeSingleTsspFile(file, outputWriter, indexesMap[indexId], isOrder); err != nil { + return err + } + if err = indexesMap[indexId].Close(); err != nil { + return err + } + e.bar.Increment() + } + fmt.Fprintf(outputWriter, "\n") + } + return nil +} + +// writeSingleTsspFile writes a single tssp file's all records. +func (e *Exporter) writeSingleTsspFile(filePath string, outputWriter io.Writer, index *tsi.MergeSetIndex, isOrder bool) error { + lockPath := "" + tsspFile, err := immutable.OpenTSSPFile(filePath, &lockPath, isOrder) + defer util.MustClose(tsspFile) + + if err != nil { + return err + } + fi := immutable.NewFileIterator(tsspFile, immutable.CLog) + itr := immutable.NewChunkIterator(fi) + itrChunk := immutable.NewChunkIterator(fi) + itrChunk.NextChunkMeta() + var maxTime int64 + var minTime int64 + minTime, maxTime = fi.GetCurtChunkMeta().MinMaxTime() + // Check if the maximum and minimum time of records that the SID points to are in the filter range of e.filter + if e.filter.isBelowMinTimeFilter(maxTime) || e.filter.isAboveMaxTimeFilter(minTime) { + return nil + } + for { + if !itr.Next() { + break + } + sid := itr.GetSeriesID() + if sid == 0 { + return fmt.Errorf("series ID is zero") + } + rec := itr.GetRecord() + record.CheckRecord(rec) + + maxTime = rec.MaxTime(true) + minTime = rec.MinTime(true) + + // Check if the maximum and minimum time of records that the SID points to are in the filter range of e.filter + if e.filter.isBelowMinTimeFilter(maxTime) || e.filter.isAboveMaxTimeFilter(minTime) { + continue + } + + if err := e.writeSeriesRecords(outputWriter, sid, rec, index); err != nil { + return err + } + } + err = e.writeProgressedFiles(filePath) + if err != nil { + return err + } + return nil +} + +// writeSeriesRecords writes all records pointed to by one sid. +func (e *Exporter) writeSeriesRecords(outputWriter io.Writer, sid uint64, rec *record.Record, index *tsi.MergeSetIndex) error { + + var combineKey []byte + var seriesKeys [][]byte + var isExpectSeries []bool + var err error + // Use sid get series key's []byte + if seriesKeys, _, isExpectSeries, err = index.SearchSeriesWithTagArray(sid, seriesKeys, nil, combineKey, isExpectSeries, nil); err != nil { + return err + } + series := make([][]byte, 1) + point := &opengemini.Point{} + sIndex := 0 + for i := range seriesKeys { + if !isExpectSeries[i] { + continue + } + if sIndex >= 1 { + bufSeries := influx.GetBytesBuffer() + bufSeries, err = e.parser.parse2SeriesKeyWithoutVersion(seriesKeys[i], bufSeries, false, point) + if err != nil { + return err + } + series = append(series, bufSeries) + } else { + if series[sIndex] == nil { + series[sIndex] = influx.GetBytesBuffer() + } + series[sIndex], err = e.parser.parse2SeriesKeyWithoutVersion(seriesKeys[i], series[sIndex][:0], false, point) + if err != nil { + return err + } + sIndex++ + } + } + var recs []record.Record + recs = rec.Split(recs, 1) + buf := influx.GetBytesBuffer() + defer influx.PutBytesBuffer(buf) + for _, r := range recs { + pointWithTag := &opengemini.Point{ + Measurement: point.Measurement, + Tags: point.Tags, + } + if buf, err = e.writeSingleRecord(outputWriter, series, r, buf, pointWithTag); err != nil { + return err + } + } + if e.remoteExporter.isExist { + err := e.remoteExporter.writeAllPoints() + if err != nil { + return err + } + } + for _, bufSeries := range series { + influx.PutBytesBuffer(bufSeries) + } + return nil +} + +// writeSingleRecord parses a record and a series key to line protocol, and writes it. +func (e *Exporter) writeSingleRecord(outputWriter io.Writer, seriesKey [][]byte, rec record.Record, buf []byte, point *opengemini.Point) ([]byte, error) { + tm := rec.Times()[0] + if !e.filter.timeFilter(tm) { + return buf, nil + } + buf = bytes.Join(seriesKey, []byte(",")) + buf, err := e.parser.appendFields(rec, buf, point) + if err != nil { + return nil, err + } + if e.remoteExporter.isExist { + e.remoteExporter.points = append(e.remoteExporter.points, point) + } else { + if _, err := outputWriter.Write(buf); err != nil { + return buf, err + } + } + e.lineCount++ + buf = buf[:0] + return buf, nil +} + +// writeAllWalFilesInRp writes all wal files in a "database:retention policy" +func (e *Exporter) writeAllWalFilesInRp(metaWriter io.Writer, outputWriter io.Writer, files []string, currentDatabase string) error { + e.parser.writeMetaInfo(metaWriter, 0, "# FROM WAL FILE") + var currentMeasurement string + for _, file := range files { + if _, ok := e.progress[file]; ok { + e.bar.Increment() + continue + } + if err := e.writeSingleWalFile(file, metaWriter, outputWriter, currentDatabase, ¤tMeasurement); err != nil { + return err + } + e.bar.Increment() + if err := e.writeProgressedFiles(file); err != nil { + return err + } + } + fmt.Fprintf(outputWriter, "\n") + return nil +} + +// writeSingleWalFile writes a single wal file's all rows. +func (e *Exporter) writeSingleWalFile(file string, metaWriter io.Writer, outputWriter io.Writer, currentDatabase string, currentMeasurement *string) error { + lockPath := fileops.FileLockOption("") + priority := fileops.FilePriorityOption(fileops.IO_PRIORITY_NORMAL) + fd, err := fileops.OpenFile(file, os.O_RDONLY, 0640, lockPath, priority) + defer util.MustClose(fd) + if err != nil { + return err + } + + stat, err := fd.Stat() + if err != nil { + return err + } + fileSize := stat.Size() + if fileSize == 0 { + return nil + } + recordCompBuff := bufferpool.NewByteBufferPool(engine.WalCompBufSize, 0, bufferpool.MaxLocalCacheLen).Get() + var offset int64 = 0 + var rows []influx.Row + for { + rows, offset, recordCompBuff, err = e.readWalRows(fd, offset, fileSize, recordCompBuff) + if err != nil { + if err == io.EOF { + return nil + } + return nil + } + if e.lineCount == 0 { + measurementWithVersion := rows[0].Name + *currentMeasurement = influx.GetOriginMstName(measurementWithVersion) + *currentMeasurement = EscapeMstName(*currentMeasurement) + e.parser.writeMetaInfo(metaWriter, InfoTypeMeasurement, *currentMeasurement) + if err := e.parser.writeMstInfoFromWal(metaWriter, outputWriter, rows[0], currentDatabase); err != nil { + return err + } + } + if err = e.writeRows(rows, metaWriter, outputWriter, currentDatabase, currentMeasurement); err != nil { + return err + } + } +} + +// readWalRows read some rows from the fd, and reuse recordCompBuff to save memory. +func (e *Exporter) readWalRows(fd fileops.File, offset, fileSize int64, recordCompBuff []byte) ([]influx.Row, int64, []byte, error) { + if offset >= fileSize { + return nil, offset, recordCompBuff, io.EOF + } + + // read record header + var recordHeader [engine.WalRecordHeadSize]byte + n, err := fd.ReadAt(recordHeader[:], offset) + if err != nil { + e.stderrLogger.Println(errno.NewError(errno.ReadWalFileFailed, fd.Name(), offset, "record header").Error()) + return nil, offset, recordCompBuff, io.EOF + } + if n != engine.WalRecordHeadSize { + e.stderrLogger.Println(errno.NewError(errno.WalRecordHeaderCorrupted, fd.Name(), offset).Error()) + return nil, offset, recordCompBuff, io.EOF + } + offset += int64(len(recordHeader)) + + // prepare record memory + compBinaryLen := binary.BigEndian.Uint32(recordHeader[1:engine.WalRecordHeadSize]) + recordCompBuff = bufferpool.Resize(recordCompBuff, int(compBinaryLen)) + + // read record body + var recordBuff []byte + n, err = fd.ReadAt(recordCompBuff, offset) + if err == nil || err == io.EOF { + offset += int64(n) + var innerErr error + recordBuff, innerErr = snappy.Decode(recordBuff, recordCompBuff) + if innerErr != nil { + e.stderrLogger.Println(errno.NewError(errno.DecompressWalRecordFailed, fd.Name(), offset, innerErr.Error()).Error()) + return nil, offset, recordCompBuff, io.EOF + } + var rows []influx.Row + var tagPools []influx.Tag + var fieldPools []influx.Field + var indexKeyPools []byte + var indexOptionPools []influx.IndexOption + var err error + rows, _, _, _, _, innerErr = influx.FastUnmarshalMultiRows(recordBuff, rows, tagPools, fieldPools, indexOptionPools, indexKeyPools) + + if innerErr == nil { + return rows, offset, recordCompBuff, err + } + return rows, offset, recordCompBuff, innerErr + } + e.stderrLogger.Println(errno.NewError(errno.ReadWalFileFailed, fd.Name(), offset, "record body").Error()) + return nil, offset, recordCompBuff, io.EOF +} + +// writeRows process a cluster of rows +func (e *Exporter) writeRows(rows []influx.Row, metaWriter io.Writer, outputWriter io.Writer, currentDatabase string, currentMeasurement *string) error { + buf := influx.GetBytesBuffer() + defer influx.PutBytesBuffer(buf) + var err error + for _, r := range rows { + point := &opengemini.Point{} + if buf, err = e.writeSingleRow(r, metaWriter, outputWriter, buf, point, currentDatabase, currentMeasurement); err != nil { + return err + } + } + if e.remoteExporter.isExist { + err := e.remoteExporter.writeAllPoints() + if err != nil { + return err + } + } + return nil +} + +// writeSingleRow parse a single row to lint protocol, and writes it. +func (e *Exporter) writeSingleRow(row influx.Row, metaWriter io.Writer, outputWriter io.Writer, buf []byte, + point *opengemini.Point, currentDatabase string, mstName *string) ([]byte, error) { + measurementWithVersion := row.Name + measurementName := influx.GetOriginMstName(measurementWithVersion) + measurementName = EscapeMstName(measurementName) + tm := row.Timestamp + // filter measurement + if len(e.filter.measurement) != 0 && e.filter.measurement != measurementName { + return buf, nil + } + if !e.filter.timeFilter(tm) { + return buf, nil + } + + if measurementName != *mstName { + e.parser.writeMetaInfo(metaWriter, InfoTypeMeasurement, measurementName) + if err := e.parser.writeMstInfoFromWal(metaWriter, outputWriter, row, currentDatabase); err != nil { + return buf, err + } + *mstName = measurementName + } + buf, err := e.parser.getRowBuf(buf, measurementName, row, point) + if err != nil { + return nil, err + } + if e.remoteExporter.isExist { + e.remoteExporter.points = append(e.remoteExporter.points, point) + } else { + if _, err := outputWriter.Write(buf); err != nil { + return buf, err + } + } + e.lineCount++ + buf = buf[:0] + return buf, nil +} + +type parser interface { + parse2SeriesKeyWithoutVersion(key []byte, dst []byte, splitWithNull bool, point *opengemini.Point) ([]byte, error) + appendFields(rec record.Record, buf []byte, point *opengemini.Point) ([]byte, error) + writeMstInfoFromTssp(metaWriter io.Writer, outputWriter io.Writer, filePath string, isOrder bool, index *tsi.MergeSetIndex) error + writeMstInfoFromWal(metaWriter io.Writer, outputWriter io.Writer, row influx.Row, curDatabase string) error + writeMetaInfo(metaWriter io.Writer, infoType InfoType, info string) + writeOutputInfo(outputWriter io.Writer, info string) + getRowBuf(buf []byte, measurementName string, row influx.Row, point *opengemini.Point) ([]byte, error) +} + +type txtParser struct{} + +func newTxtParser() *txtParser { + return &txtParser{} +} + +// parse2SeriesKeyWithoutVersion parse encoded index key to line protocol series key,without version and escape special characters +// encoded index key format: [total len][ms len][ms][tagkey1 len][tagkey1 val]...] +// parse to line protocol format: mst,tagkey1=tagval1,tagkey2=tagval2... +func (t *txtParser) parse2SeriesKeyWithoutVersion(key []byte, dst []byte, splitWithNull bool, point *opengemini.Point) ([]byte, error) { + msName, src, err := influx.MeasurementName(key) + originMstName := influx.GetOriginMstName(string(msName)) + originMstName = EscapeMstName(originMstName) + if err != nil { + return []byte{}, err + } + var split [2]byte + if splitWithNull { + split[0], split[1] = influx.ByteSplit, influx.ByteSplit + } else { + split[0], split[1] = '=', ',' + } + point.Measurement = originMstName + dst = append(dst, originMstName...) + dst = append(dst, ',') + tagsN := encoding.UnmarshalUint16(src) + src = src[2:] + var i uint16 + for i = 0; i < tagsN; i++ { + keyLen := encoding.UnmarshalUint16(src) + src = src[2:] + tagKey := EscapeTagKey(string(src[:keyLen])) + dst = append(dst, tagKey...) + dst = append(dst, split[0]) + src = src[keyLen:] + + valLen := encoding.UnmarshalUint16(src) + src = src[2:] + tagVal := EscapeTagValue(string(src[:valLen])) + dst = append(dst, tagVal...) + dst = append(dst, split[1]) + src = src[valLen:] + + point.AddTag(tagKey, tagVal) + } + return dst[:len(dst)-1], nil +} + +func (t *txtParser) appendFields(rec record.Record, buf []byte, point *opengemini.Point) ([]byte, error) { + buf = append(buf, ' ') + for i, field := range rec.Schema { + if field.Name == "time" { + continue + } + buf = append(buf, EscapeFieldKey(field.Name)+"="...) + switch field.Type { + case influx.Field_Type_Float: + buf = strconv.AppendFloat(buf, rec.Column(i).FloatValues()[0], 'g', -1, 64) + point.AddField(EscapeFieldKey(field.Name), strconv.FormatFloat(rec.Column(i).FloatValues()[0], 'g', -1, 64)) + case influx.Field_Type_Int: + buf = strconv.AppendInt(buf, rec.Column(i).IntegerValues()[0], 10) + point.AddField(EscapeFieldKey(field.Name), strconv.FormatInt(rec.Column(i).IntegerValues()[0], 10)) + case influx.Field_Type_Boolean: + buf = strconv.AppendBool(buf, rec.Column(i).BooleanValues()[0]) + point.AddField(EscapeFieldKey(field.Name), strconv.FormatBool(rec.Column(i).BooleanValues()[0])) + case influx.Field_Type_String: + var str []string + str = rec.Column(i).StringValues(str) + buf = append(buf, '"') + buf = append(buf, EscapeStringFieldValue(str[0])...) + buf = append(buf, '"') + point.AddField(EscapeFieldKey(field.Name), str[0]) + default: + // This shouldn't be possible, but we'll format it anyway. + buf = append(buf, fmt.Sprintf("%v", rec.Column(i))...) + point.AddField(EscapeFieldKey(field.Name), fmt.Sprintf("%v", rec.Column(i))) + } + if i != rec.Len()-2 { + buf = append(buf, ',') + } else { + buf = append(buf, ' ') + } + } + buf = strconv.AppendInt(buf, rec.Times()[0], 10) + buf = append(buf, '\n') + point.Timestamp = rec.Times()[0] // point.Time = time.Unix(0, rec.Times()[0]) + return buf, nil +} + +func (t *txtParser) writeMstInfoFromTssp(_ io.Writer, _ io.Writer, _ string, _ bool, _ *tsi.MergeSetIndex) error { + return nil +} + +func (t *txtParser) writeMstInfoFromWal(_ io.Writer, _ io.Writer, _ influx.Row, _ string) error { + return nil +} + +func (t *txtParser) getRowBuf(buf []byte, measurementName string, row influx.Row, point *opengemini.Point) ([]byte, error) { + point.Measurement = measurementName + tags := row.Tags + fields := row.Fields + tm := row.Timestamp + + buf = append(buf, measurementName...) + buf = append(buf, ',') + for i, tag := range tags { + buf = append(buf, EscapeTagKey(tag.Key)+"="...) + buf = append(buf, EscapeTagValue(tag.Value)...) + if i != len(tags)-1 { + buf = append(buf, ',') + } else { + buf = append(buf, ' ') + } + point.AddTag(EscapeTagKey(tag.Key), EscapeTagValue(tag.Value)) + } + for i, field := range fields { + buf = append(buf, EscapeFieldKey(field.Key)+"="...) + switch field.Type { + case influx.Field_Type_Float: + buf = strconv.AppendFloat(buf, field.NumValue, 'g', -1, 64) + point.AddField(EscapeFieldKey(field.Key), strconv.FormatFloat(field.NumValue, 'g', -1, 64)) + case influx.Field_Type_Int: + buf = strconv.AppendInt(buf, int64(field.NumValue), 10) + point.AddField(EscapeFieldKey(field.Key), strconv.FormatInt(int64(field.NumValue), 10)) + case influx.Field_Type_Boolean: + buf = strconv.AppendBool(buf, field.NumValue == 1) + point.AddField(EscapeFieldKey(field.Key), strconv.FormatBool(field.NumValue == 1)) + case influx.Field_Type_String: + buf = append(buf, '"') + buf = append(buf, EscapeStringFieldValue(field.StrValue)...) + buf = append(buf, '"') + point.AddField(EscapeFieldKey(field.Key), field.StrValue) + default: + // This shouldn't be possible, but we'll format it anyway. + buf = append(buf, fmt.Sprintf("%v", field)...) + point.AddField(EscapeFieldKey(field.Key), fmt.Sprintf("%v", field)) + } + if i != len(fields)-1 { + buf = append(buf, ',') + } else { + buf = append(buf, ' ') + } + } + buf = strconv.AppendInt(buf, tm, 10) + buf = append(buf, '\n') + point.Timestamp = tm // point.Time = time.Unix(0, tm) + return buf, nil +} + +type InfoType int + +const ( + InfoTypeDatabase InfoType = 1 + iota + InfoTypeRetentionPolicy + InfoTypeMeasurement +) + +func (t *txtParser) writeMetaInfo(metaWriter io.Writer, infoType InfoType, info string) { + switch infoType { + case InfoTypeDatabase: + fmt.Fprintf(metaWriter, "# CONTEXT-DATABASE: %s\n", info) + case InfoTypeRetentionPolicy: + fmt.Fprintf(metaWriter, "# CONTEXT-RETENTION-POLICY: %s\n", info) + case InfoTypeMeasurement: + fmt.Fprintf(metaWriter, "# CONTEXT-MEASUREMENT: %s\n", info) + default: + fmt.Fprintf(metaWriter, "%s\n", info) + } +} + +func (t *txtParser) writeOutputInfo(outputWriter io.Writer, info string) { + fmt.Fprint(outputWriter, info) +} + +type csvParser struct { + fieldsName map[string]map[string][]string // database -> measurement -> []field + curDatabase string + curMeasurement string +} + +func newCsvParser() *csvParser { + return &csvParser{ + fieldsName: make(map[string]map[string][]string), + } +} + +// parse2SeriesKeyWithoutVersion parse encoded index key to csv series key,without version and escape special characters +// encoded index key format: [total len][ms len][ms][tagkey1 len][tagkey1 val]...] +// parse to csv format: mst,tagval1,tagval2... +func (c *csvParser) parse2SeriesKeyWithoutVersion(key []byte, dst []byte, splitWithNull bool, _ *opengemini.Point) ([]byte, error) { + _, src, err := influx.MeasurementName(key) + if err != nil { + return []byte{}, err + } + var split [2]byte + if splitWithNull { + split[0], split[1] = influx.ByteSplit, influx.ByteSplit + } else { + split[0], split[1] = '=', ',' + } + + tagsN := encoding.UnmarshalUint16(src) + src = src[2:] + var i uint16 + for i = 0; i < tagsN; i++ { + keyLen := encoding.UnmarshalUint16(src) + src = src[2:] + src = src[keyLen:] + + valLen := encoding.UnmarshalUint16(src) + src = src[2:] + tagVal := EscapeTagValue(string(src[:valLen])) + dst = append(dst, tagVal...) + dst = append(dst, split[1]) + src = src[valLen:] + } + return dst, nil + +} + +func (c *csvParser) appendFields(rec record.Record, buf []byte, _ *opengemini.Point) ([]byte, error) { + curFieldsName := c.fieldsName[c.curDatabase][c.curMeasurement] + for _, fieldName := range curFieldsName { + if fieldName == "time" { + continue + } + k, ok := getFieldNameIndexFromRecord(rec.Schema, fieldName) + if !ok { + buf = append(buf, ',') + } else { + switch rec.Schema[k].Type { + case influx.Field_Type_Float: + buf = strconv.AppendFloat(buf, rec.Column(k).FloatValues()[0], 'g', -1, 64) + case influx.Field_Type_Int: + buf = strconv.AppendInt(buf, rec.Column(k).IntegerValues()[0], 10) + case influx.Field_Type_Boolean: + buf = strconv.AppendBool(buf, rec.Column(k).BooleanValues()[0]) + case influx.Field_Type_String: + var str []string + str = rec.Column(k).StringValues(str) + buf = append(buf, '"') + buf = append(buf, EscapeStringFieldValue(str[0])...) + buf = append(buf, '"') + default: + // This shouldn't be possible, but we'll format it anyway. + buf = append(buf, fmt.Sprintf("%v", rec.Column(k))...) + } + if k != rec.Len()-1 { + buf = append(buf, ',') + } + } + } + buf = strconv.AppendInt(buf, rec.Times()[0], 10) + buf = append(buf, '\n') + return buf, nil +} + +func (c *csvParser) writeMstInfoFromTssp(metaWriter io.Writer, outputWriter io.Writer, filePath string, isOrder bool, index *tsi.MergeSetIndex) error { + tsspPathSplits := strings.Split(filePath, string(byte(os.PathSeparator))) + measurementDirWithVersion := tsspPathSplits[len(tsspPathSplits)-2] + measurementName := influx.GetOriginMstName(measurementDirWithVersion) + dbName := tsspPathSplits[len(tsspPathSplits)-7] + lockPath := "" + tsspFile, err := immutable.OpenTSSPFile(filePath, &lockPath, isOrder) + defer util.MustClose(tsspFile) + if err != nil { + return err + } + // search tags + fiTag := immutable.NewFileIterator(tsspFile, immutable.CLog) + itrTag := immutable.NewChunkIterator(fiTag) + itrTag.Next() + sid := itrTag.GetSeriesID() + if sid == 0 { + return fmt.Errorf("series ID is zero") + } + var combineKey []byte + var seriesKeys [][]byte + var isExpectSeries []bool + // Use sid get series key's []byte + if seriesKeys, _, _, err = index.SearchSeriesWithTagArray(sid, seriesKeys, nil, combineKey, isExpectSeries, nil); err != nil { + return err + } + _, src, err := influx.MeasurementName(seriesKeys[0]) + if err != nil { + return err + } + tagsN := encoding.UnmarshalUint16(src) + src = src[2:] + var i uint16 + var tags, fields, tagsType, fieldsType []string + for i = 0; i < tagsN; i++ { + keyLen := encoding.UnmarshalUint16(src) + src = src[2:] + tagKey := EscapeTagKey(string(src[:keyLen])) + tags = append(tags, tagKey) + src = src[keyLen:] + + valLen := encoding.UnmarshalUint16(src) + src = src[2:] + src = src[valLen:] + } + for i := 0; i < len(tags); i++ { + tagsType = append(tagsType, "tag") + } + // search fields + fiField := immutable.NewFileIterator(tsspFile, immutable.CLog) + itrField := immutable.NewChunkIterator(fiField) + itrField.NextChunkMeta() + for _, colMeta := range fiField.GetCurtChunkMeta().GetColMeta() { + fields = append(fields, colMeta.Name()) + if colMeta.Name() == "time" { + fieldsType = append(fieldsType, "dateTime:timeStamp") + } else { + fieldsType = append(fieldsType, influx.FieldTypeString(int32(colMeta.Type()))) + } + } + c.fieldsName[dbName] = make(map[string][]string) + c.fieldsName[dbName][measurementName] = fields + c.curDatabase = dbName + c.curMeasurement = measurementName + // write datatype + fmt.Fprintf(metaWriter, "#datatype %s,%s\n", strings.Join(tagsType, ","), strings.Join(fieldsType, ",")) + // write tags and fields name + buf := influx.GetBytesBuffer() + defer influx.PutBytesBuffer(buf) + buf = append(buf, strings.Join(tags, ",")...) + buf = append(buf, ',') + buf = append(buf, strings.Join(fields, ",")...) + buf = append(buf, '\n') + _, err = outputWriter.Write(buf) + if err != nil { + return err + } + return nil +} + +func (c *csvParser) writeMstInfoFromWal(metaWriter io.Writer, outputWriter io.Writer, row influx.Row, currentDatabase string) error { + tagsN := row.Tags + fieldsN := row.Fields + var tags, fields, tagsType, fieldsType []string + for _, tag := range tagsN { + tags = append(tags, tag.Key) + tagsType = append(tagsType, "tag") + } + for _, field := range fieldsN { + fields = append(fields, field.Key) + fieldsType = append(fieldsType, influx.FieldTypeString(field.Type)) + } + fieldsType = append(fieldsType, "dateTime:timeStamp") + measurementWithVersion := row.Name + measurementName := influx.GetOriginMstName(measurementWithVersion) + measurementName = EscapeMstName(measurementName) + c.fieldsName[currentDatabase] = make(map[string][]string) + c.fieldsName[currentDatabase][measurementName] = fields + c.curDatabase = currentDatabase + c.curMeasurement = measurementName + // write datatype + fmt.Fprintf(metaWriter, "#datatype %s,%s\n", strings.Join(tagsType, ","), strings.Join(fieldsType, ",")) + // write tags and fields name + buf := influx.GetBytesBuffer() + defer influx.PutBytesBuffer(buf) + buf = append(buf, strings.Join(tags, ",")...) + buf = append(buf, ',') + buf = append(buf, strings.Join(fields, ",")...) + buf = append(buf, ',') + buf = append(buf, "time"...) + buf = append(buf, '\n') + _, err := outputWriter.Write(buf) + if err != nil { + return err + } + return nil +} + +func (c *csvParser) getRowBuf(buf []byte, measurementName string, row influx.Row, _ *opengemini.Point) ([]byte, error) { + tags := row.Tags + fields := row.Fields + tm := row.Timestamp + + for _, tag := range tags { + buf = append(buf, EscapeTagValue(tag.Value)...) + buf = append(buf, ',') + } + curFieldsName := c.fieldsName[c.curDatabase][c.curMeasurement] + for _, fieldName := range curFieldsName { + if fieldName == "time" { + continue + } + k, ok := getFieldNameIndexFromRow(fields, fieldName) + if !ok { + buf = append(buf, ',') + } else { + switch fields[k].Type { + case influx.Field_Type_Float: + buf = strconv.AppendFloat(buf, fields[k].NumValue, 'g', -1, 64) + case influx.Field_Type_Int: + buf = strconv.AppendInt(buf, int64(fields[k].NumValue), 10) + case influx.Field_Type_Boolean: + buf = strconv.AppendBool(buf, fields[k].NumValue == 1) + case influx.Field_Type_String: + buf = append(buf, '"') + buf = append(buf, EscapeStringFieldValue(fields[k].StrValue)...) + buf = append(buf, '"') + default: + // This shouldn't be possible, but we'll format it anyway. + buf = append(buf, fmt.Sprintf("%v", fields[k])...) + } + buf = append(buf, ',') + } + } + buf = strconv.AppendInt(buf, tm, 10) + buf = append(buf, '\n') + return buf, nil +} + +func (c *csvParser) writeMetaInfo(metaWriter io.Writer, infoType InfoType, info string) { + switch infoType { + case InfoTypeDatabase: + fmt.Fprintf(metaWriter, "#constant database,%s\n", info) + case InfoTypeRetentionPolicy: + fmt.Fprintf(metaWriter, "#constant retention_policy,%s\n", info) + case InfoTypeMeasurement: + fmt.Fprintf(metaWriter, "#constant measurement,%s\n", info) + default: + return + } +} + +func (c *csvParser) writeOutputInfo(_ io.Writer, _ string) { +} + +type remoteExporter struct { + isExist bool + client opengemini.Client + database string + retentionPolicy string + points []*opengemini.Point +} + +func newRemoteExporter() *remoteExporter { + return &remoteExporter{ + isExist: false, + } +} + +func (re *remoteExporter) Init(clc *ExportConfig) error { + if len(clc.Remote) == 0 { + return fmt.Errorf("execute -export cmd, using remote format, --remote is required") + } + h, p, err := net.SplitHostPort(clc.Remote) + if err != nil { + return err + } + port, err := strconv.Atoi(p) + if err != nil { + return fmt.Errorf("invalid port number :%s", err) + } + var authConfig *opengemini.AuthConfig + if clc.RemoteUsername != "" { + authConfig = &opengemini.AuthConfig{ + AuthType: 0, + Username: clc.RemoteUsername, + Password: clc.RemotePassword, + } + } else { + authConfig = nil + } + var remoteConfig *opengemini.Config + if clc.RemoteSsl { + remoteConfig = &opengemini.Config{ + Addresses: []opengemini.Address{ + { + Host: h, + Port: port, + }, + }, + AuthConfig: authConfig, + TlsConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + } else { + remoteConfig = &opengemini.Config{ + Addresses: []opengemini.Address{ + { + Host: h, + Port: port, + }, + }, + AuthConfig: authConfig, + } + } + + cli, err := opengemini.NewClient(remoteConfig) + if err != nil { + return err + } + re.isExist = true + re.client = cli + if err = re.client.Ping(0); err != nil { + return err + } + return nil +} + +func (re *remoteExporter) createDatabase(dbName string) error { + err := re.client.CreateDatabase(dbName) + if err != nil { + return fmt.Errorf("error writing command: %s", err) + } + return nil +} + +func (re *remoteExporter) createRetentionPolicy(dbName string, rpName string) error { + err := re.client.CreateRetentionPolicy(dbName, opengemini.RpConfig{ + Name: rpName, + Duration: "0s", + }, false) + if err != nil { + return fmt.Errorf("error writing command: %s", err) + } + return nil +} + +func (re *remoteExporter) writeAllPoints() error { + err := re.client.WriteBatchPointsWithRp(context.Background(), re.database, re.retentionPolicy, re.points) + if err != nil { + return err + } + re.points = re.points[:0] + return nil +} + +func parseShardDir(shardDirName string) (uint64, int64, int64, uint64, error) { + shardDir := strings.Split(shardDirName, dirNameSeparator) + if len(shardDir) != 4 { + return 0, 0, 0, 0, errno.NewError(errno.InvalidDataDir) + } + shardID, err := strconv.ParseUint(shardDir[0], 10, 64) + if err != nil { + return 0, 0, 0, 0, errno.NewError(errno.InvalidDataDir) + } + dirStartTime, err := strconv.ParseInt(shardDir[1], 10, 64) + if err != nil { + return 0, 0, 0, 0, errno.NewError(errno.InvalidDataDir) + } + dirEndTime, err := strconv.ParseInt(shardDir[2], 10, 64) + if err != nil { + return 0, 0, 0, 0, errno.NewError(errno.InvalidDataDir) + } + indexID, err := strconv.ParseUint(shardDir[3], 10, 64) + if err != nil { + return 0, 0, 0, 0, errno.NewError(errno.InvalidDataDir) + } + return shardID, dirStartTime, dirEndTime, indexID, nil +} + +func parseIndexDir(indexDirName string) (uint64, error) { + indexDir := strings.Split(indexDirName, dirNameSeparator) + if len(indexDir) != 3 { + return 0, errno.NewError(errno.InvalidDataDir) + } + + indexID, err := strconv.ParseUint(indexDir[0], 10, 64) + if err != nil { + return 0, errno.NewError(errno.InvalidDataDir) + } + return indexID, nil +} + +var escapeFieldKeyReplacer = strings.NewReplacer(`,`, `\,`, `=`, `\=`, ` `, `\ `) +var escapeTagKeyReplacer = strings.NewReplacer(`,`, `\,`, `=`, `\=`, ` `, `\ `) +var escapeTagValueReplacer = strings.NewReplacer(`,`, `\,`, `=`, `\=`, ` `, `\ `) +var escapeMstNameReplacer = strings.NewReplacer(`=`, `\=`, ` `, `\ `) +var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +// EscapeFieldKey returns a copy of in with any comma or equal sign or space +// with escaped values. +func EscapeFieldKey(in string) string { + return escapeFieldKeyReplacer.Replace(in) +} + +// EscapeStringFieldValue returns a copy of in with any double quotes or +// backslashes with escaped values. +func EscapeStringFieldValue(in string) string { + return escapeStringFieldReplacer.Replace(in) +} + +// EscapeTagKey returns a copy of in with any "comma" or "equal sign" or "space" +// with escaped values. +func EscapeTagKey(in string) string { + return escapeTagKeyReplacer.Replace(in) +} + +// EscapeTagValue returns a copy of in with any "comma" or "equal sign" or "space" +// with escaped values +func EscapeTagValue(in string) string { + return escapeTagValueReplacer.Replace(in) +} + +// EscapeMstName returns a copy of in with any "equal sign" or "space" +// with escaped values. +func EscapeMstName(in string) string { + return escapeMstNameReplacer.Replace(in) +} + +// getFieldNameIndexFromRecord returns the index of a field in a slice +func getFieldNameIndexFromRecord(slice []record.Field, str string) (int, bool) { + for i, v := range slice { + if v.Name == str { + return i, true + } + } + return 0, false +} + +func getFieldNameIndexFromRow(slice []influx.Field, str string) (int, bool) { + for i, v := range slice { + if v.Key == str { + return i, true + } + } + return 0, false +} + +func convertTime(input string) (int64, error) { + t, err := time.Parse(time.RFC3339, input) + if err == nil { + return t.UnixNano(), nil + } + + timestamp, err := strconv.ParseInt(input, 10, 64) + if err == nil { + return timestamp, nil + } + + return 0, err +} diff --git a/cmd/subcmd/import.go b/cmd/subcmd/import.go index 2eccdde..6685890 100644 --- a/cmd/subcmd/import.go +++ b/cmd/subcmd/import.go @@ -55,6 +55,7 @@ const ( importTokenTags = "# CONTEXT-TAGS:" importTokenFields = "# CONTEXT-FIELDS:" importTokenTimeField = "# CONTEXT-TIME:" + timeFilterToken = "# openGemini EXPORT:" ) var ( @@ -321,7 +322,7 @@ func (fsm *ImportFileFSM) clearBuffer() FSMCall { } func (fsm *ImportFileFSM) processLineProtocol(data string) (FSMCall, error) { - if strings.HasPrefix(data, importTokenDDL) { + if strings.HasPrefix(data, importTokenDDL) || strings.HasPrefix(data, timeFilterToken) { fsm.state = importStateDDL return FSMCallEmpty, nil } @@ -511,7 +512,7 @@ func (c *ImportCommand) excuteByLPBuffer(ctx context.Context) error { } }() var lines = strings.Join(c.fsm.batchLPBuffer[:min(c.cfg.BatchSize, len(c.fsm.batchLPBuffer))], "\n") - fmt.Println("---", lines) + if c.cfg.ColumnWrite { var builderName = c.fsm.database + "." + c.fsm.retentionPolicy builder, ok := builderEntities[builderName] diff --git a/cmd/ts-cli/cli.go b/cmd/ts-cli/cli.go index 0b6931a..a9464ff 100644 --- a/cmd/ts-cli/cli.go +++ b/cmd/ts-cli/cli.go @@ -124,10 +124,55 @@ func (m *Command) importCommand() { m.cmd.AddCommand(cmd) } +func (m *Command) exportCommand() { + var config = subcmd.ExportConfig{CommandLineConfig: new(core.CommandLineConfig)} + cmd := &cobra.Command{ + Use: "export", + Short: "(EXPERIMENTAL) Export data from openGemini", + Long: `(EXPERIMENTAL) Export data from openGemini to file or remote`, + Example: ` + $ ts-cli export --format txt --out /tmp/openGemini/export/export.txt --data /tmp/openGemini/data --wal /tmp/openGemini/data + --dbfilter NOAA_water_database + + $ ts-cli export --format csv --out /tmp/openGemini/export/export.csv --data /tmp/openGemini/data --wal /tmp/openGemini/data + --dbfilter NOAA_water_database --mstfilter h2o_pH --timefilter "2019-08-25T09:18:00Z~2019-08-26T07:48:00Z" + + $ ts-cli export --format remote --remote ${host}:8086 --data /tmp/openGemini/data --wal /tmp/openGemini/data + --dbfilter NOAA_water_database --mstfilter h2o_feet`, + CompletionOptions: cobra.CompletionOptions{ + DisableDefaultCmd: true, + DisableDescriptions: true, + DisableNoDescFlag: true, + }, + RunE: func(cmd *cobra.Command, args []string) error { + exportCmd := new(subcmd.ExportCommand) + return exportCmd.Run(&config) + }, + } + + cmd.Flags().StringVar(&config.Format, "format", "txt", "Export data format, support csv, txt, remote.") + cmd.Flags().StringVar(&config.Out, "out", "", "Destination file to export to.") + cmd.Flags().StringVar(&config.DataDir, "data", "", "Data storage path to export.") + cmd.Flags().StringVar(&config.WalDir, "wal", "", "WAL storage path to export.") + cmd.Flags().StringVar(&config.Remote, "remote", "", "Remote address to export data.") + cmd.Flags().StringVar(&config.DBFilter, "dbfilter", "", "Database to export") + cmd.Flags().StringVar(&config.RetentionFilter, "retentionfilter", "", "Optional. Retention policy to export.") + cmd.Flags().StringVar(&config.MeasurementFilter, "mstfilter", "", "Optional.Measurement to export.") + cmd.Flags().StringVar(&config.TimeFilter, "timefilter", "", "Optional.Export time range, support 'start~end'") + cmd.Flags().BoolVar(&config.Compress, "compress", false, "Optional. Compress the export output.") + cmd.Flags().StringVarP(&config.RemoteUsername, "remoteusername", "u", "", "Remote export Optional.Username to connect to remote openGemini.") + cmd.Flags().StringVarP(&config.RemotePassword, "remotepassword", "p", "", "Remote export Optional.Password to connect to remote openGemini.") + cmd.Flags().BoolVar(&config.RemoteSsl, "remotessl", false, "Remote export Optional.Use https for connecting to remote openGemini.") + cmd.Flags().BoolVar(&config.Resume, "resume", false, "Resume the export progress from the last point.") + + m.cmd.AddCommand(cmd) +} + func (m *Command) load() { m.rootCommand() m.versionCommand() m.importCommand() + m.exportCommand() } func (m *Command) Execute() error { diff --git a/go.mod b/go.mod index dedf7f6..5b655fc 100644 --- a/go.mod +++ b/go.mod @@ -3,47 +3,166 @@ module github.com/openGemini/openGemini-cli go 1.24 require ( + github.com/VictoriaMetrics/VictoriaMetrics v1.102.1 + github.com/golang/snappy v1.0.0 github.com/mattn/go-runewidth v0.0.16 github.com/olekukonko/tablewriter v1.0.9 github.com/openGemini/go-prompt v0.0.0-20250603013942-a2bf30109e15 + github.com/openGemini/openGemini v1.4.3 github.com/openGemini/opengemini-client-go v0.9.1 github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.11.1 github.com/valyala/fastjson v1.6.4 + github.com/vbauerster/mpb/v7 v7.3.2 golang.org/x/term v0.34.0 google.golang.org/grpc v1.74.2 ) require ( + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect + github.com/RoaringBitmap/roaring v1.9.4 // indirect + github.com/VictoriaMetrics/fastcache v1.12.2 // indirect + github.com/VictoriaMetrics/metrics v1.35.1 // indirect + github.com/VividCortex/ewma v1.2.0 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/andybalholm/brotli v1.1.0 // indirect + github.com/apache/arrow/go/v13 v13.0.0-20230630125530-5a06b2ec2a8e // indirect + github.com/apache/thrift v0.16.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.12.0 // indirect + github.com/bits-and-blooms/bloom/v3 v3.5.0 // indirect + github.com/bytedance/sonic v1.13.3 // indirect + github.com/bytedance/sonic/loader v0.2.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/golang/snappy v1.0.0 // indirect + github.com/cloudwego/base64x v0.1.5 // indirect + github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/dennwc/varint v1.0.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/edsrzf/mmap-go v1.1.0 // indirect + github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-msgpack v0.5.3 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-sockaddr v1.0.0 // indirect + github.com/hashicorp/go-uuid v1.0.1 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/memberlist v0.5.0 // indirect + github.com/hashicorp/raft v1.7.0 // indirect + github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/influxdata/influxdb v1.11.5 // indirect + github.com/influxdata/influxql v1.2.0 // indirect + github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 // indirect + github.com/jsternberg/zap-logfmt v1.2.0 // indirect + github.com/klauspost/asmfmt v1.3.2 // indirect github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/libgox/gocollections v0.1.1 // indirect github.com/libgox/unicodex v0.1.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-tty v0.0.7 // indirect + github.com/miekg/dns v1.1.59 // indirect + github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect + github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mschoch/smat v0.2.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/olekukonko/errors v1.1.0 // indirect github.com/olekukonko/ll v0.0.9 // indirect + github.com/philhofer/fwd v1.1.2 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pingcap/errors v0.11.4 // indirect + github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pkg/term v1.2.0-beta.2 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/prometheus v0.53.1 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 // indirect + github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect github.com/spf13/pflag v1.0.6 // indirect + github.com/tinylib/msgp v1.1.8 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/valyala/fastrand v1.1.0 // indirect + github.com/valyala/gozstd v1.21.1 // indirect + github.com/valyala/histogram v1.2.0 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + github.com/zeebo/xxh3 v1.0.2 // indirect + go.etcd.io/etcd/raft/v3 v3.5.10 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect + golang.org/x/crypto v0.38.0 // indirect + golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 // indirect + golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.40.0 // indirect + golang.org/x/sync v0.14.0 // indirect golang.org/x/sys v0.35.0 // indirect golang.org/x/text v0.25.0 // indirect + golang.org/x/time v0.6.0 // indirect + golang.org/x/tools v0.24.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/libc v1.55.3 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.8.0 // indirect + modernc.org/sqlite v1.34.5 // indirect +) + +replace ( + github.com/VictoriaMetrics/VictoriaMetrics => ./lib/VictoriaMetrics + github.com/influxdata/influxdb => ./lib/influxdb ) diff --git a/go.sum b/go.sum index 998db99..ed917fa 100644 --- a/go.sum +++ b/go.sum @@ -1,46 +1,275 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ= +github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/VictoriaMetrics/metrics v1.35.1 h1:o84wtBKQbzLdDy14XeskkCZih6anG+veZ1SwJHFGwrU= +github.com/VictoriaMetrics/metrics v1.35.1/go.mod h1:r7hveu6xMdUACXvB8TYdAj8WEsKzWB0EkpJN+RDtOf8= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/agiledragon/gomonkey/v2 v2.11.0 h1:5oxSgA+tC1xuGsrIorR+sYiziYltmJyEZ9qA25b6l5U= +github.com/agiledragon/gomonkey/v2 v2.11.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/apache/arrow/go/v13 v13.0.0-20230630125530-5a06b2ec2a8e h1:4qZRsjdW3DLHIzZ+aFW8iT3/CxlPQZPiP2EkLioceqQ= +github.com/apache/arrow/go/v13 v13.0.0-20230630125530-5a06b2ec2a8e/go.mod h1:W69eByFNO0ZR30q1/7Sr9d83zcVZmF2MiP3fFYAWJOc= +github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= +github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= +github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bloom/v3 v3.5.0 h1:AKDvi1V3xJCmSR6QhcBfHbCN4Vf8FfxeWkMNQfmAGhY= +github.com/bits-and-blooms/bloom/v3 v3.5.0/go.mod h1:Y8vrn7nk1tPIlmLtW2ZPV+W7StdVMor6bC1xgpjMZFs= +github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0= +github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= +github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= +github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= +github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= +github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= +github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= +github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible h1:tKTaPHNVwikS3I1rdyf1INNvgJXWSf/+TzqsiGbrgnQ= +github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/influxql v1.2.0 h1:EkgnTLCmaXeZKEjA6G+B7a/HH+Gl7GVLO0k2AoZbJMU= +github.com/influxdata/influxql v1.2.0/go.mod h1:nISAma2m+CbSt/y3GrehnHKWJRXdTTMZn+iSGroMmJw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 h1:9Nu54bhS/H/Kgo2/7xNSUuC5G28VR8ljfrLKU2G4IjU= +github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12/go.mod h1:TBzl5BIHNXfS9+C35ZyJaklL7mLDbgUkcgXzSLa8Tk0= +github.com/jsternberg/zap-logfmt v1.2.0 h1:1v+PK4/B48cy8cfQbxL4FmmNZrjnIMr2BsnyEmXqv2o= +github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/libgox/gocollections v0.1.1 h1:u102d/xMBF+8Cf/5UuFpcM/iP0NgvWlOR9tVo14Fs6s= github.com/libgox/gocollections v0.1.1/go.mod h1:Y4udpR8lStv1f67hVWbMCrcTyTvf98bFFsu/ZXvAvZ0= github.com/libgox/unicodex v0.1.0 h1:l7kBlt5yO/PLX4QmaOV6GLO7W2jFUECQsyxGWQPhwq8= github.com/libgox/unicodex v0.1.0/go.mod h1:RaB9wNp/oOS0Ew5+Wml7WePjztZ3njXiNid08KOmgjs= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-tty v0.0.7 h1:KJ486B6qI8+wBO7kQxYgmmEFDaFEE96JMBQ7h400N8Q= github.com/mattn/go-tty v0.0.7/go.mod h1:f2i5ZOvXBU/tCABmLmOfzLz9azMo5wdAaElRNnJKr+k= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM= github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= github.com/olekukonko/ll v0.0.9 h1:Y+1YqDfVkqMWuEQMclsF9HUR5+a82+dxJuL1HHSRpxI= @@ -49,38 +278,146 @@ github.com/olekukonko/tablewriter v1.0.9 h1:XGwRsYLC2bY7bNd93Dk51bcPZksWZmLYuaTH github.com/olekukonko/tablewriter v1.0.9/go.mod h1:5c+EBPeSqvXnLLgkm9isDdzR3wjfBkHR9Nhfp3NWrzo= github.com/openGemini/go-prompt v0.0.0-20250603013942-a2bf30109e15 h1:VoQIZoxRXabyT1WdbypHym6cTSTiixZP2DhP9qDHNBc= github.com/openGemini/go-prompt v0.0.0-20250603013942-a2bf30109e15/go.mod h1:d77nLK1BQoE1hIbtC5lQiIOYjClRbxqDAfz6rd/olYo= +github.com/openGemini/openGemini v1.4.3 h1:ZNmVA3uYt/KwbjaeafLGWpCPJnTsM3AsdRz/L3eIhI0= +github.com/openGemini/openGemini v1.4.3/go.mod h1:ECfKtcWaJkRyTmACSF7VNFDfeRD5T677/oUSWgwE4ig= github.com/openGemini/opengemini-client-go v0.9.1 h1:fsgtgiw0LCMTRiyi7/6IurvzHhoTU+mDWlJNNJ1V+tk= github.com/openGemini/opengemini-client-go v0.9.1/go.mod h1:u8UW2jfh6sp7CQGWuyzsJuqId+4u6hQyiDi280GkW8c= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c h1:CgbKAHto5CQgWM9fSBIvaxsJHuGP0uM74HXtv3MyyGQ= +github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/term v1.2.0-beta.2 h1:L3y/h2jkuBVFdWiJvNfYfKmzcCnILw7mJWm2JQuMppw= github.com/pkg/term v1.2.0-beta.2/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.53.1 h1:B0xu4VuVTKYrIuBMn/4YSUoIPYxs956qsOfcS4rqCuA= +github.com/prometheus/prometheus v0.53.1/go.mod h1:RZDkzs+ShMBDkAPQkLEaLBXpjmDcjhNxU2drUVPgKUU= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4= +github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8= +github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d h1:Q+gqLBOPkFGHyCJxXMRqtUgUbTjI8/Ze8vu8GGyNFwo= +github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= +github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= +github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= +github.com/valyala/gozstd v1.21.1 h1:TQFZVTk5zo7iJcX3o4XYBJujPdO31LFb4fVImwK873A= +github.com/valyala/gozstd v1.21.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ= +github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= +github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= +github.com/vbauerster/mpb/v7 v7.3.2 h1:tCuxMy8G9cLdjb61b6wO7I1vRT/LyMEzRbr3xCC0JPU= +github.com/vbauerster/mpb/v7 v7.3.2/go.mod h1:wfxIZcOJq/bG1/lAtfzMXcOiSvbqVi/5GX5WCSi+IsA= github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= @@ -93,24 +430,180 @@ go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFw go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670 h1:18EFjUmQOcUvxNYSkA6jO9VAiXCnxFY6NyDX0bHDmkU= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA= +golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ= +modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= +modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y= +modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw= +modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= +modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U= +modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= +modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= +modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g= +modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= diff --git a/lib/VictoriaMetrics/LICENSE b/lib/VictoriaMetrics/LICENSE new file mode 100644 index 0000000..84f8ad8 --- /dev/null +++ b/lib/VictoriaMetrics/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2019-2021 VictoriaMetrics, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lib/VictoriaMetrics/go.mod b/lib/VictoriaMetrics/go.mod new file mode 100644 index 0000000..6f9a235 --- /dev/null +++ b/lib/VictoriaMetrics/go.mod @@ -0,0 +1,21 @@ +module github.com/VictoriaMetrics/VictoriaMetrics + +go 1.22 + +require ( + github.com/VictoriaMetrics/fastcache v1.12.2 + github.com/VictoriaMetrics/metrics v1.24.0 + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb + github.com/klauspost/compress v1.17.4 + github.com/valyala/gozstd v1.20.1 + golang.org/x/sys v0.25.0 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/valyala/fastrand v1.1.0 // indirect + github.com/valyala/histogram v1.2.0 // indirect + + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect +) diff --git a/lib/VictoriaMetrics/lib/bytesutil/ast_string_matcher_test.go b/lib/VictoriaMetrics/lib/bytesutil/ast_string_matcher_test.go new file mode 100644 index 0000000..3643f75 --- /dev/null +++ b/lib/VictoriaMetrics/lib/bytesutil/ast_string_matcher_test.go @@ -0,0 +1,51 @@ +package bytesutil + +import ( + "strings" + "sync/atomic" + "testing" +) + +func TestFastStringMatcher(t *testing.T) { + fsm := NewFastStringMatcher(func(s string) bool { + return strings.HasPrefix(s, "foo") + }) + f := func(s string, resultExpected bool) { + t.Helper() + for i := 0; i < 10; i++ { + result := fsm.Match(s) + if result != resultExpected { + t.Fatalf("unexpected result for Match(%q) at iteration %d; got %v; want %v", s, i, result, resultExpected) + } + } + } + f("", false) + f("foo", true) + f("a_b-C", false) + f("foobar", true) +} + +func TestNeedCleanup(t *testing.T) { + f := func(lastCleanupTime, currentTime uint64, resultExpected bool) { + t.Helper() + var lct atomic.Uint64 + lct.Store(lastCleanupTime) + result := needCleanup(&lct, currentTime) + if result != resultExpected { + t.Fatalf("unexpected result for needCleanup(%d, %d); got %v; want %v", lastCleanupTime, currentTime, result, resultExpected) + } + if result { + if n := lct.Load(); n != currentTime { + t.Fatalf("unexpected value for lct; got %d; want currentTime=%d", n, currentTime) + } + } else { + if n := lct.Load(); n != lastCleanupTime { + t.Fatalf("unexpected value for lct; got %d; want lastCleanupTime=%d", n, lastCleanupTime) + } + } + } + f(0, 0, false) + f(0, 61, false) + f(0, 62, true) + f(10, 100, true) +} diff --git a/lib/VictoriaMetrics/lib/bytesutil/bytebuffer.go b/lib/VictoriaMetrics/lib/bytesutil/bytebuffer.go new file mode 100644 index 0000000..4ec50f9 --- /dev/null +++ b/lib/VictoriaMetrics/lib/bytesutil/bytebuffer.go @@ -0,0 +1,118 @@ +package bytesutil + +import ( + "io" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +// ByteBuffer implements a simple byte buffer. +type ByteBuffer struct { + // B is the underlying byte slice. + B []byte +} + +// Reset resets bb. +func (bb *ByteBuffer) Reset() { + bb.B = bb.B[:0] +} + +// Write appends p to bb. +func (bb *ByteBuffer) Write(p []byte) (int, error) { + bb.B = append(bb.B, p...) + return len(p), nil +} + +// MustReadAt reads len(p) bytes starting from the given offset. +func (bb *ByteBuffer) MustReadAt(p []byte, offset int64) { + if offset < 0 { + logger.Panicf("BUG: cannot read at negative offset=%d", offset) + } + if offset > int64(len(bb.B)) { + logger.Panicf("BUG: too big offset=%d; cannot exceed len(bb.B)=%d", offset, len(bb.B)) + } + if n := copy(p, bb.B[offset:]); n < len(p) { + logger.Panicf("BUG: EOF occurred after reading %d bytes out of %d bytes at offset %d", n, len(p), offset) + } +} + +// ReadFrom reads all the data from r to bb until EOF. +func (bb *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { + b := bb.B + bLen := len(b) + b = Resize(b, 4*1024) + b = b[:cap(b)] + offset := bLen + for { + if free := len(b) - offset; free < offset { + n := len(b) + b = append(b, make([]byte, n)...) + } + n, err := r.Read(b[offset:]) + offset += n + if err != nil { + bb.B = b[:offset] + if err == io.EOF { + err = nil + } + return int64(offset - bLen), err + } + } +} + +// MustClose closes bb for subsequent re-use. +func (bb *ByteBuffer) MustClose() { + // Do nothing, since certain code rely on bb reading after MustClose call. +} + +// NewReader returns new reader for the given bb. +func (bb *ByteBuffer) NewReader() *reader { + return &reader{ + bb: bb, + } +} + +type reader struct { + bb *ByteBuffer + + // readOffset is the offset in bb.B for read. + readOffset int +} + +// Read reads up to len(p) bytes from bb. +func (r *reader) Read(p []byte) (int, error) { + var err error + n := copy(p, r.bb.B[r.readOffset:]) + if n < len(p) { + err = io.EOF + } + r.readOffset += n + return n, err +} + +// MustClose closes bb for subsequent re-use. +func (r *reader) MustClose() { + r.bb = nil + r.readOffset = 0 +} + +// ByteBufferPool is a pool of ByteBuffers. +type ByteBufferPool struct { + p sync.Pool +} + +// Get obtains a ByteBuffer from bbp. +func (bbp *ByteBufferPool) Get() *ByteBuffer { + bbv := bbp.p.Get() + if bbv == nil { + return &ByteBuffer{} + } + return bbv.(*ByteBuffer) +} + +// Put puts bb into bbp. +func (bbp *ByteBufferPool) Put(bb *ByteBuffer) { + bb.Reset() + bbp.p.Put(bb) +} diff --git a/lib/VictoriaMetrics/lib/bytesutil/bytesutil.go b/lib/VictoriaMetrics/lib/bytesutil/bytesutil.go new file mode 100644 index 0000000..2c6c635 --- /dev/null +++ b/lib/VictoriaMetrics/lib/bytesutil/bytesutil.go @@ -0,0 +1,30 @@ +package bytesutil + +import ( + "unsafe" +) + +// Resize resizes b to n bytes and returns b (which may be newly allocated). +func Resize(b []byte, n int) []byte { + if nn := n - cap(b); nn > 0 { + b = append(b[:cap(b)], make([]byte, nn)...) + } + return b[:n] +} + +// ToUnsafeString converts b to string without memory allocations. +// +// The returned string is valid only until b is reachable and unmodified. +func ToUnsafeString(b []byte) string { + if len(b) == 0 { + return "" + } + return unsafe.String(&b[0], len(b)) +} + +// ToUnsafeBytes converts s to a byte slice without memory allocations. +// +// The returned byte slice is valid only until s is reachable and unmodified. +func ToUnsafeBytes(s string) (b []byte) { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} diff --git a/lib/VictoriaMetrics/lib/bytesutil/fast_string_matcher.go b/lib/VictoriaMetrics/lib/bytesutil/fast_string_matcher.go new file mode 100644 index 0000000..31159bd --- /dev/null +++ b/lib/VictoriaMetrics/lib/bytesutil/fast_string_matcher.go @@ -0,0 +1,94 @@ +package bytesutil + +import ( + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" +) + +var cacheExpireDuration = 60 * time.Minute + +// FastStringMatcher implements fast matcher for strings. +// +// It caches string match results and returns them back on the next calls +// without calling the matchFunc, which may be expensive. +type FastStringMatcher struct { + lastCleanupTime atomic.Uint64 + + m sync.Map + + matchFunc func(s string) bool +} + +type fsmEntry struct { + lastAccessTime atomic.Uint64 + ok bool +} + +// NewFastStringMatcher creates new matcher, which applies matchFunc to strings passed to Match() +// +// matchFunc must return the same result for the same input. +func NewFastStringMatcher(matchFunc func(s string) bool) *FastStringMatcher { + fsm := &FastStringMatcher{ + matchFunc: matchFunc, + } + fsm.lastCleanupTime.Store(fasttime.UnixTimestamp()) + return fsm +} + +// Match applies matchFunc to s and returns the result. +func (fsm *FastStringMatcher) Match(s string) bool { + ct := fasttime.UnixTimestamp() + v, ok := fsm.m.Load(s) + if ok { + // Fast path - s match result is found in the cache. + e := v.(*fsmEntry) + if e.lastAccessTime.Load()+10 < ct { + // Reduce the frequency of e.lastAccessTime update to once per 10 seconds + // in order to improve the fast path speed on systems with many CPU cores. + e.lastAccessTime.Store(ct) + } + return e.ok + } + // Slow path - run matchFunc for s and store the result in the cache. + b := fsm.matchFunc(s) + e := &fsmEntry{ + ok: b, + } + e.lastAccessTime.Store(ct) + // Make a copy of s in order to limit memory usage to the s length, + // since the s may point to bigger string. + // This also protects from the case when s contains unsafe string, which points to a temporary byte slice. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3227 + s = strings.Clone(s) + fsm.m.Store(s, e) + + if needCleanup(&fsm.lastCleanupTime, ct) { + // Perform a global cleanup for fsm.m by removing items, which weren't accessed during the last 5 minutes. + m := &fsm.m + deadline := ct - uint64(cacheExpireDuration.Seconds()) + m.Range(func(k, v any) bool { + e := v.(*fsmEntry) + if e.lastAccessTime.Load() < deadline { + m.Delete(k) + } + return true + }) + } + + return b +} + +func needCleanup(lastCleanupTime *atomic.Uint64, currentTime uint64) bool { + lct := lastCleanupTime.Load() + if lct+61 >= currentTime { + return false + } + // Atomically compare and swap the current time with the lastCleanupTime + // in order to guarantee that only a single goroutine out of multiple + // concurrently executing goroutines gets true from the call. + return lastCleanupTime.CompareAndSwap(lct, currentTime) +} diff --git a/lib/VictoriaMetrics/lib/bytesutil/fast_string_matcher_timing_test.go b/lib/VictoriaMetrics/lib/bytesutil/fast_string_matcher_timing_test.go new file mode 100644 index 0000000..b8af8d1 --- /dev/null +++ b/lib/VictoriaMetrics/lib/bytesutil/fast_string_matcher_timing_test.go @@ -0,0 +1,35 @@ +package bytesutil + +import ( + "strings" + "sync/atomic" + "testing" +) + +func BenchmarkFastStringMatcher(b *testing.B) { + for _, s := range []string{"", "foo", "foo-bar-baz", "http_requests_total"} { + b.Run(s, func(b *testing.B) { + benchmarkFastStringMatcher(b, s) + }) + } +} + +func benchmarkFastStringMatcher(b *testing.B, s string) { + fsm := NewFastStringMatcher(func(s string) bool { + return strings.HasPrefix(s, "foo") + }) + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + n := uint64(0) + for pb.Next() { + v := fsm.Match(s) + if v { + n++ + } + } + GlobalSink.Add(n) + }) +} + +var GlobalSink atomic.Uint64 diff --git a/lib/VictoriaMetrics/lib/cgroup/cpu.go b/lib/VictoriaMetrics/lib/cgroup/cpu.go new file mode 100644 index 0000000..3d98c63 --- /dev/null +++ b/lib/VictoriaMetrics/lib/cgroup/cpu.go @@ -0,0 +1,143 @@ +package cgroup + +import ( + "fmt" + "os" + "runtime" + "strconv" + "strings" +) + +// AvailableCPUs returns the number of available CPU cores for the app. +func AvailableCPUs() int { + return runtime.GOMAXPROCS(-1) +} + +func init() { + updateGOMAXPROCSToCPUQuota() +} + +// updateGOMAXPROCSToCPUQuota updates GOMAXPROCS to cgroup CPU quota if GOMAXPROCS isn't set in environment var. +func updateGOMAXPROCSToCPUQuota() { + if v := os.Getenv("GOMAXPROCS"); v != "" { + // Do not override explicitly set GOMAXPROCS. + return + } + q := getCPUQuota() + if q <= 0 { + // Do not change GOMAXPROCS + return + } + gomaxprocs := int(q + 0.5) + numCPU := runtime.NumCPU() + if gomaxprocs > numCPU { + // There is no sense in setting more GOMAXPROCS than the number of available CPU cores. + return + } + if gomaxprocs <= 0 { + gomaxprocs = 1 + } + runtime.GOMAXPROCS(gomaxprocs) +} + +func getCPUQuota() float64 { + cpuQuota, err := getCPUQuotaGeneric() + if err != nil { + return 0 + } + if cpuQuota <= 0 { + // The quota isn't set. This may be the case in multilevel containers. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685#issuecomment-674423728 + return getOnlineCPUCount() + } + return cpuQuota +} + +func getCPUQuotaGeneric() (float64, error) { + quotaUS, err := getCPUStat("cpu.cfs_quota_us") + if err == nil { + periodUS, err := getCPUStat("cpu.cfs_period_us") + if err == nil { + return float64(quotaUS) / float64(periodUS), nil + } + } + return getCPUQuotaV2("/sys/fs/cgroup", "/proc/self/cgroup") +} + +func getCPUStat(statName string) (int64, error) { + return getStatGeneric(statName, "/sys/fs/cgroup/cpu", "/proc/self/cgroup", "cpu,") +} + +func getOnlineCPUCount() float64 { + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685#issuecomment-674423728 + data, err := os.ReadFile("/sys/devices/system/cpu/online") + if err != nil { + return -1 + } + n := float64(countCPUs(string(data))) + if n <= 0 { + return -1 + } + return n +} + +func getCPUQuotaV2(sysPrefix, cgroupPath string) (float64, error) { + data, err := getFileContents("cpu.max", sysPrefix, cgroupPath, "") + if err != nil { + return 0, err + } + data = strings.TrimSpace(data) + n, err := parseCPUMax(data) + if err != nil { + return 0, fmt.Errorf("cannot parse cpu.max file contents: %w", err) + } + return n, nil +} + +// See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#cpu +func parseCPUMax(data string) (float64, error) { + bounds := strings.Split(data, " ") + if len(bounds) != 2 { + return 0, fmt.Errorf("unexpected line format: want 'quota period'; got: %s", data) + } + if bounds[0] == "max" { + return -1, nil + } + quota, err := strconv.ParseUint(bounds[0], 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot parse quota: %w", err) + } + period, err := strconv.ParseUint(bounds[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot parse period: %w", err) + } + return float64(quota) / float64(period), nil +} + +func countCPUs(data string) int { + data = strings.TrimSpace(data) + n := 0 + for _, s := range strings.Split(data, ",") { + n++ + if !strings.Contains(s, "-") { + if _, err := strconv.Atoi(s); err != nil { + return -1 + } + continue + } + bounds := strings.Split(s, "-") + if len(bounds) != 2 { + return -1 + } + start, err := strconv.Atoi(bounds[0]) + if err != nil { + return -1 + } + end, err := strconv.Atoi(bounds[1]) + if err != nil { + return -1 + } + n += end - start + } + return n +} diff --git a/lib/VictoriaMetrics/lib/cgroup/mem.go b/lib/VictoriaMetrics/lib/cgroup/mem.go new file mode 100644 index 0000000..2fa34a1 --- /dev/null +++ b/lib/VictoriaMetrics/lib/cgroup/mem.go @@ -0,0 +1,86 @@ +package cgroup + +import ( + "os" + "runtime/debug" + "strconv" +) + +// GetGOGC returns GOGC value for the currently running process. +// +// See https://golang.org/pkg/runtime/#hdr-Environment_Variables for more details about GOGC +func GetGOGC() int { + return gogc +} + +func init() { + initGOGC() +} + +func initGOGC() { + if v := os.Getenv("GOGC"); v != "" { + n, err := strconv.Atoi(v) + if err != nil { + n = 100 + } + gogc = n + } else { + // Set GOGC to 50% by default if it isn't set yet. + // This should reduce memory usage for typical workloads for VictoriaMetrics components. + gogc = 50 + debug.SetGCPercent(gogc) + } +} + +var gogc int + +// GetMemoryLimit returns cgroup memory limit +func GetMemoryLimit() int64 { + // Try determining the amount of memory inside docker container. + // See https://stackoverflow.com/questions/42187085/check-mem-limit-within-a-docker-container + // + // Read memory limit according to https://unix.stackexchange.com/questions/242718/how-to-find-out-how-much-memory-lxc-container-is-allowed-to-consume + // This should properly determine the limit inside lxc container. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/84 + n, err := getMemStat("memory.limit_in_bytes") + if err == nil { + return n + } + n, err = getMemStatV2("memory.max") + if err != nil { + return 0 + } + return n +} + +func getMemStatV2(statName string) (int64, error) { + // See https: //www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#memory-interface-files + return getStatGeneric(statName, "/sys/fs/cgroup", "/proc/self/cgroup", "") +} + +func getMemStat(statName string) (int64, error) { + return getStatGeneric(statName, "/sys/fs/cgroup/memory", "/proc/self/cgroup", "memory") +} + +// GetHierarchicalMemoryLimit returns hierarchical memory limit +// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt +func GetHierarchicalMemoryLimit() int64 { + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/699 + n, err := getHierarchicalMemoryLimit("/sys/fs/cgroup/memory", "/proc/self/cgroup") + if err != nil { + return 0 + } + return n +} + +func getHierarchicalMemoryLimit(sysfsPrefix, cgroupPath string) (int64, error) { + data, err := getFileContents("memory.stat", sysfsPrefix, cgroupPath, "memory") + if err != nil { + return 0, err + } + memStat, err := grepFirstMatch(data, "hierarchical_memory_limit", 1, " ") + if err != nil { + return 0, err + } + return strconv.ParseInt(memStat, 10, 64) +} diff --git a/lib/VictoriaMetrics/lib/cgroup/util.go b/lib/VictoriaMetrics/lib/cgroup/util.go new file mode 100644 index 0000000..aa1c80a --- /dev/null +++ b/lib/VictoriaMetrics/lib/cgroup/util.go @@ -0,0 +1,59 @@ +package cgroup + +import ( + "fmt" + "os" + "path" + "strconv" + "strings" +) + +func getStatGeneric(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string) (int64, error) { + data, err := getFileContents(statName, sysfsPrefix, cgroupPath, cgroupGrepLine) + if err != nil { + return 0, err + } + data = strings.TrimSpace(data) + n, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot parse %q: %w", cgroupPath, err) + } + return n, nil +} + +func getFileContents(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string) (string, error) { + filepath := path.Join(sysfsPrefix, statName) + data, err := os.ReadFile(filepath) + if err == nil { + return string(data), nil + } + cgroupData, err := os.ReadFile(cgroupPath) + if err != nil { + return "", err + } + subPath, err := grepFirstMatch(string(cgroupData), cgroupGrepLine, 2, ":") + if err != nil { + return "", fmt.Errorf("cannot find cgroup path for %q in %q: %w", cgroupGrepLine, cgroupPath, err) + } + filepath = path.Join(sysfsPrefix, subPath, statName) + data, err = os.ReadFile(filepath) + if err != nil { + return "", err + } + return string(data), nil +} + +// grepFirstMatch searches match line at data and returns item from it by index with given delimiter. +func grepFirstMatch(data string, match string, index int, delimiter string) (string, error) { + lines := strings.Split(string(data), "\n") + for _, s := range lines { + if !strings.Contains(s, match) { + continue + } + parts := strings.Split(s, delimiter) + if index < len(parts) { + return strings.TrimSpace(parts[index]), nil + } + } + return "", fmt.Errorf("cannot find %q in %q", match, data) +} diff --git a/lib/VictoriaMetrics/lib/encoding/compress.go b/lib/VictoriaMetrics/lib/encoding/compress.go new file mode 100644 index 0000000..b8acf54 --- /dev/null +++ b/lib/VictoriaMetrics/lib/encoding/compress.go @@ -0,0 +1,40 @@ +package encoding + +import ( + "fmt" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd" + "github.com/VictoriaMetrics/metrics" +) + +// CompressZSTDLevel appends compressed src to dst and returns +// the appended dst. +// +// The given compressLevel is used for the compression. +func CompressZSTDLevel(dst, src []byte, compressLevel int) []byte { + compressCalls.Inc() + originalBytes.Add(len(src)) + dstLen := len(dst) + dst = zstd.CompressLevel(dst, src, compressLevel) + compressedBytes.Add(len(dst) - dstLen) + return dst +} + +// DecompressZSTD decompresses src, appends the result to dst and returns +// the appended dst. +func DecompressZSTD(dst, src []byte) ([]byte, error) { + decompressCalls.Inc() + b, err := zstd.Decompress(dst, src) + if err != nil { + return b, fmt.Errorf("cannot decompress zstd block with len=%d to a buffer with len=%d: %w; block data (hex): %X", len(src), len(dst), err, src) + } + return b, nil +} + +var ( + compressCalls = metrics.NewCounter(`vm_zstd_block_compress_calls_total`) + decompressCalls = metrics.NewCounter(`vm_zstd_block_decompress_calls_total`) + + originalBytes = metrics.NewCounter(`vm_zstd_block_original_bytes_total`) + compressedBytes = metrics.NewCounter(`vm_zstd_block_compressed_bytes_total`) +) diff --git a/lib/VictoriaMetrics/lib/encoding/int.go b/lib/VictoriaMetrics/lib/encoding/int.go new file mode 100644 index 0000000..28f16ba --- /dev/null +++ b/lib/VictoriaMetrics/lib/encoding/int.go @@ -0,0 +1,297 @@ +package encoding + +import ( + "encoding/binary" + "fmt" + "sync" +) + +// MarshalUint16 appends marshaled v to dst and returns the result. +func MarshalUint16(dst []byte, u uint16) []byte { + return append(dst, byte(u>>8), byte(u)) +} + +// UnmarshalUint16 returns unmarshaled uint32 from src. +func UnmarshalUint16(src []byte) uint16 { + // This is faster than the manual conversion. + return binary.BigEndian.Uint16(src) +} + +// MarshalUint32 appends marshaled v to dst and returns the result. +func MarshalUint32(dst []byte, u uint32) []byte { + return append(dst, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// UnmarshalUint32 returns unmarshaled uint32 from src. +func UnmarshalUint32(src []byte) uint32 { + // This is faster than the manual conversion. + return binary.BigEndian.Uint32(src) +} + +// MarshalUint64 appends marshaled v to dst and returns the result. +func MarshalUint64(dst []byte, u uint64) []byte { + return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// UnmarshalUint64 returns unmarshaled uint64 from src. +func UnmarshalUint64(src []byte) uint64 { + // This is faster than the manual conversion. + return binary.BigEndian.Uint64(src) +} + +// MarshalInt16 appends marshaled v to dst and returns the result. +func MarshalInt16(dst []byte, v int16) []byte { + // Such encoding for negative v must improve compression. + v = (v << 1) ^ (v >> 15) // zig-zag encoding without branching. + u := uint16(v) + return append(dst, byte(u>>8), byte(u)) +} + +// UnmarshalInt16 returns unmarshaled int16 from src. +func UnmarshalInt16(src []byte) int16 { + // This is faster than the manual conversion. + u := binary.BigEndian.Uint16(src) + v := int16(u>>1) ^ (int16(u<<15) >> 15) // zig-zag decoding without branching. + return v +} + +// MarshalInt64 appends marshaled v to dst and returns the result. +func MarshalInt64(dst []byte, v int64) []byte { + // Such encoding for negative v must improve compression. + v = (v << 1) ^ (v >> 63) // zig-zag encoding without branching. + u := uint64(v) + return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// UnmarshalInt64 returns unmarshaled int64 from src. +func UnmarshalInt64(src []byte) int64 { + // This is faster than the manual conversion. + u := binary.BigEndian.Uint64(src) + v := int64(u>>1) ^ (int64(u<<63) >> 63) // zig-zag decoding without branching. + return v +} + +// MarshalVarInt64 appends marshalsed v to dst and returns the result. +func MarshalVarInt64(dst []byte, v int64) []byte { + var tmp [1]int64 + tmp[0] = v + return MarshalVarInt64s(dst, tmp[:]) +} + +// MarshalVarInt64s appends marshaled vs to dst and returns the result. +func MarshalVarInt64s(dst []byte, vs []int64) []byte { + for _, v := range vs { + if v < 0x40 && v > -0x40 { + // Fast path + c := int8(v) + v := (c << 1) ^ (c >> 7) // zig-zag encoding without branching. + dst = append(dst, byte(v)) + continue + } + + v = (v << 1) ^ (v >> 63) // zig-zag encoding without branching. + u := uint64(v) + for u > 0x7f { + dst = append(dst, 0x80|byte(u)) + u >>= 7 + } + dst = append(dst, byte(u)) + } + return dst +} + +// UnmarshalVarInt64 returns unmarshaled int64 from src and returns +// the remaining tail from src. +func UnmarshalVarInt64(src []byte) ([]byte, int64, error) { + var tmp [1]int64 + tail, err := UnmarshalVarInt64s(tmp[:], src) + return tail, tmp[0], err +} + +// UnmarshalVarInt64s unmarshals len(dst) int64 values from src to dst +// and returns the remaining tail from src. +func UnmarshalVarInt64s(dst []int64, src []byte) ([]byte, error) { + idx := uint(0) + for i := range dst { + if idx >= uint(len(src)) { + return nil, fmt.Errorf("cannot unmarshal varint from empty data") + } + c := src[idx] + idx++ + if c < 0x80 { + // Fast path + v := int8(c>>1) ^ (int8(c<<7) >> 7) // zig-zag decoding without branching. + dst[i] = int64(v) + continue + } + + // Slow path + u := uint64(c & 0x7f) + startIdx := idx - 1 + shift := uint8(0) + for c >= 0x80 { + if idx >= uint(len(src)) { + return nil, fmt.Errorf("unexpected end of encoded varint at byte %d; src=%x", idx-startIdx, src[startIdx:]) + } + if idx-startIdx > 9 { + return src[idx:], fmt.Errorf("too long encoded varint; the maximum allowed length is 10 bytes; got %d bytes; src=%x", + (idx-startIdx)+1, src[startIdx:]) + } + c = src[idx] + idx++ + shift += 7 + u |= uint64(c&0x7f) << shift + } + v := int64(u>>1) ^ (int64(u<<63) >> 63) // zig-zag decoding without branching. + dst[i] = v + } + return src[idx:], nil +} + +// MarshalVarUint64 appends marshaled u to dst and returns the result. +func MarshalVarUint64(dst []byte, u uint64) []byte { + var tmp [1]uint64 + tmp[0] = u + return MarshalVarUint64s(dst, tmp[:]) +} + +// MarshalVarUint64s appends marshaled us to dst and returns the result. +func MarshalVarUint64s(dst []byte, us []uint64) []byte { + for _, u := range us { + if u < 0x80 { + // Fast path + dst = append(dst, byte(u)) + continue + } + for u > 0x7f { + dst = append(dst, 0x80|byte(u)) + u >>= 7 + } + dst = append(dst, byte(u)) + } + return dst +} + +// UnmarshalVarUint64 returns unmarshaled uint64 from src and returns +// the remaining tail from src. +func UnmarshalVarUint64(src []byte) ([]byte, uint64, error) { + var tmp [1]uint64 + tail, err := UnmarshalVarUint64s(tmp[:], src) + return tail, tmp[0], err +} + +// UnmarshalVarUint64s unmarshals len(dst) uint64 values from src to dst +// and returns the remaining tail from src. +func UnmarshalVarUint64s(dst []uint64, src []byte) ([]byte, error) { + idx := uint(0) + for i := range dst { + if idx >= uint(len(src)) { + return nil, fmt.Errorf("cannot unmarshal varuint from empty data") + } + c := src[idx] + idx++ + if c < 0x80 { + // Fast path + dst[i] = uint64(c) + continue + } + + // Slow path + u := uint64(c & 0x7f) + startIdx := idx - 1 + shift := uint8(0) + for c >= 0x80 { + if idx >= uint(len(src)) { + return nil, fmt.Errorf("unexpected end of encoded varint at byte %d; src=%x", idx-startIdx, src[startIdx:]) + } + if idx-startIdx > 9 { + return src[idx:], fmt.Errorf("too long encoded varint; the maximum allowed length is 10 bytes; got %d bytes; src=%x", + (idx-startIdx)+1, src[startIdx:]) + } + c = src[idx] + idx++ + shift += 7 + u |= uint64(c&0x7f) << shift + } + dst[i] = u + } + return src[idx:], nil +} + +// MarshalBytes appends marshaled b to dst and returns the result. +func MarshalBytes(dst, b []byte) []byte { + dst = MarshalVarUint64(dst, uint64(len(b))) + dst = append(dst, b...) + return dst +} + +// UnmarshalBytes returns unmarshaled bytes from src. +func UnmarshalBytes(src []byte) ([]byte, []byte, error) { + tail, n, err := UnmarshalVarUint64(src) + if err != nil { + return nil, nil, fmt.Errorf("cannot unmarshal string size: %w", err) + } + src = tail + if uint64(len(src)) < n { + return nil, nil, fmt.Errorf("src is too short for reading string with size %d; len(src)=%d", n, len(src)) + } + return src[n:], src[:n], nil +} + +// GetInt64s returns an int64 slice with the given size. +// The slice contents isn't initialized - it may contain garbage. +func GetInt64s(size int) *Int64s { + v := int64sPool.Get() + if v == nil { + return &Int64s{ + A: make([]int64, size), + } + } + is := v.(*Int64s) + if n := size - cap(is.A); n > 0 { + is.A = append(is.A[:cap(is.A)], make([]int64, n)...) + } + is.A = is.A[:size] + return is +} + +// PutInt64s returns is to the pool. +func PutInt64s(is *Int64s) { + int64sPool.Put(is) +} + +// Int64s holds an int64 slice +type Int64s struct { + A []int64 +} + +var int64sPool sync.Pool + +// GetUint64s returns an uint64 slice with the given size. +// The slice contents isn't initialized - it may contain garbage. +func GetUint64s(size int) *Uint64s { + v := uint64sPool.Get() + if v == nil { + return &Uint64s{ + A: make([]uint64, size), + } + } + is := v.(*Uint64s) + if n := size - cap(is.A); n > 0 { + is.A = append(is.A[:cap(is.A)], make([]uint64, n)...) + } + is.A = is.A[:size] + return is +} + +// PutUint64s returns is to the pool. +func PutUint64s(is *Uint64s) { + uint64sPool.Put(is) +} + +// Uint64s holds an uint64 slice +type Uint64s struct { + A []uint64 +} + +var uint64sPool sync.Pool diff --git a/lib/VictoriaMetrics/lib/encoding/zstd/zstd_cgo.go b/lib/VictoriaMetrics/lib/encoding/zstd/zstd_cgo.go new file mode 100644 index 0000000..b38a48e --- /dev/null +++ b/lib/VictoriaMetrics/lib/encoding/zstd/zstd_cgo.go @@ -0,0 +1,20 @@ +//go:build cgo +// +build cgo + +package zstd + +import ( + "github.com/valyala/gozstd" +) + +// Decompress appends decompressed src to dst and returns the result. +func Decompress(dst, src []byte) ([]byte, error) { + return gozstd.Decompress(dst, src) +} + +// CompressLevel appends compressed src to dst and returns the result. +// +// The given compressionLevel is used for the compression. +func CompressLevel(dst, src []byte, compressionLevel int) []byte { + return gozstd.CompressLevel(dst, src, compressionLevel) +} diff --git a/lib/VictoriaMetrics/lib/encoding/zstd/zstd_pure.go b/lib/VictoriaMetrics/lib/encoding/zstd/zstd_pure.go new file mode 100644 index 0000000..3fc666b --- /dev/null +++ b/lib/VictoriaMetrics/lib/encoding/zstd/zstd_pure.go @@ -0,0 +1,81 @@ +//go:build !cgo +// +build !cgo + +package zstd + +import ( + "sync" + "sync/atomic" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/klauspost/compress/zstd" +) + +var ( + decoder *zstd.Decoder + + mu sync.Mutex + av atomic.Value +) + +type registry map[int]*zstd.Encoder + +func init() { + r := make(registry) + av.Store(r) + + var err error + decoder, err = zstd.NewReader(nil) + if err != nil { + logger.Panicf("BUG: failed to create ZSTD reader: %s", err) + } +} + +// Decompress appends decompressed src to dst and returns the result. +func Decompress(dst, src []byte) ([]byte, error) { + return decoder.DecodeAll(src, dst) +} + +// CompressLevel appends compressed src to dst and returns the result. +// +// The given compressionLevel is used for the compression. +func CompressLevel(dst, src []byte, compressionLevel int) []byte { + e := getEncoder(compressionLevel) + return e.EncodeAll(src, dst) +} + +func getEncoder(compressionLevel int) *zstd.Encoder { + r := av.Load().(registry) + e := r[compressionLevel] + if e != nil { + return e + } + + mu.Lock() + // Create the encoder under lock in order to prevent from wasted work + // when concurrent goroutines create encoder for the same compressionLevel. + r1 := av.Load().(registry) + if e = r1[compressionLevel]; e == nil { + e = newEncoder(compressionLevel) + r2 := make(registry) + for k, v := range r1 { + r2[k] = v + } + r2[compressionLevel] = e + av.Store(r2) + } + mu.Unlock() + + return e +} + +func newEncoder(compressionLevel int) *zstd.Encoder { + level := zstd.EncoderLevelFromZstd(compressionLevel) + e, err := zstd.NewWriter(nil, + zstd.WithEncoderCRC(false), // Disable CRC for performance reasons. + zstd.WithEncoderLevel(level)) + if err != nil { + logger.Panicf("BUG: failed to create ZSTD writer: %s", err) + } + return e +} diff --git a/lib/VictoriaMetrics/lib/fasttime/fasttime.go b/lib/VictoriaMetrics/lib/fasttime/fasttime.go new file mode 100644 index 0000000..f50a27f --- /dev/null +++ b/lib/VictoriaMetrics/lib/fasttime/fasttime.go @@ -0,0 +1,40 @@ +package fasttime + +import ( + "sync/atomic" + "time" +) + +func init() { + go func() { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for tm := range ticker.C { + t := uint64(tm.Unix()) + atomic.StoreUint64(¤tTimestamp, t) + } + }() +} + +var currentTimestamp = uint64(time.Now().Unix()) + +// UnixTimestamp returns the current unix timestamp in seconds. +// +// It is faster than time.Now().Unix() +func UnixTimestamp() uint64 { + return atomic.LoadUint64(¤tTimestamp) +} + +// UnixDate returns date from the current unix timestamp. +// +// The date is calculated by dividing unix timestamp by (24*3600) +func UnixDate() uint64 { + return UnixTimestamp() / (24 * 3600) +} + +// UnixHour returns hour from the current unix timestamp. +// +// The hour is calculated by dividing unix timestamp by 3600 +func UnixHour() uint64 { + return UnixTimestamp() / 3600 +} diff --git a/lib/VictoriaMetrics/lib/flagutil/bytes.go b/lib/VictoriaMetrics/lib/flagutil/bytes.go new file mode 100644 index 0000000..de15229 --- /dev/null +++ b/lib/VictoriaMetrics/lib/flagutil/bytes.go @@ -0,0 +1,102 @@ +package flagutil + +import ( + "flag" + "fmt" + "strconv" + "strings" +) + +// NewBytes returns new `bytes` flag with the given name, defaultValue and description. +func NewBytes(name string, defaultValue int, description string) *Bytes { + description += "\nSupports the following optional suffixes for `size` values: KB, MB, GB, KiB, MiB, GiB" + b := Bytes{ + N: defaultValue, + valueString: fmt.Sprintf("%d", defaultValue), + } + flag.Var(&b, name, description) + return &b +} + +// Bytes is a flag for holding size in bytes. +// +// It supports the following optional suffixes for values: KB, MB, GB, KiB, MiB, GiB. +type Bytes struct { + // N contains parsed value for the given flag. + N int + + valueString string +} + +// String implements flag.Value interface +func (b *Bytes) String() string { + return b.valueString +} + +// Set implements flag.Value interface +func (b *Bytes) Set(value string) error { + value = normalizeBytesString(value) + switch { + case strings.HasSuffix(value, "KB"): + f, err := strconv.ParseFloat(value[:len(value)-2], 64) + if err != nil { + return err + } + b.N = int(f * 1000) + b.valueString = value + return nil + case strings.HasSuffix(value, "MB"): + f, err := strconv.ParseFloat(value[:len(value)-2], 64) + if err != nil { + return err + } + b.N = int(f * 1000 * 1000) + b.valueString = value + return nil + case strings.HasSuffix(value, "GB"): + f, err := strconv.ParseFloat(value[:len(value)-2], 64) + if err != nil { + return err + } + b.N = int(f * 1000 * 1000 * 1000) + b.valueString = value + return nil + case strings.HasSuffix(value, "KiB"): + f, err := strconv.ParseFloat(value[:len(value)-3], 64) + if err != nil { + return err + } + b.N = int(f * 1024) + b.valueString = value + return nil + case strings.HasSuffix(value, "MiB"): + f, err := strconv.ParseFloat(value[:len(value)-3], 64) + if err != nil { + return err + } + b.N = int(f * 1024 * 1024) + b.valueString = value + return nil + case strings.HasSuffix(value, "GiB"): + f, err := strconv.ParseFloat(value[:len(value)-3], 64) + if err != nil { + return err + } + b.N = int(f * 1024 * 1024 * 1024) + b.valueString = value + return nil + default: + f, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + b.N = int(f) + b.valueString = value + return nil + } +} + +func normalizeBytesString(s string) string { + s = strings.ToUpper(s) + return strings.ReplaceAll(s, "I", "i") +} diff --git a/lib/VictoriaMetrics/lib/logger/logger.go b/lib/VictoriaMetrics/lib/logger/logger.go new file mode 100644 index 0000000..50b0785 --- /dev/null +++ b/lib/VictoriaMetrics/lib/logger/logger.go @@ -0,0 +1,303 @@ +package logger + +import ( + "errors" + "flag" + "fmt" + "io" + "log" + "os" + "runtime" + "strings" + "sync" + "time" + + "github.com/VictoriaMetrics/metrics" +) + +var ( + loggerLevel = flag.String("loggerLevel", "INFO", "Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC") + loggerFormat = flag.String("loggerFormat", "default", "Format for logs. Possible values: default, json") + loggerOutput = flag.String("loggerOutput", "stderr", "Output for the logs. Supported values: stderr, stdout") + loggerTimezone = flag.String("loggerTimezone", "UTC", "Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. "+ + "For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local") + disableTimestamps = flag.Bool("loggerDisableTimestamps", false, "Whether to disable writing timestamps in logs") + + errorsPerSecondLimit = flag.Int("loggerErrorsPerSecondLimit", 0, `Per-second limit on the number of ERROR messages. If more than the given number of errors are emitted per second, the remaining errors are suppressed. Zero values disable the rate limit`) + warnsPerSecondLimit = flag.Int("loggerWarnsPerSecondLimit", 0, `Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit`) +) + +// Init initializes the logger. +// +// Init must be called after flag.Parse() +// +// There is no need in calling Init from tests. +func Init() { + setLoggerOutput() + validateLoggerLevel() + validateLoggerFormat() + initTimezone() + go logLimiterCleaner() +} + +func initTimezone() { + tz, err := time.LoadLocation(*loggerTimezone) + if err != nil { + log.Fatalf("cannot load timezone %q: %s", *loggerTimezone, err) + } + timezone = tz +} + +var timezone = time.UTC + +func setLoggerOutput() { + switch *loggerOutput { + case "stderr": + output = os.Stderr + case "stdout": + output = os.Stdout + default: + panic(fmt.Errorf("FATAL: unsupported `loggerOutput` value: %q; supported values are: stderr, stdout", *loggerOutput)) + } +} + +var output io.Writer = os.Stderr + +func validateLoggerLevel() { + switch *loggerLevel { + case "INFO", "WARN", "ERROR", "FATAL", "PANIC": + default: + // We cannot use logger.Panicf here, since the logger isn't initialized yet. + panic(fmt.Errorf("FATAL: unsupported `-loggerLevel` value: %q; supported values are: INFO, WARN, ERROR, FATAL, PANIC", *loggerLevel)) + } +} + +func validateLoggerFormat() { + switch *loggerFormat { + case "default", "json": + default: + // We cannot use logger.Pancif here, since the logger isn't initialized yet. + panic(fmt.Errorf("FATAL: unsupported `-loggerFormat` value: %q; supported values are: default, json", *loggerFormat)) + } +} + +var stdErrorLogger = log.New(&logWriter{}, "", 0) + +// StdErrorLogger returns standard error logger. +func StdErrorLogger() *log.Logger { + return stdErrorLogger +} + +// Infof logs info message. +func Infof(format string, args ...interface{}) { + logLevel("INFO", format, args...) +} + +// Warnf logs warn message. +func Warnf(format string, args ...interface{}) { + logLevel("WARN", format, args...) +} + +// Errorf logs error message. +func Errorf(format string, args ...interface{}) { + logLevel("ERROR", format, args...) +} + +// WarnfSkipframes logs warn message and skips the given number of frames for the caller. +func WarnfSkipframes(skipframes int, format string, args ...interface{}) { + logLevelSkipframes(skipframes, "WARN", format, args...) +} + +// ErrorfSkipframes logs error message and skips the given number of frames for the caller. +func ErrorfSkipframes(skipframes int, format string, args ...interface{}) { + logLevelSkipframes(skipframes, "ERROR", format, args...) +} + +// Fatalf logs fatal message and terminates the app. +func Fatalf(format string, args ...interface{}) { + logLevel("FATAL", format, args...) +} + +// Panicf logs panic message and panics. +func Panicf(format string, args ...interface{}) { + logLevel("PANIC", format, args...) +} + +func logLevel(level, format string, args ...interface{}) { + logLevelSkipframes(1, level, format, args...) +} + +func logLevelSkipframes(skipframes int, level, format string, args ...interface{}) { + if shouldSkipLog(level) { + return + } + msg := fmt.Sprintf(format, args...) + logMessage(level, msg, 3+skipframes) +} + +func logLimiterCleaner() { + for { + time.Sleep(time.Second) + logLimiter.reset() + } +} + +var logLimiter = newLogLimit() + +func newLogLimit() *logLimit { + return &logLimit{ + m: make(map[string]uint64), + } +} + +type logLimit struct { + mu sync.Mutex + m map[string]uint64 +} + +func (ll *logLimit) reset() { + ll.mu.Lock() + ll.m = make(map[string]uint64, len(ll.m)) + ll.mu.Unlock() +} + +// needSuppress checks if the number of calls for the given location exceeds the given limit. +// +// When the number of calls equals limit, log message prefix returned. +func (ll *logLimit) needSuppress(location string, limit uint64) (bool, string) { + // fast path + var msg string + if limit == 0 { + return false, msg + } + ll.mu.Lock() + defer ll.mu.Unlock() + + if n, ok := ll.m[location]; ok { + if n >= limit { + switch n { + // report only once + case limit: + msg = fmt.Sprintf("suppressing log message with rate limit=%d: ", limit) + default: + return true, msg + } + } + ll.m[location] = n + 1 + } else { + ll.m[location] = 1 + } + return false, msg +} + +type logWriter struct { +} + +func (lw *logWriter) Write(p []byte) (int, error) { + logLevelSkipframes(2, "ERROR", "%s", p) + return len(p), nil +} + +func logMessage(level, msg string, skipframes int) { + timestamp := "" + if !*disableTimestamps { + timestamp = time.Now().In(timezone).Format("2006-01-02T15:04:05.000Z0700") + } + levelLowercase := strings.ToLower(level) + _, file, line, ok := runtime.Caller(skipframes) + if !ok { + file = "???" + line = 0 + } + if n := strings.Index(file, "/VictoriaMetrics/"); n >= 0 { + // Strip /VictoriaMetrics/ prefix + file = file[n+len("/VictoriaMetrics/"):] + } + location := fmt.Sprintf("%s:%d", file, line) + + // rate limit ERROR and WARN log messages with given limit. + if level == "ERROR" || level == "WARN" { + limit := uint64(*errorsPerSecondLimit) + if level == "WARN" { + limit = uint64(*warnsPerSecondLimit) + } + ok, suppressMessage := logLimiter.needSuppress(location, limit) + if ok { + return + } + if len(suppressMessage) > 0 { + msg = suppressMessage + msg + } + } + + for len(msg) > 0 && msg[len(msg)-1] == '\n' { + msg = msg[:len(msg)-1] + } + var logMsg string + switch *loggerFormat { + case "json": + if *disableTimestamps { + logMsg = fmt.Sprintf(`{"level":%q,"caller":%q,"msg":%q}`+"\n", levelLowercase, location, msg) + } else { + logMsg = fmt.Sprintf(`{"ts":%q,"level":%q,"caller":%q,"msg":%q}`+"\n", timestamp, levelLowercase, location, msg) + } + default: + if *disableTimestamps { + logMsg = fmt.Sprintf("%s\t%s\t%s\n", levelLowercase, location, msg) + } else { + logMsg = fmt.Sprintf("%s\t%s\t%s\t%s\n", timestamp, levelLowercase, location, msg) + } + } + + // Serialize writes to log. + mu.Lock() + fmt.Fprint(output, logMsg) + mu.Unlock() + + // Increment vm_log_messages_total + counterName := fmt.Sprintf(`vm_log_messages_total{app_version="v1.67.0", level=%q, location=%q}`, levelLowercase, location) + metrics.GetOrCreateCounter(counterName).Inc() + + switch level { + case "PANIC": + if *loggerFormat == "json" { + // Do not clutter `json` output with panic stack trace + os.Exit(-1) + } + panic(errors.New(msg)) + case "FATAL": + os.Exit(-1) + } +} + +var mu sync.Mutex + +func shouldSkipLog(level string) bool { + switch *loggerLevel { + case "WARN": + switch level { + case "WARN", "ERROR", "FATAL", "PANIC": + return false + default: + return true + } + case "ERROR": + switch level { + case "ERROR", "FATAL", "PANIC": + return false + default: + return true + } + case "FATAL": + switch level { + case "FATAL", "PANIC": + return false + default: + return true + } + case "PANIC": + return level != "PANIC" + default: + return false + } +} diff --git a/lib/VictoriaMetrics/lib/logger/tzdata.go b/lib/VictoriaMetrics/lib/logger/tzdata.go new file mode 100644 index 0000000..5eb23cf --- /dev/null +++ b/lib/VictoriaMetrics/lib/logger/tzdata.go @@ -0,0 +1,8 @@ +//go:build go1.15 +// +build go1.15 + +package logger + +import ( + _ "time/tzdata" +) diff --git a/lib/VictoriaMetrics/lib/lrucache/lrucache.go b/lib/VictoriaMetrics/lib/lrucache/lrucache.go new file mode 100644 index 0000000..9104852 --- /dev/null +++ b/lib/VictoriaMetrics/lib/lrucache/lrucache.go @@ -0,0 +1,326 @@ +package lrucache + +import ( + "container/heap" + "sync" + "sync/atomic" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil" + "github.com/cespare/xxhash/v2" +) + +// Cache caches Entry entries. +// +// Call NewCache() for creating new Cache. +type Cache struct { + shards []*cache + + cleanerMustStopCh chan struct{} + cleanerStoppedCh chan struct{} +} + +// NewCache creates new cache. +// +// Cache size in bytes is limited by the value returned by getMaxSizeBytes() callback. +// Call MustStop() in order to free up resources occupied by Cache. +func NewCache(getMaxSizeBytes func() int) *Cache { + cpusCount := cgroup.AvailableCPUs() + shardsCount := cgroup.AvailableCPUs() + // Increase the number of shards with the increased number of available CPU cores. + // This should reduce contention on per-shard mutexes. + multiplier := cpusCount + if multiplier > 16 { + multiplier = 16 + } + shardsCount *= multiplier + shards := make([]*cache, shardsCount) + getMaxShardBytes := func() int { + n := getMaxSizeBytes() + return n / shardsCount + } + for i := range shards { + shards[i] = newCache(getMaxShardBytes) + } + c := &Cache{ + shards: shards, + cleanerMustStopCh: make(chan struct{}), + cleanerStoppedCh: make(chan struct{}), + } + go c.cleaner() + return c +} + +// MustStop frees up resources occupied by c. +func (c *Cache) MustStop() { + close(c.cleanerMustStopCh) + <-c.cleanerStoppedCh +} + +// GetEntry returns an Entry for the given key k from c. +func (c *Cache) GetEntry(k string) Entry { + idx := uint64(0) + if len(c.shards) > 1 { + h := hashUint64(k) + idx = h % uint64(len(c.shards)) + } + shard := c.shards[idx] + return shard.GetEntry(k) +} + +// PutEntry puts the given Entry e under the given key k into c. +func (c *Cache) PutEntry(k string, e Entry) { + idx := uint64(0) + if len(c.shards) > 1 { + h := hashUint64(k) + idx = h % uint64(len(c.shards)) + } + shard := c.shards[idx] + shard.PutEntry(k, e) +} + +// Len returns the number of blocks in the cache c. +func (c *Cache) Len() int { + n := 0 + for _, shard := range c.shards { + n += shard.Len() + } + return n +} + +// SizeBytes returns an approximate size in bytes of all the blocks stored in the cache c. +func (c *Cache) SizeBytes() int { + n := 0 + for _, shard := range c.shards { + n += shard.SizeBytes() + } + return n +} + +// SizeMaxBytes returns the max allowed size in bytes for c. +func (c *Cache) SizeMaxBytes() int { + n := 0 + for _, shard := range c.shards { + n += shard.SizeMaxBytes() + } + return n +} + +// Requests returns the number of requests served by c. +func (c *Cache) Requests() uint64 { + n := uint64(0) + for _, shard := range c.shards { + n += shard.Requests() + } + return n +} + +// Misses returns the number of cache misses for c. +func (c *Cache) Misses() uint64 { + n := uint64(0) + for _, shard := range c.shards { + n += shard.Misses() + } + return n +} + +func (c *Cache) cleaner() { + d := timeutil.AddJitterToDuration(time.Second * 53) + ticker := time.NewTicker(d) + defer ticker.Stop() + for { + select { + case <-c.cleanerMustStopCh: + close(c.cleanerStoppedCh) + return + case <-ticker.C: + c.cleanByTimeout() + } + } +} + +func (c *Cache) cleanByTimeout() { + for _, shard := range c.shards { + shard.cleanByTimeout() + } +} + +type cache struct { + requests atomic.Uint64 + misses atomic.Uint64 + + // sizeBytes contains an approximate size for all the blocks stored in the cache. + sizeBytes atomic.Int64 + + // getMaxSizeBytes() is a callback, which returns the maximum allowed cache size in bytes. + getMaxSizeBytes func() int + + // mu protects all the fields below. + mu sync.Mutex + + // m contains cached entries + m map[string]*cacheEntry + + // The heap for removing the least recently used entries from m. + lah lastAccessHeap +} + +func hashUint64(s string) uint64 { + b := bytesutil.ToUnsafeBytes(s) + return xxhash.Sum64(b) +} + +// Entry is an item, which may be cached in the Cache. +type Entry interface { + // SizeBytes must return the approximate size of the given entry in bytes + SizeBytes() int +} + +type cacheEntry struct { + // The timestamp in seconds for the last access to the given entry. + lastAccessTime uint64 + + // heapIdx is the index for the entry in lastAccessHeap. + heapIdx int + + // k contains the associated key for the given entry. + k string + + // e contains the cached entry. + e Entry +} + +func newCache(getMaxSizeBytes func() int) *cache { + var c cache + c.getMaxSizeBytes = getMaxSizeBytes + c.m = make(map[string]*cacheEntry) + return &c +} + +func (c *cache) updateSizeBytes(n int) { + c.sizeBytes.Add(int64(n)) +} + +func (c *cache) cleanByTimeout() { + // Delete items accessed more than three minutes ago. + // This time should be enough for repeated queries. + lastAccessTime := fasttime.UnixTimestamp() - 3*60 + c.mu.Lock() + defer c.mu.Unlock() + + for len(c.lah) > 0 { + if lastAccessTime < c.lah[0].lastAccessTime { + break + } + c.removeLeastRecentlyAccessedItem() + } +} + +func (c *cache) GetEntry(k string) Entry { + c.requests.Add(1) + c.mu.Lock() + defer c.mu.Unlock() + + ce := c.m[k] + if ce == nil { + c.misses.Add(1) + return nil + } + currentTime := fasttime.UnixTimestamp() + if ce.lastAccessTime != currentTime { + ce.lastAccessTime = currentTime + heap.Fix(&c.lah, ce.heapIdx) + } + return ce.e +} + +func (c *cache) PutEntry(k string, e Entry) { + c.mu.Lock() + defer c.mu.Unlock() + + ce := c.m[k] + if ce != nil { + // The entry has been already registered by concurrent goroutine. + return + } + ce = &cacheEntry{ + lastAccessTime: fasttime.UnixTimestamp(), + k: k, + e: e, + } + heap.Push(&c.lah, ce) + c.m[k] = ce + c.updateSizeBytes(e.SizeBytes()) + maxSizeBytes := c.getMaxSizeBytes() + for c.SizeBytes() > maxSizeBytes && len(c.lah) > 0 { + c.removeLeastRecentlyAccessedItem() + } +} + +func (c *cache) removeLeastRecentlyAccessedItem() { + ce := c.lah[0] + c.updateSizeBytes(-ce.e.SizeBytes()) + delete(c.m, ce.k) + heap.Pop(&c.lah) +} + +func (c *cache) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.m) +} + +func (c *cache) SizeBytes() int { + return int(c.sizeBytes.Load()) +} + +func (c *cache) SizeMaxBytes() int { + return c.getMaxSizeBytes() +} + +func (c *cache) Requests() uint64 { + return c.requests.Load() +} + +func (c *cache) Misses() uint64 { + return c.misses.Load() +} + +// lastAccessHeap implements heap.Interface +type lastAccessHeap []*cacheEntry + +func (lah *lastAccessHeap) Len() int { + return len(*lah) +} +func (lah *lastAccessHeap) Swap(i, j int) { + h := *lah + a := h[i] + b := h[j] + a.heapIdx = j + b.heapIdx = i + h[i] = b + h[j] = a +} +func (lah *lastAccessHeap) Less(i, j int) bool { + h := *lah + return h[i].lastAccessTime < h[j].lastAccessTime +} +func (lah *lastAccessHeap) Push(x any) { + e := x.(*cacheEntry) + h := *lah + e.heapIdx = len(h) + *lah = append(h, e) +} +func (lah *lastAccessHeap) Pop() any { + h := *lah + e := h[len(h)-1] + + // Remove the reference to deleted entry, so Go GC could free up memory occupied by the deleted entry. + h[len(h)-1] = nil + + *lah = h[:len(h)-1] + return e +} diff --git a/lib/VictoriaMetrics/lib/lrucache/lrucache_test.go b/lib/VictoriaMetrics/lib/lrucache/lrucache_test.go new file mode 100644 index 0000000..a83f35c --- /dev/null +++ b/lib/VictoriaMetrics/lib/lrucache/lrucache_test.go @@ -0,0 +1,126 @@ +package lrucache + +import ( + "fmt" + "sync" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" +) + +func TestCache(t *testing.T) { + sizeMaxBytes := 64 * 1024 + // Multiply sizeMaxBytes by the square of available CPU cores + // in order to get proper distribution of sizes between cache shards. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2204 + cpus := cgroup.AvailableCPUs() + sizeMaxBytes *= cpus * cpus + getMaxSize := func() int { + return sizeMaxBytes + } + c := NewCache(getMaxSize) + defer c.MustStop() + if n := c.SizeBytes(); n != 0 { + t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, 0) + } + if n := c.SizeMaxBytes(); n != sizeMaxBytes { + t.Fatalf("unexpected SizeMaxBytes(); got %d; want %d", n, sizeMaxBytes) + } + k := "foobar" + var e testEntry + entrySize := e.SizeBytes() + // Put a single entry into cache + c.PutEntry(k, &e) + if n := c.Len(); n != 1 { + t.Fatalf("unexpected number of items in the cache; got %d; want %d", n, 1) + } + if n := c.SizeBytes(); n != entrySize { + t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, entrySize) + } + if n := c.Requests(); n != 0 { + t.Fatalf("unexpected number of requests; got %d; want %d", n, 0) + } + if n := c.Misses(); n != 0 { + t.Fatalf("unexpected number of misses; got %d; want %d", n, 0) + } + // Obtain this entry from the cache + if e1 := c.GetEntry(k); e1 != &e { + t.Fatalf("unexpected entry obtained; got %v; want %v", e1, &e) + } + if n := c.Requests(); n != 1 { + t.Fatalf("unexpected number of requests; got %d; want %d", n, 1) + } + if n := c.Misses(); n != 0 { + t.Fatalf("unexpected number of misses; got %d; want %d", n, 0) + } + // Obtain non-existing entry from the cache + if e1 := c.GetEntry("non-existing-key"); e1 != nil { + t.Fatalf("unexpected non-nil block obtained for non-existing key: %v", e1) + } + if n := c.Requests(); n != 2 { + t.Fatalf("unexpected number of requests; got %d; want %d", n, 2) + } + if n := c.Misses(); n != 1 { + t.Fatalf("unexpected number of misses; got %d; want %d", n, 1) + } + // Store the entry again. + c.PutEntry(k, &e) + if n := c.SizeBytes(); n != entrySize { + t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, entrySize) + } + if e1 := c.GetEntry(k); e1 != &e { + t.Fatalf("unexpected entry obtained; got %v; want %v", e1, &e) + } + if n := c.Requests(); n != 3 { + t.Fatalf("unexpected number of requests; got %d; want %d", n, 3) + } + if n := c.Misses(); n != 1 { + t.Fatalf("unexpected number of misses; got %d; want %d", n, 1) + } + + // Manually clean the cache. The entry shouldn't be deleted because it was recently accessed. + c.cleanByTimeout() + if n := c.SizeBytes(); n != entrySize { + t.Fatalf("unexpected SizeBytes(); got %d; want %d", n, entrySize) + } +} + +func TestCacheConcurrentAccess(_ *testing.T) { + const sizeMaxBytes = 16 * 1024 * 1024 + getMaxSize := func() int { + return sizeMaxBytes + } + c := NewCache(getMaxSize) + defer c.MustStop() + + workers := 5 + var wg sync.WaitGroup + wg.Add(workers) + for i := 0; i < workers; i++ { + go func(worker int) { + defer wg.Done() + testCacheSetGet(c, worker) + }(i) + } + wg.Wait() +} + +func testCacheSetGet(c *Cache, worker int) { + for i := 0; i < 1000; i++ { + e := testEntry{} + k := fmt.Sprintf("key_%d_%d", worker, i) + c.PutEntry(k, &e) + if e1 := c.GetEntry(k); e1 != &e { + panic(fmt.Errorf("unexpected entry obtained; got %v; want %v", e1, &e)) + } + if e1 := c.GetEntry("non-existing-key"); e1 != nil { + panic(fmt.Errorf("unexpected non-nil entry obtained: %v", e1)) + } + } +} + +type testEntry struct{} + +func (tb *testEntry) SizeBytes() int { + return 42 +} diff --git a/lib/VictoriaMetrics/lib/memory/memory.go b/lib/VictoriaMetrics/lib/memory/memory.go new file mode 100644 index 0000000..04e4ff0 --- /dev/null +++ b/lib/VictoriaMetrics/lib/memory/memory.go @@ -0,0 +1,59 @@ +package memory + +import ( + "flag" + "fmt" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +var ( + allowedPercent = flag.Float64("memory.allowedPercent", 60, `Allowed percent of system memory VictoriaMetrics caches may occupy. See also -memory.allowedBytes. Too low a value may increase cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache which will result in higher disk IO usage`) + allowedBytes = flagutil.NewBytes("memory.allowedBytes", 0, `Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage`) +) + +var ( + allowedMemory int + remainingMemory int +) + +var once sync.Once + +func initOnce() { + if !flag.Parsed() { + // Do not use logger.Panicf here, since logger may be uninitialized yet. + panic(fmt.Errorf("BUG: memory.Allowed must be called only after flag.Parse call")) + } + mem := sysTotalMemory() + if allowedBytes.N <= 0 { + if *allowedPercent < 1 || *allowedPercent > 200 { + logger.Panicf("FATAL: -memory.allowedPercent must be in the range [1...200]; got %g", *allowedPercent) + } + percent := *allowedPercent / 100 + allowedMemory = int(float64(mem) * percent) + remainingMemory = mem - allowedMemory + logger.Infof("limiting caches to %d bytes, leaving %d bytes to the OS according to -memory.allowedPercent=%g", allowedMemory, remainingMemory, *allowedPercent) + } else { + allowedMemory = allowedBytes.N + remainingMemory = mem - allowedMemory + logger.Infof("limiting caches to %d bytes, leaving %d bytes to the OS according to -memory.allowedBytes=%s", allowedMemory, remainingMemory, allowedBytes.String()) + } +} + +// Allowed returns the amount of system memory allowed to use by the app. +// +// The function must be called only after flag.Parse is called. +func Allowed() int { + once.Do(initOnce) + return allowedMemory +} + +// Remaining returns the amount of memory remaining to the OS. +// +// This function must be called only after flag.Parse is called. +func Remaining() int { + once.Do(initOnce) + return remainingMemory +} diff --git a/lib/VictoriaMetrics/lib/memory/memory_bsd.go b/lib/VictoriaMetrics/lib/memory/memory_bsd.go new file mode 100644 index 0000000..4955ef6 --- /dev/null +++ b/lib/VictoriaMetrics/lib/memory/memory_bsd.go @@ -0,0 +1,18 @@ +//go:build freebsd || openbsd || dragonfly || netbsd +// +build freebsd openbsd dragonfly netbsd + +package memory + +import ( + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +// This code has been adopted from https://github.com/pbnjay/memory + +func sysTotalMemory() int { + s, err := sysctlUint64("hw.physmem") + if err != nil { + logger.Panicf("FATAL: cannot determine system memory: %s", err) + } + return int(s) +} diff --git a/lib/VictoriaMetrics/lib/memory/memory_darwin.go b/lib/VictoriaMetrics/lib/memory/memory_darwin.go new file mode 100644 index 0000000..85dffbd --- /dev/null +++ b/lib/VictoriaMetrics/lib/memory/memory_darwin.go @@ -0,0 +1,14 @@ +package memory + +import ( + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +// This has been adapted from github.com/pbnjay/memory. +func sysTotalMemory() int { + s, err := sysctlUint64("hw.memsize") + if err != nil { + logger.Panicf("FATAL: cannot determine system memory: %s", err) + } + return int(s) +} diff --git a/lib/VictoriaMetrics/lib/memory/memory_linux.go b/lib/VictoriaMetrics/lib/memory/memory_linux.go new file mode 100644 index 0000000..05b2c19 --- /dev/null +++ b/lib/VictoriaMetrics/lib/memory/memory_linux.go @@ -0,0 +1,31 @@ +package memory + +import ( + "syscall" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +const maxInt = int(^uint(0) >> 1) + +func sysTotalMemory() int { + var si syscall.Sysinfo_t + if err := syscall.Sysinfo(&si); err != nil { + logger.Panicf("FATAL: error in syscall.Sysinfo: %s", err) + } + totalMem := maxInt + if uint64(maxInt)/uint64(si.Totalram) > uint64(si.Unit) { + totalMem = int(uint64(si.Totalram) * uint64(si.Unit)) + } + mem := cgroup.GetMemoryLimit() + if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem { + // Try reading hierachical memory limit. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/699 + mem = cgroup.GetHierarchicalMemoryLimit() + if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem { + return totalMem + } + } + return int(mem) +} diff --git a/lib/VictoriaMetrics/lib/memory/memory_solaris.go b/lib/VictoriaMetrics/lib/memory/memory_solaris.go new file mode 100644 index 0000000..2e29d9a --- /dev/null +++ b/lib/VictoriaMetrics/lib/memory/memory_solaris.go @@ -0,0 +1,20 @@ +package memory + +import ( + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "golang.org/x/sys/unix" +) + +const PHYS_PAGES = 0x1f4 + +func sysTotalMemory() int { + memPageSize := unix.Getpagesize() + // https://man7.org/linux/man-pages/man3/sysconf.3.html + // _SC_PHYS_PAGES + memPagesCnt, err := unix.Sysconf(PHYS_PAGES) + if err != nil { + logger.Panicf("FATAL: error in unix.Sysconf: %s", err) + } + + return memPageSize * int(memPagesCnt) +} diff --git a/lib/VictoriaMetrics/lib/memory/memory_windows.go b/lib/VictoriaMetrics/lib/memory/memory_windows.go new file mode 100644 index 0000000..6323a62 --- /dev/null +++ b/lib/VictoriaMetrics/lib/memory/memory_windows.go @@ -0,0 +1,40 @@ +package memory + +import ( + "syscall" + "unsafe" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +// This has been adapted from https://github.com/pbnjay/memory. + +type memStatusEx struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + unused [6]uint64 +} + +func sysTotalMemory() int { + kernel32, err := syscall.LoadDLL("kernel32.dll") + if err != nil { + logger.Panicf("FATAL: cannot load kernel32.dll: %s", err) + } + globalMemoryStatusEx, err := kernel32.FindProc("GlobalMemoryStatusEx") + if err != nil { + logger.Panicf("FATAL: cannot find GlobalMemoryStatusEx: %s", err) + } + msx := &memStatusEx{ + dwLength: uint32(unsafe.Sizeof(memStatusEx{})), + } + r, _, err := globalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msx))) + if r == 0 { + logger.Panicf("FATAL: error in GlobalMemoryStatusEx: %s", err) + } + n := int(msx.ullTotalPhys) + if uint64(n) != msx.ullTotalPhys { + logger.Panicf("FATAL: int overflow for msx.ullTotalPhys=%d", msx.ullTotalPhys) + } + return n +} diff --git a/lib/VictoriaMetrics/lib/memory/sysctl.go b/lib/VictoriaMetrics/lib/memory/sysctl.go new file mode 100644 index 0000000..1c31e09 --- /dev/null +++ b/lib/VictoriaMetrics/lib/memory/sysctl.go @@ -0,0 +1,23 @@ +//go:build darwin || freebsd || openbsd || dragonfly || netbsd +// +build darwin freebsd openbsd dragonfly netbsd + +package memory + +import ( + "syscall" + "unsafe" +) + +// This has been adapted from github.com/pbnjay/memory. +func sysctlUint64(name string) (uint64, error) { + s, err := syscall.Sysctl(name) + if err != nil { + return 0, err + } + // hack because the string conversion above drops a \0 + b := []byte(s) + if len(b) < 8 { + b = append(b, 0) + } + return *(*uint64)(unsafe.Pointer(&b[0])), nil +} diff --git a/lib/VictoriaMetrics/lib/procutil/signal.go b/lib/VictoriaMetrics/lib/procutil/signal.go new file mode 100644 index 0000000..58a8634 --- /dev/null +++ b/lib/VictoriaMetrics/lib/procutil/signal.go @@ -0,0 +1,45 @@ +//go:build !windows +// +build !windows + +package procutil + +import ( + "os" + "os/signal" + "syscall" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +// WaitForSigterm waits for either SIGTERM or SIGINT +// +// Returns the caught signal. +// +// It also prevent from program termination on SIGHUP signal, +// since this signal is frequently used for config reloading. +func WaitForSigterm() os.Signal { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP) + for { + sig := <-ch + if sig == syscall.SIGHUP { + // Prevent from the program stop on SIGHUP + continue + } + return sig + } +} + +// SelfSIGHUP sends SIGHUP signal to the current process. +func SelfSIGHUP() { + if err := syscall.Kill(syscall.Getpid(), syscall.SIGHUP); err != nil { + logger.Panicf("FATAL: cannot send SIGHUP to itself: %s", err) + } +} + +// NewSighupChan returns a channel, which is triggered on every SIGHUP. +func NewSighupChan() <-chan os.Signal { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGHUP) + return ch +} diff --git a/lib/VictoriaMetrics/lib/procutil/signal_windows.go b/lib/VictoriaMetrics/lib/procutil/signal_windows.go new file mode 100644 index 0000000..1a1d32e --- /dev/null +++ b/lib/VictoriaMetrics/lib/procutil/signal_windows.go @@ -0,0 +1,61 @@ +//go:build windows +// +build windows + +package procutil + +import ( + "os" + "os/signal" + "sync" + "syscall" +) + +// WaitForSigterm waits for either SIGTERM or SIGINT +// +// Returns the caught signal. +// +// Windows dont have SIGHUP syscall. +func WaitForSigterm() os.Signal { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt, syscall.SIGTERM) + sig := <-ch + return sig +} + +type sigHUPNotifier struct { + lock sync.Mutex + subscribers []chan<- os.Signal +} + +var notifier sigHUPNotifier + +// https://golang.org/pkg/os/signal/#hdr-Windows +// https://github.com/golang/go/issues/6948 +// SelfSIGHUP sends SIGHUP signal to the subscribed listeners. +func SelfSIGHUP() { + notifier.notify(syscall.SIGHUP) +} + +// NewSighupChan returns a channel, which is triggered on every SelfSIGHUP. +func NewSighupChan() <-chan os.Signal { + ch := make(chan os.Signal, 1) + notifier.subscribe(ch) + return ch +} + +func (sn *sigHUPNotifier) subscribe(sub chan<- os.Signal) { + sn.lock.Lock() + defer sn.lock.Unlock() + sn.subscribers = append(sn.subscribers, sub) +} + +func (sn *sigHUPNotifier) notify(sig os.Signal) { + sn.lock.Lock() + defer sn.lock.Unlock() + for _, sub := range sn.subscribers { + select { + case sub <- sig: + default: + } + } +} diff --git a/lib/VictoriaMetrics/lib/prompb/remote.pb.go b/lib/VictoriaMetrics/lib/prompb/remote.pb.go new file mode 100644 index 0000000..e9c53de --- /dev/null +++ b/lib/VictoriaMetrics/lib/prompb/remote.pb.go @@ -0,0 +1,211 @@ +// Code generated from remote.proto + +package prompb + +import ( + "fmt" + "io" +) + +// WriteRequest represents Prometheus remote write API request +type WriteRequest struct { + Timeseries []TimeSeries + + labelsPool []Label + samplesPool []Sample +} + +// Unmarshal unmarshals m from dAtA. +func (m *WriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return errIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return errIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return errInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if cap(m.Timeseries) > len(m.Timeseries) { + m.Timeseries = m.Timeseries[:len(m.Timeseries)+1] + } else { + m.Timeseries = append(m.Timeseries, TimeSeries{}) + } + ts := &m.Timeseries[len(m.Timeseries)-1] + var err error + m.labelsPool, m.samplesPool, err = ts.Unmarshal(dAtA[iNdEx:postIndex], m.labelsPool, m.samplesPool) + if err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return errInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skipRemote(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, errIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, errIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, errIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, errInvalidLengthRemote + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + start := iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, errIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRemote(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + errInvalidLengthRemote = fmt.Errorf("proto: negative length found during unmarshaling") + errIntOverflowRemote = fmt.Errorf("proto: integer overflow") +) diff --git a/lib/VictoriaMetrics/lib/prompb/types.pb.go b/lib/VictoriaMetrics/lib/prompb/types.pb.go new file mode 100644 index 0000000..c132926 --- /dev/null +++ b/lib/VictoriaMetrics/lib/prompb/types.pb.go @@ -0,0 +1,455 @@ +// Code generated manually from types.proto + +package prompb + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +// Sample is a timeseries sample. +type Sample struct { + Value float64 + Timestamp int64 +} + +// TimeSeries is a timeseries. +type TimeSeries struct { + Labels []Label + Samples []Sample +} + +// Label is a timeseries label +type Label struct { + Name []byte + Value []byte +} + +// Unmarshal unmarshals sample from dAtA. +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return errIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return errIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return errInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +// Unmarshal unmarshals timeseries from dAtA. +func (m *TimeSeries) Unmarshal(dAtA []byte, dstLabels []Label, dstSamples []Sample) ([]Label, []Sample, error) { + labelsStart := len(dstLabels) + samplesStart := len(dstSamples) + + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return dstLabels, dstSamples, errIntOverflowTypes + } + if iNdEx >= l { + return dstLabels, dstSamples, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return dstLabels, dstSamples, fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return dstLabels, dstSamples, fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return dstLabels, dstSamples, fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return dstLabels, dstSamples, errIntOverflowTypes + } + if iNdEx >= l { + return dstLabels, dstSamples, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return dstLabels, dstSamples, errInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return dstLabels, dstSamples, io.ErrUnexpectedEOF + } + if cap(dstLabels) > len(dstLabels) { + dstLabels = dstLabels[:len(dstLabels)+1] + } else { + dstLabels = append(dstLabels, Label{}) + } + lb := &dstLabels[len(dstLabels)-1] + if err := lb.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return dstLabels, dstSamples, err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return dstLabels, dstSamples, fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return dstLabels, dstSamples, errIntOverflowTypes + } + if iNdEx >= l { + return dstLabels, dstSamples, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return dstLabels, dstSamples, errInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return dstLabels, dstSamples, io.ErrUnexpectedEOF + } + if cap(dstSamples) > len(dstSamples) { + dstSamples = dstSamples[:len(dstSamples)+1] + } else { + dstSamples = append(dstSamples, Sample{}) + } + s := &dstSamples[len(dstSamples)-1] + if err := s.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return dstLabels, dstSamples, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return dstLabels, dstSamples, err + } + if skippy < 0 { + return dstLabels, dstSamples, errInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return dstLabels, dstSamples, io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return dstLabels, dstSamples, io.ErrUnexpectedEOF + } + + m.Labels = dstLabels[labelsStart:] + m.Samples = dstSamples[samplesStart:] + return dstLabels, dstSamples, nil +} + +// Unmarshal unmarshals Label from dAtA. +func (m *Label) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return errIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Label: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return errIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return errInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = dAtA[iNdEx:postIndex] + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return errIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return errInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = dAtA[iNdEx:postIndex] + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return errInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, errIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, errIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, errIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, errInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + start := iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, errIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + errInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + errIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) diff --git a/lib/VictoriaMetrics/lib/prompb/util.go b/lib/VictoriaMetrics/lib/prompb/util.go new file mode 100644 index 0000000..ffda716 --- /dev/null +++ b/lib/VictoriaMetrics/lib/prompb/util.go @@ -0,0 +1,25 @@ +package prompb + +// Reset resets wr. +func (wr *WriteRequest) Reset() { + for i := range wr.Timeseries { + ts := &wr.Timeseries[i] + ts.Labels = nil + ts.Samples = nil + } + wr.Timeseries = wr.Timeseries[:0] + + for i := range wr.labelsPool { + lb := &wr.labelsPool[i] + lb.Name = nil + lb.Value = nil + } + wr.labelsPool = wr.labelsPool[:0] + + for i := range wr.samplesPool { + s := &wr.samplesPool[i] + s.Value = 0 + s.Timestamp = 0 + } + wr.samplesPool = wr.samplesPool[:0] +} diff --git a/lib/VictoriaMetrics/lib/protoparser/promremotewrite/streamparser.go b/lib/VictoriaMetrics/lib/protoparser/promremotewrite/streamparser.go new file mode 100644 index 0000000..9df2bc9 --- /dev/null +++ b/lib/VictoriaMetrics/lib/protoparser/promremotewrite/streamparser.go @@ -0,0 +1,141 @@ +package promremotewrite + +import ( + "bufio" + "fmt" + "io" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" + "github.com/VictoriaMetrics/metrics" + "github.com/golang/snappy" +) + +var maxInsertRequestSize = flagutil.NewBytes("maxInsertRequestSize", 32*1024*1024, "The maximum size in bytes of a single Prometheus remote_write API request") + +// ParseStream parses Prometheus remote_write message from reader and calls callback for the parsed timeseries. +// +// callback shouldn't hold tss after returning. +func ParseStream(r io.Reader, callback func(tss []prompb.TimeSeries) error) error { + ctx := getPushCtx(r) + defer putPushCtx(ctx) + if err := ctx.Read(); err != nil { + return err + } + + // Synchronously process the request in order to properly return errors to ParseStream caller, + // so it could properly return HTTP 503 status code in response. + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896 + bb := bodyBufferPool.Get() + defer bodyBufferPool.Put(bb) + var err error + bb.B, err = snappy.Decode(bb.B[:cap(bb.B)], ctx.reqBuf.B) + if err != nil { + return fmt.Errorf("cannot decompress request with length %d: %w", len(ctx.reqBuf.B), err) + } + if len(bb.B) > maxInsertRequestSize.N { + return fmt.Errorf("too big unpacked request; mustn't exceed `-maxInsertRequestSize=%d` bytes; got %d bytes", maxInsertRequestSize.N, len(bb.B)) + } + wr := getWriteRequest() + defer putWriteRequest(wr) + if err := wr.Unmarshal(bb.B); err != nil { + unmarshalErrors.Inc() + return fmt.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %w", len(bb.B), err) + } + + rows := 0 + tss := wr.Timeseries + for i := range tss { + rows += len(tss[i].Samples) + } + rowsRead.Add(rows) + + if err := callback(tss); err != nil { + return fmt.Errorf("error when processing imported data: %w", err) + } + return nil +} + +var bodyBufferPool bytesutil.ByteBufferPool + +type pushCtx struct { + br *bufio.Reader + reqBuf bytesutil.ByteBuffer +} + +func (ctx *pushCtx) reset() { + ctx.br.Reset(nil) + ctx.reqBuf.Reset() +} + +func (ctx *pushCtx) Read() error { + readCalls.Inc() + lr := io.LimitReader(ctx.br, int64(maxInsertRequestSize.N)+1) + startTime := fasttime.UnixTimestamp() + reqLen, err := ctx.reqBuf.ReadFrom(lr) + if err != nil { + readErrors.Inc() + return fmt.Errorf("cannot read compressed request in %d seconds: %w", fasttime.UnixTimestamp()-startTime, err) + } + if reqLen > int64(maxInsertRequestSize.N) { + readErrors.Inc() + return fmt.Errorf("too big packed request; mustn't exceed `-maxInsertRequestSize=%d` bytes", maxInsertRequestSize.N) + } + return nil +} + +var ( + readCalls = metrics.NewCounter(`vm_protoparser_read_calls_total{type="promremotewrite"}`) + readErrors = metrics.NewCounter(`vm_protoparser_read_errors_total{type="promremotewrite"}`) + rowsRead = metrics.NewCounter(`vm_protoparser_rows_read_total{type="promremotewrite"}`) + unmarshalErrors = metrics.NewCounter(`vm_protoparser_unmarshal_errors_total{type="promremotewrite"}`) +) + +func getPushCtx(r io.Reader) *pushCtx { + select { + case ctx := <-pushCtxPoolCh: + ctx.br.Reset(r) + return ctx + default: + if v := pushCtxPool.Get(); v != nil { + ctx := v.(*pushCtx) + ctx.br.Reset(r) + return ctx + } + return &pushCtx{ + br: bufio.NewReaderSize(r, 64*1024), + } + } +} + +func putPushCtx(ctx *pushCtx) { + ctx.reset() + select { + case pushCtxPoolCh <- ctx: + default: + pushCtxPool.Put(ctx) + } +} + +var pushCtxPool sync.Pool + +var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs()) + +func getWriteRequest() *prompb.WriteRequest { + v := writeRequestPool.Get() + if v == nil { + return &prompb.WriteRequest{} + } + return v.(*prompb.WriteRequest) +} + +func putWriteRequest(wr *prompb.WriteRequest) { + wr.Reset() + writeRequestPool.Put(wr) +} + +var writeRequestPool sync.Pool diff --git a/lib/VictoriaMetrics/lib/regexutil/regex.go b/lib/VictoriaMetrics/lib/regexutil/regex.go new file mode 100644 index 0000000..8cdfacb --- /dev/null +++ b/lib/VictoriaMetrics/lib/regexutil/regex.go @@ -0,0 +1,209 @@ +package regexutil + +import ( + "regexp" + "regexp/syntax" + "strings" +) + +// Regex implements an optimized string matching for Go regex. +// +// The following regexs are optimized: +// +// - plain string such as "foobar" +// - alternate strings such as "foo|bar|baz" +// - prefix match such as "foo.*" or "foo.+" +// - substring match such as ".*foo.*" or ".+bar.+" +type Regex struct { + // exprStr is the original expression. + exprStr string + + // prefix contains literal prefix for regex. + // For example, prefix="foo" for regex="foo(a|b)" + prefix string + + // isOnlyPrefix is set to true if the regex contains only the prefix. + isOnlyPrefix bool + + // isSuffixDotStar is set to true if suffix is ".*" + isSuffixDotStar bool + + // isSuffixDotPlus is set to true if suffix is ".+" + isSuffixDotPlus bool + + // substrDotStar contains literal string for regex suffix=".*string.*" + substrDotStar string + + // substrDotPlus contains literal string for regex suffix=".+string.+" + substrDotPlus string + + // orValues contains or values for the suffix regex. + // For example, orValues contain ["foo","bar","baz"] for regex suffix="foo|bar|baz" + orValues []string + + // suffixRe is the regexp for suffix + suffixRe *regexp.Regexp +} + +// NewRegex returns Regex for the given expr. +func NewRegex(expr string) (*Regex, error) { + if _, err := regexp.Compile(expr); err != nil { + return nil, err + } + + prefix, suffix := SimplifyRegex(expr) + sre := mustParseRegexp(suffix) + orValues := getOrValues(sre) + isOnlyPrefix := len(orValues) == 1 && orValues[0] == "" + isSuffixDotStar := isDotOp(sre, syntax.OpStar) + isSuffixDotPlus := isDotOp(sre, syntax.OpPlus) + substrDotStar := getSubstringLiteral(sre, syntax.OpStar) + substrDotPlus := getSubstringLiteral(sre, syntax.OpPlus) + + suffixAnchored := suffix + if len(prefix) > 0 { + suffixAnchored = "^(?:" + suffix + ")" + } + // The suffixAnchored must be properly compiled, since it has been already checked above. + // Otherwise it is a bug, which must be fixed. + suffixRe := regexp.MustCompile(suffixAnchored) + + r := &Regex{ + exprStr: expr, + prefix: prefix, + isOnlyPrefix: isOnlyPrefix, + isSuffixDotStar: isSuffixDotStar, + isSuffixDotPlus: isSuffixDotPlus, + substrDotStar: substrDotStar, + substrDotPlus: substrDotPlus, + orValues: orValues, + suffixRe: suffixRe, + } + return r, nil +} + +// MatchString returns true if s matches r. +func (r *Regex) MatchString(s string) bool { + if r.isOnlyPrefix { + return strings.Contains(s, r.prefix) + } + + if len(r.prefix) == 0 { + return r.matchStringNoPrefix(s) + } + return r.matchStringWithPrefix(s) +} + +// GetLiterals returns literals for r. +func (r *Regex) GetLiterals() []string { + sre := mustParseRegexp(r.exprStr) + for sre.Op == syntax.OpCapture { + sre = sre.Sub[0] + } + + v, ok := getLiteral(sre) + if ok { + return []string{v} + } + + if sre.Op != syntax.OpConcat { + return nil + } + + var a []string + for _, sub := range sre.Sub { + v, ok := getLiteral(sub) + if ok { + a = append(a, v) + } + } + return a +} + +// String returns string represetnation for r +func (r *Regex) String() string { + return r.exprStr +} + +func (r *Regex) matchStringNoPrefix(s string) bool { + if r.isSuffixDotStar { + return true + } + if r.isSuffixDotPlus { + return len(s) > 0 + } + if r.substrDotStar != "" { + // Fast path - r contains ".*someText.*" + return strings.Contains(s, r.substrDotStar) + } + if r.substrDotPlus != "" { + // Fast path - r contains ".+someText.+" + n := strings.Index(s, r.substrDotPlus) + return n > 0 && n+len(r.substrDotPlus) < len(s) + } + + if len(r.orValues) == 0 { + // Fall back to slow path by matching the suffix regexp. + return r.suffixRe.MatchString(s) + } + + // Fast path - compare s to r.orValues + for _, v := range r.orValues { + if strings.Contains(s, v) { + return true + } + } + return false +} + +func (r *Regex) matchStringWithPrefix(s string) bool { + n := strings.Index(s, r.prefix) + if n < 0 { + // Fast path - s doesn't contain the needed prefix + return false + } + sNext := s[n+1:] + s = s[n+len(r.prefix):] + + if r.isSuffixDotStar { + return true + } + if r.isSuffixDotPlus { + return len(s) > 0 + } + if r.substrDotStar != "" { + // Fast path - r contains ".*someText.*" + return strings.Contains(s, r.substrDotStar) + } + if r.substrDotPlus != "" { + // Fast path - r contains ".+someText.+" + n := strings.Index(s, r.substrDotPlus) + return n > 0 && n+len(r.substrDotPlus) < len(s) + } + + for { + if len(r.orValues) == 0 { + // Fall back to slow path by matching the suffix regexp. + if r.suffixRe.MatchString(s) { + return true + } + } else { + // Fast path - compare s to r.orValues + for _, v := range r.orValues { + if strings.HasPrefix(s, v) { + return true + } + } + } + + // Mismatch. Try again starting from the next char. + s = sNext + n := strings.Index(s, r.prefix) + if n < 0 { + // Fast path - s doesn't contain the needed prefix + return false + } + sNext = s[n+1:] + s = s[n+len(r.prefix):] + } +} diff --git a/lib/VictoriaMetrics/lib/regexutil/regex_test.go b/lib/VictoriaMetrics/lib/regexutil/regex_test.go new file mode 100644 index 0000000..a7208f9 --- /dev/null +++ b/lib/VictoriaMetrics/lib/regexutil/regex_test.go @@ -0,0 +1,173 @@ +package regexutil + +import ( + "reflect" + "testing" +) + +func TestNewRegexFailure(t *testing.T) { + f := func(expr string) { + t.Helper() + + r, err := NewRegex(expr) + if err == nil { + t.Fatalf("expecting non-nil error when parsing %q; got %q", expr, r) + } + } + + f("[foo") + f("(foo") +} + +func TestRegexMatchString(t *testing.T) { + f := func(expr, s string, resultExpected bool) { + t.Helper() + + r, err := NewRegex(expr) + if err != nil { + t.Fatalf("cannot parse %q: %s", expr, err) + } + exprResult := r.String() + if exprResult != expr { + t.Fatalf("unexpected string representation for %q: %q", expr, exprResult) + } + result := r.MatchString(s) + if result != resultExpected { + t.Fatalf("unexpected result when matching %q against regex=%q; got %v; want %v", s, expr, result, resultExpected) + } + } + + f("", "", true) + f("", "foo", true) + f("foo", "", false) + f(".*", "", true) + f(".*", "foo", true) + f(".+", "", false) + f(".+", "foo", true) + f("foo.*", "bar", false) + f("foo.*", "foo", true) + f("foo.*", "a foo", true) + f("foo.*", "a foo a", true) + f("foo.*", "foobar", true) + f("foo.*", "a foobar", true) + f("foo.+", "bar", false) + f("foo.+", "foo", false) + f("foo.+", "a foo", false) + f("foo.+", "foobar", true) + f("foo.+", "a foobar", true) + f("foo|bar", "", false) + f("foo|bar", "a", false) + f("foo|bar", "foo", true) + f("foo|bar", "a foo", true) + f("foo|bar", "foo a", true) + f("foo|bar", "a foo a", true) + f("foo|bar", "bar", true) + f("foo|bar", "foobar", true) + f("foo(bar|baz)", "a", false) + f("foo(bar|baz)", "foobar", true) + f("foo(bar|baz)", "foobaz", true) + f("foo(bar|baz)", "foobaza", true) + f("foo(bar|baz)", "a foobaz a", true) + f("foo(bar|baz)", "foobal", false) + f("^foo|b(ar)$", "foo", true) + f("^foo|b(ar)$", "foo a", true) + f("^foo|b(ar)$", "a foo", false) + f("^foo|b(ar)$", "bar", true) + f("^foo|b(ar)$", "a bar", true) + f("^foo|b(ar)$", "barz", false) + f("^foo|b(ar)$", "ar", false) + f(".*foo.*", "foo", true) + f(".*foo.*", "afoobar", true) + f(".*foo.*", "abc", false) + f("foo.*bar.*", "foobar", true) + f("foo.*bar.*", "foo_bar_", true) + f("foo.*bar.*", "a foo bar baz", true) + f("foo.*bar.*", "foobaz", false) + f("foo.*bar.*", "baz foo", false) + f(".+foo.+", "foo", false) + f(".+foo.+", "afoobar", true) + f(".+foo.+", "afoo", false) + f(".+foo.+", "abc", false) + f("foo.+bar.+", "foobar", false) + f("foo.+bar.+", "foo_bar_", true) + f("foo.+bar.+", "a foo_bar_", true) + f("foo.+bar.+", "foobaz", false) + f("foo.+bar.+", "abc", false) + f(".+foo.*", "foo", false) + f(".+foo.*", "afoo", true) + f(".+foo.*", "afoobar", true) + f(".*(a|b).*", "a", true) + f(".*(a|b).*", "ax", true) + f(".*(a|b).*", "xa", true) + f(".*(a|b).*", "xay", true) + f(".*(a|b).*", "xzy", false) + f("^(?:true)$", "true", true) + f("^(?:true)$", "false", false) + + f(".+;|;.+", ";", false) + f(".+;|;.+", "foo", false) + f(".+;|;.+", "foo;bar", true) + f(".+;|;.+", "foo;", true) + f(".+;|;.+", ";foo", true) + f(".+foo|bar|baz.+", "foo", false) + f(".+foo|bar|baz.+", "afoo", true) + f(".+foo|bar|baz.+", "fooa", false) + f(".+foo|bar|baz.+", "afooa", true) + f(".+foo|bar|baz.+", "bar", true) + f(".+foo|bar|baz.+", "abar", true) + f(".+foo|bar|baz.+", "abara", true) + f(".+foo|bar|baz.+", "bara", true) + f(".+foo|bar|baz.+", "baz", false) + f(".+foo|bar|baz.+", "baza", true) + f(".+foo|bar|baz.+", "abaz", false) + f(".+foo|bar|baz.+", "abaza", true) + f(".+foo|bar|baz.+", "afoo|bar|baza", true) + f(".+(foo|bar|baz).+", "bar", false) + f(".+(foo|bar|baz).+", "bara", false) + f(".+(foo|bar|baz).+", "abar", false) + f(".+(foo|bar|baz).+", "abara", true) + f(".+(foo|bar|baz).+", "afooa", true) + f(".+(foo|bar|baz).+", "abaza", true) + + f(".*;|;.*", ";", true) + f(".*;|;.*", "foo", false) + f(".*;|;.*", "foo;bar", true) + f(".*;|;.*", "foo;", true) + f(".*;|;.*", ";foo", true) + + f("^bar", "foobarbaz", false) + f("^foo", "foobarbaz", true) + f("bar$", "foobarbaz", false) + f("baz$", "foobarbaz", true) + f("(bar$|^foo)", "foobarbaz", true) + f("(bar$^boo)", "foobarbaz", false) + f("foo(bar|baz)", "a fooxfoobaz a", true) + f("foo(bar|baz)", "a fooxfooban a", false) + f("foo(bar|baz)", "a fooxfooban foobar a", true) +} + +func TestGetLiterals(t *testing.T) { + f := func(expr string, literalsExpected []string) { + t.Helper() + + r, err := NewRegex(expr) + if err != nil { + t.Fatalf("cannot parse %q: %s", expr, err) + } + literals := r.GetLiterals() + if !reflect.DeepEqual(literals, literalsExpected) { + t.Fatalf("unexpected literals; got %q; want %q", literals, literalsExpected) + } + } + + f("", nil) + f("foo bar baz", []string{"foo bar baz"}) + f("foo.*bar(a|b)baz.+", []string{"foo", "bar", "baz"}) + f("(foo[ab](?:bar))", []string{"foo", "bar"}) + f("foo|bar", nil) + f("(?i)foo", nil) + f("foo((?i)bar)baz", []string{"foo", "baz"}) + f("((foo|bar)baz xxx(?:yzabc))", []string{"baz xxxyzabc"}) + f("((foo|bar)baz xxx(?:yzabc)*)", []string{"baz xxx"}) + f("((foo|bar)baz? xxx(?:yzabc)*)", []string{"ba", " xxx"}) +} diff --git a/lib/VictoriaMetrics/lib/regexutil/regex_timing_test.go b/lib/VictoriaMetrics/lib/regexutil/regex_timing_test.go new file mode 100644 index 0000000..7faf8ab --- /dev/null +++ b/lib/VictoriaMetrics/lib/regexutil/regex_timing_test.go @@ -0,0 +1,111 @@ +package regexutil + +import ( + "fmt" + "regexp" + "testing" +) + +func BenchmarkRegexMatchString(b *testing.B) { + b.Run("unpotimized-noprefix-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "xbar.*|baz", "axbarz", true) + }) + b.Run("unpotimized-noprefix-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "xbar.*|baz", "zfoobaxz", false) + }) + b.Run("unpotimized-prefix-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo(bar.*|baz)", "afoobarz", true) + }) + b.Run("unpotimized-prefix-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo(bar.*|baz)", "zfoobaxz", false) + }) + b.Run("dot-star-match", func(b *testing.B) { + benchmarkRegexMatchString(b, ".*", "foo", true) + }) + b.Run("dot-plus-match", func(b *testing.B) { + benchmarkRegexMatchString(b, ".+", "foo", true) + }) + b.Run("dot-plus-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, ".+", "", false) + }) + b.Run("literal-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo", "afoobar", true) + }) + b.Run("literal-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo", "abaraa", false) + }) + b.Run("prefix-dot-star-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo.*", "afoobar", true) + }) + b.Run("prefix-dot-star-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo.*", "axoobar", false) + }) + b.Run("prefix-dot-plus-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo.+", "afoobar", true) + }) + b.Run("prefix-dot-plus-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo.+", "axoobar", false) + }) + b.Run("or-values-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo|bar|baz", "abaz", true) + }) + b.Run("or-values-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "foo|bar|baz", "axaz", false) + }) + b.Run("prefix-or-values-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "x(foo|bar|baz)", "axbaz", true) + }) + b.Run("prefix-or-values-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "x(foo|bar|baz)", "aabaz", false) + }) + b.Run("substring-dot-star-match", func(b *testing.B) { + benchmarkRegexMatchString(b, ".*foo.*", "afoobar", true) + }) + b.Run("substring-dot-star-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, ".*foo.*", "abarbaz", false) + }) + b.Run("substring-dot-plus-match", func(b *testing.B) { + benchmarkRegexMatchString(b, ".+foo.+", "afoobar", true) + }) + b.Run("substring-dot-plus-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, ".+foo.+", "abarbaz", false) + }) + b.Run("prefix-substring-dot-star-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "a.*foo.*", "bafoobar", true) + }) + b.Run("prefix-substring-dot-star-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "a.*foo.*", "babarbaz", false) + }) + b.Run("prefix-substring-dot-plus-match", func(b *testing.B) { + benchmarkRegexMatchString(b, "a.+foo.+", "babfoobar", true) + }) + b.Run("prefix-substring-dot-plus-mismatch", func(b *testing.B) { + benchmarkRegexMatchString(b, "a.+foo.+", "babarbaz", false) + }) +} + +func benchmarkRegexMatchString(b *testing.B, expr, s string, resultExpected bool) { + r, err := NewRegex(expr) + if err != nil { + panic(fmt.Errorf("unexpected error: %w", err)) + } + re := regexp.MustCompile(expr) + f := func(b *testing.B, matchString func(s string) bool) { + b.SetBytes(1) + b.ReportAllocs() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + result := matchString(s) + if result != resultExpected { + panic(fmt.Errorf("unexpected result when matching %s against %s; got %v; want %v", s, expr, result, resultExpected)) + } + } + }) + } + b.Run("Regex", func(b *testing.B) { + f(b, r.MatchString) + }) + b.Run("StandardRegex", func(b *testing.B) { + f(b, re.MatchString) + }) +} diff --git a/lib/VictoriaMetrics/lib/regexutil/regexutil.go b/lib/VictoriaMetrics/lib/regexutil/regexutil.go new file mode 100644 index 0000000..e056db8 --- /dev/null +++ b/lib/VictoriaMetrics/lib/regexutil/regexutil.go @@ -0,0 +1,351 @@ +package regexutil + +import ( + "fmt" + "regexp/syntax" + "sort" + "strings" +) + +// RemoveStartEndAnchors removes '^' at the start of expr and '$' at the end of the expr. +func RemoveStartEndAnchors(expr string) string { + for strings.HasPrefix(expr, "^") { + expr = expr[1:] + } + for strings.HasSuffix(expr, "$") && !strings.HasSuffix(expr, "\\$") { + expr = expr[:len(expr)-1] + } + return expr +} + +// GetOrValuesRegex returns "or" values from the given regexp expr. +// +// It returns ["foo", "bar"] for "foo|bar" regexp. +// It returns ["foo"] for "foo" regexp. +// It returns [""] for "" regexp. +// It returns an empty list if it is impossible to extract "or" values from the regexp. +func GetOrValuesRegex(expr string) []string { + return getOrValuesRegex(expr, true) +} + +// GetOrValuesPromRegex returns "or" values from the given Prometheus-like regexp expr. +// +// It ignores start and end anchors ('^') and ('$') at the start and the end of expr. +// It returns ["foo", "bar"] for "foo|bar" regexp. +// It returns ["foo"] for "foo" regexp. +// It returns [""] for "" regexp. +// It returns an empty list if it is impossible to extract "or" values from the regexp. +func GetOrValuesPromRegex(expr string) []string { + expr = RemoveStartEndAnchors(expr) + return getOrValuesRegex(expr, false) +} + +func getOrValuesRegex(expr string, keepAnchors bool) []string { + prefix, tailExpr := simplifyRegex(expr, keepAnchors) + if tailExpr == "" { + return []string{prefix} + } + sre, err := parseRegexp(tailExpr) + if err != nil { + return nil + } + orValues := getOrValues(sre) + + // Sort orValues for faster index seek later + sort.Strings(orValues) + + if len(prefix) > 0 { + // Add prefix to orValues + for i, orValue := range orValues { + orValues[i] = prefix + orValue + } + } + + return orValues +} + +func getOrValues(sre *syntax.Regexp) []string { + switch sre.Op { + case syntax.OpCapture: + return getOrValues(sre.Sub[0]) + case syntax.OpLiteral: + v, ok := getLiteral(sre) + if !ok { + return nil + } + return []string{v} + case syntax.OpEmptyMatch: + return []string{""} + case syntax.OpAlternate: + a := make([]string, 0, len(sre.Sub)) + for _, reSub := range sre.Sub { + ca := getOrValues(reSub) + if len(ca) == 0 { + return nil + } + a = append(a, ca...) + if len(a) > maxOrValues { + // It is cheaper to use regexp here. + return nil + } + } + return a + case syntax.OpCharClass: + a := make([]string, 0, len(sre.Rune)/2) + for i := 0; i < len(sre.Rune); i += 2 { + start := sre.Rune[i] + end := sre.Rune[i+1] + for start <= end { + a = append(a, string(start)) + start++ + if len(a) > maxOrValues { + // It is cheaper to use regexp here. + return nil + } + } + } + return a + case syntax.OpConcat: + if len(sre.Sub) < 1 { + return []string{""} + } + prefixes := getOrValues(sre.Sub[0]) + if len(prefixes) == 0 { + return nil + } + if len(sre.Sub) == 1 { + return prefixes + } + sre.Sub = sre.Sub[1:] + suffixes := getOrValues(sre) + if len(suffixes) == 0 { + return nil + } + if len(prefixes)*len(suffixes) > maxOrValues { + // It is cheaper to use regexp here. + return nil + } + a := make([]string, 0, len(prefixes)*len(suffixes)) + for _, prefix := range prefixes { + for _, suffix := range suffixes { + s := prefix + suffix + a = append(a, s) + } + } + return a + default: + return nil + } +} + +func getLiteral(sre *syntax.Regexp) (string, bool) { + if sre.Op == syntax.OpCapture { + return getLiteral(sre.Sub[0]) + } + if sre.Op == syntax.OpLiteral && sre.Flags&syntax.FoldCase == 0 { + return string(sre.Rune), true + } + return "", false +} + +const maxOrValues = 100 + +// SimplifyRegex simplifies the given regexp expr. +// +// It returns plaintext pefix and the remaining regular expression +// without capturing parens. +func SimplifyRegex(expr string) (string, string) { + prefix, suffix := simplifyRegex(expr, true) + sre := mustParseRegexp(suffix) + + if isDotOp(sre, syntax.OpStar) { + return prefix, "" + } + if sre.Op == syntax.OpConcat { + subs := sre.Sub + if prefix == "" { + // Drop .* at the start + for len(subs) > 0 && isDotOp(subs[0], syntax.OpStar) { + subs = subs[1:] + } + } + + // Drop .* at the end. + for len(subs) > 0 && isDotOp(subs[len(subs)-1], syntax.OpStar) { + subs = subs[:len(subs)-1] + } + + sre.Sub = subs + if len(subs) == 0 { + return prefix, "" + } + suffix = sre.String() + } + return prefix, suffix +} + +// SimplifyPromRegex simplifies the given Prometheus-like expr. +// +// It returns plaintext prefix and the remaining regular expression +// with dropped '^' and '$' anchors at the beginning and at the end +// of the regular expression. +// +// The function removes capturing parens from the expr, +// so it cannot be used when capturing parens are necessary. +func SimplifyPromRegex(expr string) (string, string) { + return simplifyRegex(expr, false) +} + +func simplifyRegex(expr string, keepAnchors bool) (string, string) { + sre, err := parseRegexp(expr) + if err != nil { + // Cannot parse the regexp. Return it all as prefix. + return expr, "" + } + sre = simplifyRegexp(sre, keepAnchors, keepAnchors) + if sre == emptyRegexp { + return "", "" + } + v, ok := getLiteral(sre) + if ok { + return v, "" + } + var prefix string + if sre.Op == syntax.OpConcat { + prefix, ok = getLiteral(sre.Sub[0]) + if ok { + sre.Sub = sre.Sub[1:] + if len(sre.Sub) == 0 { + return prefix, "" + } + sre = simplifyRegexp(sre, true, keepAnchors) + } + } + if _, err := syntax.Compile(sre); err != nil { + // Cannot compile the regexp. Return it all as prefix. + return expr, "" + } + s := sre.String() + s = strings.ReplaceAll(s, "(?:)", "") + s = strings.ReplaceAll(s, "(?s:.)", ".") + s = strings.ReplaceAll(s, "(?m:$)", "$") + return prefix, s +} + +func simplifyRegexp(sre *syntax.Regexp, keepBeginOp, keepEndOp bool) *syntax.Regexp { + s := sre.String() + for { + sre = simplifyRegexpExt(sre, keepBeginOp, keepEndOp) + sre = sre.Simplify() + if !keepBeginOp && sre.Op == syntax.OpBeginText { + sre = emptyRegexp + } else if !keepEndOp && sre.Op == syntax.OpEndText { + sre = emptyRegexp + } + sNew := sre.String() + if sNew == s { + return sre + } + sre = mustParseRegexp(sNew) + s = sNew + } +} + +func simplifyRegexpExt(sre *syntax.Regexp, keepBeginOp, keepEndOp bool) *syntax.Regexp { + switch sre.Op { + case syntax.OpCapture: + // Substitute all the capture regexps with non-capture regexps. + sre.Op = syntax.OpAlternate + sre.Sub[0] = simplifyRegexpExt(sre.Sub[0], keepBeginOp, keepEndOp) + if sre.Sub[0] == emptyRegexp { + return emptyRegexp + } + return sre + case syntax.OpStar, syntax.OpPlus, syntax.OpQuest, syntax.OpRepeat: + sre.Sub[0] = simplifyRegexpExt(sre.Sub[0], keepBeginOp, keepEndOp) + if sre.Sub[0] == emptyRegexp { + return emptyRegexp + } + return sre + case syntax.OpAlternate: + // Do not remove empty captures from OpAlternate, since this may break regexp. + for i, sub := range sre.Sub { + sre.Sub[i] = simplifyRegexpExt(sub, keepBeginOp, keepEndOp) + } + return sre + case syntax.OpConcat: + subs := sre.Sub[:0] + for i, sub := range sre.Sub { + sub = simplifyRegexpExt(sub, keepBeginOp || len(subs) > 0, keepEndOp || i+1 < len(sre.Sub)) + if sub != emptyRegexp { + subs = append(subs, sub) + } + } + sre.Sub = subs + // Remove anchros from the beginning and the end of regexp, since they + // will be added later. + if !keepBeginOp { + for len(sre.Sub) > 0 && sre.Sub[0].Op == syntax.OpBeginText { + sre.Sub = sre.Sub[1:] + } + } + if !keepEndOp { + for len(sre.Sub) > 0 && sre.Sub[len(sre.Sub)-1].Op == syntax.OpEndText { + sre.Sub = sre.Sub[:len(sre.Sub)-1] + } + } + if len(sre.Sub) == 0 { + return emptyRegexp + } + if len(sre.Sub) == 1 { + return sre.Sub[0] + } + return sre + case syntax.OpEmptyMatch: + return emptyRegexp + default: + return sre + } +} + +// getSubstringLiteral returns regex part from sre surrounded by .+ or .* depending on the prefixSuffixOp. +// +// For example, if sre=".+foo.+" and prefixSuffix=syntax.OpPlus, then the function returns "foo". +// +// An empty string is returned if sre doesn't contain the given prefixSuffixOp prefix and suffix. +func getSubstringLiteral(sre *syntax.Regexp, prefixSuffixOp syntax.Op) string { + if sre.Op != syntax.OpConcat || len(sre.Sub) != 3 { + return "" + } + if !isDotOp(sre.Sub[0], prefixSuffixOp) || !isDotOp(sre.Sub[2], prefixSuffixOp) { + return "" + } + v, ok := getLiteral(sre.Sub[1]) + if !ok { + return "" + } + return v +} + +func isDotOp(sre *syntax.Regexp, op syntax.Op) bool { + if sre.Op != op { + return false + } + return sre.Sub[0].Op == syntax.OpAnyChar +} + +var emptyRegexp = &syntax.Regexp{ + Op: syntax.OpEmptyMatch, +} + +func parseRegexp(expr string) (*syntax.Regexp, error) { + return syntax.Parse(expr, syntax.Perl|syntax.DotNL) +} + +func mustParseRegexp(expr string) *syntax.Regexp { + sre, err := parseRegexp(expr) + if err != nil { + panic(fmt.Errorf("BUG: cannot parse already verified regexp %q: %w", expr, err)) + } + return sre +} diff --git a/lib/VictoriaMetrics/lib/regexutil/regexutil_test.go b/lib/VictoriaMetrics/lib/regexutil/regexutil_test.go new file mode 100644 index 0000000..b9eb756 --- /dev/null +++ b/lib/VictoriaMetrics/lib/regexutil/regexutil_test.go @@ -0,0 +1,257 @@ +package regexutil + +import ( + "reflect" + "testing" +) + +func TestGetOrValuesRegex(t *testing.T) { + f := func(s string, valuesExpected []string) { + t.Helper() + values := GetOrValuesRegex(s) + if !reflect.DeepEqual(values, valuesExpected) { + t.Fatalf("unexpected values for s=%q; got %q; want %q", s, values, valuesExpected) + } + } + + f("", []string{""}) + f("foo", []string{"foo"}) + f("^foo$", nil) + f("|foo", []string{"", "foo"}) + f("|foo|", []string{"", "", "foo"}) + f("foo.+", nil) + f("foo.*", nil) + f(".*", nil) + f("foo|.*", nil) + f("(fo((o)))|(bar)", []string{"bar", "foo"}) + f("foobar", []string{"foobar"}) + f("z|x|c", []string{"c", "x", "z"}) + f("foo|bar", []string{"bar", "foo"}) + f("(foo|bar)", []string{"bar", "foo"}) + f("(foo|bar)baz", []string{"barbaz", "foobaz"}) + f("[a-z][a-z]", nil) + f("[a-d]", []string{"a", "b", "c", "d"}) + f("x[a-d]we", []string{"xawe", "xbwe", "xcwe", "xdwe"}) + f("foo(bar|baz)", []string{"foobar", "foobaz"}) + f("foo(ba[rz]|(xx|o))", []string{"foobar", "foobaz", "fooo", "fooxx"}) + f("foo(?:bar|baz)x(qwe|rt)", []string{"foobarxqwe", "foobarxrt", "foobazxqwe", "foobazxrt"}) + f("foo(bar||baz)", []string{"foo", "foobar", "foobaz"}) + f("(a|b|c)(d|e|f|0|1|2)(g|h|k|x|y|z)", nil) + f("(?i)foo", nil) + f("(?i)(foo|bar)", nil) + f("^foo|bar$", nil) + f("^(foo|bar)$", nil) + f("^a(foo|b(?:a|r))$", nil) + f("^a(foo$|b(?:a$|r))$", nil) + f("^a(^foo|bar$)z$", nil) +} + +func TestGetOrValuesPromRegex(t *testing.T) { + f := func(s string, valuesExpected []string) { + t.Helper() + values := GetOrValuesPromRegex(s) + if !reflect.DeepEqual(values, valuesExpected) { + t.Fatalf("unexpected values for s=%q; got %q; want %q", s, values, valuesExpected) + } + } + + f("", []string{""}) + f("foo", []string{"foo"}) + f("^foo$", []string{"foo"}) + f("|foo", []string{"", "foo"}) + f("|foo|", []string{"", "", "foo"}) + f("foo.+", nil) + f("foo.*", nil) + f(".*", nil) + f("foo|.*", nil) + f("(fo((o)))|(bar)", []string{"bar", "foo"}) + f("foobar", []string{"foobar"}) + f("z|x|c", []string{"c", "x", "z"}) + f("foo|bar", []string{"bar", "foo"}) + f("(foo|bar)", []string{"bar", "foo"}) + f("(foo|bar)baz", []string{"barbaz", "foobaz"}) + f("[a-z][a-z]", nil) + f("[a-d]", []string{"a", "b", "c", "d"}) + f("x[a-d]we", []string{"xawe", "xbwe", "xcwe", "xdwe"}) + f("foo(bar|baz)", []string{"foobar", "foobaz"}) + f("foo(ba[rz]|(xx|o))", []string{"foobar", "foobaz", "fooo", "fooxx"}) + f("foo(?:bar|baz)x(qwe|rt)", []string{"foobarxqwe", "foobarxrt", "foobazxqwe", "foobazxrt"}) + f("foo(bar||baz)", []string{"foo", "foobar", "foobaz"}) + f("(a|b|c)(d|e|f|0|1|2)(g|h|k|x|y|z)", nil) + f("(?i)foo", nil) + f("(?i)(foo|bar)", nil) + f("^foo|bar$", []string{"bar", "foo"}) + f("^(foo|bar)$", []string{"bar", "foo"}) + f("^a(foo|b(?:a|r))$", []string{"aba", "abr", "afoo"}) + f("^a(foo$|b(?:a$|r))$", []string{"aba", "abr", "afoo"}) + f("^a(^foo|bar$)z$", nil) +} + +func TestSimplifyRegex(t *testing.T) { + f := func(s, expectedPrefix, expectedSuffix string) { + t.Helper() + prefix, suffix := SimplifyRegex(s) + if prefix != expectedPrefix { + t.Fatalf("unexpected prefix for s=%q; got %q; want %q", s, prefix, expectedPrefix) + } + if suffix != expectedSuffix { + t.Fatalf("unexpected suffix for s=%q; got %q; want %q", s, suffix, expectedSuffix) + } + } + + f("", "", "") + f(".*", "", "") + f(".*(.*).*", "", "") + f("foo.*", "foo", "") + f(".*foo.*", "", "foo") + f("^", "", "\\A") + f("$", "", "(?-m:$)") + f("^()$", "", "(?-m:\\A$)") + f("^(?:)$", "", "(?-m:\\A$)") + f("^foo|^bar$|baz", "", "(?-m:\\Afoo|\\Abar$|baz)") + f("^(foo$|^bar)$", "", "(?-m:\\A(?:foo$|\\Abar)$)") + f("^a(foo$|bar)$", "", "(?-m:\\Aa(?:foo$|bar)$)") + f("^a(^foo|bar$)z$", "", "(?-m:\\Aa(?:\\Afoo|bar$)z$)") + f("foobar", "foobar", "") + f("foo$|^foobar", "", "(?-m:foo$|\\Afoobar)") + f("^(foo$|^foobar)$", "", "(?-m:\\A(?:foo$|\\Afoobar)$)") + f("foobar|foobaz", "fooba", "[rz]") + f("(fo|(zar|bazz)|x)", "", "fo|zar|bazz|x") + f("(тестЧЧ|тест)", "тест", "ЧЧ|") + f("foo(bar|baz|bana)", "fooba", "[rz]|na") + f("^foobar|foobaz", "", "\\Afoobar|foobaz") + f("^foobar|^foobaz$", "", "(?-m:\\Afoobar|\\Afoobaz$)") + f("foobar|foobaz", "fooba", "[rz]") + f("(?:^foobar|^foobaz)aa.*", "", "(?:\\Afoobar|\\Afoobaz)aa") + f("foo[bar]+", "foo", "[abr]+") + f("foo[a-z]+", "foo", "[a-z]+") + f("foo[bar]*", "foo", "[abr]*") + f("foo[a-z]*", "foo", "[a-z]*") + f("foo[x]+", "foo", "x+") + f("foo[^x]+", "foo", "[^x]+") + f("foo[x]*", "foo", "x*") + f("foo[^x]*", "foo", "[^x]*") + f("foo[x]*bar", "foo", "x*bar") + f("fo\\Bo[x]*bar?", "fo", "\\Box*bar?") + f("foo.+bar", "foo", "(?s:.+bar)") + f("a(b|c.*).+", "a", "(?s:(?:b|c.*).+)") + f("ab|ac", "a", "[bc]") + f("(?i)xyz", "", "(?i:XYZ)") + f("(?i)foo|bar", "", "(?i:FOO|BAR)") + f("(?i)up.+x", "", "(?is:UP.+X)") + f("(?smi)xy.*z$", "", "(?ims:XY.*Z$)") + + // test invalid regexps + f("a(", "a(", "") + f("a[", "a[", "") + f("a[]", "a[]", "") + f("a{", "a{", "") + f("a{}", "a{}", "") + f("invalid(regexp", "invalid(regexp", "") + + // The transformed regexp mustn't match aba + f("a?(^ba|c)", "", "a?(?:\\Aba|c)") + + // The transformed regexp mustn't match barx + f("(foo|bar$)x*", "", "(?-m:(?:foo|bar$)x*)") + + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5297 + f(".+;|;.+", "", "(?s:.+;|;.+)") + f("^(.+);|;(.+)$", "", "(?s-m:\\A.+;|;.+$)") + f("^(.+);$|^;(.+)$", "", "(?s-m:\\A.+;$|\\A;.+$)") + f(".*;|;.*", "", "(?s:.*;|;.*)") + f("^(.*);|;(.*)$", "", "(?s-m:\\A.*;|;.*$)") + f("^(.*);$|^;(.*)$", "", "(?s-m:\\A.*;$|\\A;.*$)") +} + +func TestSimplifyPromRegex(t *testing.T) { + f := func(s, expectedPrefix, expectedSuffix string) { + t.Helper() + prefix, suffix := SimplifyPromRegex(s) + if prefix != expectedPrefix { + t.Fatalf("unexpected prefix for s=%q; got %q; want %q", s, prefix, expectedPrefix) + } + if suffix != expectedSuffix { + t.Fatalf("unexpected suffix for s=%q; got %q; want %q", s, suffix, expectedSuffix) + } + } + + f("", "", "") + f("^", "", "") + f("$", "", "") + f("^()$", "", "") + f("^(?:)$", "", "") + f("^foo|^bar$|baz", "", "foo|ba[rz]") + f("^(foo$|^bar)$", "", "foo|bar") + f("^a(foo$|bar)$", "a", "foo|bar") + f("^a(^foo|bar$)z$", "a", "(?-m:(?:\\Afoo|bar$)z)") + f("foobar", "foobar", "") + f("foo$|^foobar", "foo", "|bar") + f("^(foo$|^foobar)$", "foo", "|bar") + f("foobar|foobaz", "fooba", "[rz]") + f("(fo|(zar|bazz)|x)", "", "fo|zar|bazz|x") + f("(тестЧЧ|тест)", "тест", "ЧЧ|") + f("foo(bar|baz|bana)", "fooba", "[rz]|na") + f("^foobar|foobaz", "fooba", "[rz]") + f("^foobar|^foobaz$", "fooba", "[rz]") + f("foobar|foobaz", "fooba", "[rz]") + f("(?:^foobar|^foobaz)aa.*", "fooba", "(?s:[rz]aa.*)") + f("foo[bar]+", "foo", "[abr]+") + f("foo[a-z]+", "foo", "[a-z]+") + f("foo[bar]*", "foo", "[abr]*") + f("foo[a-z]*", "foo", "[a-z]*") + f("foo[x]+", "foo", "x+") + f("foo[^x]+", "foo", "[^x]+") + f("foo[x]*", "foo", "x*") + f("foo[^x]*", "foo", "[^x]*") + f("foo[x]*bar", "foo", "x*bar") + f("fo\\Bo[x]*bar?", "fo", "\\Box*bar?") + f("foo.+bar", "foo", "(?s:.+bar)") + f("a(b|c.*).+", "a", "(?s:(?:b|c.*).+)") + f("ab|ac", "a", "[bc]") + f("(?i)xyz", "", "(?i:XYZ)") + f("(?i)foo|bar", "", "(?i:FOO|BAR)") + f("(?i)up.+x", "", "(?is:UP.+X)") + f("(?smi)xy.*z$", "", "(?ims:XY.*Z$)") + + // test invalid regexps + f("a(", "a(", "") + f("a[", "a[", "") + f("a[]", "a[]", "") + f("a{", "a{", "") + f("a{}", "a{}", "") + f("invalid(regexp", "invalid(regexp", "") + + // The transformed regexp mustn't match aba + f("a?(^ba|c)", "", "a?(?:\\Aba|c)") + + // The transformed regexp mustn't match barx + f("(foo|bar$)x*", "", "(?-m:(?:foo|bar$)x*)") + + // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5297 + f(".+;|;.+", "", "(?s:.+;|;.+)") + f("^(.+);|;(.+)$", "", "(?s:.+;|;.+)") + f("^(.+);$|^;(.+)$", "", "(?s:.+;|;.+)") + f(".*;|;.*", "", "(?s:.*;|;.*)") + f("^(.*);|;(.*)$", "", "(?s:.*;|;.*)") + f("^(.*);$|^;(.*)$", "", "(?s:.*;|;.*)") +} + +func TestRemoveStartEndAnchors(t *testing.T) { + f := func(s, resultExpected string) { + t.Helper() + result := RemoveStartEndAnchors(s) + if result != resultExpected { + t.Fatalf("unexpected result for RemoveStartEndAnchors(%q); got %q; want %q", s, result, resultExpected) + } + } + f("", "") + f("a", "a") + f("^^abc", "abc") + f("a^b$c", "a^b$c") + f("$$abc^", "$$abc^") + f("^abc|de$", "abc|de") + f("abc\\$", "abc\\$") + f("^abc\\$$$", "abc\\$") + f("^a\\$b\\$$", "a\\$b\\$") +} diff --git a/lib/VictoriaMetrics/lib/syncwg/syncwg.go b/lib/VictoriaMetrics/lib/syncwg/syncwg.go new file mode 100644 index 0000000..7ed1da9 --- /dev/null +++ b/lib/VictoriaMetrics/lib/syncwg/syncwg.go @@ -0,0 +1,49 @@ +package syncwg + +import ( + "sync" +) + +// WaitGroup wraps sync.WaitGroup and makes safe to call Add/Wait +// from concurrent goroutines. +// +// An additional limitation is that call to Wait prohibits further calls to Add +// until return. +type WaitGroup struct { + sync.WaitGroup + mu sync.Mutex +} + +// Add registers n additional workers. Add may be called from concurrent goroutines. +func (wg *WaitGroup) Add(n int) { + wg.mu.Lock() + wg.WaitGroup.Add(n) + wg.mu.Unlock() +} + +// Wait waits until all the goroutines call Done. +// +// Wait may be called from concurrent goroutines. +// +// Further calls to Add are blocked until return from Wait. +func (wg *WaitGroup) Wait() { + wg.mu.Lock() + wg.WaitGroup.Wait() + wg.mu.Unlock() +} + +// WaitAndBlock waits until all the goroutines call Done and then prevents +// from new goroutines calling Add. +// +// Further calls to Add are always blocked. This is useful for graceful shutdown +// when other goroutines calling Add must be stopped. +// +// wg cannot be used after this call. +func (wg *WaitGroup) WaitAndBlock() { + wg.mu.Lock() + wg.WaitGroup.Wait() + + // Do not unlock wg.mu, so other goroutines calling Add are blocked. +} + +// There is no need in wrapping WaitGroup.Done, since it is already goroutine-safe. diff --git a/lib/VictoriaMetrics/lib/timeutil/timeutil.go b/lib/VictoriaMetrics/lib/timeutil/timeutil.go new file mode 100644 index 0000000..919bd0f --- /dev/null +++ b/lib/VictoriaMetrics/lib/timeutil/timeutil.go @@ -0,0 +1,19 @@ +package timeutil + +import ( + "time" + + "github.com/valyala/fastrand" +) + +// AddJitterToDuration adds up to 10% random jitter to d and returns the resulting duration. +// +// The maximum jitter is limited by 10 seconds. +func AddJitterToDuration(d time.Duration) time.Duration { + dv := d / 10 + if dv > 10*time.Second { + dv = 10 * time.Second + } + p := float64(fastrand.Uint32()) / (1 << 32) + return d + time.Duration(p*float64(dv)) +} diff --git a/lib/VictoriaMetrics/lib/timeutil/timeutil_test.go b/lib/VictoriaMetrics/lib/timeutil/timeutil_test.go new file mode 100644 index 0000000..55032fb --- /dev/null +++ b/lib/VictoriaMetrics/lib/timeutil/timeutil_test.go @@ -0,0 +1,27 @@ +package timeutil + +import ( + "testing" + "time" +) + +func TestAddJitterToDuration(t *testing.T) { + f := func(d time.Duration) { + t.Helper() + result := AddJitterToDuration(d) + if result < d { + t.Fatalf("unexpected negative jitter") + } + variance := (float64(result) - float64(d)) / float64(d) + if variance > 0.1 { + t.Fatalf("too big variance=%.2f for result=%s, d=%s; mustn't exceed 0.1", variance, result, d) + } + } + + f(time.Nanosecond) + f(time.Microsecond) + f(time.Millisecond) + f(time.Second) + f(time.Hour) + f(24 * time.Hour) +} diff --git a/lib/VictoriaMetrics/lib/timeutil/timezone.go b/lib/VictoriaMetrics/lib/timeutil/timezone.go new file mode 100644 index 0000000..c2f51f7 --- /dev/null +++ b/lib/VictoriaMetrics/lib/timeutil/timezone.go @@ -0,0 +1,30 @@ +package timeutil + +import ( + "sync/atomic" + "time" +) + +// GetLocalTimezoneOffsetNsecs returns local timezone offset in nanoseconds. +func GetLocalTimezoneOffsetNsecs() int64 { + return localTimezoneOffsetNsecs.Load() +} + +var localTimezoneOffsetNsecs atomic.Int64 + +func updateLocalTimezoneOffsetNsecs() { + _, offset := time.Now().Zone() + nsecs := int64(offset) * 1e9 + localTimezoneOffsetNsecs.Store(nsecs) +} + +func init() { + updateLocalTimezoneOffsetNsecs() + // Update local timezone offset in a loop, since it may change over the year due to DST. + go func() { + t := time.NewTicker(5 * time.Second) + for range t.C { + updateLocalTimezoneOffsetNsecs() + } + }() +} diff --git a/lib/VictoriaMetrics/lib/workingsetcache/cache.go b/lib/VictoriaMetrics/lib/workingsetcache/cache.go new file mode 100644 index 0000000..7a96b1f --- /dev/null +++ b/lib/VictoriaMetrics/lib/workingsetcache/cache.go @@ -0,0 +1,354 @@ +package workingsetcache + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" + "github.com/VictoriaMetrics/fastcache" +) + +// Cache modes. +const ( + split = 0 + switching = 1 + whole = 2 +) + +// Cache is a cache for working set entries. +// +// The cache evicts inactive entries after the given expireDuration. +// Recently accessed entries survive expireDuration. +// +// Comparing to fastcache, this cache minimizes the required RAM size +// to values smaller than maxBytes. +type Cache struct { + curr atomic.Value + prev atomic.Value + + // mode indicates whether to use only curr and skip prev. + // + // This flag is set to switching if curr is filled for more than 50% space. + // In this case using prev would result in RAM waste, + // it is better to use only curr cache with doubled size. + // After the process of switching, this flag will be set to whole. + mode uint32 + + // The maximum cache size in bytes. + maxBytes int + + // mu serializes access to curr, prev and mode + // in expirationWatcher and cacheSizeWatcher. + mu sync.Mutex + + wg sync.WaitGroup + stopCh chan struct{} + + // cs holds cache stats + cs fastcache.Stats +} + +// Load loads the cache from filePath and limits its size to maxBytes +// and evicts inactive entires after expireDuration. +// +// Stop must be called on the returned cache when it is no longer needed. +func Load(filePath string, maxBytes int, expireDuration time.Duration) *Cache { + curr := fastcache.LoadFromFileOrNew(filePath, maxBytes) + var cs fastcache.Stats + curr.UpdateStats(&cs) + if cs.EntriesCount == 0 { + curr.Reset() + // The cache couldn't be loaded with maxBytes size. + // This may mean that the cache is split into curr and prev caches. + // Try loading it again with maxBytes / 2 size. + curr := fastcache.New(maxBytes / 2) + prev := fastcache.LoadFromFileOrNew(filePath, maxBytes/2) + c := newCacheInternal(curr, prev, maxBytes, split) + c.runWatchers(expireDuration) + return c + } + + // The cache has been successfully loaded in full. + // Set its' mode to `whole`. + // There is no need in runWatchers call. + prev := fastcache.New(1024) + return newCacheInternal(curr, prev, maxBytes, whole) +} + +// New creates new cache with the given maxBytes capcity and the given expireDuration +// for inactive entries. +// +// Stop must be called on the returned cache when it is no longer needed. +func New(maxBytes int, expireDuration time.Duration) *Cache { + curr := fastcache.New(maxBytes / 2) + prev := fastcache.New(1024) + c := newCacheInternal(curr, prev, maxBytes, split) + c.runWatchers(expireDuration) + return c +} + +func newCacheInternal(curr, prev *fastcache.Cache, maxBytes, mode int) *Cache { + var c Cache + c.maxBytes = maxBytes + c.curr.Store(curr) + c.prev.Store(prev) + c.stopCh = make(chan struct{}) + c.setMode(mode) + return &c +} + +func (c *Cache) runWatchers(expireDuration time.Duration) { + c.wg.Add(1) + go func() { + defer c.wg.Done() + c.expirationWatcher(expireDuration) + }() + c.wg.Add(1) + go func() { + defer c.wg.Done() + c.cacheSizeWatcher() + }() +} + +func (c *Cache) expirationWatcher(expireDuration time.Duration) { + t := time.NewTicker(expireDuration / 2) + for { + select { + case <-c.stopCh: + t.Stop() + return + case <-t.C: + } + + c.mu.Lock() + if atomic.LoadUint32(&c.mode) != split { + // Stop the expirationWatcher on non-split mode. + c.mu.Unlock() + return + } + // Expire prev cache and create fresh curr cache with c.maxBytes/2 capacity. + // Do not reuse prev cache, since it can have too big capacity. + prev := c.prev.Load().(*fastcache.Cache) + prev.Reset() + curr := c.curr.Load().(*fastcache.Cache) + c.prev.Store(curr) + curr = fastcache.New(c.maxBytes / 2) + c.curr.Store(curr) + c.mu.Unlock() + } +} + +func (c *Cache) cacheSizeWatcher() { + t := time.NewTicker(time.Minute) + defer t.Stop() + + for { + select { + case <-c.stopCh: + return + case <-t.C: + } + var cs fastcache.Stats + curr := c.curr.Load().(*fastcache.Cache) + curr.UpdateStats(&cs) + if cs.BytesSize >= uint64(0.95*float64(c.maxBytes)/2) { + break + } + } + + // curr cache size exceeds 50% of its capacity. It is better + // to double the size of curr cache and stop using prev cache, + // since this will result in higher summary cache capacity. + // + // Do this in the following steps: + // 1) switch to mode=switching + // 2) move curr cache to prev + // 3) create curr with the double size + // 4) wait until curr size exceeds c.maxBytes/2, i.e. it is populated with new data + // 5) switch to mode=whole + // 6) drop prev + + c.mu.Lock() + c.setMode(switching) + prev := c.prev.Load().(*fastcache.Cache) + prev.Reset() + curr := c.curr.Load().(*fastcache.Cache) + c.prev.Store(curr) + c.curr.Store(fastcache.New(c.maxBytes)) + c.mu.Unlock() + + for { + select { + case <-c.stopCh: + return + case <-t.C: + } + var cs fastcache.Stats + curr := c.curr.Load().(*fastcache.Cache) + curr.UpdateStats(&cs) + if cs.BytesSize >= uint64(c.maxBytes)/2 { + break + } + } + + c.mu.Lock() + c.setMode(whole) + prev = c.prev.Load().(*fastcache.Cache) + prev.Reset() + c.prev.Store(fastcache.New(1024)) + c.mu.Unlock() +} + +// Save saves the cache to filePath. +func (c *Cache) Save(filePath string) error { + curr := c.curr.Load().(*fastcache.Cache) + concurrency := cgroup.AvailableCPUs() + return curr.SaveToFileConcurrent(filePath, concurrency) +} + +// Stop stops the cache. +// +// The cache cannot be used after the Stop call. +func (c *Cache) Stop() { + close(c.stopCh) + c.wg.Wait() + + c.Reset() +} + +// Reset resets the cache. +func (c *Cache) Reset() { + prev := c.prev.Load().(*fastcache.Cache) + prev.Reset() + curr := c.curr.Load().(*fastcache.Cache) + curr.Reset() + // Reset the mode to `split` in the hope the working set size becomes smaller after the reset. + c.setMode(split) +} + +func (c *Cache) setMode(mode int) { + atomic.StoreUint32(&c.mode, uint32(mode)) +} + +func (c *Cache) loadMode() int { + return int(atomic.LoadUint32(&c.mode)) +} + +// UpdateStats updates fcs with cache stats. +func (c *Cache) UpdateStats(fcs *fastcache.Stats) { + var cs fastcache.Stats + curr := c.curr.Load().(*fastcache.Cache) + curr.UpdateStats(&cs) + fcs.Collisions += cs.Collisions + fcs.Corruptions += cs.Corruptions + fcs.EntriesCount += cs.EntriesCount + fcs.BytesSize += cs.BytesSize + + fcs.GetCalls += atomic.LoadUint64(&c.cs.GetCalls) + fcs.SetCalls += atomic.LoadUint64(&c.cs.SetCalls) + fcs.Misses += atomic.LoadUint64(&c.cs.Misses) + + prev := c.prev.Load().(*fastcache.Cache) + cs.Reset() + prev.UpdateStats(&cs) + fcs.EntriesCount += cs.EntriesCount + fcs.BytesSize += cs.BytesSize +} + +// Get appends the found value for the given key to dst and returns the result. +func (c *Cache) Get(dst, key []byte) []byte { + atomic.AddUint64(&c.cs.GetCalls, 1) + curr := c.curr.Load().(*fastcache.Cache) + result := curr.Get(dst, key) + if len(result) > len(dst) { + // Fast path - the entry is found in the current cache. + return result + } + if c.loadMode() == whole { + // Nothing found. + atomic.AddUint64(&c.cs.Misses, 1) + return result + } + + // Search for the entry in the previous cache. + prev := c.prev.Load().(*fastcache.Cache) + result = prev.Get(dst, key) + if len(result) <= len(dst) { + // Nothing found. + atomic.AddUint64(&c.cs.Misses, 1) + return result + } + // Cache the found entry in the current cache. + curr.Set(key, result[len(dst):]) + return result +} + +// Has verifies whether the cache contains the given key. +func (c *Cache) Has(key []byte) bool { + atomic.AddUint64(&c.cs.GetCalls, 1) + curr := c.curr.Load().(*fastcache.Cache) + if curr.Has(key) { + return true + } + if c.loadMode() == whole { + atomic.AddUint64(&c.cs.Misses, 1) + return false + } + prev := c.prev.Load().(*fastcache.Cache) + if !prev.Has(key) { + atomic.AddUint64(&c.cs.Misses, 1) + return false + } + // Cache the found entry in the current cache. + tmpBuf := tmpBufPool.Get() + tmpBuf.B = prev.Get(tmpBuf.B, key) + curr.Set(key, tmpBuf.B) + tmpBufPool.Put(tmpBuf) + return true +} + +var tmpBufPool bytesutil.ByteBufferPool + +// Set sets the given value for the given key. +func (c *Cache) Set(key, value []byte) { + atomic.AddUint64(&c.cs.SetCalls, 1) + curr := c.curr.Load().(*fastcache.Cache) + curr.Set(key, value) +} + +// GetBig appends the found value for the given key to dst and returns the result. +func (c *Cache) GetBig(dst, key []byte) []byte { + atomic.AddUint64(&c.cs.GetCalls, 1) + curr := c.curr.Load().(*fastcache.Cache) + result := curr.GetBig(dst, key) + if len(result) > len(dst) { + // Fast path - the entry is found in the current cache. + return result + } + if c.loadMode() == whole { + // Nothing found. + atomic.AddUint64(&c.cs.Misses, 1) + return result + } + + // Search for the entry in the previous cache. + prev := c.prev.Load().(*fastcache.Cache) + result = prev.GetBig(dst, key) + if len(result) <= len(dst) { + // Nothing found. + atomic.AddUint64(&c.cs.Misses, 1) + return result + } + // Cache the found entry in the current cache. + curr.SetBig(key, result[len(dst):]) + return result +} + +// SetBig sets the given value for the given key. +func (c *Cache) SetBig(key, value []byte) { + atomic.AddUint64(&c.cs.SetCalls, 1) + curr := c.curr.Load().(*fastcache.Cache) + curr.SetBig(key, value) +} diff --git a/lib/influxdb/LICENSE b/lib/influxdb/LICENSE new file mode 100644 index 0000000..cfd3bfe --- /dev/null +++ b/lib/influxdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2018 InfluxData Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/lib/influxdb/cmd/parse.go b/lib/influxdb/cmd/parse.go new file mode 100644 index 0000000..7b140ed --- /dev/null +++ b/lib/influxdb/cmd/parse.go @@ -0,0 +1,29 @@ +// Package cmd is the root package of the various command-line utilities for InfluxDB. +package cmd + +import "strings" + +// ParseCommandName extracts the command name and args from the args list. +func ParseCommandName(args []string) (string, []string) { + // Retrieve command name as first argument. + var name string + if len(args) > 0 { + if !strings.HasPrefix(args[0], "-") { + name = args[0] + } else if args[0] == "-h" || args[0] == "-help" || args[0] == "--help" { + // Special case -h immediately following binary name + name = "help" + } + } + + // If command is "help" and has an argument then rewrite args to use "-h". + if name == "help" && len(args) > 2 && !strings.HasPrefix(args[1], "-") { + return args[1], []string{"-h"} + } + + // If a named command is specified then return it with its arguments. + if name != "" { + return name, args[1:] + } + return "", args +} diff --git a/lib/influxdb/errors.go b/lib/influxdb/errors.go new file mode 100644 index 0000000..5cbf19f --- /dev/null +++ b/lib/influxdb/errors.go @@ -0,0 +1,31 @@ +package influxdb + +import ( + "errors" + "strings" +) + +// ErrFieldTypeConflict is returned when a new field already exists with a +// different type. +var ErrFieldTypeConflict = errors.New("field type conflict") + +// IsAuthorizationError indicates whether an error is due to an authorization failure +func IsAuthorizationError(err error) bool { + e, ok := err.(interface { + AuthorizationFailed() bool + }) + return ok && e.AuthorizationFailed() +} + +// IsClientError indicates whether an error is a known client error. +func IsClientError(err error) bool { + if err == nil { + return false + } + + if strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) { + return true + } + + return false +} diff --git a/lib/influxdb/go.mod b/lib/influxdb/go.mod new file mode 100644 index 0000000..69f35c7 --- /dev/null +++ b/lib/influxdb/go.mod @@ -0,0 +1,19 @@ +module github.com/influxdata/influxdb + +go 1.22 + +require ( + github.com/bytedance/sonic v1.11.6 + github.com/cespare/xxhash/v2 v2.3.0 + github.com/gogo/protobuf v1.3.2 + github.com/jsternberg/zap-logfmt v1.2.0 + github.com/mattn/go-isatty v0.0.20 + github.com/prometheus/prometheus v0.50.1 + github.com/xlab/treeprint v1.2.0 + go.uber.org/zap v1.27.0 +) + +require ( + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/sys v0.21.0 // indirect +) diff --git a/lib/influxdb/logger/config.go b/lib/influxdb/logger/config.go new file mode 100644 index 0000000..7282f26 --- /dev/null +++ b/lib/influxdb/logger/config.go @@ -0,0 +1,20 @@ +package logger + +import ( + "go.uber.org/zap/zapcore" +) + +// Config represents the configuration for creating a zap.Logger. +type Config struct { + Format string `toml:"format"` + Level zapcore.Level `toml:"level"` + SuppressLogo bool `toml:"suppress-logo"` +} + +// NewConfig returns a new instance of Config with defaults. +func NewConfig() Config { + return Config{ + Format: "auto", + Level: zapcore.InfoLevel, + } +} diff --git a/lib/influxdb/logger/fields.go b/lib/influxdb/logger/fields.go new file mode 100644 index 0000000..ed5be0e --- /dev/null +++ b/lib/influxdb/logger/fields.go @@ -0,0 +1,112 @@ +package logger + +import ( + "time" + + "github.com/influxdata/influxdb/pkg/snowflake" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + // OperationNameKey is the logging context key used for identifying name of an operation. + OperationNameKey = "op_name" + + // OperationEventKey is the logging context key used for identifying a notable + // event during the course of an operation. + OperationEventKey = "op_event" + + // OperationElapsedKey is the logging context key used for identifying time elapsed to finish an operation. + OperationElapsedKey = "op_elapsed" + + // DBInstanceKey is the logging context key used for identifying name of the relevant database. + DBInstanceKey = "db_instance" + + // DBRetentionKey is the logging context key used for identifying name of the relevant retention policy. + DBRetentionKey = "db_rp" + + // DBShardGroupKey is the logging context key used for identifying relevant shard group. + DBShardGroupKey = "db_shard_group" + + // DBShardIDKey is the logging context key used for identifying name of the relevant shard number. + DBShardIDKey = "db_shard_id" + + // TraceIDKey is the logging context key used for identifying unique traces. + TraceIDKey = "trace_id" +) + +const ( + eventStart = "start" + eventEnd = "end" +) + +var ( + gen = snowflake.New(0) +) + +func nextID() string { + return gen.NextString() +} + +// TraceID returns a field for tracking the trace identifier. +func TraceID(id string) zapcore.Field { + return zap.String(TraceIDKey, id) +} + +// OperationName returns a field for tracking the name of an operation. +func OperationName(name string) zapcore.Field { + return zap.String(OperationNameKey, name) +} + +// OperationElapsed returns a field for tracking the duration of an operation. +func OperationElapsed(d time.Duration) zapcore.Field { + return zap.Duration(OperationElapsedKey, d) +} + +// OperationEventStart returns a field for tracking the start of an operation. +func OperationEventStart() zapcore.Field { + return zap.String(OperationEventKey, eventStart) +} + +// OperationEventEnd returns a field for tracking the end of an operation. +func OperationEventEnd() zapcore.Field { + return zap.String(OperationEventKey, eventEnd) +} + +// Database returns a field for tracking the name of a database. +func Database(name string) zapcore.Field { + return zap.String(DBInstanceKey, name) +} + +// RetentionPolicy returns the retention policy. +func RetentionPolicy(name string) zapcore.Field { + return zap.String(DBRetentionKey, name) +} + +// ShardGroup returns a field for tracking the shard group identifier. +func ShardGroup(id uint64) zapcore.Field { + return zap.Uint64(DBShardGroupKey, id) +} + +// Shard returns a field for tracking the shard identifier. +func Shard(id uint64) zapcore.Field { + return zap.Uint64(DBShardIDKey, id) +} + +// NewOperation uses the exiting log to create a new logger with context +// containing a trace id and the operation. Prior to returning, a standardized message +// is logged indicating the operation has started. The returned function should be +// called when the operation concludes in order to log a corresponding message which +// includes an elapsed time and that the operation has ended. +func NewOperation(log *zap.Logger, msg, name string, fields ...zapcore.Field) (*zap.Logger, func()) { + f := []zapcore.Field{TraceID(nextID()), OperationName(name)} + if len(fields) > 0 { + f = append(f, fields...) + } + + now := time.Now() + log = log.With(f...) + log.Info(msg+" (start)", OperationEventStart()) + + return log, func() { log.Info(msg+" (end)", OperationEventEnd(), OperationElapsed(time.Since(now))) } +} diff --git a/lib/influxdb/logger/logger.go b/lib/influxdb/logger/logger.go new file mode 100644 index 0000000..d2586db --- /dev/null +++ b/lib/influxdb/logger/logger.go @@ -0,0 +1,89 @@ +package logger + +import ( + "fmt" + "io" + "time" + + zaplogfmt "github.com/jsternberg/zap-logfmt" + "github.com/mattn/go-isatty" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// TimeFormat represents the logger time format. +const TimeFormat = "2006-01-02T15:04:05.000000Z07:00" + +// New creates a new zap.Logger. +func New(w io.Writer) *zap.Logger { + config := NewConfig() + l, _ := config.New(w) + return l +} + +// New creates a new zap.Logger from config settings. +func (c *Config) New(defaultOutput io.Writer) (*zap.Logger, error) { + w := defaultOutput + format := c.Format + if format == "console" { + // Disallow the console logger if the output is not a terminal. + return nil, fmt.Errorf("unknown logging format: %s", format) + } + + // If the format is empty or auto, then set the format depending + // on whether or not a terminal is present. + if format == "" || format == "auto" { + if IsTerminal(w) { + format = "console" + } else { + format = "logfmt" + } + } + + encoder, err := newEncoder(format) + if err != nil { + return nil, err + } + return zap.New(zapcore.NewCore( + encoder, + zapcore.Lock(zapcore.AddSync(w)), + c.Level, + ), zap.Fields(zap.String("log_id", nextID()))), nil +} + +func newEncoder(format string) (zapcore.Encoder, error) { + config := newEncoderConfig() + switch format { + case "json": + return zapcore.NewJSONEncoder(config), nil + case "console": + return zapcore.NewConsoleEncoder(config), nil + case "logfmt": + return zaplogfmt.NewEncoder(config), nil + default: + return nil, fmt.Errorf("unknown logging format: %s", format) + } +} + +func newEncoderConfig() zapcore.EncoderConfig { + config := zap.NewProductionEncoderConfig() + config.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { + encoder.AppendString(ts.UTC().Format(TimeFormat)) + } + config.EncodeDuration = func(d time.Duration, encoder zapcore.PrimitiveArrayEncoder) { + val := float64(d) / float64(time.Millisecond) + encoder.AppendString(fmt.Sprintf("%.3fms", val)) + } + config.LevelKey = "lvl" + return config +} + +// IsTerminal checks if w is a file and whether it is an interactive terminal session. +func IsTerminal(w io.Writer) bool { + if f, ok := w.(interface { + Fd() uintptr + }); ok { + return isatty.IsTerminal(f.Fd()) + } + return false +} diff --git a/lib/influxdb/models/inline_fnv.go b/lib/influxdb/models/inline_fnv.go new file mode 100644 index 0000000..eec1ae8 --- /dev/null +++ b/lib/influxdb/models/inline_fnv.go @@ -0,0 +1,32 @@ +package models // import "github.com/influxdata/influxdb/models" + +// from stdlib hash/fnv/fnv.go +const ( + prime64 = 1099511628211 + offset64 = 14695981039346656037 +) + +// InlineFNV64a is an alloc-free port of the standard library's fnv64a. +// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. +type InlineFNV64a uint64 + +// NewInlineFNV64a returns a new instance of InlineFNV64a. +func NewInlineFNV64a() InlineFNV64a { + return offset64 +} + +// Write adds data to the running hash. +func (s *InlineFNV64a) Write(data []byte) (int, error) { + hash := uint64(*s) + for _, c := range data { + hash ^= uint64(c) + hash *= prime64 + } + *s = InlineFNV64a(hash) + return len(data), nil +} + +// Sum64 returns the uint64 of the current resulting hash. +func (s *InlineFNV64a) Sum64() uint64 { + return uint64(*s) +} diff --git a/lib/influxdb/models/points.go b/lib/influxdb/models/points.go new file mode 100644 index 0000000..28db3ec --- /dev/null +++ b/lib/influxdb/models/points.go @@ -0,0 +1,332 @@ +// Package models implements basic objects used throughout the TICK stack. +package models // import "github.com/influxdata/influxdb/models" + +import ( + "bytes" + "sort" +) + +type escapeSet struct { + k [1]byte + esc [2]byte +} + +var ( + tagEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, + {k: [1]byte{'='}, esc: [2]byte{'\\', '='}}, + } +) + +func escapeTag(in []byte) []byte { + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) + } + } + return in +} + +// Tag represents a single key/value tag pair. +type Tag struct { + Key []byte + Value []byte +} + +// NewTag returns a new Tag. +func NewTag(key, value []byte) Tag { + return Tag{ + Key: key, + Value: value, + } +} + +// Size returns the size of the key and value. +func (t Tag) Size() int { return len(t.Key) + len(t.Value) } + +// Clone returns a shallow copy of Tag. +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (t Tag) Clone() Tag { + other := Tag{ + Key: make([]byte, len(t.Key)), + Value: make([]byte, len(t.Value)), + } + + copy(other.Key, t.Key) + copy(other.Value, t.Value) + + return other +} + +// String returns the string reprsentation of the tag. +func (t *Tag) String() string { + var buf bytes.Buffer + buf.WriteByte('{') + buf.WriteString(string(t.Key)) + buf.WriteByte(' ') + buf.WriteString(string(t.Value)) + buf.WriteByte('}') + return buf.String() +} + +// Tags represents a sorted list of tags. +type Tags []Tag + +// NewTags returns a new Tags from a map. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return nil + } + a := make(Tags, 0, len(m)) + for k, v := range m { + a = append(a, NewTag([]byte(k), []byte(v))) + } + sort.Sort(a) + return a +} + +// Keys returns the list of keys for a tag set. +func (a Tags) Keys() []string { + if len(a) == 0 { + return nil + } + keys := make([]string, len(a)) + for i, tag := range a { + keys[i] = string(tag.Key) + } + return keys +} + +// Values returns the list of values for a tag set. +func (a Tags) Values() []string { + if len(a) == 0 { + return nil + } + values := make([]string, len(a)) + for i, tag := range a { + values[i] = string(tag.Value) + } + return values +} + +// String returns the string representation of the tags. +func (a Tags) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + for i := range a { + buf.WriteString(a[i].String()) + if i < len(a)-1 { + buf.WriteByte(' ') + } + } + buf.WriteByte(']') + return buf.String() +} + +// Size returns the number of bytes needed to store all tags. Note, this is +// the number of bytes needed to store all keys and values and does not account +// for data structures or delimiters for example. +func (a Tags) Size() int { + var total int + for i := range a { + total += a[i].Size() + } + return total +} + +// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (a Tags) Clone() Tags { + if len(a) == 0 { + return nil + } + + others := make(Tags, len(a)) + for i := range a { + others[i] = a[i].Clone() + } + + return others +} + +// sorted returns true if a is sorted and is an optimization +// to avoid an allocation when calling sort.IsSorted, improving +// performance as much as 50%. +func (a Tags) sorted() bool { + for i := len(a) - 1; i > 0; i-- { + if bytes.Compare(a[i].Key, a[i-1].Key) == -1 { + return false + } + } + return true +} + +func (a Tags) Len() int { return len(a) } + +func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } + +func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Equal returns true if a equals other. +func (a Tags) Equal(other Tags) bool { + if len(a) != len(other) { + return false + } + for i := range a { + if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) { + return false + } + } + return true +} + +// Get returns the value for a key. +func (a Tags) Get(key []byte) []byte { + // OPTIMIZE: Use sort.Search if tagset is large. + + for _, t := range a { + if bytes.Equal(t.Key, key) { + return t.Value + } + } + return nil +} + +// GetString returns the string value for a string key. +func (a Tags) GetString(key string) string { + return string(a.Get([]byte(key))) +} + +// Set sets the value for a key. +func (a *Tags) Set(key, value []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + (*a)[i].Value = value + return + } + } + *a = append(*a, Tag{Key: key, Value: value}) + sort.Sort(*a) +} + +// SetString sets the string value for a string key. +func (a *Tags) SetString(key, value string) { + a.Set([]byte(key), []byte(value)) +} + +// Delete removes a tag by key. +func (a *Tags) Delete(key []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + copy((*a)[i:], (*a)[i+1:]) + (*a)[len(*a)-1] = Tag{} + *a = (*a)[:len(*a)-1] + return + } + } +} + +// Map returns a map representation of the tags. +func (a Tags) Map() map[string]string { + m := make(map[string]string, len(a)) + for _, t := range a { + m[string(t.Key)] = string(t.Value) + } + return m +} + +// Merge merges the tags combining the two. If both define a tag with the +// same key, the merged value overwrites the old value. +// A new map is returned. +func (a Tags) Merge(other map[string]string) Tags { + merged := make(map[string]string, len(a)+len(other)) + for _, t := range a { + merged[string(t.Key)] = string(t.Value) + } + for k, v := range other { + merged[k] = v + } + return NewTags(merged) +} + +// HashKey hashes all of a tag's keys. +func (a Tags) HashKey() []byte { + return a.AppendHashKey(nil) +} + +func (a Tags) needsEscape() bool { + for i := range a { + t := &a[i] + for j := range tagEscapeCodes { + c := &tagEscapeCodes[j] + if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 { + return true + } + } + } + return false +} + +// AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer. +func (a Tags) AppendHashKey(dst []byte) []byte { + // Empty maps marshal to empty bytes. + if len(a) == 0 { + return dst + } + + // Type invariant: Tags are sorted + + sz := 0 + var escaped Tags + if a.needsEscape() { + var tmp [20]Tag + if len(a) < len(tmp) { + escaped = tmp[:len(a)] + } else { + escaped = make(Tags, len(a)) + } + + for i := range a { + t := &a[i] + nt := &escaped[i] + nt.Key = escapeTag(t.Key) + nt.Value = escapeTag(t.Value) + sz += len(nt.Key) + len(nt.Value) + } + } else { + sz = a.Size() + escaped = a + } + + sz += len(escaped) + (len(escaped) * 2) // separators + + // Generate marshaled bytes. + if cap(dst)-len(dst) < sz { + nd := make([]byte, len(dst), len(dst)+sz) + copy(nd, dst) + dst = nd + } + buf := dst[len(dst) : len(dst)+sz] + idx := 0 + for i := range escaped { + k := &escaped[i] + if len(k.Value) == 0 { + continue + } + buf[idx] = ',' + idx++ + copy(buf[idx:], k.Key) + idx += len(k.Key) + buf[idx] = '=' + idx++ + copy(buf[idx:], k.Value) + idx += len(k.Value) + } + return dst[:len(dst)+idx] +} diff --git a/lib/influxdb/models/rows.go b/lib/influxdb/models/rows.go new file mode 100644 index 0000000..609152d --- /dev/null +++ b/lib/influxdb/models/rows.go @@ -0,0 +1,123 @@ +package models + +import ( + "math" + "sort" + + "github.com/bytedance/sonic" +) + +// Row represents a single row returned from the execution of a statement. +type Row struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Columns []string `json:"columns,omitempty"` + Values [][]interface{} `json:"values,omitempty"` + Partial bool `json:"partial,omitempty"` +} + +// This function is reserved.In the future, the scenarios of dividing by zero is supported, It can be changed to MarshalJSON. +func (r *Row) marshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Columns []string `json:"columns,omitempty"` + Values [][]interface{} `json:"values,omitempty"` + Partial bool `json:"partial,omitempty"` + } + + // Copy fields to output struct. + o.Name = r.Name + o.Tags = r.Tags + o.Columns = r.Columns + o.Values = r.Values + + for i, value := range o.Values { + for i2 := range value { + switch v := value[i2].(type) { + case float64: + if math.IsNaN(v) { + o.Values[i][i2] = 0 + } + if math.IsInf(v, 1) { + o.Values[i][i2] = 0 + } + if math.IsInf(v, -1) { + o.Values[i][i2] = 0 + } + case float32: + if math.IsNaN(float64(v)) { + o.Values[i][i2] = 0 + } + if math.IsInf(float64(v), 1) { + o.Values[i][i2] = 0 + } + if math.IsInf(float64(v), -1) { + o.Values[i][i2] = 0 + } + } + } + } + o.Partial = r.Partial + return sonic.ConfigStd.Marshal(&o) +} + +// SameSeries returns true if r contains values for the same series as o. +func (r *Row) SameSeries(o *Row) bool { + if r.Name != o.Name { + return false + } + if len(r.Tags) != len(o.Tags) { + return false + } + for k, v1 := range r.Tags { + if v2, ok := o.Tags[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// tagsHash returns a hash of tag key/value pairs. +func (r *Row) tagsHash() uint64 { + h := NewInlineFNV64a() + keys := r.tagsKeys() + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(r.Tags[k])) + } + return h.Sum64() +} + +// tagKeys returns a sorted list of tag keys. +func (r *Row) tagsKeys() []string { + a := make([]string, 0, len(r.Tags)) + for k := range r.Tags { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Rows represents a collection of rows. Rows implements sort.Interface. +type Rows []*Row + +// Len implements sort.Interface. +func (p Rows) Len() int { return len(p) } + +// Less implements sort.Interface. +func (p Rows) Less(i, j int) bool { + // Sort by name first. + if p[i].Name != p[j].Name { + return p[i].Name < p[j].Name + } + + // Sort by tag set hash. Tags don't have a meaningful sort order so we + // just compute a hash and sort by that instead. This allows the tests + // to receive rows in a predictable order every time. + return p[i].tagsHash() < p[j].tagsHash() +} + +// Swap implements sort.Interface. +func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/lib/influxdb/models/rows_test.go b/lib/influxdb/models/rows_test.go new file mode 100644 index 0000000..7f624bf --- /dev/null +++ b/lib/influxdb/models/rows_test.go @@ -0,0 +1,14 @@ +package models + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSameSeries(t *testing.T) { + var mst = "aaaaaabbbbbbbbbbccccccccc" + row1 := &Row{Name: mst, Tags: map[string]string{"a": "abb"}} + row2 := &Row{Name: mst, Tags: map[string]string{"aa": "bb"}} + assert.False(t, row1.SameSeries(row2)) +} diff --git a/lib/influxdb/models/time.go b/lib/influxdb/models/time.go new file mode 100644 index 0000000..7e15eb8 --- /dev/null +++ b/lib/influxdb/models/time.go @@ -0,0 +1,50 @@ +package models + +// Helper time methods since parsing time can easily overflow and we only support a +// specific time range. + +import ( + "fmt" + "math" + "time" +) + +const ( + // MinNanoTime is the minimum time that can be represented. + // + // 1677-09-21 00:12:43.145224194 +0000 UTC + // + // The two lowest minimum integers are used as sentinel values. The + // minimum value needs to be used as a value lower than any other value for + // comparisons and another separate value is needed to act as a sentinel + // default value that is unusable by the user, but usable internally. + // Because these two values need to be used for a special purpose, we do + // not allow users to write points at these two times. + MinNanoTime = int64(math.MinInt64) + 2 + + // MaxNanoTime is the maximum time that can be represented. + // + // 2262-04-11 23:47:16.854775806 +0000 UTC + // + // The highest time represented by a nanosecond needs to be used for an + // exclusive range in the shard group, so the maximum time needs to be one + // less than the possible maximum number of nanoseconds representable by an + // int64 so that we don't lose a point at that one time. + MaxNanoTime = int64(math.MaxInt64) - 1 +) + +var ( + minNanoTime = time.Unix(0, MinNanoTime).UTC() + maxNanoTime = time.Unix(0, MaxNanoTime).UTC() + + // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. + ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) +) + +// CheckTime checks that a time is within the safe range. +func CheckTime(t time.Time) error { + if t.Before(minNanoTime) || t.After(maxNanoTime) { + return ErrTimeOutOfRange + } + return nil +} diff --git a/lib/influxdb/monitor/diagnostics/diagnostics.go b/lib/influxdb/monitor/diagnostics/diagnostics.go new file mode 100644 index 0000000..32c5aa4 --- /dev/null +++ b/lib/influxdb/monitor/diagnostics/diagnostics.go @@ -0,0 +1,64 @@ +// Package diagnostics provides the diagnostics type so that +// other packages can provide diagnostics without depending on the monitor package. +package diagnostics // import "github.com/influxdata/influxdb/monitor/diagnostics" + +import "sort" + +// Client is the interface modules implement if they register diagnostics with monitor. +type Client interface { + Diagnostics() (*Diagnostics, error) +} + +// The ClientFunc type is an adapter to allow the use of +// ordinary functions as Diagnostics clients. +type ClientFunc func() (*Diagnostics, error) + +// Diagnostics calls f(). +func (f ClientFunc) Diagnostics() (*Diagnostics, error) { + return f() +} + +// Diagnostics represents a table of diagnostic information. The first value +// is the name of the columns, the second is a slice of interface slices containing +// the values for each column, by row. This information is never written to an InfluxDB +// system and is display-only. An example showing, say, connections follows: +// +// source_ip source_port dest_ip dest_port +// 182.1.0.2 2890 127.0.0.1 38901 +// 174.33.1.2 2924 127.0.0.1 38902 +type Diagnostics struct { + Columns []string + Rows [][]interface{} +} + +// NewDiagnostics initialises a new Diagnostics with the specified columns. +func NewDiagnostics(columns []string) *Diagnostics { + return &Diagnostics{ + Columns: columns, + Rows: make([][]interface{}, 0), + } +} + +// AddRow appends the provided row to the Diagnostics' rows. +func (d *Diagnostics) AddRow(r []interface{}) { + d.Rows = append(d.Rows, r) +} + +// RowFromMap returns a new one-row Diagnostics from a map. +func RowFromMap(m map[string]interface{}) *Diagnostics { + // Display columns in deterministic order. + sortedKeys := make([]string, 0, len(m)) + for k := range m { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + d := NewDiagnostics(sortedKeys) + row := make([]interface{}, len(sortedKeys)) + for i, k := range sortedKeys { + row[i] = m[k] + } + d.AddRow(row) + + return d +} diff --git a/lib/influxdb/pkg/bloom/bloom.go b/lib/influxdb/pkg/bloom/bloom.go new file mode 100644 index 0000000..23e2806 --- /dev/null +++ b/lib/influxdb/pkg/bloom/bloom.go @@ -0,0 +1,129 @@ +package bloom + +// NOTE: +// This package implements a limited bloom filter implementation based on +// Will Fitzgerald's bloom & bitset packages. It uses a zero-allocation xxhash +// implementation, rather than murmur3. It's implemented locally to support +// zero-copy memory-mapped slices. +// +// This also optimizes the filter by always using a bitset size with a power of 2. + +import ( + "fmt" + "math" + + "github.com/cespare/xxhash/v2" +) + +// Filter represents a bloom filter. +type Filter struct { + k uint64 + b []byte + mask uint64 +} + +// NewFilterBuffer returns a new instance of a filter using a backing buffer. +// The buffer length MUST be a power of 2. +func NewFilterBuffer(buf []byte, k uint64) (*Filter, error) { + m := pow2(uint64(len(buf)) * 8) + if m != uint64(len(buf))*8 { + return nil, fmt.Errorf("bloom.Filter: buffer bit count must a power of two: %d/%d", len(buf)*8, m) + } + return &Filter{k: k, b: buf, mask: m - 1}, nil +} + +// Len returns the number of bits used in the filter. +func (f *Filter) Len() uint { return uint(len(f.b)) } + +// K returns the number of hash functions used in the filter. +func (f *Filter) K() uint64 { return f.k } + +// Bytes returns the underlying backing slice. +func (f *Filter) Bytes() []byte { return f.b } + +// Clone returns a copy of f. +func (f *Filter) Clone() *Filter { + other := &Filter{k: f.k, b: make([]byte, len(f.b)), mask: f.mask} + copy(other.b, f.b) + return other +} + +// Insert inserts data to the filter. +func (f *Filter) Insert(v []byte) { + h := f.hash(v) + for i := uint64(0); i < f.k; i++ { + loc := f.location(h, i) + f.b[loc>>3] |= 1 << (loc & 7) + } +} + +// Contains returns true if the filter possibly contains v. +// Returns false if the filter definitely does not contain v. +func (f *Filter) Contains(v []byte) bool { + h := f.hash(v) + for i := uint64(0); i < f.k; i++ { + loc := f.location(h, i) + if f.b[loc>>3]&(1<<(loc&7)) == 0 { + return false + } + } + return true +} + +// Merge performs an in-place union of other into f. +// Returns an error if m or k of the filters differs. +func (f *Filter) Merge(other *Filter) error { + if other == nil { + return nil + } + + // Ensure m & k fields match. + if len(f.b) != len(other.b) { + return fmt.Errorf("bloom.Filter.Merge(): m mismatch: %d <> %d", len(f.b), len(other.b)) + } else if f.k != other.k { + return fmt.Errorf("bloom.Filter.Merge(): k mismatch: %d <> %d", f.b, other.b) + } + + // Perform union of each byte. + for i := range f.b { + f.b[i] |= other.b[i] + } + + return nil +} + +// location returns the ith hashed location using two hash values. +func (f *Filter) location(h [2]uint64, i uint64) uint { + return uint((h[0] + h[1]*i) & f.mask) +} + +// hash returns two 64-bit hashes based on the output of xxhash. +func (f *Filter) hash(data []byte) [2]uint64 { + v1 := xxhash.Sum64(data) + var v2 uint64 + if len(data) > 0 { + b := data[len(data)-1] // We'll put the original byte back. + data[len(data)-1] = byte(0) + v2 = xxhash.Sum64(data) + data[len(data)-1] = b + } + return [2]uint64{v1, v2} +} + +// Estimate returns an estimated bit count and hash count given the element count and false positive rate. +func Estimate(n uint64, p float64) (m uint64, k uint64) { + m = uint64(math.Ceil(-1 * float64(n) * math.Log(p) / math.Pow(math.Log(2), 2))) + k = uint64(math.Ceil(math.Log(2) * float64(m) / float64(n))) + return m, k +} + +// pow2 returns the number that is the next highest power of 2. +// Returns v if it is a power of 2. +func pow2(v uint64) uint64 { + for i := uint64(8); i < 1<<62; i *= 2 { + if i >= v { + return i + } + } + panic("unreachable") +} diff --git a/lib/influxdb/pkg/limiter/fixed.go b/lib/influxdb/pkg/limiter/fixed.go new file mode 100644 index 0000000..19d967d --- /dev/null +++ b/lib/influxdb/pkg/limiter/fixed.go @@ -0,0 +1,46 @@ +// Package limiter provides concurrency limiters. +package limiter + +// Fixed is a simple channel-based concurrency limiter. It uses a fixed +// size channel to limit callers from proceeding until there is a value available +// in the channel. If all are in-use, the caller blocks until one is freed. +type Fixed chan struct{} + +func NewFixed(limit int) Fixed { + return make(Fixed, limit) +} + +// Idle returns true if the limiter has all its capacity is available. +func (t Fixed) Idle() bool { + return len(t) == cap(t) +} + +// Available returns the number of available tokens that may be taken. +func (t Fixed) Available() int { + return cap(t) - len(t) +} + +// Capacity returns the number of tokens can be taken. +func (t Fixed) Capacity() int { + return cap(t) +} + +// TryTake attempts to take a token and return true if successful, otherwise returns false. +func (t Fixed) TryTake() bool { + select { + case t <- struct{}{}: + return true + default: + return false + } +} + +// Take attempts to take a token and blocks until one is available. +func (t Fixed) Take() { + t <- struct{}{} +} + +// Release releases a token back to the limiter. +func (t Fixed) Release() { + <-t +} diff --git a/lib/influxdb/pkg/limiter/writer.go b/lib/influxdb/pkg/limiter/writer.go new file mode 100644 index 0000000..d5b168e --- /dev/null +++ b/lib/influxdb/pkg/limiter/writer.go @@ -0,0 +1,63 @@ +package limiter + +import ( + "context" + "io" + "os" +) + +type Writer struct { + w io.WriteCloser + limiter Rate + ctx context.Context +} + +type Rate interface { + WaitN(ctx context.Context, n int) error + Burst() int +} + +// Write writes bytes from b. +func (s *Writer) Write(b []byte) (int, error) { + if s.limiter == nil { + return s.w.Write(b) + } + + var n int + for n < len(b) { + wantToWriteN := len(b[n:]) + if wantToWriteN > s.limiter.Burst() { + wantToWriteN = s.limiter.Burst() + } + + wroteN, err := s.w.Write(b[n : n+wantToWriteN]) + if err != nil { + return n, err + } + n += wroteN + + if err := s.limiter.WaitN(s.ctx, wroteN); err != nil { + return n, err + } + } + + return n, nil +} + +func (s *Writer) Sync() error { + if f, ok := s.w.(*os.File); ok { + return f.Sync() + } + return nil +} + +func (s *Writer) Name() string { + if f, ok := s.w.(*os.File); ok { + return f.Name() + } + return "" +} + +func (s *Writer) Close() error { + return s.w.Close() +} diff --git a/lib/influxdb/pkg/snowflake/gen.go b/lib/influxdb/pkg/snowflake/gen.go new file mode 100644 index 0000000..c245f1b --- /dev/null +++ b/lib/influxdb/pkg/snowflake/gen.go @@ -0,0 +1,124 @@ +package snowflake + +import ( + "fmt" + "sync/atomic" + "time" +) + +const ( + epoch = 1491696000000 + serverBits = 10 + sequenceBits = 12 + timeBits = 42 + serverShift = sequenceBits + timeShift = sequenceBits + serverBits + serverMax = ^(-1 << serverBits) + sequenceMask = ^(-1 << sequenceBits) + timeMask = ^(-1 << timeBits) +) + +type Generator struct { + state uint64 + machine uint64 +} + +func New(machineID int) *Generator { + if machineID < 0 || machineID > serverMax { + panic(fmt.Errorf("invalid machine id; must be 0 ≤ id < %d", serverMax)) + } + return &Generator{ + state: 0, + machine: uint64(machineID << serverShift), + } +} + +func (g *Generator) MachineID() int { + return int(g.machine >> serverShift) +} + +func (g *Generator) Next() uint64 { + var state uint64 + + // we attempt 100 times to update the millisecond part of the state + // and increment the sequence atomically. each attempt is approx ~30ns + // so we spend around ~3µs total. + for i := 0; i < 100; i++ { + t := (now() - epoch) & timeMask + current := atomic.LoadUint64(&g.state) + currentTime := current >> timeShift & timeMask + currentSeq := current & sequenceMask + + // this sequence of conditionals ensures a monotonically increasing + // state. + + switch { + // if our time is in the future, use that with a zero sequence number. + case t > currentTime: + state = t << timeShift + + // we now know that our time is at or before the current time. + // if we're at the maximum sequence, bump to the next millisecond + case currentSeq == sequenceMask: + state = (currentTime + 1) << timeShift + + // otherwise, increment the sequence. + default: + state = current + 1 + } + + if atomic.CompareAndSwapUint64(&g.state, current, state) { + break + } + + state = 0 + } + + // since we failed 100 times, there's high contention. bail out of the + // loop to bound the time we'll spend in this method, and just add + // one to the counter. this can cause millisecond drift, but hopefully + // some CAS eventually succeeds and fixes the milliseconds. additionally, + // if the sequence is already at the maximum, adding 1 here can cause + // it to roll over into the machine id. giving the CAS 100 attempts + // helps to avoid these problems. + if state == 0 { + state = atomic.AddUint64(&g.state, 1) + } + + return state | g.machine +} + +func (g *Generator) NextString() string { + var s [11]byte + encode(&s, g.Next()) + return string(s[:]) +} + +func (g *Generator) AppendNext(s *[11]byte) { + encode(s, g.Next()) +} + +func now() uint64 { return uint64(time.Now().UnixNano() / 1e6) } + +var digits = [...]byte{ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z', '_', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', + 'x', 'y', 'z', '~'} + +func encode(s *[11]byte, n uint64) { + s[10], n = digits[n&0x3f], n>>6 + s[9], n = digits[n&0x3f], n>>6 + s[8], n = digits[n&0x3f], n>>6 + s[7], n = digits[n&0x3f], n>>6 + s[6], n = digits[n&0x3f], n>>6 + s[5], n = digits[n&0x3f], n>>6 + s[4], n = digits[n&0x3f], n>>6 + s[3], n = digits[n&0x3f], n>>6 + s[2], n = digits[n&0x3f], n>>6 + s[1], n = digits[n&0x3f], n>>6 + s[0] = digits[n&0x3f] +} diff --git a/lib/influxdb/pkg/tlsconfig/tls_config.go b/lib/influxdb/pkg/tlsconfig/tls_config.go new file mode 100644 index 0000000..05735c1 --- /dev/null +++ b/lib/influxdb/pkg/tlsconfig/tls_config.go @@ -0,0 +1,133 @@ +package tlsconfig + +import ( + "crypto/tls" + "fmt" + "sort" + "strings" +) + +type Config struct { + Ciphers []string `toml:"ciphers"` + MinVersion string `toml:"min-version"` + MaxVersion string `toml:"max-version"` +} + +func (c Config) Validate() error { + _, err := c.Parse() + return err +} + +func (c Config) Parse() (out *tls.Config, err error) { + if len(c.Ciphers) > 0 { + if out == nil { + out = new(tls.Config) + } + + for _, name := range c.Ciphers { + cipher, ok := ciphersMap[strings.ToUpper(name)] + if !ok { + return nil, unknownCipher(name) + } + out.CipherSuites = append(out.CipherSuites, cipher) + } + } + + if c.MinVersion != "" { + if out == nil { + out = new(tls.Config) + } + + version, ok := versionsMap[strings.ToUpper(c.MinVersion)] + if !ok { + return nil, unknownVersion(c.MinVersion) + } + out.MinVersion = version + } + + if c.MaxVersion != "" { + if out == nil { + out = new(tls.Config) + } + + version, ok := versionsMap[strings.ToUpper(c.MaxVersion)] + if !ok { + return nil, unknownVersion(c.MaxVersion) + } + out.MaxVersion = version + } + + return out, nil +} + +var ciphersMap = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + // TLS 1.3 cipher suites. + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, + // TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator + // that the client is doing version fallback. See RFC 7507. + "TLS_FALLBACK_SCSV": tls.TLS_FALLBACK_SCSV, +} + +func unknownCipher(name string) error { + available := make([]string, 0, len(ciphersMap)) + for name := range ciphersMap { + available = append(available, name) + } + sort.Strings(available) + + return fmt.Errorf("unknown cipher suite: %q. available ciphers: %s", + name, strings.Join(available, ", ")) +} + +var versionsMap = map[string]uint16{ + "SSL3.0": tls.VersionSSL30, + "TLS1.0": tls.VersionTLS10, + "1.0": tls.VersionTLS10, + "TLS1.1": tls.VersionTLS11, + "1.1": tls.VersionTLS11, + "TLS1.2": tls.VersionTLS12, + "1.2": tls.VersionTLS12, + "TLS1.3": tls.VersionTLS13, + "1.3": tls.VersionTLS13, +} + +func unknownVersion(name string) error { + available := make([]string, 0, len(versionsMap)) + for name := range versionsMap { + // skip the ones that just begin with a number. they may be confusing + // due to the duplication, and just help if the user specifies without + // the TLS part. + if name[0] == '1' { + continue + } + available = append(available, name) + } + sort.Strings(available) + + return fmt.Errorf("unknown tls version: %q. available versions: %s", + name, strings.Join(available, ", ")) +} diff --git a/lib/influxdb/pkg/tracing/fields/field.go b/lib/influxdb/pkg/tracing/fields/field.go new file mode 100644 index 0000000..bc96be1 --- /dev/null +++ b/lib/influxdb/pkg/tracing/fields/field.go @@ -0,0 +1,117 @@ +package fields + +import ( + "fmt" + "math" + "time" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + int64Type + uint64Type + durationType + float64Type +) + +// Field instances are constructed via Bool, String, and so on. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/opentracing/opentracing-go/log +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// / Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Duration(key string, val time.Duration) Field { + return Field{ + key: key, + fieldType: durationType, + numericVal: int64(val), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case int64Type: + return int64(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case durationType: + return time.Duration(lf.numericVal) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ": ", lf.Value()) +} diff --git a/lib/influxdb/pkg/tracing/fields/fields.go b/lib/influxdb/pkg/tracing/fields/fields.go new file mode 100644 index 0000000..825cf25 --- /dev/null +++ b/lib/influxdb/pkg/tracing/fields/fields.go @@ -0,0 +1,61 @@ +package fields + +import "sort" + +type Fields []Field + +// Merge merges other with the current set, replacing any matching keys from other. +func (fs *Fields) Merge(other Fields) { + var list []Field + i, j := 0, 0 + for i < len(*fs) && j < len(other) { + if (*fs)[i].key < other[j].key { + list = append(list, (*fs)[i]) + i++ + } else if (*fs)[i].key > other[j].key { + list = append(list, other[j]) + j++ + } else { + // equal, then "other" replaces existing key + list = append(list, other[j]) + i++ + j++ + } + } + + if i < len(*fs) { + list = append(list, (*fs)[i:]...) + } else if j < len(other) { + list = append(list, other[j:]...) + } + + *fs = list +} + +// New creates a new set of fields, sorted by Key. +// Duplicate keys are removed. +func New(args ...Field) Fields { + fields := Fields(args) + sort.Slice(fields, func(i, j int) bool { + return fields[i].key < fields[j].key + }) + + // deduplicate + // loop invariant: fields[:i] has no duplicates + for i := 0; i < len(fields)-1; i++ { + j := i + 1 + // find all duplicate keys + for j < len(fields) && fields[i].key == fields[j].key { + j++ + } + + d := (j - 1) - i // number of duplicate keys + if d > 0 { + // copy over duplicate keys in order to maintain loop invariant + copy(fields[i+1:], fields[j:]) + fields = fields[:len(fields)-d] + } + } + + return fields +} diff --git a/lib/influxdb/pkg/tracing/labels/labels.go b/lib/influxdb/pkg/tracing/labels/labels.go new file mode 100644 index 0000000..90afda7 --- /dev/null +++ b/lib/influxdb/pkg/tracing/labels/labels.go @@ -0,0 +1,74 @@ +package labels + +import "sort" + +type Label struct { + Key, Value string +} + +// The Labels type represents a set of labels, sorted by Key. +type Labels []Label + +// Merge merges other with the current set, replacing any matching keys from other. +func (ls *Labels) Merge(other Labels) { + var list []Label + i, j := 0, 0 + for i < len(*ls) && j < len(other) { + if (*ls)[i].Key < other[j].Key { + list = append(list, (*ls)[i]) + i++ + } else if (*ls)[i].Key > other[j].Key { + list = append(list, other[j]) + j++ + } else { + // equal, then "other" replaces existing key + list = append(list, other[j]) + i++ + j++ + } + } + + if i < len(*ls) { + list = append(list, (*ls)[i:]...) + } else if j < len(other) { + list = append(list, other[j:]...) + } + + *ls = list +} + +// New takes an even number of strings representing key-value pairs +// and creates a new slice of Labels. Duplicates are removed, however, +// there is no guarantee which will be removed +func New(args ...string) Labels { + if len(args)%2 != 0 { + panic("uneven number of arguments to label.Labels") + } + var labels Labels + for i := 0; i+1 < len(args); i += 2 { + labels = append(labels, Label{Key: args[i], Value: args[i+1]}) + } + + sort.Slice(labels, func(i, j int) bool { + return labels[i].Key < labels[j].Key + }) + + // deduplicate + // loop invariant: labels[:i] has no duplicates + for i := 0; i < len(labels)-1; i++ { + j := i + 1 + // find all duplicate keys + for j < len(labels) && labels[i].Key == labels[j].Key { + j++ + } + + d := (j - 1) - i // number of duplicate keys + if d > 0 { + // copy over duplicate keys in order to maintain loop invariant + copy(labels[i+1:], labels[j:]) + labels = labels[:len(labels)-d] + } + } + + return labels +} diff --git a/lib/influxdb/pkg/tracing/rawspan.go b/lib/influxdb/pkg/tracing/rawspan.go new file mode 100644 index 0000000..12e37e5 --- /dev/null +++ b/lib/influxdb/pkg/tracing/rawspan.go @@ -0,0 +1,18 @@ +package tracing + +import ( + "time" + + "github.com/influxdata/influxdb/pkg/tracing/fields" + "github.com/influxdata/influxdb/pkg/tracing/labels" +) + +// RawSpan represents the data associated with a span. +type RawSpan struct { + Context SpanContext + ParentSpanID uint64 // ParentSpanID identifies the parent of this span or 0 if this is the root span. + Name string // Name is the operation name given to this span. + Start time.Time // Start identifies the start time of the span. + Labels labels.Labels // Labels contains additional metadata about this span. + Fields fields.Fields // Fields contains typed values associated with this span. +} diff --git a/lib/influxdb/pkg/tracing/span.go b/lib/influxdb/pkg/tracing/span.go new file mode 100644 index 0000000..c8bcfb4 --- /dev/null +++ b/lib/influxdb/pkg/tracing/span.go @@ -0,0 +1,84 @@ +package tracing + +import ( + "sync" + "time" + + "github.com/influxdata/influxdb/pkg/tracing/fields" + "github.com/influxdata/influxdb/pkg/tracing/labels" +) + +// The Span type denotes a specific operation for a Trace. +// A Span may have one or more children, identifying additional +// details about a trace. +type Span struct { + tracer *Trace + mu sync.Mutex + raw RawSpan +} + +type StartSpanOption interface { + applyStart(*Span) +} + +// The StartTime start span option specifies the start time of +// the new span rather than using now. +type StartTime time.Time + +func (t StartTime) applyStart(s *Span) { + s.raw.Start = time.Time(t) +} + +// StartSpan creates a new child span using time.Now as the start time. +func (s *Span) StartSpan(name string, opt ...StartSpanOption) *Span { + return s.tracer.startSpan(name, s.raw.Context, opt) +} + +// Context returns a SpanContext that can be serialized and passed to a remote node to continue a trace. +func (s *Span) Context() SpanContext { + return s.raw.Context +} + +// SetLabels replaces any existing labels for the Span with args. +func (s *Span) SetLabels(args ...string) { + s.mu.Lock() + s.raw.Labels = labels.New(args...) + s.mu.Unlock() +} + +// MergeLabels merges args with any existing labels defined +// for the Span. +func (s *Span) MergeLabels(args ...string) { + ls := labels.New(args...) + s.mu.Lock() + s.raw.Labels.Merge(ls) + s.mu.Unlock() +} + +// SetFields replaces any existing fields for the Span with args. +func (s *Span) SetFields(set fields.Fields) { + s.mu.Lock() + s.raw.Fields = set + s.mu.Unlock() +} + +// MergeFields merges the provides args with any existing fields defined +// for the Span. +func (s *Span) MergeFields(args ...fields.Field) { + set := fields.New(args...) + s.mu.Lock() + s.raw.Fields.Merge(set) + s.mu.Unlock() +} + +// Finish marks the end of the span and records it to the associated Trace. +// If Finish is not called, the span will not appear in the trace. +func (s *Span) Finish() { + s.mu.Lock() + s.tracer.addRawSpan(s.raw) + s.mu.Unlock() +} + +func (s *Span) Tree() *TreeNode { + return s.tracer.TreeFrom(s.raw.Context.SpanID) +} diff --git a/lib/influxdb/pkg/tracing/spancontext.go b/lib/influxdb/pkg/tracing/spancontext.go new file mode 100644 index 0000000..62cf7af --- /dev/null +++ b/lib/influxdb/pkg/tracing/spancontext.go @@ -0,0 +1,27 @@ +package tracing + +import ( + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/pkg/tracing/wire" +) + +// A SpanContext represents the minimal information to identify a span in a trace. +// This is typically serialized to continue a trace on a remote node. +type SpanContext struct { + TraceID uint64 // TraceID is assigned a random number to this trace. + SpanID uint64 // SpanID is assigned a random number to identify this span. +} + +func (s SpanContext) MarshalBinary() ([]byte, error) { + ws := wire.SpanContext(s) + return proto.Marshal(&ws) +} + +func (s *SpanContext) UnmarshalBinary(data []byte) error { + var ws wire.SpanContext + err := proto.Unmarshal(data, &ws) + if err == nil { + *s = SpanContext(ws) + } + return err +} diff --git a/lib/influxdb/pkg/tracing/trace.go b/lib/influxdb/pkg/tracing/trace.go new file mode 100644 index 0000000..35b5e33 --- /dev/null +++ b/lib/influxdb/pkg/tracing/trace.go @@ -0,0 +1,124 @@ +package tracing + +import ( + "sort" + "sync" + "time" +) + +// The Trace type functions as a container for capturing Spans used to +// trace the execution of a request. +type Trace struct { + mu sync.Mutex + spans map[uint64]RawSpan +} + +// NewTrace starts a new trace and returns a root span identified by the provided name. +// +// Additional options may be specified to override the default behavior when creating the span. +func NewTrace(name string, opt ...StartSpanOption) (*Trace, *Span) { + t := &Trace{spans: make(map[uint64]RawSpan)} + s := &Span{tracer: t} + s.raw.Name = name + s.raw.Context.TraceID, s.raw.Context.SpanID = randomID2() + setOptions(s, opt) + + return t, s +} + +func (t *Trace) startSpan(name string, sc SpanContext, opt []StartSpanOption) *Span { + s := &Span{tracer: t} + s.raw.Name = name + s.raw.Context.SpanID = randomID() + s.raw.Context.TraceID = sc.TraceID + s.raw.ParentSpanID = sc.SpanID + setOptions(s, opt) + + return s +} + +func setOptions(s *Span, opt []StartSpanOption) { + for _, o := range opt { + o.applyStart(s) + } + + if s.raw.Start.IsZero() { + s.raw.Start = time.Now() + } +} + +func (t *Trace) addRawSpan(raw RawSpan) { + t.mu.Lock() + t.spans[raw.Context.SpanID] = raw + t.mu.Unlock() +} + +// Tree returns a graph of the current trace. +func (t *Trace) Tree() *TreeNode { + t.mu.Lock() + defer t.mu.Unlock() + + for _, s := range t.spans { + if s.ParentSpanID == 0 { + return t.treeFrom(s.Context.SpanID) + } + } + return nil +} + +// Merge combines other with the current trace. This is +// typically necessary when traces are transferred from a remote. +func (t *Trace) Merge(other *Trace) { + for k, s := range other.spans { + t.spans[k] = s + } +} + +func (t *Trace) TreeFrom(root uint64) *TreeNode { + t.mu.Lock() + defer t.mu.Unlock() + return t.treeFrom(root) +} + +func (t *Trace) treeFrom(root uint64) *TreeNode { + c := map[uint64]*TreeNode{} + + for k, s := range t.spans { + c[k] = &TreeNode{Raw: s} + } + + if _, ok := c[root]; !ok { + return nil + } + + for _, n := range c { + if n.Raw.ParentSpanID != 0 { + if pn := c[n.Raw.ParentSpanID]; pn != nil { + pn.Children = append(pn.Children, n) + } + } + } + + // sort nodes + var v treeSortVisitor + Walk(&v, c[root]) + + return c[root] +} + +type treeSortVisitor struct{} + +func (v *treeSortVisitor) Visit(node *TreeNode) Visitor { + sort.Slice(node.Children, func(i, j int) bool { + lt, rt := node.Children[i].Raw.Start.UnixNano(), node.Children[j].Raw.Start.UnixNano() + if lt < rt { + return true + } else if lt > rt { + return false + } + + ln, rn := node.Children[i].Raw.Name, node.Children[j].Raw.Name + return ln < rn + }) + return v +} diff --git a/lib/influxdb/pkg/tracing/trace_encoding.go b/lib/influxdb/pkg/tracing/trace_encoding.go new file mode 100644 index 0000000..31c3b33 --- /dev/null +++ b/lib/influxdb/pkg/tracing/trace_encoding.go @@ -0,0 +1,136 @@ +package tracing + +import ( + "math" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/pkg/tracing/fields" + "github.com/influxdata/influxdb/pkg/tracing/labels" + "github.com/influxdata/influxdb/pkg/tracing/wire" +) + +func fieldsToWire(set fields.Fields) []wire.Field { + var r []wire.Field + for _, f := range set { + wf := wire.Field{Key: f.Key()} + switch val := f.Value().(type) { + case string: + wf.FieldType = wire.FieldTypeString + wf.Value = &wire.Field_StringVal{StringVal: val} + + case bool: + var numericVal int64 + if val { + numericVal = 1 + } + wf.FieldType = wire.FieldTypeBool + wf.Value = &wire.Field_NumericVal{NumericVal: numericVal} + + case int64: + wf.FieldType = wire.FieldTypeInt64 + wf.Value = &wire.Field_NumericVal{NumericVal: val} + + case uint64: + wf.FieldType = wire.FieldTypeUint64 + wf.Value = &wire.Field_NumericVal{NumericVal: int64(val)} + + case time.Duration: + wf.FieldType = wire.FieldTypeDuration + wf.Value = &wire.Field_NumericVal{NumericVal: int64(val)} + + case float64: + wf.FieldType = wire.FieldTypeFloat64 + wf.Value = &wire.Field_NumericVal{NumericVal: int64(math.Float64bits(val))} + + default: + continue + } + + r = append(r, wf) + } + return r +} + +func labelsToWire(set labels.Labels) []string { + var r []string + for i := range set { + r = append(r, set[i].Key, set[i].Value) + } + return r +} + +func (t *Trace) MarshalBinary() ([]byte, error) { + wt := wire.Trace{} + for _, sp := range t.spans { + wt.Spans = append(wt.Spans, &wire.Span{ + Context: wire.SpanContext{ + TraceID: sp.Context.TraceID, + SpanID: sp.Context.SpanID, + }, + ParentSpanID: sp.ParentSpanID, + Name: sp.Name, + Start: sp.Start, + Labels: labelsToWire(sp.Labels), + Fields: fieldsToWire(sp.Fields), + }) + } + + return proto.Marshal(&wt) +} + +func wireToFields(wfs []wire.Field) fields.Fields { + var fs []fields.Field + for _, wf := range wfs { + switch wf.FieldType { + case wire.FieldTypeString: + fs = append(fs, fields.String(wf.Key, wf.GetStringVal())) + + case wire.FieldTypeBool: + var boolVal bool + if wf.GetNumericVal() != 0 { + boolVal = true + } + fs = append(fs, fields.Bool(wf.Key, boolVal)) + + case wire.FieldTypeInt64: + fs = append(fs, fields.Int64(wf.Key, wf.GetNumericVal())) + + case wire.FieldTypeUint64: + fs = append(fs, fields.Uint64(wf.Key, uint64(wf.GetNumericVal()))) + + case wire.FieldTypeDuration: + fs = append(fs, fields.Duration(wf.Key, time.Duration(wf.GetNumericVal()))) + + case wire.FieldTypeFloat64: + fs = append(fs, fields.Float64(wf.Key, math.Float64frombits(uint64(wf.GetNumericVal())))) + } + } + + return fields.New(fs...) +} + +func (t *Trace) UnmarshalBinary(data []byte) error { + var wt wire.Trace + if err := proto.Unmarshal(data, &wt); err != nil { + return err + } + + t.spans = make(map[uint64]RawSpan) + + for _, sp := range wt.Spans { + t.spans[sp.Context.SpanID] = RawSpan{ + Context: SpanContext{ + TraceID: sp.Context.TraceID, + SpanID: sp.Context.SpanID, + }, + ParentSpanID: sp.ParentSpanID, + Name: sp.Name, + Start: sp.Start, + Labels: labels.New(sp.Labels...), + Fields: wireToFields(sp.Fields), + } + } + + return nil +} diff --git a/lib/influxdb/pkg/tracing/tree.go b/lib/influxdb/pkg/tracing/tree.go new file mode 100644 index 0000000..0321be6 --- /dev/null +++ b/lib/influxdb/pkg/tracing/tree.go @@ -0,0 +1,74 @@ +package tracing + +import ( + "github.com/xlab/treeprint" +) + +// A Visitor's Visit method is invoked for each node encountered by Walk. +// If the result of Visit is not nil, Walk visits each of the children. +type Visitor interface { + Visit(*TreeNode) Visitor +} + +// A TreeNode represents a single node in the graph. +type TreeNode struct { + Raw RawSpan + Children []*TreeNode +} + +// String returns the tree as a string. +func (t *TreeNode) String() string { + if t == nil { + return "" + } + tv := newTreeVisitor() + Walk(tv, t) + return tv.root.String() +} + +// Walk traverses the graph in a depth-first order, calling v.Visit +// for each node until completion or v.Visit returns nil. +func Walk(v Visitor, node *TreeNode) { + if v = v.Visit(node); v == nil { + return + } + + for _, c := range node.Children { + Walk(v, c) + } +} + +type treeVisitor struct { + root treeprint.Tree + trees []treeprint.Tree +} + +func newTreeVisitor() *treeVisitor { + t := treeprint.New() + return &treeVisitor{root: t, trees: []treeprint.Tree{t}} +} + +func (v *treeVisitor) Visit(n *TreeNode) Visitor { + t := v.trees[len(v.trees)-1].AddBranch(n.Raw.Name) + v.trees = append(v.trees, t) + + if labels := n.Raw.Labels; len(labels) > 0 { + l := t.AddBranch("labels") + for _, ll := range n.Raw.Labels { + l.AddNode(ll.Key + ": " + ll.Value) + } + } + + for _, k := range n.Raw.Fields { + t.AddNode(k.String()) + } + + for _, cn := range n.Children { + Walk(v, cn) + } + + v.trees[len(v.trees)-1] = nil + v.trees = v.trees[:len(v.trees)-1] + + return nil +} diff --git a/lib/influxdb/pkg/tracing/util.go b/lib/influxdb/pkg/tracing/util.go new file mode 100644 index 0000000..f98cc77 --- /dev/null +++ b/lib/influxdb/pkg/tracing/util.go @@ -0,0 +1,26 @@ +package tracing + +import ( + "math/rand" + "sync" + "time" +) + +var ( + seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) + seededIDLock sync.Mutex +) + +func randomID() (n uint64) { + seededIDLock.Lock() + n = uint64(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} + +func randomID2() (n uint64, m uint64) { + seededIDLock.Lock() + n, m = uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63()) + seededIDLock.Unlock() + return +} diff --git a/lib/influxdb/pkg/tracing/wire/binary.pb.go b/lib/influxdb/pkg/tracing/wire/binary.pb.go new file mode 100644 index 0000000..9d5f0e4 --- /dev/null +++ b/lib/influxdb/pkg/tracing/wire/binary.pb.go @@ -0,0 +1,1369 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: binary.proto + +package wire + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Field_FieldType int32 + +const ( + FieldTypeString Field_FieldType = 0 + FieldTypeBool Field_FieldType = 1 + FieldTypeInt64 Field_FieldType = 2 + FieldTypeUint64 Field_FieldType = 3 + FieldTypeDuration Field_FieldType = 4 + FieldTypeFloat64 Field_FieldType = 6 +) + +var Field_FieldType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT_64", + 3: "UINT_64", + 4: "DURATION", + 6: "FLOAT_64", +} + +var Field_FieldType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT_64": 2, + "UINT_64": 3, + "DURATION": 4, + "FLOAT_64": 6, +} + +func (x Field_FieldType) String() string { + return proto.EnumName(Field_FieldType_name, int32(x)) +} + +func (Field_FieldType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3aeef8c45497084a, []int{3, 0} +} + +type SpanContext struct { + TraceID uint64 `protobuf:"varint,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + SpanID uint64 `protobuf:"varint,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` +} + +func (m *SpanContext) Reset() { *m = SpanContext{} } +func (m *SpanContext) String() string { return proto.CompactTextString(m) } +func (*SpanContext) ProtoMessage() {} +func (*SpanContext) Descriptor() ([]byte, []int) { + return fileDescriptor_3aeef8c45497084a, []int{0} +} +func (m *SpanContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SpanContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SpanContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SpanContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SpanContext.Merge(m, src) +} +func (m *SpanContext) XXX_Size() int { + return m.Size() +} +func (m *SpanContext) XXX_DiscardUnknown() { + xxx_messageInfo_SpanContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SpanContext proto.InternalMessageInfo + +func (m *SpanContext) GetTraceID() uint64 { + if m != nil { + return m.TraceID + } + return 0 +} + +func (m *SpanContext) GetSpanID() uint64 { + if m != nil { + return m.SpanID + } + return 0 +} + +type Span struct { + Context SpanContext `protobuf:"bytes,1,opt,name=context,proto3" json:"context"` + ParentSpanID uint64 `protobuf:"varint,2,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Start time.Time `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time"` + Labels []string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty"` + Fields []Field `protobuf:"bytes,6,rep,name=fields,proto3" json:"fields"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_3aeef8c45497084a, []int{1} +} +func (m *Span) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return m.Size() +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetContext() SpanContext { + if m != nil { + return m.Context + } + return SpanContext{} +} + +func (m *Span) GetParentSpanID() uint64 { + if m != nil { + return m.ParentSpanID + } + return 0 +} + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *Span) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Span) GetFields() []Field { + if m != nil { + return m.Fields + } + return nil +} + +type Trace struct { + Spans []*Span `protobuf:"bytes,1,rep,name=spans,proto3" json:"spans,omitempty"` +} + +func (m *Trace) Reset() { *m = Trace{} } +func (m *Trace) String() string { return proto.CompactTextString(m) } +func (*Trace) ProtoMessage() {} +func (*Trace) Descriptor() ([]byte, []int) { + return fileDescriptor_3aeef8c45497084a, []int{2} +} +func (m *Trace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Trace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Trace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Trace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trace.Merge(m, src) +} +func (m *Trace) XXX_Size() int { + return m.Size() +} +func (m *Trace) XXX_DiscardUnknown() { + xxx_messageInfo_Trace.DiscardUnknown(m) +} + +var xxx_messageInfo_Trace proto.InternalMessageInfo + +func (m *Trace) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +type Field struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + FieldType Field_FieldType `protobuf:"varint,2,opt,name=field_type,json=fieldType,proto3,enum=wire.Field_FieldType" json:"field_type,omitempty"` + // Types that are valid to be assigned to Value: + // *Field_NumericVal + // *Field_StringVal + Value isField_Value `protobuf_oneof:"value"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_3aeef8c45497084a, []int{3} +} +func (m *Field) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(m, src) +} +func (m *Field) XXX_Size() int { + return m.Size() +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +type isField_Value interface { + isField_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Field_NumericVal struct { + NumericVal int64 `protobuf:"fixed64,3,opt,name=numeric_val,json=numericVal,proto3,oneof" json:"numeric_val,omitempty"` +} +type Field_StringVal struct { + StringVal string `protobuf:"bytes,4,opt,name=string_val,json=stringVal,proto3,oneof" json:"string_val,omitempty"` +} + +func (*Field_NumericVal) isField_Value() {} +func (*Field_StringVal) isField_Value() {} + +func (m *Field) GetValue() isField_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Field) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Field) GetFieldType() Field_FieldType { + if m != nil { + return m.FieldType + } + return FieldTypeString +} + +func (m *Field) GetNumericVal() int64 { + if x, ok := m.GetValue().(*Field_NumericVal); ok { + return x.NumericVal + } + return 0 +} + +func (m *Field) GetStringVal() string { + if x, ok := m.GetValue().(*Field_StringVal); ok { + return x.StringVal + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Field) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Field_NumericVal)(nil), + (*Field_StringVal)(nil), + } +} + +func init() { + proto.RegisterEnum("wire.Field_FieldType", Field_FieldType_name, Field_FieldType_value) + proto.RegisterType((*SpanContext)(nil), "wire.SpanContext") + proto.RegisterType((*Span)(nil), "wire.Span") + proto.RegisterType((*Trace)(nil), "wire.Trace") + proto.RegisterType((*Field)(nil), "wire.Field") +} + +func init() { proto.RegisterFile("binary.proto", fileDescriptor_3aeef8c45497084a) } + +var fileDescriptor_3aeef8c45497084a = []byte{ + // 621 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x52, 0x3f, 0x6f, 0x9b, 0x5e, + 0x14, 0x85, 0x18, 0xe3, 0x70, 0x9d, 0xf8, 0x87, 0xdf, 0x2f, 0xa9, 0x10, 0x95, 0x00, 0x39, 0x52, + 0xe5, 0x2c, 0x44, 0x49, 0x23, 0xef, 0x21, 0x56, 0x5a, 0xa4, 0xc8, 0xae, 0xb0, 0xd3, 0xa1, 0x8b, + 0xf5, 0x1c, 0xbf, 0x58, 0xa8, 0x18, 0x10, 0x3c, 0xa7, 0xf5, 0x37, 0xa8, 0x3c, 0x65, 0xea, 0xe6, + 0xa9, 0x43, 0xbf, 0x4a, 0xc6, 0x8c, 0x55, 0x07, 0x5a, 0xe1, 0x2f, 0x52, 0xbd, 0x87, 0xff, 0xa4, + 0x5d, 0xd0, 0xbd, 0xf7, 0x9c, 0x7b, 0xce, 0xbb, 0x47, 0xc0, 0xde, 0xd0, 0x0f, 0x71, 0x32, 0xb3, + 0xe3, 0x24, 0xa2, 0x11, 0x92, 0x3e, 0xf9, 0x09, 0xd1, 0x0f, 0xc6, 0xd1, 0x38, 0xe2, 0x83, 0x13, + 0x56, 0x15, 0x98, 0x6e, 0x8e, 0xa3, 0x68, 0x1c, 0x90, 0x13, 0xde, 0x0d, 0xa7, 0x77, 0x27, 0xd4, + 0x9f, 0x90, 0x94, 0xe2, 0x49, 0x5c, 0x10, 0x1a, 0x1f, 0xa0, 0xda, 0x8b, 0x71, 0x78, 0x19, 0x85, + 0x94, 0x7c, 0xa6, 0xe8, 0x15, 0xec, 0xd2, 0x04, 0xdf, 0x92, 0x81, 0x3f, 0xd2, 0x44, 0x4b, 0x6c, + 0x4a, 0x4e, 0x35, 0xcf, 0xcc, 0x4a, 0x9f, 0xcd, 0xdc, 0xb6, 0x57, 0xe1, 0xa0, 0x3b, 0x42, 0x47, + 0x50, 0x49, 0x63, 0x1c, 0x32, 0xda, 0x0e, 0xa7, 0x41, 0x9e, 0x99, 0x32, 0x53, 0x72, 0xdb, 0x9e, + 0xcc, 0x20, 0x77, 0xd4, 0xf8, 0xba, 0x03, 0x12, 0x1b, 0xa1, 0x53, 0xa8, 0xdc, 0x16, 0x06, 0x5c, + 0xb4, 0x7a, 0x56, 0xb7, 0xd9, 0x9b, 0xed, 0x67, 0xce, 0x8e, 0xf4, 0x98, 0x99, 0x82, 0xb7, 0xe6, + 0xa1, 0x16, 0xd4, 0x62, 0x9c, 0x90, 0x90, 0x0e, 0xfe, 0xf6, 0x51, 0xf3, 0xcc, 0xdc, 0x7b, 0xc7, + 0x91, 0x95, 0xdb, 0x5e, 0xbc, 0xed, 0x46, 0x08, 0x81, 0x14, 0xe2, 0x09, 0xd1, 0x4a, 0x96, 0xd8, + 0x54, 0x3c, 0x5e, 0xa3, 0x6b, 0x80, 0x94, 0xe2, 0x84, 0x0e, 0xd8, 0xf1, 0x9a, 0xc4, 0x5f, 0xa0, + 0xdb, 0x45, 0x32, 0xf6, 0x3a, 0x19, 0xbb, 0xbf, 0x4e, 0xc6, 0xa9, 0xb3, 0xa7, 0xe4, 0x99, 0x59, + 0xee, 0xb1, 0xad, 0x87, 0x5f, 0xa6, 0xe8, 0x29, 0x5c, 0x80, 0x51, 0xd0, 0x0b, 0x90, 0x03, 0x3c, + 0x24, 0x41, 0xaa, 0x95, 0xad, 0x52, 0x53, 0xf1, 0x56, 0x1d, 0x3a, 0x06, 0xf9, 0xce, 0x27, 0xc1, + 0x28, 0xd5, 0x64, 0xab, 0xd4, 0xac, 0x9e, 0x55, 0x8b, 0x1b, 0xaf, 0xd8, 0x6c, 0x75, 0xdd, 0x8a, + 0xd0, 0x38, 0x86, 0x32, 0x4f, 0x14, 0x59, 0x50, 0x66, 0xe7, 0xa5, 0x9a, 0xc8, 0x57, 0x60, 0x1b, + 0x8b, 0x57, 0x00, 0x8d, 0xef, 0x25, 0x28, 0x73, 0x09, 0xa4, 0x42, 0xe9, 0x23, 0x99, 0xf1, 0x00, + 0x15, 0x8f, 0x95, 0xe8, 0x12, 0x80, 0x0b, 0x0e, 0xe8, 0x2c, 0x26, 0x3c, 0x9f, 0xda, 0xd9, 0xe1, + 0x33, 0xd7, 0xe2, 0xdb, 0x9f, 0xc5, 0xc4, 0xd9, 0xcf, 0x33, 0x53, 0xd9, 0xb4, 0x9e, 0x72, 0xb7, + 0x2e, 0xd1, 0x29, 0x54, 0xc3, 0xe9, 0x84, 0x24, 0xfe, 0xed, 0xe0, 0x1e, 0x07, 0x3c, 0x37, 0xd5, + 0xa9, 0xe5, 0x99, 0x09, 0x9d, 0x62, 0xfc, 0x1e, 0x07, 0x6f, 0x05, 0x0f, 0xc2, 0x4d, 0x87, 0x6c, + 0x96, 0x67, 0xe2, 0x87, 0x63, 0xbe, 0xc1, 0xf2, 0x54, 0x0a, 0x83, 0x1e, 0x9f, 0x16, 0x0b, 0x4a, + 0xba, 0x6e, 0x1a, 0x3f, 0x45, 0xd8, 0x7a, 0x23, 0x13, 0xe4, 0x5e, 0xdf, 0x73, 0x3b, 0x6f, 0x54, + 0x41, 0xff, 0x7f, 0xbe, 0xb0, 0xfe, 0xdb, 0x40, 0xc5, 0x3a, 0x7a, 0x09, 0x92, 0xd3, 0xed, 0x5e, + 0xab, 0xa2, 0x5e, 0x9f, 0x2f, 0xac, 0xfd, 0xed, 0x11, 0x51, 0x14, 0x20, 0x03, 0x64, 0xb7, 0xd3, + 0x1f, 0xb4, 0xce, 0xd5, 0x1d, 0x1d, 0xcd, 0x17, 0x56, 0x6d, 0x03, 0xbb, 0x21, 0x6d, 0x9d, 0x23, + 0x0b, 0x2a, 0x37, 0x2b, 0x42, 0xe9, 0x1f, 0xf9, 0x1b, 0x9f, 0x33, 0x8e, 0x60, 0xb7, 0x7d, 0xe3, + 0x5d, 0xf4, 0xdd, 0x6e, 0x47, 0x95, 0xf4, 0xc3, 0xf9, 0xc2, 0xaa, 0x6f, 0x28, 0xed, 0x69, 0x82, + 0xa9, 0x1f, 0x85, 0xa8, 0x01, 0xbb, 0x57, 0xd7, 0xdd, 0x0b, 0xae, 0x23, 0xeb, 0x07, 0xf3, 0x85, + 0xa5, 0x6e, 0x48, 0x57, 0x41, 0x84, 0x69, 0xeb, 0x5c, 0x97, 0xbe, 0x7c, 0x33, 0x04, 0xa7, 0x02, + 0xe5, 0x7b, 0x1c, 0x4c, 0x89, 0xa3, 0x3d, 0xe6, 0x86, 0xf8, 0x94, 0x1b, 0xe2, 0xef, 0xdc, 0x10, + 0x1f, 0x96, 0x86, 0xf0, 0xb4, 0x34, 0x84, 0x1f, 0x4b, 0x43, 0x18, 0xca, 0xfc, 0x1f, 0x7b, 0xfd, + 0x27, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x66, 0xcd, 0x47, 0xb7, 0x03, 0x00, 0x00, +} + +func (m *SpanContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpanContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SpanContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SpanID != 0 { + i = encodeVarintBinary(dAtA, i, uint64(m.SpanID)) + i-- + dAtA[i] = 0x10 + } + if m.TraceID != 0 { + i = encodeVarintBinary(dAtA, i, uint64(m.TraceID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Span) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinary(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Labels[iNdEx]) + copy(dAtA[i:], m.Labels[iNdEx]) + i = encodeVarintBinary(dAtA, i, uint64(len(m.Labels[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintBinary(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x22 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintBinary(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.ParentSpanID != 0 { + i = encodeVarintBinary(dAtA, i, uint64(m.ParentSpanID)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Context.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinary(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Trace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Trace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Trace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Spans) > 0 { + for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBinary(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.FieldType != 0 { + i = encodeVarintBinary(dAtA, i, uint64(m.FieldType)) + i-- + dAtA[i] = 0x10 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintBinary(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Field_NumericVal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field_NumericVal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.NumericVal)) + i-- + dAtA[i] = 0x19 + return len(dAtA) - i, nil +} +func (m *Field_StringVal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field_StringVal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringVal) + copy(dAtA[i:], m.StringVal) + i = encodeVarintBinary(dAtA, i, uint64(len(m.StringVal))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func encodeVarintBinary(dAtA []byte, offset int, v uint64) int { + offset -= sovBinary(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SpanContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceID != 0 { + n += 1 + sovBinary(uint64(m.TraceID)) + } + if m.SpanID != 0 { + n += 1 + sovBinary(uint64(m.SpanID)) + } + return n +} + +func (m *Span) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Context.Size() + n += 1 + l + sovBinary(uint64(l)) + if m.ParentSpanID != 0 { + n += 1 + sovBinary(uint64(m.ParentSpanID)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovBinary(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovBinary(uint64(l)) + if len(m.Labels) > 0 { + for _, s := range m.Labels { + l = len(s) + n += 1 + l + sovBinary(uint64(l)) + } + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovBinary(uint64(l)) + } + } + return n +} + +func (m *Trace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Spans) > 0 { + for _, e := range m.Spans { + l = e.Size() + n += 1 + l + sovBinary(uint64(l)) + } + } + return n +} + +func (m *Field) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovBinary(uint64(l)) + } + if m.FieldType != 0 { + n += 1 + sovBinary(uint64(m.FieldType)) + } + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Field_NumericVal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Field_StringVal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringVal) + n += 1 + l + sovBinary(uint64(l)) + return n +} + +func sovBinary(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBinary(x uint64) (n int) { + return sovBinary(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SpanContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpanContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpanContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + m.TraceID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) + } + m.SpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpanID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Span: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinary + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanID", wireType) + } + m.ParentSpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParentSpanID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinary + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinary + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinary + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinary + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Trace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Trace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Trace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBinary + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Spans = append(m.Spans, &Span{}) + if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinary + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldType", wireType) + } + m.FieldType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FieldType |= Field_FieldType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumericVal", wireType) + } + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &Field_NumericVal{v} + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBinary + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBinary + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBinary + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &Field_StringVal{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBinary(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBinary + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBinary(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBinary + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBinary + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBinary + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBinary + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBinary = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBinary = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBinary = fmt.Errorf("proto: unexpected end of group") +) diff --git a/lib/influxdb/prometheus/converters.go b/lib/influxdb/prometheus/converters.go new file mode 100644 index 0000000..a7873f0 --- /dev/null +++ b/lib/influxdb/prometheus/converters.go @@ -0,0 +1,21 @@ +package prometheus + +import ( + "github.com/influxdata/influxdb/models" + "github.com/prometheus/prometheus/prompb" +) + +// ModelTagsToLabelPairs converts models.Tags to a slice of Prometheus label pairs +func ModelTagsToLabelPairs(tags models.Tags) []prompb.Label { + pairs := make([]prompb.Label, 0, len(tags)) + for _, t := range tags { + if string(t.Value) == "" { + continue + } + pairs = append(pairs, prompb.Label{ + Name: string(t.Key), + Value: string(t.Value), + }) + } + return pairs +} diff --git a/lib/influxdb/query/neldermead/neldermead.go b/lib/influxdb/query/neldermead/neldermead.go new file mode 100644 index 0000000..f2e628d --- /dev/null +++ b/lib/influxdb/query/neldermead/neldermead.go @@ -0,0 +1,239 @@ +// Package neldermead is an implementation of the Nelder-Mead optimization method. +// Based on work by Michael F. Hutt: http://www.mikehutt.com/neldermead.html +package neldermead + +import "math" + +const ( + defaultMaxIterations = 1000 + // reflection coefficient + defaultAlpha = 1.0 + // contraction coefficient + defaultBeta = 0.5 + // expansion coefficient + defaultGamma = 2.0 +) + +// Optimizer represents the parameters to the Nelder-Mead simplex method. +type Optimizer struct { + // Maximum number of iterations. + MaxIterations int + // Reflection coefficient. + Alpha, + // Contraction coefficient. + Beta, + // Expansion coefficient. + Gamma float64 +} + +// New returns a new instance of Optimizer with all values set to the defaults. +func New() *Optimizer { + return &Optimizer{ + MaxIterations: defaultMaxIterations, + Alpha: defaultAlpha, + Beta: defaultBeta, + Gamma: defaultGamma, + } +} + +// Optimize applies the Nelder-Mead simplex method with the Optimizer's settings. +func (o *Optimizer) Optimize( + objfunc func([]float64) float64, + start []float64, + epsilon, + scale float64, +) (float64, []float64) { + n := len(start) + + //holds vertices of simplex + v := make([][]float64, n+1) + for i := range v { + v[i] = make([]float64, n) + } + + //value of function at each vertex + f := make([]float64, n+1) + + //reflection - coordinates + vr := make([]float64, n) + + //expansion - coordinates + ve := make([]float64, n) + + //contraction - coordinates + vc := make([]float64, n) + + //centroid - coordinates + vm := make([]float64, n) + + // create the initial simplex + // assume one of the vertices is 0,0 + + pn := scale * (math.Sqrt(float64(n+1)) - 1 + float64(n)) / (float64(n) * math.Sqrt(2)) + qn := scale * (math.Sqrt(float64(n+1)) - 1) / (float64(n) * math.Sqrt(2)) + + for i := 0; i < n; i++ { + v[0][i] = start[i] + } + + for i := 1; i <= n; i++ { + for j := 0; j < n; j++ { + if i-1 == j { + v[i][j] = pn + start[j] + } else { + v[i][j] = qn + start[j] + } + } + } + + // find the initial function values + for j := 0; j <= n; j++ { + f[j] = objfunc(v[j]) + } + + // begin the main loop of the minimization + for itr := 1; itr <= o.MaxIterations; itr++ { + + // find the indexes of the largest and smallest values + vg := 0 + vs := 0 + for i := 0; i <= n; i++ { + if f[i] > f[vg] { + vg = i + } + if f[i] < f[vs] { + vs = i + } + } + // find the index of the second largest value + vh := vs + for i := 0; i <= n; i++ { + if f[i] > f[vh] && f[i] < f[vg] { + vh = i + } + } + + // calculate the centroid + for i := 0; i <= n-1; i++ { + cent := 0.0 + for m := 0; m <= n; m++ { + if m != vg { + cent += v[m][i] + } + } + vm[i] = cent / float64(n) + } + + // reflect vg to new vertex vr + for i := 0; i <= n-1; i++ { + vr[i] = vm[i] + o.Alpha*(vm[i]-v[vg][i]) + } + + // value of function at reflection point + fr := objfunc(vr) + + if fr < f[vh] && fr >= f[vs] { + for i := 0; i <= n-1; i++ { + v[vg][i] = vr[i] + } + f[vg] = fr + } + + // investigate a step further in this direction + if fr < f[vs] { + for i := 0; i <= n-1; i++ { + ve[i] = vm[i] + o.Gamma*(vr[i]-vm[i]) + } + + // value of function at expansion point + fe := objfunc(ve) + + // by making fe < fr as opposed to fe < f[vs], + // Rosenbrocks function takes 63 iterations as opposed + // to 64 when using double variables. + + if fe < fr { + for i := 0; i <= n-1; i++ { + v[vg][i] = ve[i] + } + f[vg] = fe + } else { + for i := 0; i <= n-1; i++ { + v[vg][i] = vr[i] + } + f[vg] = fr + } + } + + // check to see if a contraction is necessary + if fr >= f[vh] { + if fr < f[vg] && fr >= f[vh] { + // perform outside contraction + for i := 0; i <= n-1; i++ { + vc[i] = vm[i] + o.Beta*(vr[i]-vm[i]) + } + } else { + // perform inside contraction + for i := 0; i <= n-1; i++ { + vc[i] = vm[i] - o.Beta*(vm[i]-v[vg][i]) + } + } + + // value of function at contraction point + fc := objfunc(vc) + + if fc < f[vg] { + for i := 0; i <= n-1; i++ { + v[vg][i] = vc[i] + } + f[vg] = fc + } else { + // at this point the contraction is not successful, + // we must halve the distance from vs to all the + // vertices of the simplex and then continue. + + for row := 0; row <= n; row++ { + if row != vs { + for i := 0; i <= n-1; i++ { + v[row][i] = v[vs][i] + (v[row][i]-v[vs][i])/2.0 + } + } + } + f[vg] = objfunc(v[vg]) + f[vh] = objfunc(v[vh]) + } + } + + // test for convergence + fsum := 0.0 + for i := 0; i <= n; i++ { + fsum += f[i] + } + favg := fsum / float64(n+1) + s := 0.0 + for i := 0; i <= n; i++ { + s += math.Pow((f[i]-favg), 2.0) / float64(n) + } + s = math.Sqrt(s) + if s < epsilon { + break + } + } + + // find the index of the smallest value + vs := 0 + for i := 0; i <= n; i++ { + if f[i] < f[vs] { + vs = i + } + } + + parameters := make([]float64, n) + for i := 0; i < n; i++ { + parameters[i] = v[vs][i] + } + + min := objfunc(v[vs]) + + return min, parameters +} diff --git a/lib/influxdb/services/retention/config.go b/lib/influxdb/services/retention/config.go new file mode 100644 index 0000000..d12ad64 --- /dev/null +++ b/lib/influxdb/services/retention/config.go @@ -0,0 +1,49 @@ +package retention + +import ( + "errors" + "time" + + "github.com/influxdata/influxdb/monitor/diagnostics" + "github.com/influxdata/influxdb/toml" +) + +// Config represents the configuration for the retention service. +type Config struct { + Enabled bool `toml:"enabled"` + CheckInterval toml.Duration `toml:"check-interval"` +} + +// NewConfig returns an instance of Config with defaults. +func NewConfig() Config { + return Config{Enabled: true, CheckInterval: toml.Duration(30 * time.Minute)} +} + +// Validate returns an error if the Config is invalid. +func (c Config) Validate() error { + if !c.Enabled { + return nil + } + + // TODO: Should we enforce a minimum interval? + // Polling every nanosecond, for instance, will greatly impact performance. + if c.CheckInterval <= 0 { + return errors.New("check-interval must be positive") + } + + return nil +} + +// Diagnostics returns a diagnostics representation of a subset of the Config. +func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { + if !c.Enabled { + return diagnostics.RowFromMap(map[string]interface{}{ + "enabled": false, + }), nil + } + + return diagnostics.RowFromMap(map[string]interface{}{ + "enabled": true, + "check-interval": c.CheckInterval, + }), nil +} diff --git a/lib/influxdb/tcp/mux.go b/lib/influxdb/tcp/mux.go new file mode 100644 index 0000000..75377bd --- /dev/null +++ b/lib/influxdb/tcp/mux.go @@ -0,0 +1,302 @@ +// Package tcp provides a simple multiplexer over TCP. +package tcp // import "github.com/influxdata/influxdb/tcp" + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "sync" + "time" +) + +const ( + // DefaultTimeout is the default length of time to wait for first byte. + DefaultTimeout = 30 * time.Second +) + +// Mux multiplexes a network connection. +type Mux struct { + mu sync.RWMutex + ln net.Listener + m map[byte]*listener + + defaultListener *listener + + wg sync.WaitGroup + + // The amount of time to wait for the first header byte. + Timeout time.Duration + + // Out-of-band error logger + Logger *log.Logger +} + +type replayConn struct { + net.Conn + firstByte byte + readFirstbyte bool +} + +func (rc *replayConn) Read(b []byte) (int, error) { + if rc.readFirstbyte { + return rc.Conn.Read(b) + } + + if len(b) == 0 { + return 0, nil + } + + b[0] = rc.firstByte + rc.readFirstbyte = true + return 1, nil +} + +func MuxLogger(w io.Writer) *log.Logger { + return log.New(w, "[tcp] ", log.LstdFlags) +} + +// NewMux returns a new instance of Mux. +func NewMux(log *log.Logger) *Mux { + return &Mux{ + m: make(map[byte]*listener), + Timeout: DefaultTimeout, + Logger: log, + } +} + +// Serve handles connections from ln and multiplexes then across registered listeners. +func (mux *Mux) Serve(ln net.Listener) error { + mux.mu.Lock() + mux.ln = ln + mux.mu.Unlock() + for { + // Wait for the next connection. + // If it returns a temporary error then simply retry. + // If it returns any other error then exit immediately. + conn, err := ln.Accept() + if err, ok := err.(interface { + Temporary() bool + }); ok && err.Temporary() { + continue + } + if err != nil { + mux.Logger.Printf("tcp.Mux: Listener at %s failed failed to accept a connection, closing all listeners - %s", ln.Addr(), err) + // Wait for all connections to be demux + mux.wg.Wait() + + // Concurrently close all registered listeners. + // Because mux.m is keyed by byte, in the worst case we would spawn 256 goroutines here. + var wg sync.WaitGroup + mux.mu.RLock() + for _, ln := range mux.m { + wg.Add(1) + go func(ln *listener) { + defer wg.Done() + if err := ln.Close(); err != nil { + mux.Logger.Printf("tcp.Mux: Closing the listener at %s failed - %s", ln.Addr().String(), err) + } + }(ln) + } + mux.mu.RUnlock() + wg.Wait() + + mux.mu.RLock() + dl := mux.defaultListener + mux.mu.RUnlock() + if dl != nil { + if closeErr := dl.Close(); closeErr != nil { + mux.Logger.Printf("tcp.Mux: Closing the default listener at %s failed - %s", ln.Addr().String(), closeErr) + } + } + return err + } + + // Demux in a goroutine to + mux.wg.Add(1) + go mux.handleConn(conn) + } +} + +func (mux *Mux) handleConn(conn net.Conn) { + defer mux.wg.Done() + // Set a read deadline so connections with no data don't timeout. + if err := conn.SetReadDeadline(time.Now().Add(mux.Timeout)); err != nil { + conn.Close() + mux.Logger.Printf("tcp.Mux: cannot set read deadline: %s", err) + return + } + + // Read first byte from connection to determine handler. + var typ [1]byte + if _, err := io.ReadFull(conn, typ[:]); err != nil { + conn.Close() + mux.Logger.Printf("tcp.Mux: cannot read header byte: %s", err) + return + } + + // Reset read deadline and let the listener handle that. + if err := conn.SetReadDeadline(time.Time{}); err != nil { + conn.Close() + mux.Logger.Printf("tcp.Mux: cannot reset set read deadline: %s", err) + return + } + + // Retrieve handler based on first byte. + mux.mu.RLock() + handler := mux.m[typ[0]] + mux.mu.RUnlock() + + if handler == nil { + if mux.defaultListener == nil { + conn.Close() + mux.Logger.Printf("tcp.Mux: handler not registered: %d. Connection from %s closed", typ[0], conn.RemoteAddr()) + return + } + + conn = &replayConn{ + Conn: conn, + firstByte: typ[0], + } + handler = mux.defaultListener + } + + handler.HandleConn(conn, typ[0]) +} + +// Listen returns a listener identified by header. +// Any connection accepted by mux is multiplexed based on the initial header byte. +func (mux *Mux) Listen(header byte) net.Listener { + mux.mu.Lock() + defer mux.mu.Unlock() + + // Ensure two listeners are not created for the same header byte. + if _, ok := mux.m[header]; ok { + panic(fmt.Sprintf("listener already registered under header byte: %d", header)) + } + + // Create a new listener and assign it. + ln := &listener{ + c: make(chan net.Conn), + done: make(chan struct{}), + mux: mux, + } + mux.m[header] = ln + + return ln +} + +// release removes the listener from the mux. +func (mux *Mux) release(ln *listener) bool { + mux.mu.Lock() + defer mux.mu.Unlock() + + for b, l := range mux.m { + if l == ln { + delete(mux.m, b) + return true + } + } + return false +} + +// DefaultListener will return a net.Listener that will pass-through any +// connections with non-registered values for the first byte of the connection. +// The connections returned from this listener's Accept() method will replay the +// first byte of the connection as a short first Read(). +// +// This can be used to pass to an HTTP server, so long as there are no conflicts +// with registered listener bytes and the first character of the HTTP request: +// 71 ('G') for GET, etc. +func (mux *Mux) DefaultListener() net.Listener { + mux.mu.Lock() + defer mux.mu.Unlock() + if mux.defaultListener == nil { + mux.defaultListener = &listener{ + c: make(chan net.Conn), + done: make(chan struct{}), + mux: mux, + } + } + + return mux.defaultListener +} + +// listener is a receiver for connections received by Mux. +type listener struct { + mux *Mux + + // The done channel is closed before taking a lock on mu to close c. + // That way, anyone holding an RLock can release the lock by receiving from done. + done chan struct{} + + mu sync.RWMutex + c chan net.Conn +} + +// Accept waits for and returns the next connection to the listener. +func (ln *listener) Accept() (net.Conn, error) { + ln.mu.RLock() + defer ln.mu.RUnlock() + + select { + case <-ln.done: + return nil, errors.New("network connection closed") + case conn := <-ln.c: + return conn, nil + } +} + +// Close removes this listener from the parent mux and closes the channel. +func (ln *listener) Close() error { + if ok := ln.mux.release(ln); ok { + // Close done to signal to any RLock holders to release their lock. + close(ln.done) + + // Hold a lock while reassigning ln.c to nil + // so that attempted sends or receives will block forever. + ln.mu.Lock() + ln.c = nil + ln.mu.Unlock() + } + return nil +} + +// HandleConn handles the connection, if the listener has not been closed. +func (ln *listener) HandleConn(conn net.Conn, handlerID byte) { + ln.mu.RLock() + defer ln.mu.RUnlock() + + // Send connection to handler. The handler is responsible for closing the connection. + timer := time.NewTimer(ln.mux.Timeout) + defer timer.Stop() + + select { + case <-ln.done: + // Receive will return immediately if ln.Close has been called. + conn.Close() + case ln.c <- conn: + // Send will block forever if ln.Close has been called. + case <-timer.C: + conn.Close() + ln.mux.Logger.Printf("tcp.Mux: handler not ready: %d. Connection from %s closed", handlerID, conn.RemoteAddr()) + return + } +} + +// Addr returns the Addr of the listener +func (ln *listener) Addr() net.Addr { + if ln.mux == nil { + return nil + } + + ln.mux.mu.RLock() + defer ln.mux.mu.RUnlock() + + if ln.mux.ln == nil { + return nil + } + + return ln.mux.ln.Addr() +} diff --git a/lib/influxdb/toml/toml.go b/lib/influxdb/toml/toml.go new file mode 100644 index 0000000..2cacd09 --- /dev/null +++ b/lib/influxdb/toml/toml.go @@ -0,0 +1,283 @@ +// Package toml adds support to marshal and unmarshal types not in the official TOML spec. +package toml // import "github.com/influxdata/influxdb/toml" + +import ( + "encoding" + "errors" + "fmt" + "math" + "os" + "os/user" + "reflect" + "strconv" + "strings" + "time" + "unicode" +) + +// Duration is a TOML wrapper type for time.Duration. +type Duration time.Duration + +// String returns the string representation of the duration. +func (d Duration) String() string { + return time.Duration(d).String() +} + +// UnmarshalText parses a TOML value into a duration value. +func (d *Duration) UnmarshalText(text []byte) error { + // Ignore if there is no value set. + if len(text) == 0 { + return nil + } + + // Otherwise parse as a duration formatted string. + duration, err := time.ParseDuration(string(text)) + if err != nil { + return err + } + + // Set duration and return. + *d = Duration(duration) + return nil +} + +// MarshalText converts a duration to a string for decoding toml +func (d Duration) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +// Size represents a TOML parseable file size. +// Users can specify size using "k" or "K" for kibibytes, "m" or "M" for mebibytes, +// and "g" or "G" for gibibytes. If a size suffix isn't specified then bytes are assumed. +type Size uint64 + +// UnmarshalText parses a byte size from text. +func (s *Size) UnmarshalText(text []byte) error { + if len(text) == 0 { + return fmt.Errorf("size was empty") + } + + // The multiplier defaults to 1 in case the size has + // no suffix (and is then just raw bytes) + mult := uint64(1) + + // Preserve the original text for error messages + sizeText := text + + // Parse unit of measure + suffix := text[len(sizeText)-1] + if !unicode.IsDigit(rune(suffix)) { + switch suffix { + case 'k', 'K': + mult = 1 << 10 // KiB + case 'm', 'M': + mult = 1 << 20 // MiB + case 'g', 'G': + mult = 1 << 30 // GiB + default: + return fmt.Errorf("unknown size suffix: %c (expected k, m, or g)", suffix) + } + sizeText = sizeText[:len(sizeText)-1] + } + + // Parse numeric portion of value. + size, err := strconv.ParseUint(string(sizeText), 10, 64) + if err != nil { + return fmt.Errorf("invalid size: %s", string(text)) + } + + if math.MaxUint64/mult < size { + return fmt.Errorf("size would overflow the max size (%d) of a uint: %s", uint64(math.MaxUint64), string(text)) + } + + size *= mult + + *s = Size(size) + return nil +} + +type FileMode uint32 + +func (m *FileMode) UnmarshalText(text []byte) error { + // Ignore if there is no value set. + if len(text) == 0 { + return nil + } + + mode, err := strconv.ParseUint(string(text), 8, 32) + if err != nil { + return err + } else if mode == 0 { + return errors.New("file mode cannot be zero") + } + *m = FileMode(mode) + return nil +} + +func (m FileMode) MarshalText() (text []byte, err error) { + if m != 0 { + return []byte(fmt.Sprintf("%04o", m)), nil + } + return nil, nil +} + +type Group int + +func (g *Group) UnmarshalTOML(data interface{}) error { + if grpName, ok := data.(string); ok { + group, err := user.LookupGroup(grpName) + if err != nil { + return err + } + + gid, err := strconv.Atoi(group.Gid) + if err != nil { + return err + } + *g = Group(gid) + return nil + } else if gid, ok := data.(int64); ok { + *g = Group(gid) + return nil + } + return errors.New("group must be a name (string) or id (int)") +} + +func ApplyEnvOverrides(getenv func(string) string, prefix string, val interface{}) error { + if getenv == nil { + getenv = os.Getenv + } + return applyEnvOverrides(getenv, prefix, reflect.ValueOf(val), "") +} + +func applyEnvOverrides(getenv func(string) string, prefix string, spec reflect.Value, structKey string) error { + element := spec + // If spec is a named type and is addressable, + // check the address to see if it implements encoding.TextUnmarshaler. + if spec.Kind() != reflect.Ptr && spec.Type().Name() != "" && spec.CanAddr() { + v := spec.Addr() + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + value := getenv(prefix) + // Skip any fields we don't have a value to set + if len(value) == 0 { + return nil + } + return u.UnmarshalText([]byte(value)) + } + } + // If we have a pointer, dereference it + if spec.Kind() == reflect.Ptr { + element = spec.Elem() + } + + value := getenv(prefix) + + switch element.Kind() { + case reflect.String: + if len(value) == 0 { + return nil + } + element.SetString(value) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + intValue, err := strconv.ParseInt(value, 0, element.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) + } + element.SetInt(intValue) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + intValue, err := strconv.ParseUint(value, 0, element.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) + } + element.SetUint(intValue) + case reflect.Bool: + boolValue, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) + } + element.SetBool(boolValue) + case reflect.Float32, reflect.Float64: + floatValue, err := strconv.ParseFloat(value, element.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) + } + element.SetFloat(floatValue) + case reflect.Slice: + // If the type is s slice, apply to each using the index as a suffix, e.g. GRAPHITE_0, GRAPHITE_0_TEMPLATES_0 or GRAPHITE_0_TEMPLATES="item1,item2" + for j := 0; j < element.Len(); j++ { + f := element.Index(j) + if err := applyEnvOverrides(getenv, prefix, f, structKey); err != nil { + return err + } + + if err := applyEnvOverrides(getenv, fmt.Sprintf("%s_%d", prefix, j), f, structKey); err != nil { + return err + } + } + + // If the type is s slice but have value not parsed as slice e.g. GRAPHITE_0_TEMPLATES="item1,item2" + if element.Len() == 0 && len(value) > 0 { + rules := strings.Split(value, ",") + + for _, rule := range rules { + element.Set(reflect.Append(element, reflect.ValueOf(rule))) + } + } + case reflect.Struct: + typeOfSpec := element.Type() + for i := 0; i < element.NumField(); i++ { + field := element.Field(i) + + // Skip any fields that we cannot set + if !field.CanSet() && field.Kind() != reflect.Slice { + continue + } + + structField := typeOfSpec.Field(i) + fieldName := structField.Name + + configName := structField.Tag.Get("toml") + if configName == "-" { + // Skip fields with tag `toml:"-"`. + continue + } + + if configName == "" && structField.Anonymous { + // Embedded field without a toml tag. + // Don't modify prefix. + if err := applyEnvOverrides(getenv, prefix, field, fieldName); err != nil { + return err + } + continue + } + + // Replace hyphens with underscores to avoid issues with shells + configName = strings.Replace(configName, "-", "_", -1) + + envKey := strings.ToUpper(configName) + if prefix != "" { + envKey = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName)) + } + + // If it's a sub-config, recursively apply + if field.Kind() == reflect.Struct || field.Kind() == reflect.Ptr || + field.Kind() == reflect.Slice || field.Kind() == reflect.Array { + if err := applyEnvOverrides(getenv, envKey, field, fieldName); err != nil { + return err + } + continue + } + + value := getenv(envKey) + // Skip any fields we don't have a value to set + if len(value) == 0 { + continue + } + + if err := applyEnvOverrides(getenv, envKey, field, fieldName); err != nil { + return err + } + } + } + return nil +} diff --git a/lib/influxdb/tsdb/engine/tsm1/batch_float.go b/lib/influxdb/tsdb/engine/tsm1/batch_float.go new file mode 100644 index 0000000..eb17874 --- /dev/null +++ b/lib/influxdb/tsdb/engine/tsm1/batch_float.go @@ -0,0 +1,514 @@ +package tsm1 + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" + "unsafe" +) + +// FloatArrayEncodeAll encodes src into b, returning b and any error encountered. +// The returned slice may be of a different length and capactity to b. +// +// Currently only the float compression scheme used in Facebook's Gorilla is +// supported, so this method implements a batch oriented version of that. +func FloatArrayEncodeAll(src []float64, b []byte) ([]byte, error) { + if cap(b) < 9 { + b = make([]byte, 0, 9) // Enough room for the header and one value. + } + + b = b[:1] + b[0] = floatCompressedGorilla << 4 + + var first float64 + var finished bool + if len(src) > 0 && math.IsNaN(src[0]) { + return nil, fmt.Errorf("unsupported value: NaN") + } else if len(src) == 0 { + first = math.NaN() // Write sentinal value to terminate batch. + finished = true + } else { + first = src[0] + src = src[1:] + } + + b = b[:9] + n := uint64(8 + 64) // Number of bits written. + prev := math.Float64bits(first) + + // Write first value. + binary.BigEndian.PutUint64(b[1:], prev) + + prevLeading, prevTrailing := ^uint64(0), uint64(0) + var leading, trailing uint64 + var mask uint64 + var sum float64 + + // Encode remaining values. + for i := 0; !finished; i++ { + var x float64 + if i < len(src) { + x = src[i] + sum += x + } else { + // Encode sentinal value to terminate batch + x = math.NaN() + finished = true + } + + { + cur := math.Float64bits(x) + vDelta := cur ^ prev + if vDelta == 0 { + n++ // Write a zero bit. Nothing else to do. + prev = cur + continue + } + + // First the current bit of the current byte is set to indicate we're + // writing a delta value to the stream. + for n>>3 >= uint64(len(b)) { // Keep growing b until we can fit all bits in. + b = append(b, byte(0)) + } + + // n&7 - current bit in current byte. + // n>>3 - the current byte. + b[n>>3] |= 128 >> (n & 7) // Sets the current bit of the current byte. + n++ + + // Write the delta to b. + + // Determine the leading and trailing zeros. + leading = uint64(bits.LeadingZeros64(vDelta)) + trailing = uint64(bits.TrailingZeros64(vDelta)) + + // Clamp number of leading zeros to avoid overflow when encoding + leading &= 0x1F + if leading >= 32 { + leading = 31 + } + + // At least 2 further bits will be required. + if (n+2)>>3 >= uint64(len(b)) { + b = append(b, byte(0)) + } + + if prevLeading != ^uint64(0) && leading >= prevLeading && trailing >= prevTrailing { + n++ // Write a zero bit. + + // Write the l least significant bits of vDelta to b, most significant + // bit first. + l := uint64(64 - prevLeading - prevTrailing) + for (n+l)>>3 >= uint64(len(b)) { // Keep growing b until we can fit all bits in. + b = append(b, byte(0)) + } + + // Full value to write. + v := (vDelta >> prevTrailing) << (64 - l) // l least signifciant bits of v. + + var m = n & 7 // Current bit in current byte. + var written uint64 + if m > 0 { // In this case the current byte is not full. + written = 8 - m + if l < written { + written = l + } + mask = v >> 56 // Move 8 MSB to 8 LSB + b[n>>3] |= byte(mask >> m) + n += written + + if l-written == 0 { + prev = cur + continue + } + } + + vv := v << written // Move written bits out of the way. + + // TODO(edd): Optimise this. It's unlikely we actually have 8 bytes to write. + if (n>>3)+8 >= uint64(len(b)) { + b = append(b, 0, 0, 0, 0, 0, 0, 0, 0) + } + binary.BigEndian.PutUint64(b[n>>3:], vv) + n += (l - written) + } else { + prevLeading, prevTrailing = leading, trailing + + // Set a single bit to indicate a value will follow. + b[n>>3] |= 128 >> (n & 7) // Set current bit on current byte + n++ + + // Write 5 bits of leading. + if (n+5)>>3 >= uint64(len(b)) { + b = append(b, byte(0)) + } + + // Enough room to write the 5 bits in the current byte? + var m = n & 7 + l := uint64(5) + v := leading << 59 // 5 LSB of leading. + mask = v >> 56 // Move 5 MSB to 8 LSB + + if m <= 3 { // 5 bits fit into current byte. + b[n>>3] |= byte(mask >> m) + n += l + } else { // In this case there are fewer than 5 bits available in current byte. + // First step is to fill current byte + written := 8 - m + b[n>>3] |= byte(mask >> m) // Some of mask will get lost. + n += written + + // Second step is to write the lost part of mask into the next byte. + mask = v << written // Move written bits in previous byte out of way. + mask >>= 56 + + m = n & 7 // Recompute current bit. + b[n>>3] |= byte(mask >> m) + n += (l - written) + } + + // Note that if leading == trailing == 0, then sigbits == 64. But that + // value doesn't actually fit into the 6 bits we have. + // Luckily, we never need to encode 0 significant bits, since that would + // put us in the other case (vdelta == 0). So instead we write out a 0 and + // adjust it back to 64 on unpacking. + sigbits := 64 - leading - trailing + + if (n+6)>>3 >= uint64(len(b)) { + b = append(b, byte(0)) + } + + m = n & 7 + l = uint64(6) + v = sigbits << 58 // Move 6 LSB of sigbits to MSB + mask = v >> 56 // Move 6 MSB to 8 LSB + if m <= 2 { + // The 6 bits fit into the current byte. + b[n>>3] |= byte(mask >> m) + n += l + } else { // In this case there are fewer than 6 bits available in current byte. + // First step is to fill the current byte. + written := 8 - m + b[n>>3] |= byte(mask >> m) // Write to the current bit. + n += written + + // Second step is to write the lost part of mask into the next byte. + // Write l remaining bits into current byte. + mask = v << written // Remove bits written in previous byte out of way. + mask >>= 56 + + m = n & 7 // Recompute current bit. + b[n>>3] |= byte(mask >> m) + n += l - written + } + + // Write final value. + m = n & 7 + l = sigbits + v = (vDelta >> trailing) << (64 - l) // Move l LSB into MSB + for (n+l)>>3 >= uint64(len(b)) { // Keep growing b until we can fit all bits in. + b = append(b, byte(0)) + } + + var written uint64 + if m > 0 { // In this case the current byte is not full. + written = 8 - m + if l < written { + written = l + } + mask = v >> 56 // Move 8 MSB to 8 LSB + b[n>>3] |= byte(mask >> m) + n += written + + if l-written == 0 { + prev = cur + continue + } + } + + // Shift remaining bits and write out in one go. + vv := v << written // Remove bits written in previous byte. + // TODO(edd): Optimise this. + if (n>>3)+8 >= uint64(len(b)) { + b = append(b, 0, 0, 0, 0, 0, 0, 0, 0) + } + + binary.BigEndian.PutUint64(b[n>>3:], vv) + n += (l - written) + } + prev = cur + } + } + + if math.IsNaN(sum) { + return nil, fmt.Errorf("unsupported value: NaN") + } + + length := n >> 3 + if n&7 > 0 { + length++ // Add an extra byte to capture overflowing bits. + } + return b[:length], nil +} + +// bitMask contains a lookup table where the index is the number of bits +// and the value is a mask. The table is always read by ANDing the index +// with 0x3f, such that if the index is 64, position 0 will be read, which +// is a 0xffffffffffffffff, thus returning all bits. +// +// 00 = 0xffffffffffffffff +// 01 = 0x0000000000000001 +// 02 = 0x0000000000000003 +// 03 = 0x0000000000000007 +// ... +// 62 = 0x3fffffffffffffff +// 63 = 0x7fffffffffffffff +var bitMask [64]uint64 + +func init() { + v := uint64(1) + for i := 1; i <= 64; i++ { + bitMask[i&0x3f] = v + v = v<<1 | 1 + } +} + +func FloatArrayDecodeAll(b []byte, buf []float64) ([]float64, error) { + if len(b) < 9 { + return []float64{}, nil + } + + var ( + val uint64 // current value + trailingN uint8 // trailing zero count + meaningfulN uint8 = 64 // meaningful bit count + ) + + // first byte is the compression type; always Gorilla + b = b[1:] + + val = binary.BigEndian.Uint64(b) + if val == uvnan { + if buf == nil { + var tmp [1]float64 + buf = tmp[:0] + } + // special case: there were no values to decode + return buf[:0], nil + } + + buf = buf[:0] + // convert the []float64 to []uint64 to avoid calling math.Float64Frombits, + // which results in unnecessary moves between Xn registers before moving + // the value into the float64 slice. This change increased performance from + // 320 MB/s to 340 MB/s on an Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz + dst := *(*[]uint64)(unsafe.Pointer(&buf)) + dst = append(dst, val) + + b = b[8:] + + // The bit reader code uses brCachedVal to store up to the next 8 bytes + // of MSB data read from b. brValidBits stores the number of remaining unread + // bits starting from the MSB. Before N bits are read from brCachedVal, + // they are left-rotated N bits, such that they end up in the left-most position. + // Using bits.RotateLeft64 results in a single instruction on many CPU architectures. + // This approach permits simple tests, such as for the two control bits: + // + // brCachedVal&1 > 0 + // + // The alternative was to leave brCachedValue alone and perform shifts and + // masks to read specific bits. The original approach looked like the + // following: + // + // brCachedVal&(1<<(brValidBits&0x3f)) > 0 + // + var ( + brCachedVal = uint64(0) // a buffer of up to the next 8 bytes read from b in MSB order + brValidBits = uint8(0) // the number of unread bits remaining in brCachedVal + ) + + // Refill brCachedVal, reading up to 8 bytes from b + if len(b) >= 8 { + // fast path reads 8 bytes directly + brCachedVal = binary.BigEndian.Uint64(b) + brValidBits = 64 + b = b[8:] + } else if len(b) > 0 { + brCachedVal = 0 + brValidBits = uint8(len(b) * 8) + for i := range b { + brCachedVal = (brCachedVal << 8) | uint64(b[i]) + } + brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) + b = b[:0] + } else { + goto ERROR + } + + // The expected exit condition is for a uvnan to be decoded. + // Any other error (EOF) indicates a truncated stream. + for { + if brValidBits > 0 { + // brValidBits > 0 is impossible to predict, so we place the + // most likely case inside the if and immediately jump, keeping + // the instruction pipeline consistently full. + // This is a similar approach to using the GCC __builtin_expect + // intrinsic, which modifies the order of branches such that the + // likely case follows the conditional jump. + // + // Written as if brValidBits == 0 and placing the Refill brCachedVal + // code inside reduces benchmarks from 318 MB/s to 260 MB/s on an + // Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz + goto READ0 + } + + // Refill brCachedVal, reading up to 8 bytes from b + if len(b) >= 8 { + brCachedVal = binary.BigEndian.Uint64(b) + brValidBits = 64 + b = b[8:] + } else if len(b) > 0 { + brCachedVal = 0 + brValidBits = uint8(len(b) * 8) + for i := range b { + brCachedVal = (brCachedVal << 8) | uint64(b[i]) + } + brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) + b = b[:0] + } else { + goto ERROR + } + + READ0: + // read control bit 0 + brValidBits -= 1 + brCachedVal = bits.RotateLeft64(brCachedVal, 1) + if brCachedVal&1 > 0 { + if brValidBits > 0 { + goto READ1 + } + + // Refill brCachedVal, reading up to 8 bytes from b + if len(b) >= 8 { + brCachedVal = binary.BigEndian.Uint64(b) + brValidBits = 64 + b = b[8:] + } else if len(b) > 0 { + brCachedVal = 0 + brValidBits = uint8(len(b) * 8) + for i := range b { + brCachedVal = (brCachedVal << 8) | uint64(b[i]) + } + brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) + b = b[:0] + } else { + goto ERROR + } + + READ1: + // read control bit 1 + brValidBits -= 1 + brCachedVal = bits.RotateLeft64(brCachedVal, 1) + if brCachedVal&1 > 0 { + // read 5 bits for leading zero count and 6 bits for the meaningful data count + const leadingTrailingBitCount = 11 + var lmBits uint64 // leading + meaningful data counts + if brValidBits >= leadingTrailingBitCount { + // decode 5 bits leading + 6 bits meaningful for a total of 11 bits + brValidBits -= leadingTrailingBitCount + brCachedVal = bits.RotateLeft64(brCachedVal, leadingTrailingBitCount) + lmBits = brCachedVal + } else { + bits01 := uint8(11) + if brValidBits > 0 { + bits01 -= brValidBits + lmBits = bits.RotateLeft64(brCachedVal, 11) + } + + // Refill brCachedVal, reading up to 8 bytes from b + if len(b) >= 8 { + brCachedVal = binary.BigEndian.Uint64(b) + brValidBits = 64 + b = b[8:] + } else if len(b) > 0 { + brCachedVal = 0 + brValidBits = uint8(len(b) * 8) + for i := range b { + brCachedVal = (brCachedVal << 8) | uint64(b[i]) + } + brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) + b = b[:0] + } else { + goto ERROR + } + brCachedVal = bits.RotateLeft64(brCachedVal, int(bits01)) + brValidBits -= bits01 + lmBits &^= bitMask[bits01&0x3f] + lmBits |= brCachedVal & bitMask[bits01&0x3f] + } + + lmBits &= 0x7ff + leadingN := uint8((lmBits >> 6) & 0x1f) // 5 bits leading + meaningfulN = uint8(lmBits & 0x3f) // 6 bits meaningful + if meaningfulN > 0 { + trailingN = 64 - leadingN - meaningfulN + } else { + // meaningfulN == 0 is a special case, such that all bits + // are meaningful + trailingN = 0 + meaningfulN = 64 + } + } + + var sBits uint64 // significant bits + if brValidBits >= meaningfulN { + brValidBits -= meaningfulN + brCachedVal = bits.RotateLeft64(brCachedVal, int(meaningfulN)) + sBits = brCachedVal + } else { + mBits := meaningfulN + if brValidBits > 0 { + mBits -= brValidBits + sBits = bits.RotateLeft64(brCachedVal, int(meaningfulN)) + } + + // Refill brCachedVal, reading up to 8 bytes from b + if len(b) >= 8 { + brCachedVal = binary.BigEndian.Uint64(b) + brValidBits = 64 + b = b[8:] + } else if len(b) > 0 { + brCachedVal = 0 + brValidBits = uint8(len(b) * 8) + for i := range b { + brCachedVal = (brCachedVal << 8) | uint64(b[i]) + } + brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) + b = b[:0] + } else { + goto ERROR + } + brCachedVal = bits.RotateLeft64(brCachedVal, int(mBits)) + brValidBits -= mBits + sBits &^= bitMask[mBits&0x3f] + sBits |= brCachedVal & bitMask[mBits&0x3f] + } + sBits &= bitMask[meaningfulN&0x3f] + + val ^= sBits << (trailingN & 0x3f) + if val == uvnan { + // IsNaN, eof + break + } + } + + dst = append(dst, val) + } + + return *(*[]float64)(unsafe.Pointer(&dst)), nil + +ERROR: + return (*(*[]float64)(unsafe.Pointer(&dst)))[:0], io.EOF +} diff --git a/lib/influxdb/tsdb/engine/tsm1/float.go b/lib/influxdb/tsdb/engine/tsm1/float.go new file mode 100644 index 0000000..1cc73aa --- /dev/null +++ b/lib/influxdb/tsdb/engine/tsm1/float.go @@ -0,0 +1,17 @@ +package tsm1 + +/* +This code is originally from: https://github.com/dgryski/go-tsz and has been modified to remove +the timestamp compression fuctionality. + +It implements the float compression as presented in: http://www.vldb.org/pvldb/vol8/p1816-teller.pdf. +This implementation uses a sentinel value of NaN which means that float64 NaN cannot be stored using +this version. +*/ + +// Note: an uncompressed format is not yet implemented. +// floatCompressedGorilla is a compressed format using the gorilla paper encoding +const floatCompressedGorilla = 1 + +// uvnan is the constant returned from math.NaN(). +const uvnan = 0x7FF8000000000001 diff --git a/lib/influxdb/uuid/uuid.go b/lib/influxdb/uuid/uuid.go new file mode 100644 index 0000000..c935998 --- /dev/null +++ b/lib/influxdb/uuid/uuid.go @@ -0,0 +1,118 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// The uuid package can be used to generate and parse universally unique +// identifiers, a standardized format in the form of a 128 bit number. +// +// http://tools.ietf.org/html/rfc4122 + +// Package uuid provides functions to create time-based UUIDs. +package uuid // import "github.com/influxdata/influxdb/uuid" + +import ( + "crypto/rand" + "net" + "sync/atomic" + "time" +) + +// UUID - unique identifier type representing a 128 bit number +type UUID [16]byte + +var timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix() + +var hardwareAddr = hwAddrFunc() + +var clockSeq uint32 + +func hwAddrFunc() [6]byte { + u := [6]byte{} + + ifaces, err := net.Interfaces() + if err != nil { + rand.Reader.Read(u[:]) + return u + } + for _, iface := range ifaces { + if len(iface.HardwareAddr) >= 6 { + copy(u[:], iface.HardwareAddr) + return u + } + } + rand.Reader.Read(u[:]) + return u +} + +// TimeUUID generates a new time based UUID (version 1) using the current +// time as the timestamp. +func TimeUUID() UUID { + return FromTime(time.Now()) +} + +// FromTime generates a new time based UUID (version 1) as described in +// RFC 4122. This UUID contains the MAC address of the node that generated +// the UUID, the given timestamp and a sequence number. +func FromTime(aTime time.Time) UUID { + var u UUID + + utcTime := aTime.In(time.UTC) + t := uint64(utcTime.Unix()-timeBase)*10000000 + uint64(utcTime.Nanosecond()/100) + u[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t) + u[4], u[5] = byte(t>>40), byte(t>>32) + u[6], u[7] = byte(t>>56)&0x0F, byte(t>>48) + + clock := atomic.AddUint32(&clockSeq, 1) + u[8] = byte(clock >> 8) + u[9] = byte(clock) + + copy(u[10:], hardwareAddr[:]) + + u[6] |= 0x10 // set version to 1 (time based uuid) + u[8] &= 0x3F // clear variant + u[8] |= 0x80 // set to IETF variant + + return u +} + +// String returns the UUID in it's canonical form, a 32 digit hexadecimal +// number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + var offsets = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} + const hexString = "0123456789abcdef" + r := make([]byte, 36) + for i, b := range u { + r[offsets[i]] = hexString[b>>4] + r[offsets[i]+1] = hexString[b&0xF] + } + r[8] = '-' + r[13] = '-' + r[18] = '-' + r[23] = '-' + return string(r) + +}