From 291c9d8c964d6f4f7a985b04eb8c25dc038a2702 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Wed, 23 Oct 2024 14:20:52 +0900 Subject: [PATCH 001/364] Fix unnecessary pointer dereferencing Fix unnecessary pointer dereferencing in list() and listRecursive() functions. --- pkg/runner/ftp.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 8f16a2f..cd5af1f 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -212,11 +212,11 @@ func (fc *FtpClient) isDir(path string) bool { func (fc *FtpClient) list(rootDir string, depth int) (CommandResult, error) { path := fc.parsePath(rootDir) cmdResult, err := fc.listRecursive(path, depth, 0) - return *cmdResult, err + return cmdResult, err } -func (fc *FtpClient) listRecursive(path string, depth, current int) (*CommandResult, error) { - result := &CommandResult{ +func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResult, error) { + result := CommandResult{ Name: filepath.Base(path), Type: "folder", Path: path, @@ -228,7 +228,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (*CommandRes cmd.SysProcAttr = fc.sysProcAttr output, err := cmd.CombinedOutput() if err != nil { - return &CommandResult{ + return CommandResult{ Name: filepath.Base(path), Path: path, Message: string(output), @@ -243,7 +243,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (*CommandRes size, err := fc.size(foundPath) if err != nil { - return &CommandResult{ + return CommandResult{ Name: filepath.Base(path), Path: path, Message: string(output), From 78e72176ab9f409afd0b9c355cfd97686ea828d3 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Thu, 24 Oct 2024 09:41:56 +0900 Subject: [PATCH 002/364] Minor fix --- pkg/runner/ftp.go | 2 +- pkg/runner/pty.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index cd5af1f..2f36d9f 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -53,6 +53,7 @@ func (fc *FtpClient) RunFtpBackground() { log.Debug().Err(err).Msgf("Failed to connect to pty websocket at %s", fc.url) return } + defer fc.close() fc.sysProcAttr, err = demote(fc.username, fc.groupname) if err != nil { @@ -67,7 +68,6 @@ func (fc *FtpClient) RunFtpBackground() { go fc.read(ctx, cancel) <-ctx.Done() - fc.close() } func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 081bc58..c0ac642 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -63,6 +63,7 @@ func (pc *PtyClient) RunPtyBackground() { log.Error().Err(err).Msgf("Failed to connect to pty websocket at %s", pc.url) return } + defer pc.close() pc.cmd = exec.Command("/bin/bash", "-i") @@ -93,7 +94,6 @@ func (pc *PtyClient) RunPtyBackground() { terminals[pc.sessionID] = pc <-ctx.Done() - pc.close() } func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.CancelFunc) { From f5a10a0838c33122cbdbdd94944a6087d1e626fe Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Thu, 24 Oct 2024 11:29:42 +0900 Subject: [PATCH 003/364] Refactor config file loading to handle empty files and improve error handling --- pkg/config/config.go | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 60694dd..8592b10 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -33,20 +33,39 @@ func InitSettings(settings Settings) { func LoadConfig() Settings { var iniData *ini.File var err error + var validConfigFile string + for _, configFile := range configFiles { - iniData, err = ini.Load(configFile) - if err == nil { - break + fileInfo, statErr := os.Stat(configFile) + if statErr == nil { + if fileInfo.Size() == 0 { + log.Debug().Msgf("Config file %s is empty, skipping...", configFile) + continue + } else { + log.Debug().Msgf("Using config file %s", configFile) + validConfigFile = configFile + break + } } } + + if validConfigFile == "" { + log.Fatal().Msg("No valid config file found") + } + + iniData, err = ini.Load(validConfigFile) + if err != nil { + log.Fatal().Err(err).Msgf("Failed to load config file %s", validConfigFile) + } + if iniData == nil { - log.Fatal().Err(err).Msg("Failed to load config file") + log.Fatal().Err(err).Msgf("Failed to load config file %s", validConfigFile) } var config Config err = iniData.MapTo(&config) if err != nil { - log.Fatal().Err(err).Msg("Failed to parse config file") + log.Fatal().Err(err).Msgf("Failed to parse config file %s", validConfigFile) } if config.Logging.Debug { From 327036ff901e0ad0aa5e6c07db39e7d62741e95f Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Fri, 25 Oct 2024 20:34:14 +0900 Subject: [PATCH 004/364] =?UTF-8?q?Fix=20infinite=20loop=20when=20?= =?UTF-8?q?=E2=80=98commissioned=E2=80=99=20field=20is=20missing=20in=20Ch?= =?UTF-8?q?eckSession?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/scheduler/session.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 5271398..e90537a 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -78,7 +78,8 @@ func (session *Session) CheckSession() bool { if commissioned, ok := response["commissioned"].(bool); ok { return commissioned } else { - log.Debug().Msg("Commissioned field not found") + log.Error().Msg("Unable to find 'commissioned' field in the response") + return false } } } From 7eef98bedd84242298008a236ff2cf2b7bee081f Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 25 Oct 2024 21:35:32 +0900 Subject: [PATCH 005/364] Delete install_zip_package() Due to dpkg lock error from adding install_zip_package(), roll back postinstall.sh. --- scripts/postinstall.sh | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index d359c9a..7def911 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -5,7 +5,6 @@ ALPAMON_BIN="/usr/local/bin/alpamon" main() { check_root_permission check_systemd_status - install_zip_package check_alpamon_binary install_alpamon start_systemd_service @@ -25,31 +24,6 @@ check_systemd_status() { fi } -install_zip_package() { - echo "Checking and installing zip package..." - - if command -v zip &> /dev/null; then - echo "zip package is already installed." - return 0 - fi - - if command -v apt-get &> /dev/null; then - apt-get install -y zip - elif command -v yum &> /dev/null; then - yum install -y zip - else - echo "Error: Could not detect package manager. Please install zip package manually." - exit 1 - fi - - if ! command -v zip &> /dev/null; then - echo "Error: Failed to install zip package." - exit 1 - fi - - echo "zip package installed successfully." -} - check_alpamon_binary() { if [ ! -f "$ALPAMON_BIN" ]; then echo "Error: Alpamon binary not found at $ALPAMON_BIN" From d8cb02ab113e33d13ef6d8cb454a988d73cd9579 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 28 Oct 2024 13:27:02 +0900 Subject: [PATCH 006/364] Replace retryablehttp with net/http in session client for correct behavior --- go.mod | 2 -- pkg/scheduler/session.go | 29 ++++++++++------------------- pkg/scheduler/types.go | 4 ++-- 3 files changed, 12 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index 721a161..eedd837 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible github.com/creack/pty v1.1.23 github.com/gorilla/websocket v1.5.3 - github.com/hashicorp/go-retryablehttp v0.7.7 github.com/knqyf263/go-rpmdb v0.1.1 github.com/rs/zerolog v1.33.0 github.com/shirou/gopsutil/v4 v4.24.8 @@ -23,7 +22,6 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index e90537a..2a30bab 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -8,7 +8,6 @@ import ( "fmt" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/hashicorp/go-retryablehttp" "github.com/rs/zerolog/log" "io" "net/http" @@ -26,10 +25,7 @@ func InitSession() *Session { BaseURL: config.GlobalSettings.ServerURL, } - client := retryablehttp.NewClient() - client.RetryMax = 3 - client.RetryWaitMin = 1 * time.Second - client.RetryWaitMax = 3 * time.Second + client := http.Client{} tlsConfig := &tls.Config{} if config.GlobalSettings.CaCert != "" { @@ -43,12 +39,11 @@ func InitSession() *Session { } tlsConfig.InsecureSkipVerify = config.GlobalSettings.SSLVerify - - client.HTTPClient.Transport = &http.Transport{ + client.Transport = &http.Transport{ TLSClientConfig: tlsConfig, } - session.Client = client + session.Client = &client session.authorization = fmt.Sprintf(`id="%s", key="%s"`, config.GlobalSettings.ID, config.GlobalSettings.Key) return session @@ -60,6 +55,7 @@ func (session *Session) CheckSession() bool { for { resp, _, err := session.Get(checkSessionURL, 5) if err != nil { + log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds", config.GlobalSettings.ServerURL, int(timeout.Seconds())) time.Sleep(timeout) timeout *= 2 if timeout > config.MaxConnectInterval { @@ -84,7 +80,7 @@ func (session *Session) CheckSession() bool { } } -func (session *Session) newRequest(method, url string, rawBody interface{}) (*retryablehttp.Request, error) { +func (session *Session) newRequest(method, url string, rawBody interface{}) (*http.Request, error) { var body io.Reader if rawBody != nil { switch v := rawBody.(type) { @@ -101,11 +97,11 @@ func (session *Session) newRequest(method, url string, rawBody interface{}) (*re } } - return retryablehttp.NewRequest(method, utils.JoinPath(session.BaseURL, url), body) + return http.NewRequest(method, utils.JoinPath(session.BaseURL, url), body) } -func (session *Session) do(req *retryablehttp.Request, timeout time.Duration) ([]byte, int, error) { - session.Client.HTTPClient.Timeout = timeout * time.Second +func (session *Session) do(req *http.Request, timeout time.Duration) ([]byte, int, error) { + session.Client.Timeout = timeout * time.Second req.Header.Set("Authorization", session.authorization) if req.Method == http.MethodPost || req.Method == http.MethodPut || req.Method == http.MethodPatch { @@ -156,16 +152,11 @@ func (session *Session) MultipartRequest(url string, body bytes.Buffer, contentT return nil, 0, err } - session.Client.HTTPClient.Timeout = timeout * time.Second + session.Client.Timeout = timeout * time.Second req.Header.Set("Authorization", session.authorization) req.Header.Set("Content-Type", contentType) - retryableReq, err := retryablehttp.FromRequest(req) - if err != nil { - return nil, 0, err - } - - resp, err := session.Client.Do(retryableReq) + resp, err := session.Client.Do(req) if err != nil { return nil, 0, err } diff --git a/pkg/scheduler/types.go b/pkg/scheduler/types.go index 666f0ee..e96dc52 100644 --- a/pkg/scheduler/types.go +++ b/pkg/scheduler/types.go @@ -2,14 +2,14 @@ package scheduler import ( "github.com/adrianbrad/queue" - "github.com/hashicorp/go-retryablehttp" + "net/http" "sync" "time" ) type Session struct { BaseURL string - Client *retryablehttp.Client + Client *http.Client authorization string } From 7d6462dbc2e4e2efbe46ed3ce3dd5942dec8245d Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 28 Oct 2024 13:28:17 +0900 Subject: [PATCH 007/364] Add logging to commit package --- pkg/runner/commit.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index c6e0fe5..9bc4462 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -72,10 +72,12 @@ func commitSystemInfo() { "record": "committed", "description": "Committed system information. version: %s"}`, version.Version)), 80, time.Time{}) - log.Info().Msg("Finished committing system information.") + log.Info().Msg("Completed committing system information.") } func syncSystemInfo(session *scheduler.Session, keys []string) { + log.Info().Msg("Start system information synchronization.") + syncMutex.Lock() defer syncMutex.Unlock() @@ -170,6 +172,7 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { } else { compareData(entry, currentData.(ComparableData), remoteData.(ComparableData)) } + log.Info().Msgf("Completed system information synchronization for %s.", key) } } From 6887a449977fd7378cb5c1c872c0db0413fd0e7d Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 28 Oct 2024 16:26:35 +0900 Subject: [PATCH 008/364] Refactor logger to send log records and apply minor fixes --- cmd/alpamon/command/root.go | 8 +-- pkg/logger/logger.go | 124 +++++++++++++++++++++++++++++------- pkg/runner/commit.go | 2 +- 3 files changed, 107 insertions(+), 27 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 784f627..d05d5ba 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -39,10 +39,6 @@ func runAgent() { } defer func() { _ = os.Remove(pidFilePath) }() - // Logger - logFile := logger.InitLogger() - defer func() { _ = logFile.Close() }() - // Config & Settings settings := config.LoadConfig() config.InitSettings(settings) @@ -55,6 +51,10 @@ func runAgent() { // Reporter scheduler.StartReporters(session) + // Logger + logFile := logger.InitLogger() + defer func() { _ = logFile.Close() }() + // Commit runner.CommitAsync(session, commissioned) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 199fd04..e870164 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -1,23 +1,29 @@ package logger import ( + "encoding/json" "fmt" + "github.com/alpacanetworks/alpamon-go/pkg/scheduler" + "github.com/alpacanetworks/alpamon-go/pkg/version" "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "io" "os" + "strconv" "strings" "time" ) const ( - logDir = "/var/log/alpamon" - logFile = "alpamon.log" + logDir = "/var/log/alpamon" + logFileName = "alpamon.log" + recordURL = "/api/history/logs/" ) func InitLogger() *os.File { - fileName := fmt.Sprintf("%s/%s", logDir, logFile) + fileName := fmt.Sprintf("%s/%s", logDir, logFileName) if _, err := os.Stat(logDir); os.IsNotExist(err) { - fileName = logFile + fileName = logFileName } logFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) @@ -25,26 +31,100 @@ func InitLogger() *os.File { log.Fatal().Err(err).Msg("Failed to open log file") } - consoleOutput := zerolog.ConsoleWriter{ - Out: os.Stderr, - TimeFormat: time.RFC3339, - TimeLocation: time.Local, - FormatLevel: func(i interface{}) string { - return "[" + strings.ToUpper(i.(string)) + "]" - }, - FormatMessage: func(i interface{}) string { - return " " + i.(string) - }, - FormatFieldName: func(i interface{}) string { - return "(" + i.(string) + ")" - }, - FormatFieldValue: func(i interface{}) string { - return i.(string) - }, + var output io.Writer + + recordWriter := &logRecordWriter{} + + // In development, log to console; in production, log to file + if version.Version == "dev" { + consoleWriter := zerolog.ConsoleWriter{ + Out: os.Stderr, + TimeFormat: time.RFC3339, + TimeLocation: time.Local, + FormatLevel: func(i interface{}) string { + return "[" + strings.ToUpper(i.(string)) + "]" + }, + FormatMessage: func(i interface{}) string { + return " " + i.(string) + }, + FormatFieldName: func(i interface{}) string { + return "(" + i.(string) + ")" + }, + FormatFieldValue: func(i interface{}) string { + return i.(string) + }, + } + output = zerolog.MultiLevelWriter(consoleWriter, recordWriter) + } else { + output = zerolog.MultiLevelWriter(logFile, recordWriter) } - multi := zerolog.MultiLevelWriter(consoleOutput, logFile) - log.Logger = zerolog.New(multi).With().Timestamp().Caller().Logger() + log.Logger = zerolog.New(output).With().Timestamp().Caller().Logger() return logFile } + +type logRecord struct { + Date string `json:"date"` + Level int `json:"level"` + Program string `json:"program"` + Path string `json:"path"` + Lineno int `json:"lineno"` + PID int `json:"pid"` + Msg string `json:"msg"` +} + +type logRecordWriter struct{} + +func (w *logRecordWriter) Write(p []byte) (n int, err error) { + var parsedLog map[string]string + err = json.Unmarshal(p, &parsedLog) + if err != nil { + return 0, err + } + + caller := parsedLog["caller"] + if caller == "" { + return len(p), nil + } + + lineno := 0 + if parts := strings.Split(caller, ":"); len(parts) > 1 { + lineno, _ = strconv.Atoi(parts[1]) + } + + record := logRecord{ + Date: time.Now().UTC().Format(time.RFC3339), + Level: convertLevelToNumber(parsedLog["level"]), + Program: "alpamon", + Path: caller, + Lineno: lineno, + PID: os.Getpid(), + Msg: parsedLog["message"], + } + + go func() { + scheduler.Rqueue.Post(recordURL, record, 90, time.Time{}) + }() + + return len(p), nil +} + +// alpacon-server uses Python's logging package, which has different log levels from zerolog. +// This function maps zerolog log levels to Python logging levels. +func convertLevelToNumber(level string) int { + switch level { + case "fatal": + return 50 // CRITICAL, FATAL + case "error": + return 40 // ERROR + case "warn", "warning": + return 30 // WARNING + case "info": + return 20 // INFO + case "debug": + return 10 // DEBUG + default: + return 0 // NOT SET + } +} diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 9bc4462..ee362ad 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -172,8 +172,8 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { } else { compareData(entry, currentData.(ComparableData), remoteData.(ComparableData)) } - log.Info().Msgf("Completed system information synchronization for %s.", key) } + log.Info().Msg("Completed system information synchronization") } func compareData(entry commitDef, currentData, remoteData ComparableData) { From f7e05979e0850c46d2664d3afbe1884fdca0d5dd Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 29 Oct 2024 16:18:50 +0900 Subject: [PATCH 009/364] Add ftp alpamon command To execute FtpClient in a process with lowered privileges, add ftp command to alpamon command. It accepts url and home directory as arguments to execute FtpClient. The process loads a config to be used. --- cmd/alpamon/command/ftp.go | 29 +++++++++++++++++++++++++++++ cmd/alpamon/command/root.go | 7 ++++--- 2 files changed, 33 insertions(+), 3 deletions(-) create mode 100644 cmd/alpamon/command/ftp.go diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go new file mode 100644 index 0000000..67db134 --- /dev/null +++ b/cmd/alpamon/command/ftp.go @@ -0,0 +1,29 @@ +package command + +import ( + "github.com/alpacanetworks/alpamon-go/pkg/config" + "github.com/alpacanetworks/alpamon-go/pkg/runner" + "github.com/spf13/cobra" +) + +var ftpCmd = &cobra.Command{ + Use: "ftp ", + Short: "Start worker for Web FTP", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + url := args[0] + homeDirectory := args[1] + + settings := config.LoadConfig() + config.InitFtpSettings(settings) + + RunFtpWorker(url, homeDirectory) + + return nil + }, +} + +func RunFtpWorker(url, homeDirectory string) { + ftpClient := runner.NewFtpClient(url, homeDirectory) + ftpClient.RunFtpBackground() +} diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 784f627..10ab770 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -2,6 +2,9 @@ package command import ( "fmt" + "os" + "syscall" + "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/pidfile" @@ -11,8 +14,6 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/version" "github.com/rs/zerolog/log" "github.com/spf13/cobra" - "os" - "syscall" ) var RootCmd = &cobra.Command{ @@ -24,7 +25,7 @@ var RootCmd = &cobra.Command{ } func init() { - RootCmd.AddCommand(installCmd) + RootCmd.AddCommand(installCmd, ftpCmd) } func runAgent() { From 7aec18848d774b772e32ad061943abb3ab50755a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 29 Oct 2024 16:22:48 +0900 Subject: [PATCH 010/364] Refactor Web FTP Command Due to performance issues, refactor the code from using the os/exec package to using Go's built-in packages. As a result of changes made to other code to allow FtpClient's RunFtpBackground() to execute in a process with lowered privileges, this code was also updated. --- pkg/runner/ftp.go | 224 +++++++++++++++++++++++++--------------------- 1 file changed, 124 insertions(+), 100 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 2f36d9f..c512d0b 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -4,11 +4,11 @@ import ( "context" "encoding/json" "fmt" + "io" "net/http" - "os/exec" + "os" "path/filepath" "strings" - "syscall" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/gorilla/websocket" @@ -18,29 +18,22 @@ import ( type FtpClient struct { conn *websocket.Conn requestHeader http.Header - sysProcAttr *syscall.SysProcAttr url string - username string - groupname string homeDirectory string workingDirectory string - sessionID string } -func NewFtpClient(data CommandData) *FtpClient { +func NewFtpClient(url, homeDirectory string) *FtpClient { headers := http.Header{ - "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, config.GlobalSettings.ID, config.GlobalSettings.Key)}, - "Origin": {config.GlobalSettings.ServerURL}, + "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, config.FtpSettings.ID, config.FtpSettings.Key)}, + "Origin": {config.FtpSettings.ServerURL}, } return &FtpClient{ requestHeader: headers, - url: strings.Replace(config.GlobalSettings.ServerURL, "http", "ws", 1) + data.URL, - username: data.Username, - groupname: data.Groupname, - homeDirectory: data.HomeDirectory, - workingDirectory: data.HomeDirectory, - sessionID: data.SessionID, + url: strings.Replace(config.FtpSettings.ServerURL, "http", "ws", 1) + url, + homeDirectory: homeDirectory, + workingDirectory: homeDirectory, } } @@ -55,7 +48,6 @@ func (fc *FtpClient) RunFtpBackground() { } defer fc.close() - fc.sysProcAttr, err = demote(fc.username, fc.groupname) if err != nil { log.Debug().Err(err).Msg("Failed to demote user.") fc.close() @@ -180,35 +172,6 @@ func (fc *FtpClient) parsePath(path string) string { return parsedPath } -func (fc *FtpClient) size(path string) (int64, error) { - cmd := exec.Command("du", "-sk", path) - cmd.SysProcAttr = fc.sysProcAttr - output, err := cmd.Output() - if err != nil { - return 0, err - } - - parts := strings.Fields(string(output)) - if len(parts) < 1 { - return 0, fmt.Errorf("could not retrieve size for path: %s", path) - } - - size := int64(0) - if _, err = fmt.Sscanf(parts[0], "%d", &size); err != nil { - return size, err - } - - return size * 1024, nil -} - -func (fc *FtpClient) isDir(path string) bool { - cmd := exec.Command("sh", "-c", fmt.Sprintf("ls -ld \"%s\" | awk '{print $1}'", path)) - cmd.SysProcAttr = fc.sysProcAttr - output, _ := cmd.Output() - - return strings.HasPrefix(string(output), "d") -} - func (fc *FtpClient) list(rootDir string, depth int) (CommandResult, error) { path := fc.parsePath(rootDir) cmdResult, err := fc.listRecursive(path, depth, 0) @@ -224,45 +187,34 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu Children: []CommandResult{}, } - cmd := exec.Command("find", path, "-mindepth", "1", "-maxdepth", "1") - cmd.SysProcAttr = fc.sysProcAttr - output, err := cmd.CombinedOutput() + entries, err := os.ReadDir(path) if err != nil { return CommandResult{ Name: filepath.Base(path), Path: path, - Message: string(output), + Message: err.Error(), }, nil } - paths := strings.Split(string(output), "\n") - for _, foundPath := range paths { - if foundPath == "" { - continue - } - - size, err := fc.size(foundPath) + for _, entry := range entries { + fullPath := filepath.Join(path, entry.Name()) + info, err := entry.Info() if err != nil { - return CommandResult{ - Name: filepath.Base(path), - Path: path, - Message: string(output), - }, nil + continue } child := CommandResult{ - Name: filepath.Base(foundPath), - Path: foundPath, - Size: size, + Name: entry.Name(), + Path: fullPath, + Size: info.Size(), } - if fc.isDir(foundPath) { + if entry.IsDir() { child.Type = "folder" - if current < depth-1 { - childResult, err := fc.listRecursive(foundPath, depth, current+1) + childResult, err := fc.listRecursive(fullPath, depth, current+1) if err != nil { - return result, nil + continue } child.Children = childResult.Children child.Size = childResult.Size @@ -272,7 +224,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu } result.Children = append(result.Children, child) - result.Size += size + result.Size += child.Size } return result, nil @@ -281,12 +233,10 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu func (fc *FtpClient) mkd(path string) (CommandResult, error) { path = fc.parsePath(path) - cmd := exec.Command("mkdir", path) - cmd.SysProcAttr = fc.sysProcAttr - output, err := cmd.CombinedOutput() + err := os.MkdirAll(path, 0755) if err != nil { return CommandResult{ - Message: strings.ToLower(string(output)), + Message: err.Error(), }, err } @@ -297,15 +247,20 @@ func (fc *FtpClient) mkd(path string) (CommandResult, error) { func (fc *FtpClient) cwd(path string) (CommandResult, error) { path = fc.parsePath(path) - cmd := exec.Command("test", "-r", path, "-a", "-w", path, "-a", "-x", path) - cmd.SysProcAttr = fc.sysProcAttr - output, err := cmd.CombinedOutput() + + info, err := os.Stat(path) if err != nil { return CommandResult{ - Message: strings.ToLower(string(output)), + Message: err.Error(), }, err } + if !info.IsDir() { + return CommandResult{ + Message: "not a directory", + }, fmt.Errorf("not a directory") + } + fc.workingDirectory = path return CommandResult{ @@ -323,11 +278,10 @@ func (fc *FtpClient) pwd() (CommandResult, error) { func (fc *FtpClient) dele(path string) (CommandResult, error) { path = fc.parsePath(path) - cmd := exec.Command("rm", path) - cmd.SysProcAttr = fc.sysProcAttr - if output, err := cmd.CombinedOutput(); err != nil { + err := os.Remove(path) + if err != nil { return CommandResult{ - Message: strings.ToLower(string(output)), + Message: err.Error(), }, err } @@ -339,17 +293,16 @@ func (fc *FtpClient) dele(path string) (CommandResult, error) { func (fc *FtpClient) rmd(path string, recursive bool) (CommandResult, error) { path = fc.parsePath(path) - var cmd *exec.Cmd + var err error if recursive { - cmd = exec.Command("rm", "-r", path) + err = os.RemoveAll(path) } else { - cmd = exec.Command("rmdir", path) + err = os.Remove(path) } - cmd.SysProcAttr = fc.sysProcAttr - if output, err := cmd.CombinedOutput(); err != nil { + if err != nil { return CommandResult{ - Message: strings.ToLower(string(output)), + Message: err.Error(), }, err } @@ -362,11 +315,10 @@ func (fc *FtpClient) mv(src, dst string) (CommandResult, error) { src = fc.parsePath(src) dst = filepath.Join(fc.parsePath(dst), filepath.Base(src)) - cmd := exec.Command("mv", src, dst) - cmd.SysProcAttr = fc.sysProcAttr - if output, err := cmd.CombinedOutput(); err != nil { + err := os.Rename(src, dst) + if err != nil { return CommandResult{ - Message: strings.ToLower(string(output)), + Message: err.Error(), }, err } @@ -379,18 +331,24 @@ func (fc *FtpClient) cp(src, dst string) (CommandResult, error) { src = fc.parsePath(src) dst = filepath.Join(fc.parsePath(dst), filepath.Base(src)) - if fc.isDir(src) { + info, err := os.Stat(src) + if err != nil { + return CommandResult{ + Message: err.Error(), + }, err + } + + if info.IsDir() { return fc.cpDir(src, dst) } return fc.cpFile(src, dst) } func (fc *FtpClient) cpDir(src, dst string) (CommandResult, error) { - cmd := exec.Command("cp", "-r", src, dst) - cmd.SysProcAttr = fc.sysProcAttr - if output, err := cmd.CombinedOutput(); err != nil { + err := copyDir(src, dst) + if err != nil { return CommandResult{ - Message: strings.ToLower(string(output)), + Message: err.Error(), }, err } @@ -400,11 +358,10 @@ func (fc *FtpClient) cpDir(src, dst string) (CommandResult, error) { } func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { - cmd := exec.Command("cp", src, dst) - cmd.SysProcAttr = fc.sysProcAttr - if output, err := cmd.CombinedOutput(); err != nil { + err := copyFile(src, dst) + if err != nil { return CommandResult{ - Message: strings.ToLower(string(output)), + Message: err.Error(), }, err } @@ -412,3 +369,70 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { Message: fmt.Sprintf("Copy %s to %s", src, dst), }, nil } + +func copyFile(src, dst string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.Create(dst) + if err != nil { + return err + } + defer dstFile.Close() + + if _, err = io.Copy(dstFile, srcFile); err != nil { + return err + } + + if err = dstFile.Close(); err != nil { + return err + } + + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + + if err = os.Chmod(dst, srcInfo.Mode()); err != nil { + return err + } + + return nil +} + +func copyDir(src, dst string) error { + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + + err = os.MkdirAll(dst, srcInfo.Mode()) + if err != nil { + return err + } + + entries, err := os.ReadDir(src) + if err != nil { + return err + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + if err = copyDir(srcPath, dstPath); err != nil { + return err + } + } else { + if err = copyFile(srcPath, dstPath); err != nil { + return err + } + } + } + + return nil +} From 2df4dbd33be0a5cf4d1e2ca6918ac68a664fcda0 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 29 Oct 2024 16:25:15 +0900 Subject: [PATCH 011/364] Add FtpSettings Add FtpSettings and InitFtpSettings to utilize web FTP in a new process. --- pkg/config/config.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 8592b10..70d0fdd 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -2,13 +2,14 @@ package config import ( "crypto/tls" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - "gopkg.in/ini.v1" "os" "path/filepath" "strings" "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "gopkg.in/ini.v1" ) var ( @@ -18,6 +19,7 @@ var ( } GlobalSettings Settings + FtpSettings Settings ) const ( @@ -30,6 +32,10 @@ func InitSettings(settings Settings) { GlobalSettings = settings } +func InitFtpSettings(settings Settings) { + FtpSettings = settings +} + func LoadConfig() Settings { var iniData *ini.File var err error From e8fc2b2a01e6b45b8020ddfff89f5a605a0e0f29 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 29 Oct 2024 16:26:44 +0900 Subject: [PATCH 012/364] Update openftp command Update openftp to run FtpClient in a new process. --- pkg/runner/command.go | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 79cd9d9..2dcaaab 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -153,8 +153,35 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, fmt.Sprintf("openftp: Not enough information. %s", err.Error()) } - ftpClient := NewFtpClient(cr.data) - go ftpClient.RunFtpBackground() + sysProcAttr, err := demote(data.Username, data.Groupname) + if err != nil { + log.Debug().Err(err).Msg("Failed to get demote permission") + + return 1, fmt.Sprintf("openftp: Failed to get demoted permission. %s", err.Error()) + } + + executable, err := os.Executable() + if err != nil { + log.Debug().Err(err).Msg("Failed to get executable path") + + return 1, fmt.Sprintf("openftp: Failed to get executable path. %s", err.Error()) + } + + cmd := exec.Command( + executable, + "ftp", + data.URL, + data.HomeDirectory, + ) + cmd.SysProcAttr = sysProcAttr + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Start(); err != nil { + log.Debug().Err(err).Msg("Failed to start worker process") + + return 1, fmt.Sprintf("openftp: Failed to start worker process. %s", err.Error()) + } return 0, "Spawned a ftp terminal." case "resizepty": From 996cf171ec8ac641351efc334352418a5198be43 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Tue, 29 Oct 2024 17:07:30 +0900 Subject: [PATCH 013/364] Improve logging by excluding unnecessary logs in PTY and FTP --- pkg/runner/ftp.go | 13 +++++++------ pkg/runner/pty.go | 20 ++++++++++++-------- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 2f36d9f..01a8f5f 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -81,7 +81,9 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { if ctx.Err() != nil { return } - log.Debug().Err(err).Msg("Failed to read from ftp websocket") + if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + log.Debug().Err(err).Msg("Failed to read from ftp websocket") + } cancel() return } @@ -123,7 +125,9 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { if ctx.Err() != nil { return } - log.Debug().Err(err).Msg("Failed to send websocket message") + if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + log.Debug().Err(err).Msg("Failed to send websocket message") + } cancel() return } @@ -133,10 +137,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { func (fc *FtpClient) close() { if fc.conn != nil { - err := fc.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - if err != nil { - log.Debug().Err(err).Msg("Failed to write close message to ftp websocket") - } + _ = fc.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) _ = fc.conn.Close() } diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index c0ac642..afe7adb 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -2,6 +2,7 @@ package runner import ( "context" + "errors" "fmt" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/creack/pty" @@ -107,7 +108,9 @@ func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.Cance if ctx.Err() != nil { return } - log.Debug().Err(err).Msg("Failed to read from pty websocket") + if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + log.Debug().Err(err).Msg("Failed to read from pty websocket") + } cancel() return } @@ -116,7 +119,9 @@ func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.Cance if ctx.Err() != nil { return } - log.Debug().Err(err).Msg("Failed to write to pty") + if !errors.Is(err, os.ErrClosed) { + log.Debug().Err(err).Msg("Failed to write to pty") + } cancel() return } @@ -140,7 +145,7 @@ func (pc *PtyClient) readFromPTY(ctx context.Context, cancel context.CancelFunc) if err == io.EOF { log.Debug().Msg("pty session exited.") } else { - log.Debug().Err(err).Msg("Failed to read from pty") + log.Debug().Err(err).Msg("Failed to read from PTY") } cancel() return @@ -150,7 +155,9 @@ func (pc *PtyClient) readFromPTY(ctx context.Context, cancel context.CancelFunc) if ctx.Err() != nil { return } - log.Debug().Err(err).Msg("Failed to write to pty") + if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + log.Debug().Err(err).Msg("Failed to write to pty") + } cancel() return } @@ -186,10 +193,7 @@ func (pc *PtyClient) close() { } if pc.conn != nil { - err := pc.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - if err != nil { - log.Debug().Err(err).Msg("Failed to write close message to pty websocket") - } + _ = pc.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) _ = pc.conn.Close() } From 466591f309c61ab27de11d5efdfa2b505e448eba Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 29 Oct 2024 17:08:44 +0900 Subject: [PATCH 014/364] Add logger in ftp command Add logger to unify log formats. --- cmd/alpamon/command/ftp.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go index 67db134..3126985 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp.go @@ -2,6 +2,7 @@ package command import ( "github.com/alpacanetworks/alpamon-go/pkg/config" + "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/runner" "github.com/spf13/cobra" ) @@ -14,6 +15,9 @@ var ftpCmd = &cobra.Command{ url := args[0] homeDirectory := args[1] + logFile := logger.InitLogger() + defer func() { _ = logFile.Close() }() + settings := config.LoadConfig() config.InitFtpSettings(settings) From 3044bdd2e8b785ccdba2a5bea1d5d05d417f3d6b Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 29 Oct 2024 17:09:08 +0900 Subject: [PATCH 015/364] Minor fix Add exception handling. --- pkg/runner/ftp.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index c512d0b..e5bd067 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -233,7 +233,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu func (fc *FtpClient) mkd(path string) (CommandResult, error) { path = fc.parsePath(path) - err := os.MkdirAll(path, 0755) + err := os.Mkdir(path, 0755) if err != nil { return CommandResult{ Message: err.Error(), @@ -293,6 +293,12 @@ func (fc *FtpClient) dele(path string) (CommandResult, error) { func (fc *FtpClient) rmd(path string, recursive bool) (CommandResult, error) { path = fc.parsePath(path) + if _, err := os.Stat(path); os.IsNotExist(err) { + return CommandResult{ + Message: err.Error(), + }, err + } + var err error if recursive { err = os.RemoveAll(path) @@ -404,6 +410,10 @@ func copyFile(src, dst string) error { } func copyDir(src, dst string) error { + if strings.HasPrefix(dst, src) { + return fmt.Errorf("%s is inside %s, causing infinite recursion", dst, src) + } + srcInfo, err := os.Stat(src) if err != nil { return err From 515596653a9b2072790f25ad4ac708b92bc61aae Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 30 Oct 2024 12:50:07 +0900 Subject: [PATCH 016/364] Implement log filtering for records and apply minor improvements --- cmd/alpamon/command/root.go | 2 +- pkg/logger/logger.go | 48 ++++++++++++++++++++++++++++++------- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index d05d5ba..2cda208 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -34,7 +34,7 @@ func runAgent() { // Pid pidFilePath, err := pidfile.WritePID() if err != nil { - fmt.Fprintln(os.Stderr, "Failed to create PID file", err.Error()) + _, _ = fmt.Fprintln(os.Stderr, "Failed to create PID file", err.Error()) os.Exit(1) } defer func() { _ = os.Remove(pidFilePath) }() diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index e870164..8585de9 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -28,11 +28,11 @@ func InitLogger() *os.File { logFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { - log.Fatal().Err(err).Msg("Failed to open log file") + _, _ = fmt.Fprintf(os.Stderr, "Failed to open log file: %v\n", err) + os.Exit(1) } var output io.Writer - recordWriter := &logRecordWriter{} // In development, log to console; in production, log to file @@ -74,16 +74,31 @@ type logRecord struct { Msg string `json:"msg"` } +type zerologEntry struct { + Level string `json:"level"` + Time string `json:"time"` + Caller string `json:"caller"` + Message string `json:"message"` +} + type logRecordWriter struct{} +// remoteLogThresholds defines log level thresholds for specific callers (files). +// Logs below the specified level for a given file will not be sent to the alpacon-server. +// If a file is not listed, all logs will be sent regardless of level. +var remoteLogThresholds = map[string]int{ + "client.go": 30, + "reporter.go": 30, +} + func (w *logRecordWriter) Write(p []byte) (n int, err error) { - var parsedLog map[string]string - err = json.Unmarshal(p, &parsedLog) + var entry zerologEntry + err = json.Unmarshal(p, &entry) if err != nil { return 0, err } - caller := parsedLog["caller"] + caller := entry.Caller if caller == "" { return len(p), nil } @@ -93,14 +108,21 @@ func (w *logRecordWriter) Write(p []byte) (n int, err error) { lineno, _ = strconv.Atoi(parts[1]) } + callerFileName := getCallerFileName(caller) + if levelThreshold, ok := remoteLogThresholds[callerFileName]; ok { + if convertLevelToNumber(entry.Level) < levelThreshold { + return len(p), nil + } + } + record := logRecord{ - Date: time.Now().UTC().Format(time.RFC3339), - Level: convertLevelToNumber(parsedLog["level"]), + Date: entry.Time, + Level: convertLevelToNumber(entry.Level), Program: "alpamon", Path: caller, Lineno: lineno, PID: os.Getpid(), - Msg: parsedLog["message"], + Msg: entry.Message, } go func() { @@ -128,3 +150,13 @@ func convertLevelToNumber(level string) int { return 0 // NOT SET } } + +func getCallerFileName(caller string) string { + parts := strings.Split(caller, "/") + if len(parts) > 0 { + fileWithLine := parts[len(parts)-1] + fileParts := strings.Split(fileWithLine, ":") + return fileParts[0] + } + return "" +} From ab956bc2ce25c7f50df5ed97a91c4f6da4b7db5a Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 30 Oct 2024 13:01:10 +0900 Subject: [PATCH 017/364] Add additional log thresholds for filtering --- pkg/logger/logger.go | 3 +++ pkg/runner/commit.go | 4 ++-- pkg/runner/pty.go | 6 +++--- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 8585de9..31fd7d4 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -89,6 +89,9 @@ type logRecordWriter struct{} var remoteLogThresholds = map[string]int{ "client.go": 30, "reporter.go": 30, + "command.go": 30, + "pty.go": 30, + "commit.go": 30, } func (w *logRecordWriter) Write(p []byte) (n int, err error) { diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index ee362ad..35f0132 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -149,7 +149,7 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { } remoteData = &[]SystemPackageData{} default: - log.Debug().Msgf("Unknown key: %s", key) + log.Warn().Msgf("Unknown key: %s", key) continue } @@ -157,7 +157,7 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { if statusCode == http.StatusOK { err = json.Unmarshal(resp, &remoteData) if err != nil { - log.Debug().Err(err).Msg("Failed to unmarshal remote data") + log.Error().Err(err).Msg("Failed to unmarshal remote data") continue } } else if statusCode == http.StatusNotFound { diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index afe7adb..4bb918b 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -70,14 +70,14 @@ func (pc *PtyClient) RunPtyBackground() { uid, gid, groupIds, env, err := pc.getPtyUserAndEnv() if err != nil { - log.Debug().Err(err).Msgf("Failed to get pty user and env") + log.Error().Err(err).Msgf("Failed to get pty user and env") return } pc.setPtyCmdSysProcAttrAndEnv(uid, gid, groupIds, env) pc.ptmx, err = pty.Start(pc.cmd) if err != nil { - log.Debug().Err(err).Msg("Failed to start pty") + log.Error().Err(err).Msg("Failed to start pty") pc.close() return } @@ -171,7 +171,7 @@ func (pc *PtyClient) resize(rows, cols uint16) error { Cols: cols, }) if err != nil { - log.Debug().Err(err).Msg("Failed to resize terminal") + log.Warn().Err(err).Msg("Failed to resize terminal") return err } pc.rows = rows From a797466c5c63e82d1f09c511678048d46539eae5 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 30 Oct 2024 13:02:28 +0900 Subject: [PATCH 018/364] Minor fix --- pkg/config/config.go | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 8592b10..85016f0 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -37,16 +37,23 @@ func LoadConfig() Settings { for _, configFile := range configFiles { fileInfo, statErr := os.Stat(configFile) - if statErr == nil { - if fileInfo.Size() == 0 { - log.Debug().Msgf("Config file %s is empty, skipping...", configFile) + if statErr != nil { + if os.IsNotExist(statErr) { continue } else { - log.Debug().Msgf("Using config file %s", configFile) - validConfigFile = configFile - break + log.Error().Err(statErr).Msgf("Error accessing config file %s", configFile) + continue } } + + if fileInfo.Size() == 0 { + log.Debug().Msgf("Config file %s is empty, skipping...", configFile) + continue + } + + log.Debug().Msgf("Using config file %s", configFile) + validConfigFile = configFile + break } if validConfigFile == "" { @@ -58,10 +65,6 @@ func LoadConfig() Settings { log.Fatal().Err(err).Msgf("Failed to load config file %s", validConfigFile) } - if iniData == nil { - log.Fatal().Err(err).Msgf("Failed to load config file %s", validConfigFile) - } - var config Config err = iniData.MapTo(&config) if err != nil { @@ -69,7 +72,9 @@ func LoadConfig() Settings { } if config.Logging.Debug { - log.Logger = log.Level(zerolog.DebugLevel) + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } else { + zerolog.SetGlobalLevel(zerolog.InfoLevel) } isValid, settings := validateConfig(config) From 7c9eb662dfd0639b5afbdfff8f601a8fd8504c98 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Wed, 30 Oct 2024 13:04:17 +0900 Subject: [PATCH 019/364] Minor fix Delete unused code Commit for test. --- cmd/alpamon/command/ftp.go | 60 +++++++++++++++++++++++++++++++++++--- pkg/runner/command.go | 14 +++++---- pkg/runner/ftp.go | 6 ---- 3 files changed, 64 insertions(+), 16 deletions(-) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go index 3126985..ee87065 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp.go @@ -1,19 +1,27 @@ package command import ( + "fmt" + "os/user" + "strconv" + "syscall" + "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/runner" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) var ftpCmd = &cobra.Command{ - Use: "ftp ", + Use: "ftp ", Short: "Start worker for Web FTP", - Args: cobra.ExactArgs(2), + Args: cobra.ExactArgs(4), RunE: func(cmd *cobra.Command, args []string) error { - url := args[0] - homeDirectory := args[1] + username := args[0] + groupname := args[1] + url := args[2] + homeDirectory := args[3] logFile := logger.InitLogger() defer func() { _ = logFile.Close() }() @@ -21,6 +29,50 @@ var ftpCmd = &cobra.Command{ settings := config.LoadConfig() config.InitFtpSettings(settings) + if syscall.Getuid() == 0 { + if username == "" || groupname == "" { + log.Debug().Msg("No username or groupname provided.") + return fmt.Errorf("No username or groupname provided.") + } + + usr, err := user.Lookup(username) + if err != nil { + log.Debug().Msgf("There is no corresponding %s username in this server", username) + return fmt.Errorf("There is no corresponding %s username in this server", username) + } + + group, err := user.LookupGroup(groupname) + if err != nil { + log.Debug().Msgf("There is no corresponding %s groupname in this server", groupname) + return fmt.Errorf("There is no corresponding %s groupname in this server", groupname) + } + + uid, err := strconv.Atoi(usr.Uid) + if err != nil { + return err + } + + gid, err := strconv.Atoi(group.Gid) + if err != nil { + return err + } + + err = syscall.Setgroups([]int{}) + if err != nil { + return err + } + + err = syscall.Setuid(uid) + if err != nil { + return err + } + + err = syscall.Setgid(gid) + if err != nil { + return err + } + } + RunFtpWorker(url, homeDirectory) return nil diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 2dcaaab..10e984c 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -153,12 +153,12 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, fmt.Sprintf("openftp: Not enough information. %s", err.Error()) } - sysProcAttr, err := demote(data.Username, data.Groupname) - if err != nil { - log.Debug().Err(err).Msg("Failed to get demote permission") + // sysProcAttr, err := demote(data.Username, data.Groupname) + // if err != nil { + // log.Debug().Err(err).Msg("Failed to get demote permission") - return 1, fmt.Sprintf("openftp: Failed to get demoted permission. %s", err.Error()) - } + // return 1, fmt.Sprintf("openftp: Failed to get demoted permission. %s", err.Error()) + // } executable, err := os.Executable() if err != nil { @@ -170,10 +170,12 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { cmd := exec.Command( executable, "ftp", + data.Username, + data.Groupname, data.URL, data.HomeDirectory, ) - cmd.SysProcAttr = sysProcAttr + // cmd.SysProcAttr = sysProcAttr cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index e5bd067..4222787 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -48,12 +48,6 @@ func (fc *FtpClient) RunFtpBackground() { } defer fc.close() - if err != nil { - log.Debug().Err(err).Msg("Failed to demote user.") - fc.close() - return - } - ctx, cancel := context.WithCancel(context.Background()) defer cancel() From 1c2fa0013f1bf0e9738ce25107450536eb31a5cb Mon Sep 17 00:00:00 2001 From: geunwoo Date: Wed, 30 Oct 2024 14:31:08 +0900 Subject: [PATCH 020/364] Minor fix Delete comment. Commit for test. --- cmd/alpamon/command/ftp.go | 66 +++++------------------------------- pkg/runner/command.go | 8 ----- pkg/runner/ftp.go | 69 +++++++++++++++++++++++++++++++++++--- 3 files changed, 74 insertions(+), 69 deletions(-) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go index ee87065..35847b1 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp.go @@ -1,15 +1,9 @@ package command import ( - "fmt" - "os/user" - "strconv" - "syscall" - "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/runner" - "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -18,10 +12,12 @@ var ftpCmd = &cobra.Command{ Short: "Start worker for Web FTP", Args: cobra.ExactArgs(4), RunE: func(cmd *cobra.Command, args []string) error { - username := args[0] - groupname := args[1] - url := args[2] - homeDirectory := args[3] + data := runner.CommandData{ + Username: args[0], + Groupname: args[1], + URL: args[2], + HomeDirectory: args[3], + } logFile := logger.InitLogger() defer func() { _ = logFile.Close() }() @@ -29,57 +25,13 @@ var ftpCmd = &cobra.Command{ settings := config.LoadConfig() config.InitFtpSettings(settings) - if syscall.Getuid() == 0 { - if username == "" || groupname == "" { - log.Debug().Msg("No username or groupname provided.") - return fmt.Errorf("No username or groupname provided.") - } - - usr, err := user.Lookup(username) - if err != nil { - log.Debug().Msgf("There is no corresponding %s username in this server", username) - return fmt.Errorf("There is no corresponding %s username in this server", username) - } - - group, err := user.LookupGroup(groupname) - if err != nil { - log.Debug().Msgf("There is no corresponding %s groupname in this server", groupname) - return fmt.Errorf("There is no corresponding %s groupname in this server", groupname) - } - - uid, err := strconv.Atoi(usr.Uid) - if err != nil { - return err - } - - gid, err := strconv.Atoi(group.Gid) - if err != nil { - return err - } - - err = syscall.Setgroups([]int{}) - if err != nil { - return err - } - - err = syscall.Setuid(uid) - if err != nil { - return err - } - - err = syscall.Setgid(gid) - if err != nil { - return err - } - } - - RunFtpWorker(url, homeDirectory) + RunFtpWorker(data) return nil }, } -func RunFtpWorker(url, homeDirectory string) { - ftpClient := runner.NewFtpClient(url, homeDirectory) +func RunFtpWorker(data runner.CommandData) { + ftpClient := runner.NewFtpClient(data) ftpClient.RunFtpBackground() } diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 10e984c..deca833 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -153,13 +153,6 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, fmt.Sprintf("openftp: Not enough information. %s", err.Error()) } - // sysProcAttr, err := demote(data.Username, data.Groupname) - // if err != nil { - // log.Debug().Err(err).Msg("Failed to get demote permission") - - // return 1, fmt.Sprintf("openftp: Failed to get demoted permission. %s", err.Error()) - // } - executable, err := os.Executable() if err != nil { log.Debug().Err(err).Msg("Failed to get executable path") @@ -175,7 +168,6 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { data.URL, data.HomeDirectory, ) - // cmd.SysProcAttr = sysProcAttr cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 4222787..37884de 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -7,8 +7,11 @@ import ( "io" "net/http" "os" + "os/user" "path/filepath" + "strconv" "strings" + "syscall" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/gorilla/websocket" @@ -19,11 +22,13 @@ type FtpClient struct { conn *websocket.Conn requestHeader http.Header url string + username string + groupname string homeDirectory string workingDirectory string } -func NewFtpClient(url, homeDirectory string) *FtpClient { +func NewFtpClient(data CommandData) *FtpClient { headers := http.Header{ "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, config.FtpSettings.ID, config.FtpSettings.Key)}, "Origin": {config.FtpSettings.ServerURL}, @@ -31,16 +36,72 @@ func NewFtpClient(url, homeDirectory string) *FtpClient { return &FtpClient{ requestHeader: headers, - url: strings.Replace(config.FtpSettings.ServerURL, "http", "ws", 1) + url, - homeDirectory: homeDirectory, - workingDirectory: homeDirectory, + url: strings.Replace(config.FtpSettings.ServerURL, "http", "ws", 1) + data.URL, + username: data.Username, + groupname: data.Groupname, + homeDirectory: data.HomeDirectory, + workingDirectory: data.HomeDirectory, } } +func (fc *FtpClient) demote() error { + if syscall.Getuid() == 0 { + if fc.username == "" || fc.groupname == "" { + log.Debug().Msg("No username or groupname provided.") + return fmt.Errorf("no username or groupname provided") + } + + usr, err := user.Lookup(fc.username) + if err != nil { + log.Debug().Msgf("There is no corresponding %s username in this server", fc.username) + return fmt.Errorf("there is no corresponding %s username in this server", fc.username) + } + + group, err := user.LookupGroup(fc.groupname) + if err != nil { + log.Debug().Msgf("There is no corresponding %s groupname in this server", fc.groupname) + return fmt.Errorf("there is no corresponding %s groupname in this server", fc.groupname) + } + + uid, err := strconv.Atoi(usr.Uid) + if err != nil { + return err + } + + gid, err := strconv.Atoi(group.Gid) + if err != nil { + return err + } + + err = syscall.Setgroups([]int{}) + if err != nil { + return err + } + + err = syscall.Setuid(uid) + if err != nil { + return err + } + + err = syscall.Setgid(gid) + if err != nil { + return err + } + } + + return nil +} + func (fc *FtpClient) RunFtpBackground() { log.Debug().Msg("Opening websocket for ftp session.") var err error + err = fc.demote() + if err != nil { + log.Debug().Err(err).Msg("Failed to get demote permission") + return + } + fc.conn, _, err = websocket.DefaultDialer.Dial(fc.url, fc.requestHeader) if err != nil { log.Debug().Err(err).Msgf("Failed to connect to pty websocket at %s", fc.url) From 261611a3aa6ec52cabf9d3f77ccb83da1c8e83e8 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 30 Oct 2024 15:58:02 +0900 Subject: [PATCH 021/364] Minor fix --- pkg/logger/logger.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 31fd7d4..74a2f11 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -91,7 +91,6 @@ var remoteLogThresholds = map[string]int{ "reporter.go": 30, "command.go": 30, "pty.go": 30, - "commit.go": 30, } func (w *logRecordWriter) Write(p []byte) (n int, err error) { From c2df4eac3a01929fb524a1916d9a413f06575e47 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 30 Oct 2024 16:43:52 +0900 Subject: [PATCH 022/364] Fix agent upgrade command --- pkg/runner/command.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 79cd9d9..4f4bfb5 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -89,10 +89,10 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { switch args[0] { case "upgrade": if utils.PlatformLike == "debian" { - cmd = "curl -s https://packagecloud.io/install/repositories/alpacanetworks/alpamon/script.deb.sh?any=true | sudo bash && " + + cmd = "apt-get upgrate -y && " + "apt-get upgrade -y alpamon" } else if utils.PlatformLike == "rhel" { - cmd = "curl -s https://packagecloud.io/install/repositories/alpacanetworks/alpamon/script.rpm.sh?any=true | sudo bash && " + + cmd = "yum update- y " + "yum update -y alpamon" } else { return 1, fmt.Sprintf("Platform '%s' not supported.", utils.PlatformLike) From 08cec8244d7a336ec6ef4b8c327bf781bf1a142a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Wed, 30 Oct 2024 22:24:41 +0900 Subject: [PATCH 023/364] Fix ftp alpamon command Update openftp to run FtpClient in a new process with demote permission. --- cmd/alpamon/command/ftp.go | 23 ++++-------- pkg/runner/command.go | 10 ++++- pkg/runner/ftp.go | 75 ++++---------------------------------- 3 files changed, 24 insertions(+), 84 deletions(-) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go index 35847b1..a8dd355 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp.go @@ -2,36 +2,29 @@ package command import ( "github.com/alpacanetworks/alpamon-go/pkg/config" - "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/runner" "github.com/spf13/cobra" ) var ftpCmd = &cobra.Command{ - Use: "ftp ", + Use: "ftp ", Short: "Start worker for Web FTP", - Args: cobra.ExactArgs(4), + Args: cobra.ExactArgs(2), RunE: func(cmd *cobra.Command, args []string) error { - data := runner.CommandData{ - Username: args[0], - Groupname: args[1], - URL: args[2], - HomeDirectory: args[3], - } - - logFile := logger.InitLogger() - defer func() { _ = logFile.Close() }() + url := args[0] + homeDirectory := args[1] + // TODO : Send logs to alpamon's Logserver using a Unix domain socket settings := config.LoadConfig() config.InitFtpSettings(settings) - RunFtpWorker(data) + RunFtpWorker(url, homeDirectory) return nil }, } -func RunFtpWorker(data runner.CommandData) { - ftpClient := runner.NewFtpClient(data) +func RunFtpWorker(url, homeDirectory string) { + ftpClient := runner.NewFtpClient(url, homeDirectory) ftpClient.RunFtpBackground() } diff --git a/pkg/runner/command.go b/pkg/runner/command.go index deca833..2dcaaab 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -153,6 +153,13 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, fmt.Sprintf("openftp: Not enough information. %s", err.Error()) } + sysProcAttr, err := demote(data.Username, data.Groupname) + if err != nil { + log.Debug().Err(err).Msg("Failed to get demote permission") + + return 1, fmt.Sprintf("openftp: Failed to get demoted permission. %s", err.Error()) + } + executable, err := os.Executable() if err != nil { log.Debug().Err(err).Msg("Failed to get executable path") @@ -163,11 +170,10 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { cmd := exec.Command( executable, "ftp", - data.Username, - data.Groupname, data.URL, data.HomeDirectory, ) + cmd.SysProcAttr = sysProcAttr cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 37884de..dbf2ee0 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -7,11 +7,8 @@ import ( "io" "net/http" "os" - "os/user" "path/filepath" - "strconv" "strings" - "syscall" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/gorilla/websocket" @@ -22,13 +19,11 @@ type FtpClient struct { conn *websocket.Conn requestHeader http.Header url string - username string - groupname string homeDirectory string workingDirectory string } -func NewFtpClient(data CommandData) *FtpClient { +func NewFtpClient(url, homeDirectory string) *FtpClient { headers := http.Header{ "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, config.FtpSettings.ID, config.FtpSettings.Key)}, "Origin": {config.FtpSettings.ServerURL}, @@ -36,72 +31,17 @@ func NewFtpClient(data CommandData) *FtpClient { return &FtpClient{ requestHeader: headers, - url: strings.Replace(config.FtpSettings.ServerURL, "http", "ws", 1) + data.URL, - username: data.Username, - groupname: data.Groupname, - homeDirectory: data.HomeDirectory, - workingDirectory: data.HomeDirectory, + url: strings.Replace(config.FtpSettings.ServerURL, "http", "ws", 1) + url, + homeDirectory: homeDirectory, + workingDirectory: homeDirectory, } } -func (fc *FtpClient) demote() error { - if syscall.Getuid() == 0 { - if fc.username == "" || fc.groupname == "" { - log.Debug().Msg("No username or groupname provided.") - return fmt.Errorf("no username or groupname provided") - } - - usr, err := user.Lookup(fc.username) - if err != nil { - log.Debug().Msgf("There is no corresponding %s username in this server", fc.username) - return fmt.Errorf("there is no corresponding %s username in this server", fc.username) - } - - group, err := user.LookupGroup(fc.groupname) - if err != nil { - log.Debug().Msgf("There is no corresponding %s groupname in this server", fc.groupname) - return fmt.Errorf("there is no corresponding %s groupname in this server", fc.groupname) - } - - uid, err := strconv.Atoi(usr.Uid) - if err != nil { - return err - } - - gid, err := strconv.Atoi(group.Gid) - if err != nil { - return err - } - - err = syscall.Setgroups([]int{}) - if err != nil { - return err - } - - err = syscall.Setuid(uid) - if err != nil { - return err - } - - err = syscall.Setgid(gid) - if err != nil { - return err - } - } - - return nil -} - func (fc *FtpClient) RunFtpBackground() { - log.Debug().Msg("Opening websocket for ftp session.") + // TODO : Send logs to alpamon's Logserver using a Unix domain socket + // log.Debug().Msg("Opening websocket for ftp session.") var err error - err = fc.demote() - if err != nil { - log.Debug().Err(err).Msg("Failed to get demote permission") - return - } - fc.conn, _, err = websocket.DefaultDialer.Dial(fc.url, fc.requestHeader) if err != nil { log.Debug().Err(err).Msgf("Failed to connect to pty websocket at %s", fc.url) @@ -187,7 +127,8 @@ func (fc *FtpClient) close() { _ = fc.conn.Close() } - log.Debug().Msg("Websocket connection for ftp has been closed.") + // TODO : Send logs to alpamon's Logserver using a Unix domain socket + // log.Debug().Msg("Websocket connection for ftp has been closed.") } func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (CommandResult, error) { From bcf5921e78646d92d0311777dc8005c4ad124b95 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Thu, 31 Oct 2024 12:34:04 +0900 Subject: [PATCH 024/364] Add ModTime at CommandResult ModTime has been added to the response of list command in order to display the last modified time. --- pkg/runner/ftp.go | 16 +++++++++++++--- pkg/runner/ftp_types.go | 6 +++++- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index dbf2ee0..a056e50 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "strings" + "time" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/gorilla/websocket" @@ -180,6 +181,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu Type: "folder", Path: path, Size: int64(0), + ModTime: time.Time{}, Children: []CommandResult{}, } @@ -200,9 +202,10 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu } child := CommandResult{ - Name: entry.Name(), - Path: fullPath, - Size: info.Size(), + Name: entry.Name(), + Path: fullPath, + Size: info.Size(), + ModTime: info.ModTime(), } if entry.IsDir() { @@ -223,6 +226,13 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu result.Size += child.Size } + dirInfo, err := os.Stat(path) + if err != nil { + result.Message = err.Error() + } else { + result.ModTime = dirInfo.ModTime() + } + return result, nil } diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 8be5766..8a85a50 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -1,6 +1,9 @@ package runner -import "strings" +import ( + "strings" + "time" +) type FtpCommand string @@ -49,6 +52,7 @@ type CommandResult struct { Path string `json:"path,omitempty"` Size int64 `json:"size,omitempty"` Children []CommandResult `json:"children,omitempty"` + ModTime time.Time `json:"mod_time,omitempty"` Message string `json:"message,omitempty"` } From b20c558469045a84361d36ef0e811b1f8df0088a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Thu, 31 Oct 2024 15:15:35 +0900 Subject: [PATCH 025/364] Add dependencies at .goreleaser.yaml Add zip package to dependencies so that it will be installed along with alpamon package. --- .goreleaser.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 1f6b75a..7b3580f 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -37,6 +37,8 @@ nfpms: formats: - deb - rpm + dependencies: + - zip bindir: /usr/local/bin/ scripts: From 3841f665eba8ed9c31b835930cb78a2a4c8a165a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Thu, 31 Oct 2024 15:32:33 +0900 Subject: [PATCH 026/364] Add InitFtpLogger() To ensure consistent logging format with the existing logs, InitFtpLogger() was added to a new process executing Web FTP commands. As the new process has reduced permissions on /var/alpamon/alpamon.log, InitLogger alone was insufficient to unify the log structure. To further consolidate logs from the new process into the existing log file, Unix domain sockets should be implemented in the future. --- cmd/alpamon/command/ftp.go | 4 +++- pkg/logger/logger.go | 28 ++++++++++++++++++++++++++-- pkg/runner/ftp.go | 6 ++---- 3 files changed, 31 insertions(+), 7 deletions(-) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go index a8dd355..8214999 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp.go @@ -2,6 +2,7 @@ package command import ( "github.com/alpacanetworks/alpamon-go/pkg/config" + "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/runner" "github.com/spf13/cobra" ) @@ -14,7 +15,8 @@ var ftpCmd = &cobra.Command{ url := args[0] homeDirectory := args[1] - // TODO : Send logs to alpamon's Logserver using a Unix domain socket + logger.InitFtpLogger() + settings := config.LoadConfig() config.InitFtpSettings(settings) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 199fd04..30ce33b 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -2,11 +2,12 @@ package logger import ( "fmt" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "os" "strings" "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) const ( @@ -48,3 +49,26 @@ func InitLogger() *os.File { return logFile } + +// TODO : Send logs to alpamon's Logserver using a Unix domain socket +func InitFtpLogger() { + consoleOutput := zerolog.ConsoleWriter{ + Out: os.Stderr, + TimeFormat: time.RFC3339, + TimeLocation: time.Local, + FormatLevel: func(i interface{}) string { + return "[" + strings.ToUpper(i.(string)) + "]" + }, + FormatMessage: func(i interface{}) string { + return " " + i.(string) + }, + FormatFieldName: func(i interface{}) string { + return "(" + i.(string) + ")" + }, + FormatFieldValue: func(i interface{}) string { + return i.(string) + }, + } + + log.Logger = zerolog.New(consoleOutput).With().Timestamp().Caller().Logger() +} diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index a056e50..07e5358 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -39,8 +39,7 @@ func NewFtpClient(url, homeDirectory string) *FtpClient { } func (fc *FtpClient) RunFtpBackground() { - // TODO : Send logs to alpamon's Logserver using a Unix domain socket - // log.Debug().Msg("Opening websocket for ftp session.") + log.Debug().Msg("Opening websocket for ftp session.") var err error fc.conn, _, err = websocket.DefaultDialer.Dial(fc.url, fc.requestHeader) @@ -128,8 +127,7 @@ func (fc *FtpClient) close() { _ = fc.conn.Close() } - // TODO : Send logs to alpamon's Logserver using a Unix domain socket - // log.Debug().Msg("Websocket connection for ftp has been closed.") + log.Debug().Msg("Websocket connection for ftp has been closed.") } func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (CommandResult, error) { From 8e895de89c19d2dba27b83a8a797ce9b7f8a89cd Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 1 Nov 2024 16:41:14 +0900 Subject: [PATCH 027/364] Add FtpLogger Add custom FtpLogger component to handle logging for isolated FTP processes. --- pkg/logger/ftp_logger.go | 60 ++++++++++++++++++++++++++++++++++++++++ pkg/logger/logger.go | 23 --------------- 2 files changed, 60 insertions(+), 23 deletions(-) create mode 100644 pkg/logger/ftp_logger.go diff --git a/pkg/logger/ftp_logger.go b/pkg/logger/ftp_logger.go new file mode 100644 index 0000000..744b9c5 --- /dev/null +++ b/pkg/logger/ftp_logger.go @@ -0,0 +1,60 @@ +package logger + +import ( + "os" + "strings" + "time" + + "github.com/rs/zerolog" +) + +type FtpLogger struct { + log zerolog.Logger +} + +// TODO : Send logs to alpamon's Logserver using a Unix domain socket +func NewFtpLogger() FtpLogger { + consoleOutput := zerolog.ConsoleWriter{ + Out: os.Stderr, + TimeFormat: time.RFC3339, + TimeLocation: time.Local, + FormatLevel: func(i interface{}) string { + return "[" + strings.ToUpper(i.(string)) + "]" + }, + FormatMessage: func(i interface{}) string { + return " " + i.(string) + }, + FormatFieldName: func(i interface{}) string { + return "(" + i.(string) + ")" + }, + FormatFieldValue: func(i interface{}) string { + return i.(string) + }, + } + + logger := zerolog.New(consoleOutput).With().Timestamp().Caller().Logger() + + return FtpLogger{ + log: logger, + } +} + +func (l *FtpLogger) Debug() *zerolog.Event { + return l.log.Debug() +} + +func (l *FtpLogger) Info() *zerolog.Event { + return l.log.Info() +} + +func (l *FtpLogger) Warn() *zerolog.Event { + return l.log.Warn() +} + +func (l *FtpLogger) Error() *zerolog.Event { + return l.log.Error() +} + +func (l *FtpLogger) Fatal() *zerolog.Event { + return l.log.Fatal() +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 58350c8..5d803ab 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -65,29 +65,6 @@ func InitLogger() *os.File { return logFile } -// TODO : Send logs to alpamon's Logserver using a Unix domain socket -func InitFtpLogger() { - consoleOutput := zerolog.ConsoleWriter{ - Out: os.Stderr, - TimeFormat: time.RFC3339, - TimeLocation: time.Local, - FormatLevel: func(i interface{}) string { - return "[" + strings.ToUpper(i.(string)) + "]" - }, - FormatMessage: func(i interface{}) string { - return " " + i.(string) - }, - FormatFieldName: func(i interface{}) string { - return "(" + i.(string) + ")" - }, - FormatFieldValue: func(i interface{}) string { - return i.(string) - }, - } - - log.Logger = zerolog.New(consoleOutput).With().Timestamp().Caller().Logger() -} - type logRecord struct { Date string `json:"date"` Level int `json:"level"` From 627f2e3b85b969861eb253a532c7b92d488d4563 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 1 Nov 2024 16:44:48 +0900 Subject: [PATCH 028/364] Refactor openftp command To improve readability, encapsulate the logic of spawning a new, less privileged process into openftp(), which has been separated from openftp command. --- pkg/runner/command.go | 63 ++++++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 3d3f1f3..a42b7b8 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -153,34 +153,9 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, fmt.Sprintf("openftp: Not enough information. %s", err.Error()) } - sysProcAttr, err := demote(data.Username, data.Groupname) + err = cr.openFtp(data) if err != nil { - log.Debug().Err(err).Msg("Failed to get demote permission") - - return 1, fmt.Sprintf("openftp: Failed to get demoted permission. %s", err.Error()) - } - - executable, err := os.Executable() - if err != nil { - log.Debug().Err(err).Msg("Failed to get executable path") - - return 1, fmt.Sprintf("openftp: Failed to get executable path. %s", err.Error()) - } - - cmd := exec.Command( - executable, - "ftp", - data.URL, - data.HomeDirectory, - ) - cmd.SysProcAttr = sysProcAttr - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - if err := cmd.Start(); err != nil { - log.Debug().Err(err).Msg("Failed to start worker process") - - return 1, fmt.Sprintf("openftp: Failed to start worker process. %s", err.Error()) + return 1, fmt.Sprintf("%v", err) } return 0, "Spawned a ftp terminal." @@ -617,6 +592,40 @@ func (cr *CommandRunner) validateData(data interface{}) error { return nil } +func (cr *CommandRunner) openFtp(data openFtpData) error { + sysProcAttr, err := demote(data.Username, data.Groupname) + if err != nil { + log.Debug().Err(err).Msg("Failed to get demote permission") + + return fmt.Errorf("openftp: Failed to get demoted permission. %w", err) + } + + executable, err := os.Executable() + if err != nil { + log.Debug().Err(err).Msg("Failed to get executable path") + + return fmt.Errorf("openftp: Failed to get executable path. %w", err) + } + + cmd := exec.Command( + executable, + "ftp", + data.URL, + data.HomeDirectory, + ) + cmd.SysProcAttr = sysProcAttr + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err = cmd.Start(); err != nil { + log.Debug().Err(err).Msg("Failed to start ftp worker process") + + return fmt.Errorf("openftp: Failed to start ftp worker process. %w", err) + } + + return nil +} + func getFileData(data CommandData) ([]byte, error) { var content []byte switch data.Type { From 28ae064d6bacd290868cbf9848c0622f3b56c359 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 1 Nov 2024 16:48:10 +0900 Subject: [PATCH 029/364] Minor fix Apply Go coding conventions. Integrate FtpLogger. --- pkg/runner/ftp.go | 47 +++++++++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 350f4f8..c9fd202 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -12,8 +12,8 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/config" + "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/gorilla/websocket" - "github.com/rs/zerolog/log" ) type FtpClient struct { @@ -22,29 +22,32 @@ type FtpClient struct { url string homeDirectory string workingDirectory string + log logger.FtpLogger } -func NewFtpClient(url, homeDirectory string) *FtpClient { +func NewFtpClient(url, homeDirectory string, ftpLogger logger.FtpLogger) *FtpClient { + settings := config.LoadConfig() headers := http.Header{ - "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, config.FtpSettings.ID, config.FtpSettings.Key)}, - "Origin": {config.FtpSettings.ServerURL}, + "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, settings.ID, settings.Key)}, + "Origin": {settings.ServerURL}, } return &FtpClient{ requestHeader: headers, - url: strings.Replace(config.FtpSettings.ServerURL, "http", "ws", 1) + url, + url: strings.Replace(settings.ServerURL, "http", "ws", 1) + url, homeDirectory: homeDirectory, workingDirectory: homeDirectory, + log: ftpLogger, } } func (fc *FtpClient) RunFtpBackground() { - log.Debug().Msg("Opening websocket for ftp session.") + fc.log.Debug().Msg("Opening websocket for ftp session.") var err error fc.conn, _, err = websocket.DefaultDialer.Dial(fc.url, fc.requestHeader) if err != nil { - log.Debug().Err(err).Msgf("Failed to connect to pty websocket at %s", fc.url) + fc.log.Debug().Err(err).Msgf("Failed to connect to pty websocket at %s", fc.url) return } defer fc.close() @@ -69,7 +72,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { return } if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { - log.Debug().Err(err).Msg("Failed to read from ftp websocket") + fc.log.Debug().Err(err).Msg("Failed to read from ftp websocket") } cancel() return @@ -78,7 +81,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { var content FtpContent err = json.Unmarshal(message, &content) if err != nil { - log.Debug().Err(err).Msg("Failed to unmarshal websocket message") + fc.log.Debug().Err(err).Msg("Failed to unmarshal websocket message") cancel() return } @@ -102,7 +105,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { if ctx.Err() != nil { return } - log.Debug().Err(err).Msg("Failed to marshal response") + fc.log.Debug().Err(err).Msg("Failed to marshal response") cancel() return } @@ -113,7 +116,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { return } if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { - log.Debug().Err(err).Msg("Failed to send websocket message") + fc.log.Debug().Err(err).Msg("Failed to send websocket message") } cancel() return @@ -128,7 +131,7 @@ func (fc *FtpClient) close() { _ = fc.conn.Close() } - log.Debug().Msg("Websocket connection for ftp has been closed.") + fc.log.Debug().Msg("Websocket connection for ftp has been closed.") } func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (CommandResult, error) { @@ -386,19 +389,27 @@ func copyFile(src, dst string) error { if err != nil { return err } - defer srcFile.Close() + + defer func() error { + if err := srcFile.Close(); err != nil { + return err + } + return nil + }() dstFile, err := os.Create(dst) if err != nil { return err } - defer dstFile.Close() - if _, err = io.Copy(dstFile, srcFile); err != nil { - return err - } + defer func() error { + if err := dstFile.Close(); err != nil { + return err + } + return nil + }() - if err = dstFile.Close(); err != nil { + if _, err = io.Copy(dstFile, srcFile); err != nil { return err } From 61ec2f02bbedb41d586d25f21449f6e999c27b95 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 1 Nov 2024 16:52:23 +0900 Subject: [PATCH 030/364] Delete FtpSettings With the FTP process now separated, remove FtpSettings and have NewFtpClient fetch settings directly. --- pkg/config/config.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 98c5b18..3de5c08 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -19,7 +19,6 @@ var ( } GlobalSettings Settings - FtpSettings Settings ) const ( @@ -32,10 +31,6 @@ func InitSettings(settings Settings) { GlobalSettings = settings } -func InitFtpSettings(settings Settings) { - FtpSettings = settings -} - func LoadConfig() Settings { var iniData *ini.File var err error From ff08c329a6b5c9f3d8f038dfaa622889ab5337ce Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 1 Nov 2024 16:53:50 +0900 Subject: [PATCH 031/364] Update ftp command due to refactoring Update related to the addition of FtpLogger and changes to how settings are retrieved. --- cmd/alpamon/command/ftp.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go index 8214999..9ea9d80 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp.go @@ -1,7 +1,6 @@ package command import ( - "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/runner" "github.com/spf13/cobra" @@ -11,22 +10,17 @@ var ftpCmd = &cobra.Command{ Use: "ftp ", Short: "Start worker for Web FTP", Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { + Run: func(cmd *cobra.Command, args []string) { url := args[0] homeDirectory := args[1] - logger.InitFtpLogger() + ftpLogger := logger.NewFtpLogger() - settings := config.LoadConfig() - config.InitFtpSettings(settings) - - RunFtpWorker(url, homeDirectory) - - return nil + RunFtpWorker(url, homeDirectory, ftpLogger) }, } -func RunFtpWorker(url, homeDirectory string) { - ftpClient := runner.NewFtpClient(url, homeDirectory) +func RunFtpWorker(url, homeDirectory string, ftpLogger logger.FtpLogger) { + ftpClient := runner.NewFtpClient(url, homeDirectory, ftpLogger) ftpClient.RunFtpBackground() } From 8794b772ca5d8ae147d90543f723f0a4f93d038c Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 1 Nov 2024 17:04:57 +0900 Subject: [PATCH 032/364] Add os.Exit(1) to manage child process To ensure that child process terminates upon the completion of the FtpClient connection, add os.Exit(1). --- pkg/runner/ftp.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index c9fd202..dadad7d 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -132,6 +132,7 @@ func (fc *FtpClient) close() { } fc.log.Debug().Msg("Websocket connection for ftp has been closed.") + os.Exit(1) } func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (CommandResult, error) { From be4cc663ec08894d0921d547a83cb43f6176db28 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sat, 2 Nov 2024 17:47:37 +0900 Subject: [PATCH 033/364] Fix config loading error Permissions error prevented new process from loading the config file when run NewFtpClinet(). To resolve this, ftp command was refactored to load the config and pass the configuration as FtpConfigData to the newly spawned NewFtpClient process. --- cmd/alpamon/command/ftp.go | 17 ++++++++++------- pkg/runner/ftp.go | 17 +++++++++-------- pkg/runner/ftp_types.go | 10 ++++++++++ 3 files changed, 29 insertions(+), 15 deletions(-) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go index 9ea9d80..aa72885 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp.go @@ -1,6 +1,7 @@ package command import ( + "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/runner" "github.com/spf13/cobra" @@ -11,16 +12,18 @@ var ftpCmd = &cobra.Command{ Short: "Start worker for Web FTP", Args: cobra.ExactArgs(2), Run: func(cmd *cobra.Command, args []string) { - url := args[0] - homeDirectory := args[1] + data := runner.FtpConfigData{ + URL: args[0], + HomeDirectory: args[1], + Logger: logger.NewFtpLogger(), + Settings: config.LoadConfig(), + } - ftpLogger := logger.NewFtpLogger() - - RunFtpWorker(url, homeDirectory, ftpLogger) + RunFtpWorker(data) }, } -func RunFtpWorker(url, homeDirectory string, ftpLogger logger.FtpLogger) { - ftpClient := runner.NewFtpClient(url, homeDirectory, ftpLogger) +func RunFtpWorker(data runner.FtpConfigData) { + ftpClient := runner.NewFtpClient(data) ftpClient.RunFtpBackground() } diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index dadad7d..68a0b1b 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -23,21 +23,22 @@ type FtpClient struct { homeDirectory string workingDirectory string log logger.FtpLogger + settings config.Settings } -func NewFtpClient(url, homeDirectory string, ftpLogger logger.FtpLogger) *FtpClient { - settings := config.LoadConfig() +func NewFtpClient(data FtpConfigData) *FtpClient { headers := http.Header{ - "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, settings.ID, settings.Key)}, - "Origin": {settings.ServerURL}, + "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, data.Settings.ID, data.Settings.Key)}, + "Origin": {data.Settings.ServerURL}, } return &FtpClient{ requestHeader: headers, - url: strings.Replace(settings.ServerURL, "http", "ws", 1) + url, - homeDirectory: homeDirectory, - workingDirectory: homeDirectory, - log: ftpLogger, + url: strings.Replace(data.Settings.ServerURL, "http", "ws", 1) + data.URL, + homeDirectory: data.HomeDirectory, + workingDirectory: data.HomeDirectory, + log: data.Logger, + settings: data.Settings, } } diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 8a85a50..591c409 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -3,6 +3,9 @@ package runner import ( "strings" "time" + + "github.com/alpacanetworks/alpamon-go/pkg/config" + "github.com/alpacanetworks/alpamon-go/pkg/logger" ) type FtpCommand string @@ -26,6 +29,13 @@ const ( ErrDirectoryNotEmpty = "directory not empty" ) +type FtpConfigData struct { + URL string + HomeDirectory string + Logger logger.FtpLogger + Settings config.Settings +} + type FtpData struct { Path string `json:"path,omitempty"` Depth int `json:"depth,omitempty"` From 96e55a61801e16fd53572294d2478eed094effae Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sat, 2 Nov 2024 18:31:39 +0900 Subject: [PATCH 034/364] Minor fix Apply golang convention. --- pkg/runner/ftp.go | 25 ++++++++++++------------- pkg/runner/ftp_types.go | 2 +- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 68a0b1b..ae5e37b 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -9,7 +9,6 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" @@ -185,7 +184,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu Type: "folder", Path: path, Size: int64(0), - ModTime: time.Time{}, + ModTime: nil, Children: []CommandResult{}, } @@ -205,11 +204,12 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu continue } + modTime := info.ModTime() child := CommandResult{ Name: entry.Name(), Path: fullPath, Size: info.Size(), - ModTime: info.ModTime(), + ModTime: &modTime, } if entry.IsDir() { @@ -234,7 +234,8 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu if err != nil { result.Message = err.Error() } else { - result.ModTime = dirInfo.ModTime() + modTime := dirInfo.ModTime() + result.ModTime = &modTime } return result, nil @@ -386,17 +387,16 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { }, nil } -func copyFile(src, dst string) error { +func copyFile(src, dst string) (finalErr error) { srcFile, err := os.Open(src) if err != nil { return err } - defer func() error { - if err := srcFile.Close(); err != nil { - return err + defer func() { + if err := srcFile.Close(); err != nil && finalErr == nil { + finalErr = err } - return nil }() dstFile, err := os.Create(dst) @@ -404,11 +404,10 @@ func copyFile(src, dst string) error { return err } - defer func() error { - if err := dstFile.Close(); err != nil { - return err + defer func() { + if err := dstFile.Close(); err != nil && finalErr == nil { + finalErr = err } - return nil }() if _, err = io.Copy(dstFile, srcFile); err != nil { diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 591c409..302fb0f 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -62,7 +62,7 @@ type CommandResult struct { Path string `json:"path,omitempty"` Size int64 `json:"size,omitempty"` Children []CommandResult `json:"children,omitempty"` - ModTime time.Time `json:"mod_time,omitempty"` + ModTime *time.Time `json:"mod_time,omitempty"` Message string `json:"message,omitempty"` } From 2325836dc90eb686445458a12bfba981565117f7 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 4 Nov 2024 13:02:37 +0900 Subject: [PATCH 035/364] Minor fix --- pkg/runner/command.go | 8 ++++---- pkg/runner/ftp.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index a42b7b8..45a38b0 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -289,7 +289,7 @@ func (cr *CommandRunner) addUser() (exitCode int, result string) { err := cr.validateData(data) if err != nil { - return 1, fmt.Sprintf("adduser: Not enough information. %s", err.Error()) + return 1, fmt.Sprintf("adduser: Not enough information. %s", err) } if utils.PlatformLike == "debian" { @@ -366,7 +366,7 @@ func (cr *CommandRunner) addGroup() (exitCode int, result string) { err := cr.validateData(data) if err != nil { - return 1, fmt.Sprintf("addgroup: Not enough information. %s", err.Error()) + return 1, fmt.Sprintf("addgroup: Not enough information. %s", err) } if utils.PlatformLike == "debian" { @@ -408,7 +408,7 @@ func (cr *CommandRunner) delUser() (exitCode int, result string) { err := cr.validateData(data) if err != nil { - return 1, fmt.Sprintf("deluser: Not enough information. %s", err.Error()) + return 1, fmt.Sprintf("deluser: Not enough information. %s", err) } if utils.PlatformLike == "debian" { @@ -448,7 +448,7 @@ func (cr *CommandRunner) delGroup() (exitCode int, result string) { err := cr.validateData(data) if err != nil { - return 1, fmt.Sprintf("delgroup: Not enough information. %s", err.Error()) + return 1, fmt.Sprintf("delgroup: Not enough information. %s", err) } if utils.PlatformLike == "debian" { diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index ae5e37b..f8ee548 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -394,7 +394,7 @@ func copyFile(src, dst string) (finalErr error) { } defer func() { - if err := srcFile.Close(); err != nil && finalErr == nil { + if err = srcFile.Close(); err != nil && finalErr == nil { finalErr = err } }() From 70e99dee9ec003f2373be0800815d17ad9820985 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 4 Nov 2024 14:06:27 +0900 Subject: [PATCH 036/364] Minor fix Set the initial value of finalErr to nil. Fix error handling within the defer() function to populate finalErr with any encountered errors. --- pkg/runner/ftp.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index f8ee548..7411d38 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -388,6 +388,7 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { } func copyFile(src, dst string) (finalErr error) { + finalErr = nil srcFile, err := os.Open(src) if err != nil { return err @@ -423,7 +424,7 @@ func copyFile(src, dst string) (finalErr error) { return err } - return nil + return finalErr } func copyDir(src, dst string) error { From 272db726cbbdbacf010b4772cae118b25c476c05 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 4 Nov 2024 14:24:05 +0900 Subject: [PATCH 037/364] Enhance error handling in copyFile function --- pkg/runner/ftp.go | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 7411d38..53340ae 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -387,29 +387,18 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { }, nil } -func copyFile(src, dst string) (finalErr error) { - finalErr = nil +func copyFile(src, dst string) error { srcFile, err := os.Open(src) if err != nil { return err } - - defer func() { - if err = srcFile.Close(); err != nil && finalErr == nil { - finalErr = err - } - }() + defer func() { _ = srcFile.Close() }() dstFile, err := os.Create(dst) if err != nil { return err } - - defer func() { - if err := dstFile.Close(); err != nil && finalErr == nil { - finalErr = err - } - }() + defer func() { _ = dstFile.Close() }() if _, err = io.Copy(dstFile, srcFile); err != nil { return err @@ -424,7 +413,7 @@ func copyFile(src, dst string) (finalErr error) { return err } - return finalErr + return nil } func copyDir(src, dst string) error { From 05357e4aad415cebd880c94eacff8ddfdc82f9a6 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 6 Nov 2024 19:34:21 +0900 Subject: [PATCH 038/364] Add sqlite3 driver import to resolve unknown driver error in RPM DB access --- go.mod | 3 ++- go.sum | 10 ++-------- pkg/runner/commit.go | 7 ++++--- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index eedd837..869c5d3 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,10 @@ require ( github.com/adrianbrad/queue v1.3.0 github.com/cenkalti/backoff v2.2.1+incompatible github.com/creack/pty v1.1.23 + github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/knqyf263/go-rpmdb v0.1.1 + github.com/mattn/go-sqlite3 v1.14.24 github.com/rs/zerolog v1.33.0 github.com/shirou/gopsutil/v4 v4.24.8 github.com/spf13/cobra v1.8.1 @@ -21,7 +23,6 @@ require ( github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect diff --git a/go.sum b/go.sum index 13dacc3..ab850c0 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/glebarez/go-sqlite v1.20.3 h1:89BkqGOXR9oRmG58ZrzgoY/Fhy5x0M+/WV48U5zVrZ4= github.com/glebarez/go-sqlite v1.20.3/go.mod h1:u3N6D/wftiAzIOJtZl6BmedqxmmkDfH3q+ihjqxC9u0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -28,12 +26,6 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= -github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/knqyf263/go-rpmdb v0.1.1 h1:oh68mTCvp1XzxdU7EfafcWzzfstUZAEa3MW0IJye584= @@ -48,6 +40,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= +github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 35f0132..6b404bd 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -10,6 +10,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/alpacanetworks/alpamon-go/pkg/version" rpmdb "github.com/knqyf263/go-rpmdb/pkg" + _ "github.com/mattn/go-sqlite3" "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/cpu" "github.com/shirou/gopsutil/v4/host" @@ -550,7 +551,7 @@ func getSystemPackages() ([]SystemPackageData, error) { func getDpkgPackage() ([]SystemPackageData, error) { fd, err := os.Open(dpkgDbPath) if err != nil { - log.Debug().Err(err).Str("path", dpkgDbPath).Msg("Failed to open dpkg file") + log.Debug().Err(err).Msgf("Failed to open %s file", dpkgDbPath) return []SystemPackageData{}, err } defer func() { _ = fd.Close() }() @@ -602,7 +603,7 @@ func getDpkgPackage() ([]SystemPackageData, error) { func getRpmPackage(path string) ([]SystemPackageData, error) { db, err := rpmdb.Open(path) if err != nil { - log.Debug().Err(err).Str("path", path).Msg("Failed to open rpm file") + log.Debug().Msgf("Failed to open %s file", path) return []SystemPackageData{}, err } @@ -610,7 +611,7 @@ func getRpmPackage(path string) ([]SystemPackageData, error) { pkgList, err := db.ListPackages() if err != nil { - log.Debug().Err(err).Str("path", path).Msg("Failed to list packages") + log.Debug().Err(err).Msg("Failed to list packages") return []SystemPackageData{}, err } From 31b77b650fc813cb9f92559c301051447e656958 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 6 Nov 2024 19:50:19 +0900 Subject: [PATCH 039/364] Minor fix --- pkg/runner/commit.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 6b404bd..b20971e 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -603,7 +603,7 @@ func getDpkgPackage() ([]SystemPackageData, error) { func getRpmPackage(path string) ([]SystemPackageData, error) { db, err := rpmdb.Open(path) if err != nil { - log.Debug().Msgf("Failed to open %s file", path) + log.Debug().Err(err).Msgf("Failed to open %s file: %v", path, err) return []SystemPackageData{}, err } @@ -611,7 +611,7 @@ func getRpmPackage(path string) ([]SystemPackageData, error) { pkgList, err := db.ListPackages() if err != nil { - log.Debug().Err(err).Msg("Failed to list packages") + log.Debug().Err(err).Msgf("Failed to list packages: %v", err) return []SystemPackageData{}, err } From df42295f6f967a7edcdf1571e56373426115b9ea Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 6 Nov 2024 19:52:11 +0900 Subject: [PATCH 040/364] Use glebarez SQLite driver to fix import issue --- go.mod | 8 +++++++- go.sum | 5 +++-- pkg/runner/commit.go | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 869c5d3..7a04abd 100644 --- a/go.mod +++ b/go.mod @@ -6,10 +6,10 @@ require ( github.com/adrianbrad/queue v1.3.0 github.com/cenkalti/backoff v2.2.1+incompatible github.com/creack/pty v1.1.23 + github.com/glebarez/go-sqlite v1.20.3 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/knqyf263/go-rpmdb v0.1.1 - github.com/mattn/go-sqlite3 v1.14.24 github.com/rs/zerolog v1.33.0 github.com/shirou/gopsutil/v4 v4.24.8 github.com/spf13/cobra v1.8.1 @@ -20,6 +20,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect @@ -30,6 +31,7 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect @@ -39,4 +41,8 @@ require ( golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/libc v1.22.2 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.5.0 // indirect + modernc.org/sqlite v1.20.3 // indirect ) diff --git a/go.sum b/go.sum index ab850c0..9f410a0 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= @@ -40,13 +42,12 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 h1:VstopitMQi3hZP0fzvnsLmzXZdQGc4bEcgu24cp+d4M= github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index b20971e..02e85e7 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -9,8 +9,8 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/scheduler" "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/alpacanetworks/alpamon-go/pkg/version" + _ "github.com/glebarez/go-sqlite" rpmdb "github.com/knqyf263/go-rpmdb/pkg" - _ "github.com/mattn/go-sqlite3" "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/cpu" "github.com/shirou/gopsutil/v4/host" From 904404628fe622ff16e43689a8dbb51640c59653 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sun, 10 Nov 2024 16:33:25 +0900 Subject: [PATCH 041/364] Add check components to Collector A Check component, which acts as an individual monitoring unit for specific metrics, has been added to Collector. A Check component has been added to Collector to perform individual checks for specific metrics, such as CPU usage, disk usage, disk I/O, memory, and network traffic. --- pkg/collector/check/base/base.go | 77 +++++++++++++++++++++ pkg/collector/check/check.go | 44 ++++++++++++ pkg/collector/check/cpu/cpu.go | 64 +++++++++++++++++ pkg/collector/check/disk/io/io.go | 63 +++++++++++++++++ pkg/collector/check/disk/usage/usage.go | 91 +++++++++++++++++++++++++ pkg/collector/check/memory/memory.go | 59 ++++++++++++++++ pkg/collector/check/net/net.go | 67 ++++++++++++++++++ 7 files changed, 465 insertions(+) create mode 100644 pkg/collector/check/base/base.go create mode 100644 pkg/collector/check/check.go create mode 100644 pkg/collector/check/cpu/cpu.go create mode 100644 pkg/collector/check/disk/io/io.go create mode 100644 pkg/collector/check/disk/usage/usage.go create mode 100644 pkg/collector/check/memory/memory.go create mode 100644 pkg/collector/check/net/net.go diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go new file mode 100644 index 0000000..8741676 --- /dev/null +++ b/pkg/collector/check/base/base.go @@ -0,0 +1,77 @@ +package base + +import ( + "time" +) + +const ( + CPU CheckType = "cpu" + MEM CheckType = "memory" + DISK_USAGE CheckType = "disk_usage" + DISK_IO CheckType = "disk_io" + NET CheckType = "net" +) + +type CheckType string + +type CheckResult struct { + Timestamp time.Time `json:"timestamp"` + Usage float64 `json:"usage,omitempty"` + Name string `json:"name,omitempty"` + Device string `json:"device,omitempty"` + Partition string `json:"partition,omitempty"` + Total uint64 `json:"total,omitempty"` + Free uint64 `json:"free,omitempty"` + Used uint64 `json:"used,omitempty"` + WriteBytes uint64 `json:"wrtie_bytes,omitempty"` + ReadBytes uint64 `json:"read_bytes,omitempty"` + InputPkts uint64 `json:"input_pkts,omitempty"` + InputBytes uint64 `json:"input_bytes,omitempty"` + OutputPkts uint64 `json:"output_pkts,omitempty"` + OutputBytes uint64 `json:"output_bytes,omitempty"` +} + +type MetricData struct { + Type CheckType `json:"type"` + Data []CheckResult `json:"data,omitempty"` +} + +type BaseCheck struct { + name string + interval time.Duration + buffer *CheckBuffer +} + +type CheckBuffer struct { + SuccessQueue chan MetricData + FailureQueue chan MetricData + Capacity int +} + +func NewBaseCheck(name string, interval time.Duration, buffer *CheckBuffer) BaseCheck { + return BaseCheck{ + name: name, + interval: interval, + buffer: buffer, + } +} + +func NewCheckBuffer(capacity int) *CheckBuffer { + return &CheckBuffer{ + SuccessQueue: make(chan MetricData, capacity), + FailureQueue: make(chan MetricData, capacity), + Capacity: capacity, + } +} + +func (c *BaseCheck) GetName() string { + return c.name +} + +func (c *BaseCheck) GetInterval() time.Duration { + return c.interval +} + +func (c *BaseCheck) GetBuffer() *CheckBuffer { + return c.buffer +} diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go new file mode 100644 index 0000000..ace3121 --- /dev/null +++ b/pkg/collector/check/check.go @@ -0,0 +1,44 @@ +package check + +import ( + "context" + "fmt" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/cpu" + diskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/disk/io" + diskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/disk/usage" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/memory" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/net" +) + +type CheckStrategy interface { + Execute(ctx context.Context) + GetInterval() time.Duration + GetName() string + GetBuffer() *base.CheckBuffer +} + +type CheckFactory interface { + CreateCheck(checkType base.CheckType, name string, interval time.Duration, buffer *base.CheckBuffer) (CheckStrategy, error) +} + +type DefaultCheckFactory struct{} + +func (f *DefaultCheckFactory) CreateCheck(checkType base.CheckType, name string, interval time.Duration, buffer *base.CheckBuffer) (CheckStrategy, error) { + switch checkType { + case base.CPU: + return cpu.NewCheck(name, interval, buffer), nil + case base.MEM: + return memory.NewCheck(name, interval, buffer), nil + case base.DISK_USAGE: + return diskusage.NewCheck(name, interval, buffer), nil + case base.DISK_IO: + return diskio.NewCheck(name, interval, buffer), nil + case base.NET: + return net.NewCheck(name, interval, buffer), nil + default: + return nil, fmt.Errorf("unknown check type: %s", checkType) + } +} diff --git a/pkg/collector/check/cpu/cpu.go b/pkg/collector/check/cpu/cpu.go new file mode 100644 index 0000000..b365b5e --- /dev/null +++ b/pkg/collector/check/cpu/cpu.go @@ -0,0 +1,64 @@ +package cpu + +import ( + "context" + "fmt" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/shirou/gopsutil/v4/cpu" +) + +const ( + checkType = base.CPU +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer), + } +} + +func (c *Check) Execute(ctx context.Context) { + usage, err := c.collectCPUUsage() + + metric := base.MetricData{ + Type: checkType, + Data: []base.CheckResult{}, + } + if err == nil { + data := base.CheckResult{ + Timestamp: time.Now(), + Usage: usage, + } + metric.Data = append(metric.Data, data) + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) collectCPUUsage() (float64, error) { + usage, err := cpu.Percent(0, false) + if err != nil { + return 0, err + } + + if len(usage) == 0 { + return 0, fmt.Errorf("no CPU usage data returned") + } + + return usage[0], nil +} diff --git a/pkg/collector/check/disk/io/io.go b/pkg/collector/check/disk/io/io.go new file mode 100644 index 0000000..6ea0c85 --- /dev/null +++ b/pkg/collector/check/disk/io/io.go @@ -0,0 +1,63 @@ +package diskio + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/shirou/gopsutil/v4/disk" +) + +const ( + checkType = base.DISK_IO +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer), + } +} + +func (c *Check) Execute(ctx context.Context) { + ioCounters, err := c.collectDiskIO() + + metric := base.MetricData{ + Type: checkType, + Data: []base.CheckResult{}, + } + if err == nil { + for name, ioCounter := range ioCounters { + data := base.CheckResult{ + Timestamp: time.Now(), + Device: name, + ReadBytes: ioCounter.ReadBytes, + WriteBytes: ioCounter.WriteBytes, + } + metric.Data = append(metric.Data, data) + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) collectDiskIO() (map[string]disk.IOCountersStat, error) { + ioCounters, err := disk.IOCounters() + if err != nil { + return nil, err + } + + return ioCounters, nil +} diff --git a/pkg/collector/check/disk/usage/usage.go b/pkg/collector/check/disk/usage/usage.go new file mode 100644 index 0000000..bd2dfa7 --- /dev/null +++ b/pkg/collector/check/disk/usage/usage.go @@ -0,0 +1,91 @@ +package diskusage + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/shirou/gopsutil/v4/disk" +) + +const ( + checkType = base.DISK_USAGE +) + +var excludedFileSystems = map[string]bool{ + "tmpfs": true, + "devtmpfs": true, + "proc": true, + "sysfs": true, + "cgroup": true, + "overlay": true, +} + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer), + } +} + +func (c *Check) Execute(ctx context.Context) { + partitions, err := c.collectDiskPartitions() + + metric := base.MetricData{ + Type: checkType, + Data: []base.CheckResult{}, + } + if err == nil { + for _, partition := range partitions { + usage, err := c.collectDiskUsage(partition.Mountpoint) + if err == nil { + data := base.CheckResult{ + Usage: usage.UsedPercent, + Total: usage.Total, + Free: usage.Free, + Used: usage.Used, + } + metric.Data = append(metric.Data, data) + } + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) collectDiskPartitions() ([]disk.PartitionStat, error) { + partitions, err := disk.Partitions(true) + if err != nil { + return nil, err + } + + var filteredPartitions []disk.PartitionStat + for _, partition := range partitions { + if !excludedFileSystems[partition.Fstype] { + filteredPartitions = append(filteredPartitions, partition) + } + } + + return filteredPartitions, nil +} + +func (c *Check) collectDiskUsage(path string) (*disk.UsageStat, error) { + usage, err := disk.Usage(path) + if err != nil { + return nil, err + } + + return usage, nil +} diff --git a/pkg/collector/check/memory/memory.go b/pkg/collector/check/memory/memory.go new file mode 100644 index 0000000..e651d16 --- /dev/null +++ b/pkg/collector/check/memory/memory.go @@ -0,0 +1,59 @@ +package memory + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/shirou/gopsutil/v4/mem" +) + +const ( + checkType = base.MEM +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer), + } +} + +func (c *Check) Execute(ctx context.Context) { + usage, err := c.collectMemoryUsage() + + metric := base.MetricData{ + Type: checkType, + Data: []base.CheckResult{}, + } + if err == nil { + data := base.CheckResult{ + Timestamp: time.Now(), + Usage: usage, + } + metric.Data = append(metric.Data, data) + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) collectMemoryUsage() (float64, error) { + memory, err := mem.VirtualMemory() + if err != nil { + return 0, err + } + + return memory.UsedPercent, nil +} diff --git a/pkg/collector/check/net/net.go b/pkg/collector/check/net/net.go new file mode 100644 index 0000000..69c2374 --- /dev/null +++ b/pkg/collector/check/net/net.go @@ -0,0 +1,67 @@ +package net + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/shirou/gopsutil/v4/net" +) + +const ( + checkType = base.NET +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer), + } +} + +func (c *Check) Execute(ctx context.Context) { + ioCounters, err := c.collectIOCounters() + + var metric base.MetricData + if err != nil { + metric = base.MetricData{ + Type: checkType, + Data: []base.CheckResult{}, + } + } else { + for _, ioCounter := range ioCounters { + data := base.CheckResult{ + Timestamp: time.Now(), + Name: ioCounter.Name, + InputPkts: ioCounter.PacketsRecv, + InputBytes: ioCounter.BytesRecv, + OutputPkts: ioCounter.PacketsSent, + OutputBytes: ioCounter.BytesSent, + } + metric.Data = append(metric.Data, data) + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) collectIOCounters() ([]net.IOCountersStat, error) { + ioCounters, err := net.IOCounters(true) + if err != nil { + return nil, err + } + + return ioCounters, nil +} From 9d6087dae9e454f7b84b50802c62d61f6b58e940 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sun, 10 Nov 2024 16:34:55 +0900 Subject: [PATCH 042/364] Add Scheduler to Collector Scheduler component has been added to manage when and how often Checks are executed. --- pkg/collector/scheduler/scheduler.go | 81 ++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 pkg/collector/scheduler/scheduler.go diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go new file mode 100644 index 0000000..54bd18f --- /dev/null +++ b/pkg/collector/scheduler/scheduler.go @@ -0,0 +1,81 @@ +package scheduler + +import ( + "context" + "sync" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check" +) + +type Scheduler struct { + tasks map[string]*ScheduledTask + mu sync.RWMutex + stopChan chan struct{} +} + +type ScheduledTask struct { + check check.CheckStrategy + nextRun time.Time + interval time.Duration + running bool +} + +func NewScheduler() *Scheduler { + return &Scheduler{ + tasks: make(map[string]*ScheduledTask), + stopChan: make(chan struct{}), + } +} + +func (s *Scheduler) AddTask(check check.CheckStrategy) error { + s.mu.Lock() + defer s.mu.Unlock() + + s.tasks[check.GetName()] = &ScheduledTask{ + check: check, + nextRun: time.Now(), + interval: check.GetInterval(), + running: false, + } + return nil +} + +func (s *Scheduler) Start(ctx context.Context) { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-s.stopChan: + return + case <-ticker.C: + s.mu.RLock() + now := time.Now() + for _, task := range s.tasks { + if now.After(task.nextRun) && !task.running { + task.running = true + go s.executeTask(ctx, task) + } + } + s.mu.RUnlock() + } + } +} + +func (s *Scheduler) Stop() { + close(s.stopChan) +} + +func (s *Scheduler) executeTask(ctx context.Context, task *ScheduledTask) { + defer func() { + s.mu.Lock() + task.running = false + task.nextRun = time.Now().Add(task.interval) + s.mu.Unlock() + }() + + task.check.Execute(ctx) +} From 37c0b200b5fdb484798e4abab23949d95620dbd0 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sun, 10 Nov 2024 16:38:36 +0900 Subject: [PATCH 043/364] Add transporter to Collector Transporter component has been added to transmit the collected metrics to the alpacon-server. --- pkg/collector/transporter/transporter.go | 67 ++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 pkg/collector/transporter/transporter.go diff --git a/pkg/collector/transporter/transporter.go b/pkg/collector/transporter/transporter.go new file mode 100644 index 0000000..df61e57 --- /dev/null +++ b/pkg/collector/transporter/transporter.go @@ -0,0 +1,67 @@ +package transporter + +import ( + "fmt" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/scheduler" +) + +var checkTypeUrlMap = map[base.CheckType]string{ + base.CPU: "/api/metrics/cpu/", + base.MEM: "/api/metrics/memory/", + base.DISK_USAGE: "/api/metrics/disk-usage/", + base.DISK_IO: "/api/metrics/disk-io/", + base.NET: "/api/metrics/traffic/", +} + +type TransportStrategy interface { + Send(data base.MetricData) error +} + +type TransporterFactory interface { + CreateTransporter(session *scheduler.Session) (TransportStrategy, error) +} + +type DefaultTransporterFactory struct{} + +type Transporter struct { + session *scheduler.Session +} + +// TODO: Support for various transporters will be required in the future +func (f *DefaultTransporterFactory) CreateTransporter(session *scheduler.Session) (TransportStrategy, error) { + return NewTransporter(session), nil +} + +func NewTransporter(session *scheduler.Session) *Transporter { + return &Transporter{ + session: session, + } +} + +func (t *Transporter) Send(data base.MetricData) error { + checkType := data.Type + + var err error + switch checkType { + case base.CPU: + _, _, err = t.session.Post(checkTypeUrlMap[base.CPU], data.Data[0], 10) + case base.MEM: + _, _, err = t.session.Post(checkTypeUrlMap[base.MEM], data.Data[0], 10) + case base.DISK_USAGE: + _, _, err = t.session.Post(checkTypeUrlMap[base.DISK_USAGE], data.Data, 10) + case base.DISK_IO: + _, _, err = t.session.Post(checkTypeUrlMap[base.DISK_IO], data.Data, 10) + case base.NET: + _, _, err = t.session.Post(checkTypeUrlMap[base.NET], data.Data, 10) + default: + err = fmt.Errorf("unknown check type: %s", checkType) + } + + if err != nil { + return err + } + + return nil +} From febe4a4721080db9fd513e1f85466c6011bb026a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sun, 10 Nov 2024 16:40:54 +0900 Subject: [PATCH 044/364] Add collector for monitoring system Collector component has been added to collect real-time metrics from server and transmit them to alpacon-server. --- pkg/collector/collector.go | 130 +++++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 pkg/collector/collector.go diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go new file mode 100644 index 0000000..5edafdd --- /dev/null +++ b/pkg/collector/collector.go @@ -0,0 +1,130 @@ +package collector + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/collector/scheduler" + "github.com/alpacanetworks/alpamon-go/pkg/collector/transporter" + session "github.com/alpacanetworks/alpamon-go/pkg/scheduler" +) + +type Collector struct { + transporter transporter.TransportStrategy + scheduler *scheduler.Scheduler + buffer *base.CheckBuffer + errorChan chan error + wg sync.WaitGroup + stopChan chan struct{} +} + +func NewCollector(session *session.Session, checkFactory check.CheckFactory, transporterFactory transporter.TransporterFactory) (*Collector, error) { + transporter, err := transporterFactory.CreateTransporter(session) + if err != nil { + return nil, err + } + + checkBuffer := base.NewCheckBuffer(10) + + collector := &Collector{ + transporter: transporter, + scheduler: scheduler.NewScheduler(), + buffer: checkBuffer, + errorChan: make(chan error, 10), + stopChan: make(chan struct{}), + } + + checkTypes := map[base.CheckType]string{ + base.CPU: "cpu", + base.MEM: "memory", + base.DISK_USAGE: "disk_usage", + base.DISK_IO: "disk_io", + base.NET: "net", + } + for checkType, name := range checkTypes { + check, err := checkFactory.CreateCheck(checkType, name, time.Duration(time.Duration.Seconds(5)), checkBuffer) + if err != nil { + return nil, err + } + if err := collector.scheduler.AddTask(check); err != nil { + return nil, err + } + } + + return collector, nil +} + +func (c *Collector) Start(ctx context.Context) error { + go c.scheduler.Start(ctx) + + for i := 0; i < 5; i++ { + c.wg.Add(1) + go c.successQueueWorker(ctx) + } + + c.wg.Add(1) + go c.failureQueueWorker(ctx) + + return nil +} + +func (c *Collector) successQueueWorker(ctx context.Context) { + defer c.wg.Done() + + for { + select { + case <-ctx.Done(): + return + case <-c.stopChan: + return + case metric := <-c.buffer.SuccessQueue: + if err := c.transporter.Send(metric); err != nil { + select { + case c.buffer.FailureQueue <- metric: + default: + c.errorChan <- fmt.Errorf("failed to move metric to failure queue: %v", err) + } + } + } + } + +} + +func (c *Collector) failureQueueWorker(ctx context.Context) { + defer c.wg.Done() + + retryTicker := time.NewTicker(5 * time.Second) + defer retryTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-c.stopChan: + return + case <-retryTicker.C: + metric := <-c.buffer.FailureQueue + if err := c.transporter.Send(metric); err != nil { + c.buffer.FailureQueue <- metric + } + } + } +} + +func (c *Collector) Stop() { + close(c.stopChan) + c.scheduler.Stop() + c.wg.Wait() + + close(c.buffer.SuccessQueue) + close(c.buffer.FailureQueue) + close(c.errorChan) +} + +func (c *Collector) Errors() <-chan error { + return c.errorChan +} From af6c88204bd31854130e3ee38363b5e15cf1dd21 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sun, 10 Nov 2024 16:42:24 +0900 Subject: [PATCH 045/364] Add Post() for post request Add Post() to make POST requests using Session. --- pkg/scheduler/session.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 2a30bab..03ffd2e 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -6,14 +6,15 @@ import ( "crypto/x509" "encoding/json" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/config" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" "io" "net/http" "os" "strings" "time" + + "github.com/alpacanetworks/alpamon-go/pkg/config" + "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/rs/zerolog/log" ) const ( @@ -170,3 +171,12 @@ func (session *Session) MultipartRequest(url string, body bytes.Buffer, contentT return responseBody, resp.StatusCode, nil } + +func (session *Session) Post(url string, rawBody interface{}, timeout time.Duration) ([]byte, int, error) { + req, err := session.newRequest(http.MethodPost, url, rawBody) + if err != nil { + return nil, 0, err + } + + return session.do(req, timeout) +} From 74559eef87e88d1ddea92fe112ed8098b3b4758f Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sun, 10 Nov 2024 16:43:22 +0900 Subject: [PATCH 046/364] Add logic to start collector Add logic to start collector. --- cmd/alpamon/command/root.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 01a6294..83ac068 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -1,10 +1,14 @@ package command import ( + "context" "fmt" "os" "syscall" + "github.com/alpacanetworks/alpamon-go/pkg/collector" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check" + "github.com/alpacanetworks/alpamon-go/pkg/collector/transporter" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/pidfile" @@ -59,6 +63,29 @@ func runAgent() { // Commit runner.CommitAsync(session, commissioned) + checkFactory := &check.DefaultCheckFactory{} + transporterFactory := &transporter.DefaultTransporterFactory{} + + collector, err := collector.NewCollector(session, checkFactory, transporterFactory) + if err != nil { + log.Error().Err(err).Msg("Failed to create collector") + return + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if err := collector.Start(ctx); err != nil { + log.Error().Err(err).Msg("Failed to start collector") + return + } + + go func() { + for err := range collector.Errors() { + log.Error().Err(err).Msg("Collector error") + } + }() + // Websocket Client wsClient := runner.NewWebsocketClient(session) wsClient.RunForever() From 1983fcb29a0663652d436af119659979d4ceacc7 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Sun, 10 Nov 2024 18:52:34 +0900 Subject: [PATCH 047/364] Fix Web FTP related command Due to insufficient permissions, the process was unable to access the alpamon.conf file. As a workaround, Fix args to direct pass ServerURL to args. --- cmd/alpamon/command/ftp.go | 7 +++---- pkg/runner/command.go | 1 + pkg/runner/ftp.go | 8 ++------ pkg/runner/ftp_types.go | 3 +-- 4 files changed, 7 insertions(+), 12 deletions(-) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp.go index aa72885..e74dede 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp.go @@ -1,7 +1,6 @@ package command import ( - "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/runner" "github.com/spf13/cobra" @@ -10,13 +9,13 @@ import ( var ftpCmd = &cobra.Command{ Use: "ftp ", Short: "Start worker for Web FTP", - Args: cobra.ExactArgs(2), + Args: cobra.ExactArgs(3), Run: func(cmd *cobra.Command, args []string) { data := runner.FtpConfigData{ URL: args[0], - HomeDirectory: args[1], + ServerURL: args[1], + HomeDirectory: args[2], Logger: logger.NewFtpLogger(), - Settings: config.LoadConfig(), } RunFtpWorker(data) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 45a38b0..0a7f9b9 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -611,6 +611,7 @@ func (cr *CommandRunner) openFtp(data openFtpData) error { executable, "ftp", data.URL, + config.GlobalSettings.ServerURL, data.HomeDirectory, ) cmd.SysProcAttr = sysProcAttr diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 53340ae..2dfc9be 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -10,7 +10,6 @@ import ( "path/filepath" "strings" - "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/gorilla/websocket" ) @@ -22,22 +21,19 @@ type FtpClient struct { homeDirectory string workingDirectory string log logger.FtpLogger - settings config.Settings } func NewFtpClient(data FtpConfigData) *FtpClient { headers := http.Header{ - "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, data.Settings.ID, data.Settings.Key)}, - "Origin": {data.Settings.ServerURL}, + "Origin": {data.ServerURL}, } return &FtpClient{ requestHeader: headers, - url: strings.Replace(data.Settings.ServerURL, "http", "ws", 1) + data.URL, + url: strings.Replace(data.ServerURL, "http", "ws", 1) + data.URL, homeDirectory: data.HomeDirectory, workingDirectory: data.HomeDirectory, log: data.Logger, - settings: data.Settings, } } diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 302fb0f..ad2986f 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -4,7 +4,6 @@ import ( "strings" "time" - "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/logger" ) @@ -31,9 +30,9 @@ const ( type FtpConfigData struct { URL string + ServerURL string HomeDirectory string Logger logger.FtpLogger - Settings config.Settings } type FtpData struct { From d5cd38cfeef68dbcf53420b3a31c6073b44679cd Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 11 Nov 2024 11:14:38 +0900 Subject: [PATCH 048/364] Fix makeArchive() During single file uploads, a panic was triggered when cmd.Run() was unexpectedly invoked, causing the application to crash. To address this, fix the logic to ensure cmd.Run() is only called when bulk or recursive flags are explicitly set to true. --- pkg/runner/command.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 0a7f9b9..f34eb6d 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -728,9 +728,11 @@ func makeArchive(paths []string, bulk, recursive bool, sysProcAttr *syscall.SysP } } - err := cmd.Run() - if err != nil { - return "", err + if bulk || recursive { + err := cmd.Run() + if err != nil { + return "", err + } } return archiveName, nil From 32341e5086abe29cb57064bd7a101859e1182b72 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 11 Nov 2024 13:12:32 +0900 Subject: [PATCH 049/364] Fix unzip linux command The process was failing because unzip command was being executed prematurely, before the zip file was created. Fix this by changing | to &&, ensuring that unzip command only runs after the zip file has been successfully generated. --- pkg/runner/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index f34eb6d..109652a 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -747,7 +747,7 @@ func fileDownload(data CommandData, sysProcAttr *syscall.SysProcAttr) (exitCode isZip := isZipFile(content) if isZip { - command := fmt.Sprintf("tee -a %s > /dev/null | unzip -n %s -d %s; rm %s", + command := fmt.Sprintf("tee -a %s > /dev/null && unzip -n %s -d %s; rm %s", strings.ReplaceAll(data.Path, " ", "\\ "), strings.ReplaceAll(data.Path, " ", "\\ "), strings.ReplaceAll(filepath.Dir(data.Path), " ", "\\ "), From a9a12f0f6523f3f8fd970b81769b8361eb74ead8 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 11 Nov 2024 13:22:10 +0900 Subject: [PATCH 050/364] Minor fix Set working directory for web ftp process. --- pkg/runner/command.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 109652a..e44ee2a 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -615,6 +615,7 @@ func (cr *CommandRunner) openFtp(data openFtpData) error { data.HomeDirectory, ) cmd.SysProcAttr = sysProcAttr + cmd.Dir = data.HomeDirectory cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr From 837456528beee5970f380d54dde2785b25cb5fdd Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 11 Nov 2024 17:20:42 +0900 Subject: [PATCH 051/364] Minor fix Roll back. --- pkg/runner/command.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index e44ee2a..109652a 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -615,7 +615,6 @@ func (cr *CommandRunner) openFtp(data openFtpData) error { data.HomeDirectory, ) cmd.SysProcAttr = sysProcAttr - cmd.Dir = data.HomeDirectory cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr From f92f880d29442f744383e0ab2e9738441a67ee75 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Thu, 14 Nov 2024 11:49:33 +0900 Subject: [PATCH 052/364] Add disk usage related fields to CheckResult Fields have been added to CheckResult due to model changes in alpacon-server. --- pkg/collector/check/base/base.go | 4 ++-- pkg/collector/check/disk/usage/usage.go | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go index 8741676..e7048b2 100644 --- a/pkg/collector/check/base/base.go +++ b/pkg/collector/check/base/base.go @@ -19,11 +19,11 @@ type CheckResult struct { Usage float64 `json:"usage,omitempty"` Name string `json:"name,omitempty"` Device string `json:"device,omitempty"` - Partition string `json:"partition,omitempty"` + MountPoint string `json:"mount_point,omitempty"` Total uint64 `json:"total,omitempty"` Free uint64 `json:"free,omitempty"` Used uint64 `json:"used,omitempty"` - WriteBytes uint64 `json:"wrtie_bytes,omitempty"` + WriteBytes uint64 `json:"write_bytes,omitempty"` ReadBytes uint64 `json:"read_bytes,omitempty"` InputPkts uint64 `json:"input_pkts,omitempty"` InputBytes uint64 `json:"input_bytes,omitempty"` diff --git a/pkg/collector/check/disk/usage/usage.go b/pkg/collector/check/disk/usage/usage.go index bd2dfa7..ab9cc5b 100644 --- a/pkg/collector/check/disk/usage/usage.go +++ b/pkg/collector/check/disk/usage/usage.go @@ -40,13 +40,16 @@ func (c *Check) Execute(ctx context.Context) { } if err == nil { for _, partition := range partitions { - usage, err := c.collectDiskUsage(partition.Mountpoint) - if err == nil { + usage, usageErr := c.collectDiskUsage(partition.Mountpoint) + if usageErr == nil { data := base.CheckResult{ - Usage: usage.UsedPercent, - Total: usage.Total, - Free: usage.Free, - Used: usage.Used, + Timestamp: time.Now(), + Device: partition.Device, + MountPoint: partition.Mountpoint, + Usage: usage.UsedPercent, + Total: usage.Total, + Free: usage.Free, + Used: usage.Used, } metric.Data = append(metric.Data, data) } From b04a1198ca39fb40b7b89912fbb2e6a2d930aa77 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Thu, 14 Nov 2024 11:55:33 +0900 Subject: [PATCH 053/364] Refactor to collect only physical interfaces Refactored to collect only physical interfaces by checking for MAC addresses. --- pkg/collector/check/net/net.go | 49 +++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/pkg/collector/check/net/net.go b/pkg/collector/check/net/net.go index 69c2374..5a672f8 100644 --- a/pkg/collector/check/net/net.go +++ b/pkg/collector/check/net/net.go @@ -24,24 +24,25 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Ch func (c *Check) Execute(ctx context.Context) { ioCounters, err := c.collectIOCounters() + interfaces, _ := c.collectInterfaces() - var metric base.MetricData - if err != nil { - metric = base.MetricData{ - Type: checkType, - Data: []base.CheckResult{}, - } - } else { + metric := base.MetricData{ + Type: checkType, + Data: []base.CheckResult{}, + } + if err == nil { for _, ioCounter := range ioCounters { - data := base.CheckResult{ - Timestamp: time.Now(), - Name: ioCounter.Name, - InputPkts: ioCounter.PacketsRecv, - InputBytes: ioCounter.BytesRecv, - OutputPkts: ioCounter.PacketsSent, - OutputBytes: ioCounter.BytesSent, + if _, ok := interfaces[ioCounter.Name]; ok { + data := base.CheckResult{ + Timestamp: time.Now(), + Name: ioCounter.Name, + InputPkts: ioCounter.PacketsRecv, + InputBytes: ioCounter.BytesRecv, + OutputPkts: ioCounter.PacketsSent, + OutputBytes: ioCounter.BytesSent, + } + metric.Data = append(metric.Data, data) } - metric.Data = append(metric.Data, data) } } @@ -57,6 +58,24 @@ func (c *Check) Execute(ctx context.Context) { } } +func (c *Check) collectInterfaces() (map[string]net.InterfaceStat, error) { + ifaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + interfaces := map[string]net.InterfaceStat{} + for _, iface := range ifaces { + mac := iface.HardwareAddr + if mac == "" { + continue + } + interfaces[iface.Name] = iface + } + + return interfaces, nil +} + func (c *Check) collectIOCounters() ([]net.IOCountersStat, error) { ioCounters, err := net.IOCounters(true) if err != nil { From 85cd56fe5c339455a6866da49729a67b51349a8b Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 15 Nov 2024 16:22:24 +0900 Subject: [PATCH 054/364] Add exception handling to list() To prevent stack overflows caused by excessively large depth values in list command, add exception handling. If depth exceeds 3, an error response is returned. To avoid errors caused by recursive behavior of listRecursive() on symbolic links, add exception handling specifically for symbolic links. --- pkg/runner/ftp.go | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 2dfc9be..482acf2 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -175,6 +175,13 @@ func (fc *FtpClient) list(rootDir string, depth int) (CommandResult, error) { } func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResult, error) { + if depth > 3 { + return CommandResult{ + Code: 550, + Message: "The depth has reached its limit. Please try a lower depth.", + }, nil + } + result := CommandResult{ Name: filepath.Base(path), Type: "folder", @@ -186,17 +193,32 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu entries, err := os.ReadDir(path) if err != nil { - return CommandResult{ + errResult := CommandResult{ Name: filepath.Base(path), Path: path, Message: err.Error(), - }, nil + } + _, errResult.Code = GetFtpErrorCode(List, errResult) + + return errResult, nil } for _, entry := range entries { fullPath := filepath.Join(path, entry.Name()) - info, err := entry.Info() + info, err := os.Lstat(fullPath) if err != nil { + errChild := CommandResult{ + Name: entry.Name(), + Path: fullPath, + Message: err.Error(), + } + _, errChild.Code = GetFtpErrorCode(List, errChild) + result.Children = append(result.Children, errChild) + + continue + } + + if info.Mode()&os.ModeSymlink != 0 { continue } @@ -204,6 +226,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu child := CommandResult{ Name: entry.Name(), Path: fullPath, + Code: returnCodes[List].Success, Size: info.Size(), ModTime: &modTime, } @@ -213,13 +236,14 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu if current < depth-1 { childResult, err := fc.listRecursive(fullPath, depth, current+1) if err != nil { + result.Children = append(result.Children, childResult) continue } - child.Children = childResult.Children - child.Size = childResult.Size + child = childResult } } else { child.Type = "file" + child.Code = returnCodes[List].Success } result.Children = append(result.Children, child) @@ -229,9 +253,11 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu dirInfo, err := os.Stat(path) if err != nil { result.Message = err.Error() + _, result.Code = GetFtpErrorCode(List, result) } else { modTime := dirInfo.ModTime() result.ModTime = &modTime + result.Code = returnCodes[List].Success } return result, nil From eb9a5542590d6fc72f52f0796f56496a5433a9ec Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 15 Nov 2024 16:46:33 +0900 Subject: [PATCH 055/364] Add Error Types and fields To accommodate the front-end team's requests, add new error types and fields. --- pkg/runner/ftp.go | 8 +++++--- pkg/runner/ftp_types.go | 18 +++++++++++++++++- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 482acf2..50c9153 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -177,9 +177,8 @@ func (fc *FtpClient) list(rootDir string, depth int) (CommandResult, error) { func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResult, error) { if depth > 3 { return CommandResult{ - Code: 550, - Message: "The depth has reached its limit. Please try a lower depth.", - }, nil + Message: ErrTooLargeDepth, + }, fmt.Errorf(ErrTooLargeDepth) } result := CommandResult{ @@ -362,6 +361,7 @@ func (fc *FtpClient) mv(src, dst string) (CommandResult, error) { } return CommandResult{ + Dst: dst, Message: fmt.Sprintf("Move %s to %s", src, dst), }, nil } @@ -392,6 +392,7 @@ func (fc *FtpClient) cpDir(src, dst string) (CommandResult, error) { } return CommandResult{ + Dst: dst, Message: fmt.Sprintf("Copy %s to %s", src, dst), }, nil } @@ -405,6 +406,7 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { } return CommandResult{ + Dst: dst, Message: fmt.Sprintf("Copy %s to %s", src, dst), }, nil } diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index ad2986f..503c9e0 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -22,6 +22,8 @@ const ( const ( ErrPermissionDenied = "permission denied" + ErrOperationNotPermitted = "operation not permitted" + ErrTooLargeDepth = "depth has reached its limit. please try a lower depth" ErrInvalidArgument = "invalid argument" ErrNoSuchFileOrDirectory = "no such file or directory" ErrFileExists = "file exists" @@ -59,6 +61,8 @@ type CommandResult struct { Name string `json:"name,omitempty"` Type string `json:"type,omitempty"` Path string `json:"path,omitempty"` + Dst string `json:"dst,omitempty"` + Code int `json:"code,omitempty"` Size int64 `json:"size,omitempty"` Children []CommandResult `json:"children,omitempty"` ModTime *time.Time `json:"mod_time,omitempty"` @@ -73,12 +77,18 @@ type returnCode struct { var returnCodes = map[FtpCommand]returnCode{ List: { Success: 250, - Error: map[string]int{}, + Error: map[string]int{ + ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, + ErrTooLargeDepth: 452, + ErrNoSuchFileOrDirectory: 550, + }, }, Mkd: { Success: 250, Error: map[string]int{ ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, ErrInvalidArgument: 452, ErrNoSuchFileOrDirectory: 550, ErrFileExists: 552, @@ -88,6 +98,7 @@ var returnCodes = map[FtpCommand]returnCode{ Success: 250, Error: map[string]int{ ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, ErrNoSuchFileOrDirectory: 550, }, }, @@ -95,6 +106,7 @@ var returnCodes = map[FtpCommand]returnCode{ Success: 250, Error: map[string]int{ ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, ErrNoSuchFileOrDirectory: 550, }, }, @@ -102,6 +114,7 @@ var returnCodes = map[FtpCommand]returnCode{ Success: 250, Error: map[string]int{ ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, ErrInvalidArgument: 452, ErrNoSuchFileOrDirectory: 550, }, @@ -110,6 +123,7 @@ var returnCodes = map[FtpCommand]returnCode{ Success: 250, Error: map[string]int{ ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, ErrInvalidArgument: 452, ErrNoSuchFileOrDirectory: 550, ErrDirectoryNotEmpty: 552, @@ -119,6 +133,7 @@ var returnCodes = map[FtpCommand]returnCode{ Success: 250, Error: map[string]int{ ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, ErrInvalidArgument: 452, ErrNoSuchFileOrDirectory: 550, ErrFileExists: 552, @@ -128,6 +143,7 @@ var returnCodes = map[FtpCommand]returnCode{ Success: 250, Error: map[string]int{ ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, ErrInvalidArgument: 452, ErrNoSuchFileOrDirectory: 550, ErrFileExists: 552, From aceee344df046a034fd89490ae6424b659c14a1a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 15 Nov 2024 17:22:18 +0900 Subject: [PATCH 056/364] Minor fix Apply golint --- pkg/runner/ftp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 50c9153..4e8ffab 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -178,7 +178,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu if depth > 3 { return CommandResult{ Message: ErrTooLargeDepth, - }, fmt.Errorf(ErrTooLargeDepth) + }, fmt.Errorf("%s", ErrTooLargeDepth) } result := CommandResult{ From b80e2f5ed8b8526382f118e2c5bc563399acd132 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 18 Nov 2024 11:03:17 +0900 Subject: [PATCH 057/364] Fix upgrade command --- pkg/runner/command.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 109652a..5ccb6d5 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -89,11 +89,11 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { switch args[0] { case "upgrade": if utils.PlatformLike == "debian" { - cmd = "apt-get upgrate -y && " + + cmd = "apt-get update -y && " + "apt-get upgrade -y alpamon" } else if utils.PlatformLike == "rhel" { - cmd = "yum update- y " + - "yum update -y alpamon" + cmd = "yum update- y &&" + + "yum upgrade -y alpamon" } else { return 1, fmt.Sprintf("Platform '%s' not supported.", utils.PlatformLike) } From 6aad49e03883c44968e5439c0b24667f5262f86b Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 18 Nov 2024 14:05:43 +0900 Subject: [PATCH 058/364] Refine URL comparison logic to improve security --- pkg/runner/command.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 5ccb6d5..706dee4 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -9,6 +9,7 @@ import ( "io" "mime/multipart" "net/http" + "net/url" "os" "os/exec" "os/user" @@ -631,15 +632,24 @@ func getFileData(data CommandData) ([]byte, error) { var content []byte switch data.Type { case "url": - url := data.Content + parsedRequestURL, err := url.Parse(data.Content) + if err != nil { + return nil, fmt.Errorf("failed to parse URL '%s': %w", data.Content, err) + } - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequest("GET", parsedRequestURL.String(), nil) if err != nil { return nil, fmt.Errorf("failed to create request: %w", err) } - if strings.HasPrefix(url, config.GlobalSettings.ServerURL) { - req.Header.Set("Authorization", fmt.Sprintf(`id="%s", key="%s"`, config.GlobalSettings.ID, config.GlobalSettings.Key)) + parsedServerURL, err := url.Parse(config.GlobalSettings.ServerURL) + if err != nil { + return nil, fmt.Errorf("failed to parse url: %w", err) + } + + if parsedRequestURL.Host == parsedServerURL.Host && parsedRequestURL.Scheme == parsedServerURL.Scheme { + req.Header.Set("Authorization", fmt.Sprintf(`id="%s", key="%s"`, + config.GlobalSettings.ID, config.GlobalSettings.Key)) } client := http.Client{} @@ -650,7 +660,7 @@ func getFileData(data CommandData) ([]byte, error) { defer func() { _ = resp.Body.Close() }() if (resp.StatusCode / 100) != 2 { - log.Error().Msgf("Failed to download content from URL: %d %s", resp.StatusCode, url) + log.Error().Msgf("Failed to download content from URL: %d %s", resp.StatusCode, parsedRequestURL) return nil, errors.New("downloading content failed") } content, err = io.ReadAll(resp.Body) From 61cae3a0e817113edb533cacfd1128a97c0eed5d Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 22 Nov 2024 15:50:07 +0900 Subject: [PATCH 059/364] Add schemas Decide to use SQLite to store real-time metrics temporarily for batch processing. Define schemas and add corresponding ent code. Add CPU, Memory, Traffic, DiskUsage, DiskIO, and per-hour metrics for CPUPerHour, MemoryPerHour, TrafficPerHour, DiskUsagePerHour, and DiskIOPerHour. --- pkg/db/client.go | 34 +++++++++++++++++++++++++++ pkg/db/ent/generate.go | 3 +++ pkg/db/migrate.go | 23 ++++++++++++++++++ pkg/db/schema/cpu.go | 28 ++++++++++++++++++++++ pkg/db/schema/cpu_per_hour.go | 29 +++++++++++++++++++++++ pkg/db/schema/diskio.go | 30 ++++++++++++++++++++++++ pkg/db/schema/diskio_per_hour.go | 32 +++++++++++++++++++++++++ pkg/db/schema/diskusage.go | 33 ++++++++++++++++++++++++++ pkg/db/schema/diskusage_per_hour.go | 31 +++++++++++++++++++++++++ pkg/db/schema/memory.go | 28 ++++++++++++++++++++++ pkg/db/schema/memory_per_hour.go | 29 +++++++++++++++++++++++ pkg/db/schema/traffic.go | 32 +++++++++++++++++++++++++ pkg/db/schema/traffic_per_hour.go | 36 +++++++++++++++++++++++++++++ 13 files changed, 368 insertions(+) create mode 100644 pkg/db/client.go create mode 100644 pkg/db/ent/generate.go create mode 100644 pkg/db/migrate.go create mode 100644 pkg/db/schema/cpu.go create mode 100644 pkg/db/schema/cpu_per_hour.go create mode 100644 pkg/db/schema/diskio.go create mode 100644 pkg/db/schema/diskio_per_hour.go create mode 100644 pkg/db/schema/diskusage.go create mode 100644 pkg/db/schema/diskusage_per_hour.go create mode 100644 pkg/db/schema/memory.go create mode 100644 pkg/db/schema/memory_per_hour.go create mode 100644 pkg/db/schema/traffic.go create mode 100644 pkg/db/schema/traffic_per_hour.go diff --git a/pkg/db/client.go b/pkg/db/client.go new file mode 100644 index 0000000..d18fd9e --- /dev/null +++ b/pkg/db/client.go @@ -0,0 +1,34 @@ +package db + +import ( + "sync" + + "entgo.io/ent/dialect" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + _ "github.com/mattn/go-sqlite3" +) + +var ( + client *ent.Client + once sync.Once + initErr error +) + +func GetClient() (*ent.Client, error) { + once.Do(func() { + var err error + client, err = ent.Open(dialect.SQLite, "file:./metrics.db?cache=shared&_fk=1") + if err != nil { + initErr = err + client = nil + } + }) + return client, initErr +} + +func Close() error { + if client != nil { + return client.Close() + } + return nil +} diff --git a/pkg/db/ent/generate.go b/pkg/db/ent/generate.go new file mode 100644 index 0000000..8d3fdfd --- /dev/null +++ b/pkg/db/ent/generate.go @@ -0,0 +1,3 @@ +package ent + +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema diff --git a/pkg/db/migrate.go b/pkg/db/migrate.go new file mode 100644 index 0000000..db6f84c --- /dev/null +++ b/pkg/db/migrate.go @@ -0,0 +1,23 @@ +package db + +import ( + "context" + + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/migrate" +) + +// TODO: Apply Versioned Migrations +func RunMigration(ctx context.Context, client *ent.Client) error { + err := client.Schema.Create( + ctx, + migrate.WithDropIndex(true), + migrate.WithDropColumn(true), + ) + + if err != nil { + return err + } + + return nil +} diff --git a/pkg/db/schema/cpu.go b/pkg/db/schema/cpu.go new file mode 100644 index 0000000..ca78baf --- /dev/null +++ b/pkg/db/schema/cpu.go @@ -0,0 +1,28 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// CPU holds the schema definition for the CPU entity. +type CPU struct { + ent.Schema +} + +// Fields of the CPU. +func (CPU) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.Float("usage"), + } +} + +func (CPU) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/cpu_per_hour.go b/pkg/db/schema/cpu_per_hour.go new file mode 100644 index 0000000..3f4ec0d --- /dev/null +++ b/pkg/db/schema/cpu_per_hour.go @@ -0,0 +1,29 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// CPUPerHour holds the schema definition for the CPUPerHour entity. +type CPUPerHour struct { + ent.Schema +} + +// Fields of the CPUPerHour. +func (CPUPerHour) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.Float("peak_usage"), + field.Float("avg_usage"), + } +} + +func (CPUPerHour) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/diskio.go b/pkg/db/schema/diskio.go new file mode 100644 index 0000000..02fc945 --- /dev/null +++ b/pkg/db/schema/diskio.go @@ -0,0 +1,30 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// DiskIO holds the schema definition for the DiskIO entity. +type DiskIO struct { + ent.Schema +} + +// Fields of the DiskIO. +func (DiskIO) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.String("device"), + field.Int64("read_bytes"), + field.Int64("write_bytes"), + } +} + +func (DiskIO) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/diskio_per_hour.go b/pkg/db/schema/diskio_per_hour.go new file mode 100644 index 0000000..a426367 --- /dev/null +++ b/pkg/db/schema/diskio_per_hour.go @@ -0,0 +1,32 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// DiskIOPerHour holds the schema definition for the DiskIOPerHour entity. +type DiskIOPerHour struct { + ent.Schema +} + +// Fields of the DiskIOPerHour. +func (DiskIOPerHour) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.String("device"), + field.Int64("peak_read_bytes"), + field.Int64("peak_write_bytes"), + field.Int64("avg_read_bytes"), + field.Int64("avg_write_bytes"), + } +} + +func (DiskIOPerHour) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/diskusage.go b/pkg/db/schema/diskusage.go new file mode 100644 index 0000000..5d26a66 --- /dev/null +++ b/pkg/db/schema/diskusage.go @@ -0,0 +1,33 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// DiskUsage holds the schema definition for the DiskUsage entity. +type DiskUsage struct { + ent.Schema +} + +// Fields of the DiskUsage. +func (DiskUsage) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.String("device"), + field.String("mount_point"), + field.Float("usage"), + field.Int64("total"), + field.Int64("free"), + field.Int64("used"), + } +} + +func (DiskUsage) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/diskusage_per_hour.go b/pkg/db/schema/diskusage_per_hour.go new file mode 100644 index 0000000..f8e2daa --- /dev/null +++ b/pkg/db/schema/diskusage_per_hour.go @@ -0,0 +1,31 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// DiskUsagePerHour holds the schema definition for the DiskUsagePerHour entity. +type DiskUsagePerHour struct { + ent.Schema +} + +// Fields of the DiskUsagePerHour. +func (DiskUsagePerHour) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.String("device"), + field.String("mount_point"), + field.Float("peak_usage"), + field.Float("avg_usage"), + } +} + +func (DiskUsagePerHour) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/memory.go b/pkg/db/schema/memory.go new file mode 100644 index 0000000..82542ae --- /dev/null +++ b/pkg/db/schema/memory.go @@ -0,0 +1,28 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Memory holds the schema definition for the Memory entity. +type Memory struct { + ent.Schema +} + +// Fields of the Memory. +func (Memory) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.Float("usage"), + } +} + +func (Memory) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/memory_per_hour.go b/pkg/db/schema/memory_per_hour.go new file mode 100644 index 0000000..87c0550 --- /dev/null +++ b/pkg/db/schema/memory_per_hour.go @@ -0,0 +1,29 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// MemoryPerHour holds the schema definition for theMemoryPerHour entity. +type MemoryPerHour struct { + ent.Schema +} + +// Fields of the MemoryPerHour. +func (MemoryPerHour) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.Float("peak_usage"), + field.Float("avg_usage"), + } +} + +func (MemoryPerHour) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/traffic.go b/pkg/db/schema/traffic.go new file mode 100644 index 0000000..42dc9cc --- /dev/null +++ b/pkg/db/schema/traffic.go @@ -0,0 +1,32 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Traffic holds the schema definition for the Traffic entity. +type Traffic struct { + ent.Schema +} + +// Fields of the Traffic. +func (Traffic) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.String("name"), + field.Int64("input_pkts"), + field.Int64("input_bytes"), + field.Int64("output_pkts"), + field.Int64("output_bytes"), + } +} + +func (Traffic) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} diff --git a/pkg/db/schema/traffic_per_hour.go b/pkg/db/schema/traffic_per_hour.go new file mode 100644 index 0000000..7d620ea --- /dev/null +++ b/pkg/db/schema/traffic_per_hour.go @@ -0,0 +1,36 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// TrafficPerHour holds the schema definition for the TrafficPerHour entity. +type TrafficPerHour struct { + ent.Schema +} + +// Fields of the TrafficPerHour. +func (TrafficPerHour) Fields() []ent.Field { + return []ent.Field{ + field.Time("timestamp").Default(time.Now()), + field.String("name"), + field.Int64("peak_input_pkts"), + field.Int64("peak_input_bytes"), + field.Int64("peak_output_pkts"), + field.Int64("peak_output_bytes"), + field.Int64("avg_input_pkts"), + field.Int64("avg_input_bytes"), + field.Int64("avg_output_pkts"), + field.Int64("avg_output_bytes"), + } +} + +func (TrafficPerHour) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("timestamp"), + } +} From a534434d8210fc0525b4ab1b10e681c7799aa905 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 22 Nov 2024 15:52:23 +0900 Subject: [PATCH 060/364] Add ent package Add ent package for quering data with orm, ensuring type safety, and supporting versioned migrations. --- go.mod | 12 ++++++++++++ go.sum | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 72 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 7a04abd..a7c69ca 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/alpacanetworks/alpamon-go go 1.22.5 require ( + entgo.io/ent v0.14.0 github.com/adrianbrad/queue v1.3.0 github.com/cenkalti/backoff v2.2.1+incompatible github.com/creack/pty v1.1.23 @@ -10,6 +11,7 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/knqyf263/go-rpmdb v0.1.1 + github.com/mattn/go-sqlite3 v1.14.16 github.com/rs/zerolog v1.33.0 github.com/shirou/gopsutil/v4 v4.24.8 github.com/spf13/cobra v1.8.1 @@ -19,16 +21,23 @@ require ( ) require ( + ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect + github.com/agext/levenshtein v1.2.1 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 // indirect @@ -37,7 +46,10 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect + github.com/zclconf/go-cty v1.8.0 // indirect + golang.org/x/mod v0.15.0 // indirect golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 9f410a0..7de8d1c 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,15 @@ +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= +entgo.io/ent v0.14.0 h1:EO3Z9aZ5bXJatJeGqu/EVdnNr6K4mRq3rWe5owt0MC4= +entgo.io/ent v0.14.0/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/adrianbrad/queue v1.3.0 h1:8FH1N+93HXbqta5+URa1AL+diV7MP3VDXAEnP+DNp48= github.com/adrianbrad/queue v1.3.0/go.mod h1:wYiPC/3MPbyT45QHLrPR4zcqJWPePubM1oEP/xTwhUs= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -14,11 +24,18 @@ github.com/glebarez/go-sqlite v1.20.3 h1:89BkqGOXR9oRmG58ZrzgoY/Fhy5x0M+/WV48U5z github.com/glebarez/go-sqlite v1.20.3/go.mod h1:u3N6D/wftiAzIOJtZl6BmedqxmmkDfH3q+ihjqxC9u0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -28,10 +45,20 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= +github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/knqyf263/go-rpmdb v0.1.1 h1:oh68mTCvp1XzxdU7EfafcWzzfstUZAEa3MW0IJye584= github.com/knqyf263/go-rpmdb v0.1.1/go.mod h1:9LQcoMCMQ9vrF7HcDtXfvqGO4+ddxFQ8+YF/0CVGDww= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= @@ -42,6 +69,14 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -54,6 +89,8 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil/v4 v4.24.8 h1:pVQjIenQkIhqO81mwTaXjTzOMT7d3TZkf43PlVFHENI= github.com/shirou/gopsutil/v4 v4.24.8/go.mod h1:wE0OrJtj4dG+hYkxqDH3QiBICdKSf04/npcvLLc/oRg= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -70,8 +107,20 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -81,11 +130,21 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M= From f08bd294550d958fe1e131881642b45003f3270d Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 22 Nov 2024 16:00:35 +0900 Subject: [PATCH 061/364] Refactor Check Add functions to store data in SQLite database for batch processing. Moved existing checks to realtime folder to separate batch checks. --- pkg/collector/check/{ => realtime}/cpu/cpu.go | 35 ++++++++++++---- .../check/{ => realtime}/disk/io/io.go | 39 +++++++++++++---- .../check/{ => realtime}/disk/usage/usage.go | 42 +++++++++++++++---- .../check/{ => realtime}/memory/memory.go | 40 ++++++++++++++---- pkg/collector/check/{ => realtime}/net/net.go | 41 ++++++++++++++---- 5 files changed, 152 insertions(+), 45 deletions(-) rename pkg/collector/check/{ => realtime}/cpu/cpu.go (57%) rename pkg/collector/check/{ => realtime}/disk/io/io.go (54%) rename pkg/collector/check/{ => realtime}/disk/usage/usage.go (65%) rename pkg/collector/check/{ => realtime}/memory/memory.go (53%) rename pkg/collector/check/{ => realtime}/net/net.go (62%) diff --git a/pkg/collector/check/cpu/cpu.go b/pkg/collector/check/realtime/cpu/cpu.go similarity index 57% rename from pkg/collector/check/cpu/cpu.go rename to pkg/collector/check/realtime/cpu/cpu.go index b365b5e..9b5c372 100644 --- a/pkg/collector/check/cpu/cpu.go +++ b/pkg/collector/check/realtime/cpu/cpu.go @@ -6,36 +6,42 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/shirou/gopsutil/v4/cpu" ) -const ( - checkType = base.CPU -) - type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer), + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), } } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + usage, err := c.collectCPUUsage() + if err != nil { + checkError.CollectError = err + } metric := base.MetricData{ - Type: checkType, + Type: base.CPU, Data: []base.CheckResult{}, } - if err == nil { + if checkError.CollectError == nil { data := base.CheckResult{ Timestamp: time.Now(), Usage: usage, } metric.Data = append(metric.Data, data) + + if err := c.saveCPUUsage(ctx, data); err != nil { + checkError.QueryError = err + } } if ctx.Err() != nil { @@ -43,7 +49,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.CollectError != nil || checkError.QueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -62,3 +68,14 @@ func (c *Check) collectCPUUsage() (float64, error) { return usage[0], nil } + +func (c *Check) saveCPUUsage(ctx context.Context, data base.CheckResult) error { + client := c.GetClient() + if err := client.CPU.Create(). + SetTimestamp(data.Timestamp). + SetUsage(data.Usage).Exec(ctx); err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/disk/io/io.go b/pkg/collector/check/realtime/disk/io/io.go similarity index 54% rename from pkg/collector/check/disk/io/io.go rename to pkg/collector/check/realtime/disk/io/io.go index 6ea0c85..44abd95 100644 --- a/pkg/collector/check/disk/io/io.go +++ b/pkg/collector/check/realtime/disk/io/io.go @@ -5,31 +5,33 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/shirou/gopsutil/v4/disk" ) -const ( - checkType = base.DISK_IO -) - type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer), + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), } } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + ioCounters, err := c.collectDiskIO() + if err != nil { + checkError.CollectError = err + } metric := base.MetricData{ - Type: checkType, + Type: base.DISK_IO, Data: []base.CheckResult{}, } - if err == nil { + if checkError.CollectError == nil { for name, ioCounter := range ioCounters { data := base.CheckResult{ Timestamp: time.Now(), @@ -39,6 +41,10 @@ func (c *Check) Execute(ctx context.Context) { } metric.Data = append(metric.Data, data) } + + if err := c.saveDiskIO(ctx, metric.Data); err != nil { + checkError.QueryError = err + } } if ctx.Err() != nil { @@ -46,7 +52,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.CollectError != nil || checkError.QueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -61,3 +67,18 @@ func (c *Check) collectDiskIO() (map[string]disk.IOCountersStat, error) { return ioCounters, nil } + +func (c *Check) saveDiskIO(ctx context.Context, data []base.CheckResult) error { + client := c.GetClient() + err := client.DiskIO.MapCreateBulk(data, func(q *ent.DiskIOCreate, i int) { + q.SetTimestamp(data[i].Timestamp). + SetDevice(data[i].Device). + SetReadBytes(int64(data[i].ReadBytes)). + SetWriteBytes(int64(data[i].WriteBytes)) + }).Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go similarity index 65% rename from pkg/collector/check/disk/usage/usage.go rename to pkg/collector/check/realtime/disk/usage/usage.go index ab9cc5b..3fc5631 100644 --- a/pkg/collector/check/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -5,13 +5,10 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/shirou/gopsutil/v4/disk" ) -const ( - checkType = base.DISK_USAGE -) - var excludedFileSystems = map[string]bool{ "tmpfs": true, "devtmpfs": true, @@ -25,20 +22,25 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer), + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), } } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + partitions, err := c.collectDiskPartitions() + if err != nil { + checkError.CollectError = err + } metric := base.MetricData{ - Type: checkType, + Type: base.DISK_USAGE, Data: []base.CheckResult{}, } - if err == nil { + if checkError.CollectError == nil { for _, partition := range partitions { usage, usageErr := c.collectDiskUsage(partition.Mountpoint) if usageErr == nil { @@ -54,6 +56,10 @@ func (c *Check) Execute(ctx context.Context) { metric.Data = append(metric.Data, data) } } + + if err := c.saveDiskUsage(ctx, metric.Data); err != nil { + checkError.QueryError = err + } } if ctx.Err() != nil { @@ -61,7 +67,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.CollectError != nil || checkError.QueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -92,3 +98,21 @@ func (c *Check) collectDiskUsage(path string) (*disk.UsageStat, error) { return usage, nil } + +func (c *Check) saveDiskUsage(ctx context.Context, data []base.CheckResult) error { + client := c.GetClient() + err := client.DiskUsage.MapCreateBulk(data, func(q *ent.DiskUsageCreate, i int) { + q.SetTimestamp(data[i].Timestamp). + SetDevice(data[i].Device). + SetMountPoint(data[i].MountPoint). + SetUsage(data[i].Usage). + SetTotal(int64(data[i].Total)). + SetFree(int64(data[i].Free)). + SetUsed(int64(data[i].Used)) + }).Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/memory/memory.go b/pkg/collector/check/realtime/memory/memory.go similarity index 53% rename from pkg/collector/check/memory/memory.go rename to pkg/collector/check/realtime/memory/memory.go index e651d16..96a6f44 100644 --- a/pkg/collector/check/memory/memory.go +++ b/pkg/collector/check/realtime/memory/memory.go @@ -5,36 +5,47 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/shirou/gopsutil/v4/mem" ) -const ( - checkType = base.MEM -) - type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { +type MemoryCheckError struct { + CollectError error + QueryError error +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer), + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), } } func (c *Check) Execute(ctx context.Context) { + var checkError MemoryCheckError + usage, err := c.collectMemoryUsage() + if err != nil { + checkError.CollectError = err + } metric := base.MetricData{ - Type: checkType, + Type: base.MEM, Data: []base.CheckResult{}, } - if err == nil { + if checkError.CollectError == nil { data := base.CheckResult{ Timestamp: time.Now(), Usage: usage, } metric.Data = append(metric.Data, data) + + if err := c.saveMemoryUsage(ctx, data); err != nil { + checkError.QueryError = err + } } if ctx.Err() != nil { @@ -42,7 +53,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.CollectError != nil || checkError.QueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -57,3 +68,14 @@ func (c *Check) collectMemoryUsage() (float64, error) { return memory.UsedPercent, nil } + +func (c *Check) saveMemoryUsage(ctx context.Context, data base.CheckResult) error { + client := c.GetClient() + if err := client.Memory.Create(). + SetTimestamp(data.Timestamp). + SetUsage(data.Usage).Exec(ctx); err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/net/net.go b/pkg/collector/check/realtime/net/net.go similarity index 62% rename from pkg/collector/check/net/net.go rename to pkg/collector/check/realtime/net/net.go index 5a672f8..5c1d60e 100644 --- a/pkg/collector/check/net/net.go +++ b/pkg/collector/check/realtime/net/net.go @@ -5,32 +5,34 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/shirou/gopsutil/v4/net" ) -const ( - checkType = base.NET -) - type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer) *Check { +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer), + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), } } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + ioCounters, err := c.collectIOCounters() interfaces, _ := c.collectInterfaces() + if err != nil { + checkError.CollectError = err + } metric := base.MetricData{ - Type: checkType, + Type: base.NET, Data: []base.CheckResult{}, } - if err == nil { + if checkError.CollectError == nil { for _, ioCounter := range ioCounters { if _, ok := interfaces[ioCounter.Name]; ok { data := base.CheckResult{ @@ -44,6 +46,10 @@ func (c *Check) Execute(ctx context.Context) { metric.Data = append(metric.Data, data) } } + + if err := c.saveTraffic(ctx, metric.Data); err != nil { + checkError.QueryError = err + } } if ctx.Err() != nil { @@ -51,7 +57,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.CollectError != nil || checkError.QueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -84,3 +90,20 @@ func (c *Check) collectIOCounters() ([]net.IOCountersStat, error) { return ioCounters, nil } + +func (c *Check) saveTraffic(ctx context.Context, data []base.CheckResult) error { + client := c.GetClient() + err := client.Traffic.MapCreateBulk(data, func(q *ent.TrafficCreate, i int) { + q.SetTimestamp(data[i].Timestamp). + SetName(data[i].Name). + SetInputPkts(int64(data[i].InputPkts)). + SetInputBytes(int64(data[i].InputBytes)). + SetOutputPkts(int64(data[i].OutputPkts)). + SetOutputBytes(int64(data[i].OutputBytes)) + }).Exec(ctx) + if err != nil { + return err + } + + return nil +} From 9ababc2bebe5f973d16f9f2d607387df30bb97e1 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 22 Nov 2024 16:04:31 +0900 Subject: [PATCH 062/364] Add hourly batch check Add hourly batch checks for metrics. --- pkg/collector/check/base/base.go | 77 +++++++++++---- pkg/collector/check/batch/hourly/cpu/cpu.go | 75 ++++++++++++++ .../check/batch/hourly/disk/io/io.go | 86 ++++++++++++++++ .../check/batch/hourly/disk/usage/usage.go | 82 ++++++++++++++++ .../check/batch/hourly/memory/memory.go | 75 ++++++++++++++ pkg/collector/check/batch/hourly/net/net.go | 98 +++++++++++++++++++ 6 files changed, 473 insertions(+), 20 deletions(-) create mode 100644 pkg/collector/check/batch/hourly/cpu/cpu.go create mode 100644 pkg/collector/check/batch/hourly/disk/io/io.go create mode 100644 pkg/collector/check/batch/hourly/disk/usage/usage.go create mode 100644 pkg/collector/check/batch/hourly/memory/memory.go create mode 100644 pkg/collector/check/batch/hourly/net/net.go diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go index e7048b2..62d4e2e 100644 --- a/pkg/collector/check/base/base.go +++ b/pkg/collector/check/base/base.go @@ -2,33 +2,64 @@ package base import ( "time" + + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" ) const ( - CPU CheckType = "cpu" - MEM CheckType = "memory" - DISK_USAGE CheckType = "disk_usage" - DISK_IO CheckType = "disk_io" - NET CheckType = "net" + CPU CheckType = "cpu" + CPU_PER_HOUR CheckType = "cpu_per_hour" + CPU_PER_DAY CheckType = "cpu_per_day" + MEM CheckType = "memory" + MEM_PER_HOUR CheckType = "memory_per_hour" + MEM_PER_DAY CheckType = "memory_per_day" + DISK_USAGE CheckType = "disk_usage" + DISK_USAGE_PER_HOUR CheckType = "disk_usage_per_hour" + DISK_USAGE_PER_DAY CheckType = "disk_usage_per_day" + DISK_IO CheckType = "disk_io" + DISK_IO_PER_HOUR CheckType = "disk_io_per_hour" + DISK_IO_PER_DAY CheckType = "disk_io_per_day" + NET CheckType = "net" + NET_PER_HOUR CheckType = "net_per_hour" + NET_PER_DAY CheckType = "net_per_day" ) type CheckType string type CheckResult struct { - Timestamp time.Time `json:"timestamp"` - Usage float64 `json:"usage,omitempty"` - Name string `json:"name,omitempty"` - Device string `json:"device,omitempty"` - MountPoint string `json:"mount_point,omitempty"` - Total uint64 `json:"total,omitempty"` - Free uint64 `json:"free,omitempty"` - Used uint64 `json:"used,omitempty"` - WriteBytes uint64 `json:"write_bytes,omitempty"` - ReadBytes uint64 `json:"read_bytes,omitempty"` - InputPkts uint64 `json:"input_pkts,omitempty"` - InputBytes uint64 `json:"input_bytes,omitempty"` - OutputPkts uint64 `json:"output_pkts,omitempty"` - OutputBytes uint64 `json:"output_bytes,omitempty"` + Timestamp time.Time `json:"timestamp"` + Usage float64 `json:"usage,omitempty"` + Name string `json:"name,omitempty"` + Device string `json:"device,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + Total uint64 `json:"total,omitempty"` + Free uint64 `json:"free,omitempty"` + Used uint64 `json:"used,omitempty"` + WriteBytes uint64 `json:"write_bytes,omitempty"` + ReadBytes uint64 `json:"read_bytes,omitempty"` + InputPkts uint64 `json:"input_pkts,omitempty"` + InputBytes uint64 `json:"input_bytes,omitempty"` + OutputPkts uint64 `json:"output_pkts,omitempty"` + OutputBytes uint64 `json:"output_bytes,omitempty"` + PeakUsage float64 `json:"peak_usage,omitempty"` + AvgUsage float64 `json:"avg_usage,omitempty"` + PeakWriteBytes uint64 `json:"peak_write_bytes,omitempty"` + PeakReadBytes uint64 `json:"peak_read_bytes,omitempty"` + AvgWriteBytes uint64 `json:"avg_write_bytes,omitempty"` + AvgReadBytes uint64 `json:"avg_read_bytes,omitempty"` + PeakInputPkts uint64 `json:"peak_input_pkts,omitempty"` + PeakInputBytes uint64 `json:"peak_input_bytes,omitempty"` + PeakOutputPkts uint64 `json:"peak_output_pkts,omitempty"` + PeakOutputBytes uint64 `json:"peak_output_bytes,omitempty"` + AvgInputPkts uint64 `json:"avg_input_pkts,omitempty"` + AvgInputBytes uint64 `json:"avg_input_bytes,omitempty"` + AvgOutputPkts uint64 `json:"avg_output_pkts,omitempty"` + AvgOutputBytes uint64 `json:"avg_output_bytes,omitempty"` +} + +type CheckError struct { + CollectError error + QueryError error } type MetricData struct { @@ -40,6 +71,7 @@ type BaseCheck struct { name string interval time.Duration buffer *CheckBuffer + client *ent.Client } type CheckBuffer struct { @@ -48,11 +80,12 @@ type CheckBuffer struct { Capacity int } -func NewBaseCheck(name string, interval time.Duration, buffer *CheckBuffer) BaseCheck { +func NewBaseCheck(name string, interval time.Duration, buffer *CheckBuffer, client *ent.Client) BaseCheck { return BaseCheck{ name: name, interval: interval, buffer: buffer, + client: client, } } @@ -75,3 +108,7 @@ func (c *BaseCheck) GetInterval() time.Duration { func (c *BaseCheck) GetBuffer() *CheckBuffer { return c.buffer } + +func (c *BaseCheck) GetClient() *ent.Client { + return c.client +} diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go new file mode 100644 index 0000000..65b735a --- /dev/null +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -0,0 +1,75 @@ +package cpu + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpu" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +type cpuQuerySet struct { + Max float64 + AVG float64 +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + queryset, err := c.getCPUPeakAndAvg(ctx) + metric := base.MetricData{ + Type: base.CPU_PER_HOUR, + Data: []base.CheckResult{}, + } + + if err == nil { + data := base.CheckResult{ + Timestamp: time.Now(), + PeakUsage: queryset[0].Max, + AvgUsage: queryset[0].AVG, + } + metric.Data = append(metric.Data, data) + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getCPUPeakAndAvg(ctx context.Context) ([]cpuQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + var queryset []cpuQuerySet + err := client.CPU.Query(). + Where(cpu.TimestampGTE(from), cpu.TimestampLTE(now)). + Aggregate( + ent.Max(cpu.FieldUsage), + ent.Mean(cpu.FieldUsage), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go new file mode 100644 index 0000000..eab1d31 --- /dev/null +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -0,0 +1,86 @@ +package io + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +type diskIOQuerySet struct { + Device string `json:"device" db:"device"` + PeakReadBytes float64 `json:"peak_read_bytes"` + PeakWriteBytes float64 `json:"peak_write_bytes"` + AvgReadBytes float64 `json:"avg_read_bytes"` + AvgWriteBytes float64 `json:"avg_write_bytes"` +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + queryset, err := c.getDiskIOPeakAndAvg(ctx) + metric := base.MetricData{ + Type: base.DISK_IO_PER_HOUR, + Data: []base.CheckResult{}, + } + + if err == nil { + for _, row := range queryset { + data := base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + PeakWriteBytes: uint64(row.PeakWriteBytes), + PeakReadBytes: uint64(row.PeakReadBytes), + AvgWriteBytes: uint64(row.AvgWriteBytes), + AvgReadBytes: uint64(row.AvgReadBytes), + } + metric.Data = append(metric.Data, data) + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getDiskIOPeakAndAvg(ctx context.Context) ([]diskIOQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + var queryset []diskIOQuerySet + err := client.DiskIO.Query(). + Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)). + GroupBy(diskio.FieldDevice). + Aggregate( + ent.As(ent.Max(diskio.FieldReadBytes), "peak_read_bytes"), + ent.As(ent.Max(diskio.FieldWriteBytes), "peak_write_bytes"), + ent.As(ent.Mean(diskio.FieldReadBytes), "avg_read_bytes"), + ent.As(ent.Mean(diskio.FieldWriteBytes), "avg_write_bytes"), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go new file mode 100644 index 0000000..d553c4e --- /dev/null +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -0,0 +1,82 @@ +package usage + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusage" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +type diskUsageQuerySet struct { + Device string `json:"device"` + MountPoint string `json:"mount_point"` + Max float64 `json:"max"` + AVG float64 `json:"avg"` +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + queryset, err := c.getDiskUsagePeakAndAvg(ctx) + metric := base.MetricData{ + Type: base.DISK_USAGE_PER_HOUR, + Data: []base.CheckResult{}, + } + + if err == nil { + for _, row := range queryset { + data := base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + MountPoint: row.MountPoint, + PeakUsage: row.Max, + AvgUsage: row.AVG, + } + metric.Data = append(metric.Data, data) + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getDiskUsagePeakAndAvg(ctx context.Context) ([]diskUsageQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + var queryset []diskUsageQuerySet + err := client.DiskUsage.Query(). + Where(diskusage.TimestampGTE(from), diskusage.TimestampLTE(now)). + GroupBy(diskusage.FieldDevice, diskusage.FieldMountPoint). + Aggregate( + ent.Max(diskusage.FieldUsage), + ent.Mean(diskusage.FieldUsage), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go new file mode 100644 index 0000000..dff3255 --- /dev/null +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -0,0 +1,75 @@ +package memory + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memory" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +type memoryQuerySet struct { + Max float64 + AVG float64 +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + queryset, err := c.getMemoryPeakAndAvg(ctx) + metric := base.MetricData{ + Type: base.MEM_PER_HOUR, + Data: []base.CheckResult{}, + } + + if err == nil { + data := base.CheckResult{ + Timestamp: time.Now(), + PeakUsage: queryset[0].Max, + AvgUsage: queryset[0].AVG, + } + metric.Data = append(metric.Data, data) + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getMemoryPeakAndAvg(ctx context.Context) ([]memoryQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + var queryset []memoryQuerySet + err := client.Memory.Query(). + Where(memory.TimestampGTE(from), memory.TimestampLTE(now)). + Aggregate( + ent.Max(memory.FieldUsage), + ent.Mean(memory.FieldUsage), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go new file mode 100644 index 0000000..5e4e357 --- /dev/null +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -0,0 +1,98 @@ +package net + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +type trafficQuerySet struct { + Name string `json:"name"` + PeakInputPkts float64 `json:"peak_input_pkts"` + PeakInputBytes float64 `json:"peak_input_bytes"` + PeakOutputPkts float64 `json:"peak_output_pkts"` + PeakOutputBytes float64 `json:"peak_output_bytes"` + AvgInputPkts float64 `json:"avg_input_pkts"` + AvgInputBytes float64 `json:"avg_input_bytes"` + AvgOutputPkts float64 `json:"avg_output_pkts"` + AvgOutputBytes float64 `json:"avg_output_bytes"` +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + queryset, err := c.getTrafficPeakAndAvg(ctx) + metric := base.MetricData{ + Type: base.NET_PER_HOUR, + Data: []base.CheckResult{}, + } + + if err == nil { + for _, row := range queryset { + data := base.CheckResult{ + Timestamp: time.Now(), + Name: row.Name, + PeakInputPkts: uint64(row.PeakInputPkts), + PeakInputBytes: uint64(row.PeakInputBytes), + PeakOutputPkts: uint64(row.PeakOutputPkts), + PeakOutputBytes: uint64(row.PeakOutputBytes), + AvgInputPkts: uint64(row.AvgInputPkts), + AvgInputBytes: uint64(row.AvgInputBytes), + AvgOutputPkts: uint64(row.AvgOutputPkts), + AvgOutputBytes: uint64(row.AvgOutputBytes), + } + metric.Data = append(metric.Data, data) + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if err != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getTrafficPeakAndAvg(ctx context.Context) ([]trafficQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + var queryset []trafficQuerySet + err := client.Traffic.Query(). + Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)). + GroupBy(traffic.FieldName). + Aggregate( + ent.As(ent.Max(traffic.FieldInputPkts), "peak_input_pkts"), + ent.As(ent.Max(traffic.FieldInputBytes), "peak_input_bytes"), + ent.As(ent.Max(traffic.FieldOutputPkts), "peak_output_pkts"), + ent.As(ent.Max(traffic.FieldOutputBytes), "peak_output_bytes"), + ent.As(ent.Mean(traffic.FieldInputPkts), "avg_input_pkts"), + ent.As(ent.Mean(traffic.FieldInputBytes), "avg_input_bytes"), + ent.As(ent.Mean(traffic.FieldOutputPkts), "avg_output_pkts"), + ent.As(ent.Mean(traffic.FieldOutputBytes), "avg_output_bytes"), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} From 50ffa3fc04f26fc728e76d6a6f9c3662f449e196 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 22 Nov 2024 16:06:48 +0900 Subject: [PATCH 063/364] Add DB client connection logic. Add DB client creation logic upon alpamon startup. --- cmd/alpamon/command/root.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 83ac068..a0a5531 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -10,6 +10,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check" "github.com/alpacanetworks/alpamon-go/pkg/collector/transporter" "github.com/alpacanetworks/alpamon-go/pkg/config" + "github.com/alpacanetworks/alpamon-go/pkg/db" "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/pidfile" "github.com/alpacanetworks/alpamon-go/pkg/runner" @@ -63,18 +64,30 @@ func runAgent() { // Commit runner.CommitAsync(session, commissioned) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client, err := db.GetClient() + if err != nil { + log.Error().Err(err).Msg("Failed to open database client") + return + } + defer db.Close() + + if err := db.RunMigration(ctx, client); err != nil { + log.Error().Err(err).Msg("Failed to migrate schema") + return + } + checkFactory := &check.DefaultCheckFactory{} transporterFactory := &transporter.DefaultTransporterFactory{} - collector, err := collector.NewCollector(session, checkFactory, transporterFactory) + collector, err := collector.NewCollector(session, client, checkFactory, transporterFactory) if err != nil { log.Error().Err(err).Msg("Failed to create collector") return } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - if err := collector.Start(ctx); err != nil { log.Error().Err(err).Msg("Failed to start collector") return From 2b14e5fee7a0ace40dd1f1d791ae7107661ebedf Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 22 Nov 2024 16:10:33 +0900 Subject: [PATCH 064/364] Update according to changes Update to incorporate ent package and additional batch checks. --- pkg/collector/check/check.go | 41 +++++++++++++++++------- pkg/collector/collector.go | 20 ++++++++---- pkg/collector/transporter/transporter.go | 30 ++++++++--------- 3 files changed, 57 insertions(+), 34 deletions(-) diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index ace3121..a5d52eb 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -6,11 +6,17 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/cpu" - diskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/disk/io" - diskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/disk/usage" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/memory" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/net" + cpuhourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/cpu" + diskiohourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/io" + diskusagehourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/usage" + memoryhourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/memory" + nethourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/net" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/cpu" + diskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/disk/io" + diskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/disk/usage" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/memory" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/net" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" ) type CheckStrategy interface { @@ -18,26 +24,37 @@ type CheckStrategy interface { GetInterval() time.Duration GetName() string GetBuffer() *base.CheckBuffer + GetClient() *ent.Client } type CheckFactory interface { - CreateCheck(checkType base.CheckType, name string, interval time.Duration, buffer *base.CheckBuffer) (CheckStrategy, error) + CreateCheck(checkType base.CheckType, name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) (CheckStrategy, error) } type DefaultCheckFactory struct{} -func (f *DefaultCheckFactory) CreateCheck(checkType base.CheckType, name string, interval time.Duration, buffer *base.CheckBuffer) (CheckStrategy, error) { +func (f *DefaultCheckFactory) CreateCheck(checkType base.CheckType, name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) (CheckStrategy, error) { switch checkType { case base.CPU: - return cpu.NewCheck(name, interval, buffer), nil + return cpu.NewCheck(name, interval, buffer, client), nil + case base.CPU_PER_HOUR: + return cpuhourly.NewCheck(name, interval, buffer, client), nil case base.MEM: - return memory.NewCheck(name, interval, buffer), nil + return memory.NewCheck(name, interval, buffer, client), nil + case base.MEM_PER_HOUR: + return memoryhourly.NewCheck(name, interval, buffer, client), nil case base.DISK_USAGE: - return diskusage.NewCheck(name, interval, buffer), nil + return diskusage.NewCheck(name, interval, buffer, client), nil + case base.DISK_USAGE_PER_HOUR: + return diskusagehourly.NewCheck(name, interval, buffer, client), nil case base.DISK_IO: - return diskio.NewCheck(name, interval, buffer), nil + return diskio.NewCheck(name, interval, buffer, client), nil + case base.DISK_IO_PER_HOUR: + return diskiohourly.NewCheck(name, interval, buffer, client), nil case base.NET: - return net.NewCheck(name, interval, buffer), nil + return net.NewCheck(name, interval, buffer, client), nil + case base.NET_PER_HOUR: + return nethourly.NewCheck(name, interval, buffer, client), nil default: return nil, fmt.Errorf("unknown check type: %s", checkType) } diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 5edafdd..12be488 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -10,6 +10,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/collector/scheduler" "github.com/alpacanetworks/alpamon-go/pkg/collector/transporter" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" session "github.com/alpacanetworks/alpamon-go/pkg/scheduler" ) @@ -22,7 +23,7 @@ type Collector struct { stopChan chan struct{} } -func NewCollector(session *session.Session, checkFactory check.CheckFactory, transporterFactory transporter.TransporterFactory) (*Collector, error) { +func NewCollector(session *session.Session, client *ent.Client, checkFactory check.CheckFactory, transporterFactory transporter.TransporterFactory) (*Collector, error) { transporter, err := transporterFactory.CreateTransporter(session) if err != nil { return nil, err @@ -39,14 +40,19 @@ func NewCollector(session *session.Session, checkFactory check.CheckFactory, tra } checkTypes := map[base.CheckType]string{ - base.CPU: "cpu", - base.MEM: "memory", - base.DISK_USAGE: "disk_usage", - base.DISK_IO: "disk_io", - base.NET: "net", + base.CPU: "cpu", + base.MEM: "memory", + base.DISK_USAGE: "disk_usage", + base.DISK_IO: "disk_io", + base.NET: "net", + base.CPU_PER_HOUR: "cpu_per_hour", + base.MEM_PER_HOUR: "memory_per_hour", + base.DISK_USAGE_PER_HOUR: "disk_usage_per_hour", + base.DISK_IO_PER_HOUR: "disk_io_per_hour", + base.NET_PER_HOUR: "net_per_hour", } for checkType, name := range checkTypes { - check, err := checkFactory.CreateCheck(checkType, name, time.Duration(time.Duration.Seconds(5)), checkBuffer) + check, err := checkFactory.CreateCheck(checkType, name, time.Duration(time.Duration.Seconds(5)), checkBuffer, client) if err != nil { return nil, err } diff --git a/pkg/collector/transporter/transporter.go b/pkg/collector/transporter/transporter.go index df61e57..873abbd 100644 --- a/pkg/collector/transporter/transporter.go +++ b/pkg/collector/transporter/transporter.go @@ -8,11 +8,16 @@ import ( ) var checkTypeUrlMap = map[base.CheckType]string{ - base.CPU: "/api/metrics/cpu/", - base.MEM: "/api/metrics/memory/", - base.DISK_USAGE: "/api/metrics/disk-usage/", - base.DISK_IO: "/api/metrics/disk-io/", - base.NET: "/api/metrics/traffic/", + base.CPU: "/api/metrics/realtime/cpu/", + base.CPU_PER_HOUR: "/api/metrics/hourly/cpu/", + base.MEM: "/api/metrics/realtime/memory/", + base.MEM_PER_HOUR: "/api/metrics/hourly/memory/", + base.DISK_USAGE: "/api/metrics/realtime/disk-usage/", + base.DISK_USAGE_PER_HOUR: "/api/metrics/hourly/disk-usage/", + base.DISK_IO: "/api/metrics/realtime/disk-io/", + base.DISK_IO_PER_HOUR: "/api/metrics/hourly/disk-io/", + base.NET: "/api/metrics/realtime/traffic/", + base.NET_PER_HOUR: "/api/metrics/hourly/traffic/", } type TransportStrategy interface { @@ -45,16 +50,11 @@ func (t *Transporter) Send(data base.MetricData) error { var err error switch checkType { - case base.CPU: - _, _, err = t.session.Post(checkTypeUrlMap[base.CPU], data.Data[0], 10) - case base.MEM: - _, _, err = t.session.Post(checkTypeUrlMap[base.MEM], data.Data[0], 10) - case base.DISK_USAGE: - _, _, err = t.session.Post(checkTypeUrlMap[base.DISK_USAGE], data.Data, 10) - case base.DISK_IO: - _, _, err = t.session.Post(checkTypeUrlMap[base.DISK_IO], data.Data, 10) - case base.NET: - _, _, err = t.session.Post(checkTypeUrlMap[base.NET], data.Data, 10) + case base.CPU, base.CPU_PER_HOUR, base.MEM, base.MEM_PER_HOUR: + _, _, err = t.session.Post(checkTypeUrlMap[checkType], data.Data[0], 10) + case base.DISK_USAGE, base.DISK_USAGE_PER_HOUR, base.DISK_IO, + base.DISK_IO_PER_HOUR, base.NET, base.NET_PER_HOUR: + _, _, err = t.session.Post(checkTypeUrlMap[checkType], data.Data, 10) default: err = fmt.Errorf("unknown check type: %s", checkType) } From 340d8830fcc4964ea197039c4716937860f968ce Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 22 Nov 2024 16:12:12 +0900 Subject: [PATCH 065/364] Add .gitignore Add .gitignore to prevent pushing code generated by ent package. --- .gitignore | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e34047a --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +/pkg/db/ent/* +!/pkg/db/ent/generate.go +alpamon.log +metrics.db +.DS_Store \ No newline at end of file From e61e1286d6206ab01b06ca6ab32d73a8a260aaf9 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 22 Nov 2024 16:14:31 +0900 Subject: [PATCH 066/364] Update .goreleaser.yaml Add a command to before.hooks to generate ent-related code before building. --- .goreleaser.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 7b3580f..8737e97 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -4,6 +4,7 @@ before: hooks: - go mod tidy - go generate ./... + - go run -mod=mod entgo.io/ent/cmd/ent generate --target ./pkg/db/ent ./pkg/db/schema builds: - main: ./cmd/alpamon From 7518641204eed04feaa045a54c1fa9d8cda3153a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 25 Nov 2024 10:55:26 +0900 Subject: [PATCH 067/364] Add save function to save hourly batch metric Add save functions to store hourly batched metrics for each metric. --- pkg/collector/check/batch/hourly/cpu/cpu.go | 27 ++++++++++++++-- .../check/batch/hourly/disk/io/io.go | 32 +++++++++++++++++-- .../check/batch/hourly/disk/usage/usage.go | 31 ++++++++++++++++-- .../check/batch/hourly/memory/memory.go | 27 ++++++++++++++-- 4 files changed, 105 insertions(+), 12 deletions(-) diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index 65b735a..ad9cfcf 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -26,19 +26,28 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + queryset, err := c.getCPUPeakAndAvg(ctx) + if err != nil { + checkError.GetQueryError = err + } + metric := base.MetricData{ Type: base.CPU_PER_HOUR, Data: []base.CheckResult{}, } - - if err == nil { + if checkError.GetQueryError == nil { data := base.CheckResult{ Timestamp: time.Now(), PeakUsage: queryset[0].Max, AvgUsage: queryset[0].AVG, } metric.Data = append(metric.Data, data) + + if err := c.saveCPUPeakAndAvg(ctx, data); err != nil { + checkError.SaveQueryError = err + } } if ctx.Err() != nil { @@ -46,7 +55,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -73,3 +82,15 @@ func (c *Check) getCPUPeakAndAvg(ctx context.Context) ([]cpuQuerySet, error) { return queryset, nil } + +func (c *Check) saveCPUPeakAndAvg(ctx context.Context, data base.CheckResult) error { + client := c.GetClient() + if err := client.CPUPerHour.Create(). + SetTimestamp(data.Timestamp). + SetPeakUsage(data.PeakUsage). + SetAvgUsage(data.AvgUsage).Exec(ctx); err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index eab1d31..34b3df5 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -29,13 +29,18 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + queryset, err := c.getDiskIOPeakAndAvg(ctx) + if err != nil { + checkError.GetQueryError = err + } + metric := base.MetricData{ Type: base.DISK_IO_PER_HOUR, Data: []base.CheckResult{}, } - - if err == nil { + if checkError.GetQueryError == nil { for _, row := range queryset { data := base.CheckResult{ Timestamp: time.Now(), @@ -47,6 +52,10 @@ func (c *Check) Execute(ctx context.Context) { } metric.Data = append(metric.Data, data) } + + if err := c.saveDiskIOPeakAndAvg(ctx, metric.Data); err != nil { + checkError.SaveQueryError = err + } } if ctx.Err() != nil { @@ -54,7 +63,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.CollectError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -84,3 +93,20 @@ func (c *Check) getDiskIOPeakAndAvg(ctx context.Context) ([]diskIOQuerySet, erro return queryset, nil } + +func (c *Check) saveDiskIOPeakAndAvg(ctx context.Context, data []base.CheckResult) error { + client := c.GetClient() + err := client.DiskIOPerHour.MapCreateBulk(data, func(q *ent.DiskIOPerHourCreate, i int) { + q.SetTimestamp(data[i].Timestamp). + SetDevice(data[i].Device). + SetPeakReadBytes(int64(data[i].PeakReadBytes)). + SetPeakWriteBytes(int64(data[i].PeakWriteBytes)). + SetAvgReadBytes(int64(data[i].AvgReadBytes)). + SetAvgWriteBytes(int64(data[i].AvgWriteBytes)) + }).Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index d553c4e..8a4d2a1 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -28,13 +28,18 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + queryset, err := c.getDiskUsagePeakAndAvg(ctx) + if err != nil { + checkError.GetQueryError = err + } + metric := base.MetricData{ Type: base.DISK_USAGE_PER_HOUR, Data: []base.CheckResult{}, } - - if err == nil { + if checkError.GetQueryError == nil { for _, row := range queryset { data := base.CheckResult{ Timestamp: time.Now(), @@ -45,6 +50,10 @@ func (c *Check) Execute(ctx context.Context) { } metric.Data = append(metric.Data, data) } + + if err := c.saveDiskUsagePeakAndAvg(ctx, metric.Data); err != nil { + checkError.SaveQueryError = err + } } if ctx.Err() != nil { @@ -52,7 +61,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.CollectError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -80,3 +89,19 @@ func (c *Check) getDiskUsagePeakAndAvg(ctx context.Context) ([]diskUsageQuerySet return queryset, nil } + +func (c *Check) saveDiskUsagePeakAndAvg(ctx context.Context, data []base.CheckResult) error { + client := c.GetClient() + err := client.DiskUsagePerHour.MapCreateBulk(data, func(q *ent.DiskUsagePerHourCreate, i int) { + q.SetTimestamp(data[i].Timestamp). + SetDevice(data[i].Device). + SetMountPoint(data[i].MountPoint). + SetPeakUsage(data[i].PeakUsage). + SetAvgUsage(data[i].AvgUsage) + }).Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index dff3255..332b2ca 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -26,19 +26,28 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + queryset, err := c.getMemoryPeakAndAvg(ctx) + if err != nil { + checkError.GetQueryError = err + } + metric := base.MetricData{ Type: base.MEM_PER_HOUR, Data: []base.CheckResult{}, } - - if err == nil { + if checkError.GetQueryError == nil { data := base.CheckResult{ Timestamp: time.Now(), PeakUsage: queryset[0].Max, AvgUsage: queryset[0].AVG, } metric.Data = append(metric.Data, data) + + if err := c.saveMemoryPeakAndAvg(ctx, data); err != nil { + checkError.SaveQueryError = err + } } if ctx.Err() != nil { @@ -46,7 +55,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -73,3 +82,15 @@ func (c *Check) getMemoryPeakAndAvg(ctx context.Context) ([]memoryQuerySet, erro return queryset, nil } + +func (c *Check) saveMemoryPeakAndAvg(ctx context.Context, data base.CheckResult) error { + client := c.GetClient() + if err := client.MemoryPerHour.Create(). + SetTimestamp(data.Timestamp). + SetPeakUsage(data.PeakUsage). + SetAvgUsage(data.AvgUsage).Exec(ctx); err != nil { + return err + } + + return nil +} From 56aa822373635ee00814b7f869cdd3a2bf3cbb51 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 25 Nov 2024 11:05:32 +0900 Subject: [PATCH 068/364] Refactor CheckError Refactor CheckError to handle errors for each query type as the number of query types increased. Update the existing realtime checks accordingly. --- pkg/collector/check/base/base.go | 6 ++-- pkg/collector/check/batch/hourly/net/net.go | 36 +++++++++++++++++-- pkg/collector/check/realtime/cpu/cpu.go | 4 +-- pkg/collector/check/realtime/disk/io/io.go | 4 +-- .../check/realtime/disk/usage/usage.go | 4 +-- pkg/collector/check/realtime/memory/memory.go | 11 ++---- pkg/collector/check/realtime/net/net.go | 4 +-- 7 files changed, 48 insertions(+), 21 deletions(-) diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go index 62d4e2e..7f145f5 100644 --- a/pkg/collector/check/base/base.go +++ b/pkg/collector/check/base/base.go @@ -58,8 +58,10 @@ type CheckResult struct { } type CheckError struct { - CollectError error - QueryError error + CollectError error + GetQueryError error + SaveQueryError error + DeleteQueryError error } type MetricData struct { diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index 5e4e357..faae984 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -33,13 +33,18 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli } func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + queryset, err := c.getTrafficPeakAndAvg(ctx) + if err != nil { + checkError.GetQueryError = err + } + metric := base.MetricData{ Type: base.NET_PER_HOUR, Data: []base.CheckResult{}, } - - if err == nil { + if checkError.GetQueryError == nil { for _, row := range queryset { data := base.CheckResult{ Timestamp: time.Now(), @@ -55,6 +60,10 @@ func (c *Check) Execute(ctx context.Context) { } metric.Data = append(metric.Data, data) } + + if err := c.saveTrafficPeakAndAvg(ctx, metric.Data); err != nil { + checkError.SaveQueryError = err + } } if ctx.Err() != nil { @@ -62,7 +71,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if err != nil { + if checkError.CollectError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -96,3 +105,24 @@ func (c *Check) getTrafficPeakAndAvg(ctx context.Context) ([]trafficQuerySet, er return queryset, nil } + +func (c *Check) saveTrafficPeakAndAvg(ctx context.Context, data []base.CheckResult) error { + client := c.GetClient() + err := client.TrafficPerHour.MapCreateBulk(data, func(q *ent.TrafficPerHourCreate, i int) { + q.SetTimestamp(data[i].Timestamp). + SetName(data[i].Name). + SetPeakInputPkts(int64(data[i].PeakInputPkts)). + SetPeakInputBytes(int64(data[i].PeakInputBytes)). + SetPeakOutputPkts(int64(data[i].PeakOutputPkts)). + SetPeakOutputBytes(int64(data[i].PeakOutputBytes)). + SetAvgInputPkts(int64(data[i].AvgInputPkts)). + SetAvgInputBytes(int64(data[i].AvgInputBytes)). + SetAvgOutputPkts(int64(data[i].AvgOutputPkts)). + SetAvgOutputBytes(int64(data[i].AvgOutputBytes)) + }).Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/realtime/cpu/cpu.go b/pkg/collector/check/realtime/cpu/cpu.go index 9b5c372..6942211 100644 --- a/pkg/collector/check/realtime/cpu/cpu.go +++ b/pkg/collector/check/realtime/cpu/cpu.go @@ -40,7 +40,7 @@ func (c *Check) Execute(ctx context.Context) { metric.Data = append(metric.Data, data) if err := c.saveCPUUsage(ctx, data); err != nil { - checkError.QueryError = err + checkError.SaveQueryError = err } } @@ -49,7 +49,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.QueryError != nil { + if checkError.CollectError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric diff --git a/pkg/collector/check/realtime/disk/io/io.go b/pkg/collector/check/realtime/disk/io/io.go index 44abd95..5f6be9a 100644 --- a/pkg/collector/check/realtime/disk/io/io.go +++ b/pkg/collector/check/realtime/disk/io/io.go @@ -43,7 +43,7 @@ func (c *Check) Execute(ctx context.Context) { } if err := c.saveDiskIO(ctx, metric.Data); err != nil { - checkError.QueryError = err + checkError.SaveQueryError = err } } @@ -52,7 +52,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.QueryError != nil { + if checkError.CollectError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index 3fc5631..ded1a48 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -58,7 +58,7 @@ func (c *Check) Execute(ctx context.Context) { } if err := c.saveDiskUsage(ctx, metric.Data); err != nil { - checkError.QueryError = err + checkError.SaveQueryError = err } } @@ -67,7 +67,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.QueryError != nil { + if checkError.CollectError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric diff --git a/pkg/collector/check/realtime/memory/memory.go b/pkg/collector/check/realtime/memory/memory.go index 96a6f44..51f4d73 100644 --- a/pkg/collector/check/realtime/memory/memory.go +++ b/pkg/collector/check/realtime/memory/memory.go @@ -13,11 +13,6 @@ type Check struct { base.BaseCheck } -type MemoryCheckError struct { - CollectError error - QueryError error -} - func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ BaseCheck: base.NewBaseCheck(name, interval, buffer, client), @@ -25,7 +20,7 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli } func (c *Check) Execute(ctx context.Context) { - var checkError MemoryCheckError + var checkError base.CheckError usage, err := c.collectMemoryUsage() if err != nil { @@ -44,7 +39,7 @@ func (c *Check) Execute(ctx context.Context) { metric.Data = append(metric.Data, data) if err := c.saveMemoryUsage(ctx, data); err != nil { - checkError.QueryError = err + checkError.SaveQueryError = err } } @@ -53,7 +48,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.QueryError != nil { + if checkError.CollectError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric diff --git a/pkg/collector/check/realtime/net/net.go b/pkg/collector/check/realtime/net/net.go index 5c1d60e..bfa74a1 100644 --- a/pkg/collector/check/realtime/net/net.go +++ b/pkg/collector/check/realtime/net/net.go @@ -48,7 +48,7 @@ func (c *Check) Execute(ctx context.Context) { } if err := c.saveTraffic(ctx, metric.Data); err != nil { - checkError.QueryError = err + checkError.SaveQueryError = err } } @@ -57,7 +57,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.QueryError != nil { + if checkError.CollectError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric From f5f10eab005890b848199aca083b970b0f6336b7 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 25 Nov 2024 14:25:40 +0900 Subject: [PATCH 069/364] Add daily batch checks Add checks for batching each metric's data daily. --- pkg/collector/check/batch/daily/cpu/cpu.go | 75 +++++++++++++++ pkg/collector/check/batch/daily/disk/io/io.go | 83 +++++++++++++++++ .../check/batch/daily/disk/usage/usage.go | 80 ++++++++++++++++ .../check/batch/daily/memory/memory.go | 75 +++++++++++++++ pkg/collector/check/batch/daily/net/net.go | 91 +++++++++++++++++++ pkg/collector/check/check.go | 15 +++ pkg/collector/collector.go | 5 + pkg/collector/transporter/transporter.go | 13 ++- 8 files changed, 434 insertions(+), 3 deletions(-) create mode 100644 pkg/collector/check/batch/daily/cpu/cpu.go create mode 100644 pkg/collector/check/batch/daily/disk/io/io.go create mode 100644 pkg/collector/check/batch/daily/disk/usage/usage.go create mode 100644 pkg/collector/check/batch/daily/memory/memory.go create mode 100644 pkg/collector/check/batch/daily/net/net.go diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go new file mode 100644 index 0000000..514cb64 --- /dev/null +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -0,0 +1,75 @@ +package cpu + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpuperhour" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + + queryset, err := c.getCPUPerHour(ctx) + if err != nil { + checkError.GetQueryError = err + } + + metric := base.MetricData{ + Type: base.CPU_PER_DAY, + Data: []base.CheckResult{}, + } + if checkError.GetQueryError == nil { + data := base.CheckResult{ + Timestamp: time.Now(), + PeakUsage: queryset[0].Max, + AvgUsage: queryset[0].AVG, + } + metric.Data = append(metric.Data, data) + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if checkError.GetQueryError != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + var queryset []base.CPUQuerySet + err := client.CPUPerHour.Query(). + Where(cpuperhour.TimestampGTE(from), cpuperhour.TimestampLTE(now)). + Aggregate( + ent.Max(cpuperhour.FieldPeakUsage), + ent.Mean(cpuperhour.FieldAvgUsage), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go new file mode 100644 index 0000000..48e235b --- /dev/null +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -0,0 +1,83 @@ +package io + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskioperhour" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + + queryset, err := c.getDiskIOPerHour(ctx) + if err != nil { + checkError.GetQueryError = err + } + + metric := base.MetricData{ + Type: base.DISK_IO_PER_DAY, + Data: []base.CheckResult{}, + } + if checkError.GetQueryError == nil { + for _, row := range queryset { + data := base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + PeakWriteBytes: uint64(row.PeakWriteBytes), + PeakReadBytes: uint64(row.PeakReadBytes), + AvgWriteBytes: uint64(row.AvgWriteBytes), + AvgReadBytes: uint64(row.AvgReadBytes), + } + metric.Data = append(metric.Data, data) + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if checkError.GetQueryError != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + var queryset []base.DiskIOQuerySet + err := client.DiskIOPerHour.Query(). + Where(diskioperhour.TimestampGTE(from), diskioperhour.TimestampLTE(now)). + GroupBy(diskioperhour.FieldDevice). + Aggregate( + ent.As(ent.Max(diskioperhour.FieldPeakReadBytes), "peak_read_bytes"), + ent.As(ent.Max(diskioperhour.FieldPeakWriteBytes), "peak_write_bytes"), + ent.As(ent.Mean(diskioperhour.FieldAvgReadBytes), "avg_read_bytes"), + ent.As(ent.Mean(diskioperhour.FieldAvgWriteBytes), "avg_write_bytes"), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go new file mode 100644 index 0000000..83574e5 --- /dev/null +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -0,0 +1,80 @@ +package usage + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusageperhour" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + + queryset, err := c.getDiskUsagePerHour(ctx) + if err != nil { + checkError.GetQueryError = err + } + + metric := base.MetricData{ + Type: base.DISK_USAGE_PER_DAY, + Data: []base.CheckResult{}, + } + if checkError.GetQueryError == nil { + for _, row := range queryset { + data := base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + MountPoint: row.MountPoint, + PeakUsage: row.Max, + AvgUsage: row.AVG, + } + metric.Data = append(metric.Data, data) + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if checkError.GetQueryError != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + var queryset []base.DiskUsageQuerySet + err := client.DiskUsagePerHour.Query(). + Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)). + GroupBy(diskusageperhour.FieldDevice, diskusageperhour.FieldMountPoint). + Aggregate( + ent.Max(diskusageperhour.FieldPeakUsage), + ent.Mean(diskusageperhour.FieldAvgUsage), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go new file mode 100644 index 0000000..95279a6 --- /dev/null +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -0,0 +1,75 @@ +package memory + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memoryperhour" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + + queryset, err := c.getMemoryPerHour(ctx) + if err != nil { + checkError.GetQueryError = err + } + + metric := base.MetricData{ + Type: base.MEM_PER_DAY, + Data: []base.CheckResult{}, + } + if checkError.GetQueryError == nil { + data := base.CheckResult{ + Timestamp: time.Now(), + PeakUsage: queryset[0].Max, + AvgUsage: queryset[0].AVG, + } + metric.Data = append(metric.Data, data) + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if checkError.GetQueryError != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + var queryset []base.MemoryQuerySet + err := client.MemoryPerHour.Query(). + Where(memoryperhour.TimestampGTE(from), memoryperhour.TimestampLTE(now)). + Aggregate( + ent.Max(memoryperhour.FieldPeakUsage), + ent.Mean(memoryperhour.FieldAvgUsage), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go new file mode 100644 index 0000000..10836e4 --- /dev/null +++ b/pkg/collector/check/batch/daily/net/net.go @@ -0,0 +1,91 @@ +package net + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/trafficperhour" + "github.com/rs/zerolog/log" +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { + return &Check{ + BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + } +} + +func (c *Check) Execute(ctx context.Context) { + var checkError base.CheckError + + queryset, err := c.getTrafficPerHour(ctx) + if err != nil { + checkError.GetQueryError = err + } + + metric := base.MetricData{ + Type: base.NET_PER_DAY, + Data: []base.CheckResult{}, + } + if checkError.GetQueryError == nil { + for _, row := range queryset { + data := base.CheckResult{ + Timestamp: time.Now(), + Name: row.Name, + PeakInputPkts: uint64(row.PeakInputPkts), + PeakInputBytes: uint64(row.PeakInputBytes), + PeakOutputPkts: uint64(row.PeakOutputPkts), + PeakOutputBytes: uint64(row.PeakOutputBytes), + AvgInputPkts: uint64(row.AvgInputPkts), + AvgInputBytes: uint64(row.AvgInputBytes), + AvgOutputPkts: uint64(row.AvgOutputPkts), + AvgOutputBytes: uint64(row.AvgOutputBytes), + } + metric.Data = append(metric.Data, data) + } + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + if checkError.GetQueryError != nil { + buffer.FailureQueue <- metric + } else { + buffer.SuccessQueue <- metric + } +} + +func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, error) { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + var queryset []base.TrafficQuerySet + err := client.TrafficPerHour.Query(). + Where(trafficperhour.TimestampGTE(from), trafficperhour.TimestampLTE(now)). + GroupBy(trafficperhour.FieldName). + Aggregate( + ent.As(ent.Max(trafficperhour.FieldPeakInputPkts), "peak_input_pkts"), + ent.As(ent.Max(trafficperhour.FieldPeakInputBytes), "peak_input_bytes"), + ent.As(ent.Max(trafficperhour.FieldPeakOutputPkts), "peak_output_pkts"), + ent.As(ent.Max(trafficperhour.FieldPeakOutputBytes), "peak_output_bytes"), + ent.As(ent.Mean(trafficperhour.FieldAvgInputPkts), "avg_input_pkts"), + ent.As(ent.Mean(trafficperhour.FieldAvgInputBytes), "avg_input_bytes"), + ent.As(ent.Mean(trafficperhour.FieldAvgOutputPkts), "avg_output_pkts"), + ent.As(ent.Mean(trafficperhour.FieldAvgOutputBytes), "avg_output_bytes"), + ). + Scan(ctx, &queryset) + if err != nil { + log.Debug().Msg(err.Error()) + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index a5d52eb..0e47c1f 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -6,6 +6,11 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + cpudaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cpu" + diskiodaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/io" + diskusagedaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/usage" + memorydaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/memory" + netdaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/net" cpuhourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/cpu" diskiohourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/io" diskusagehourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/usage" @@ -39,22 +44,32 @@ func (f *DefaultCheckFactory) CreateCheck(checkType base.CheckType, name string, return cpu.NewCheck(name, interval, buffer, client), nil case base.CPU_PER_HOUR: return cpuhourly.NewCheck(name, interval, buffer, client), nil + case base.CPU_PER_DAY: + return cpudaily.NewCheck(name, interval, buffer, client), nil case base.MEM: return memory.NewCheck(name, interval, buffer, client), nil case base.MEM_PER_HOUR: return memoryhourly.NewCheck(name, interval, buffer, client), nil + case base.MEM_PER_DAY: + return memorydaily.NewCheck(name, interval, buffer, client), nil case base.DISK_USAGE: return diskusage.NewCheck(name, interval, buffer, client), nil case base.DISK_USAGE_PER_HOUR: return diskusagehourly.NewCheck(name, interval, buffer, client), nil + case base.DISK_USAGE_PER_DAY: + return diskusagedaily.NewCheck(name, interval, buffer, client), nil case base.DISK_IO: return diskio.NewCheck(name, interval, buffer, client), nil case base.DISK_IO_PER_HOUR: return diskiohourly.NewCheck(name, interval, buffer, client), nil + case base.DISK_IO_PER_DAY: + return diskiodaily.NewCheck(name, interval, buffer, client), nil case base.NET: return net.NewCheck(name, interval, buffer, client), nil case base.NET_PER_HOUR: return nethourly.NewCheck(name, interval, buffer, client), nil + case base.NET_PER_DAY: + return netdaily.NewCheck(name, interval, buffer, client), nil default: return nil, fmt.Errorf("unknown check type: %s", checkType) } diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 12be488..52dd98c 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -50,6 +50,11 @@ func NewCollector(session *session.Session, client *ent.Client, checkFactory che base.DISK_USAGE_PER_HOUR: "disk_usage_per_hour", base.DISK_IO_PER_HOUR: "disk_io_per_hour", base.NET_PER_HOUR: "net_per_hour", + base.CPU_PER_DAY: "cpu_per_day", + base.MEM_PER_DAY: "memory_per_day", + base.DISK_USAGE_PER_DAY: "disk_usage_per_day", + base.DISK_IO_PER_DAY: "disk_io_per_day", + base.NET_PER_DAY: "net_per_day", } for checkType, name := range checkTypes { check, err := checkFactory.CreateCheck(checkType, name, time.Duration(time.Duration.Seconds(5)), checkBuffer, client) diff --git a/pkg/collector/transporter/transporter.go b/pkg/collector/transporter/transporter.go index 873abbd..c524764 100644 --- a/pkg/collector/transporter/transporter.go +++ b/pkg/collector/transporter/transporter.go @@ -10,14 +10,19 @@ import ( var checkTypeUrlMap = map[base.CheckType]string{ base.CPU: "/api/metrics/realtime/cpu/", base.CPU_PER_HOUR: "/api/metrics/hourly/cpu/", + base.CPU_PER_DAY: "/api/metrics/daily/cpu/", base.MEM: "/api/metrics/realtime/memory/", base.MEM_PER_HOUR: "/api/metrics/hourly/memory/", + base.MEM_PER_DAY: "/api/metrics/daily/memory/", base.DISK_USAGE: "/api/metrics/realtime/disk-usage/", base.DISK_USAGE_PER_HOUR: "/api/metrics/hourly/disk-usage/", + base.DISK_USAGE_PER_DAY: "/api/metrics/daily/disk-usage/", base.DISK_IO: "/api/metrics/realtime/disk-io/", base.DISK_IO_PER_HOUR: "/api/metrics/hourly/disk-io/", + base.DISK_IO_PER_DAY: "/api/metrics/daily/disk-io/", base.NET: "/api/metrics/realtime/traffic/", base.NET_PER_HOUR: "/api/metrics/hourly/traffic/", + base.NET_PER_DAY: "/api/metrics/daily/traffic/", } type TransportStrategy interface { @@ -50,10 +55,12 @@ func (t *Transporter) Send(data base.MetricData) error { var err error switch checkType { - case base.CPU, base.CPU_PER_HOUR, base.MEM, base.MEM_PER_HOUR: + case base.CPU, base.CPU_PER_HOUR, base.CPU_PER_DAY, + base.MEM, base.MEM_PER_HOUR, base.MEM_PER_DAY: _, _, err = t.session.Post(checkTypeUrlMap[checkType], data.Data[0], 10) - case base.DISK_USAGE, base.DISK_USAGE_PER_HOUR, base.DISK_IO, - base.DISK_IO_PER_HOUR, base.NET, base.NET_PER_HOUR: + case base.DISK_USAGE, base.DISK_USAGE_PER_HOUR, base.DISK_USAGE_PER_DAY, + base.DISK_IO, base.DISK_IO_PER_HOUR, base.DISK_IO_PER_DAY, + base.NET, base.NET_PER_HOUR, base.NET_PER_DAY: _, _, err = t.session.Post(checkTypeUrlMap[checkType], data.Data, 10) default: err = fmt.Errorf("unknown check type: %s", checkType) From bce09d6c292210585778997c93c71e39a2be683e Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 25 Nov 2024 14:34:13 +0900 Subject: [PATCH 070/364] Refactor QuerySet To share QuerySet structs defined for fetching data from database in both daily and hourly batch jobs, move them to base package. The hourly batch job code was modified accordingly. --- pkg/collector/check/base/base.go | 51 ++++++++++++++++--- pkg/collector/check/batch/hourly/cpu/cpu.go | 15 ++---- .../check/batch/hourly/disk/io/io.go | 20 +++----- .../check/batch/hourly/disk/usage/usage.go | 19 +++---- .../check/batch/hourly/memory/memory.go | 15 ++---- pkg/collector/check/batch/hourly/net/net.go | 24 +++------ 6 files changed, 72 insertions(+), 72 deletions(-) diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go index 7f145f5..75fe442 100644 --- a/pkg/collector/check/base/base.go +++ b/pkg/collector/check/base/base.go @@ -26,6 +26,50 @@ const ( type CheckType string +type CheckError struct { + CollectError error + GetQueryError error + SaveQueryError error + DeleteQueryError error +} + +type CPUQuerySet struct { + Max float64 + AVG float64 +} + +type MemoryQuerySet struct { + Max float64 + AVG float64 +} + +type DiskIOQuerySet struct { + Device string `json:"device" db:"device"` + PeakReadBytes float64 `json:"peak_read_bytes"` + PeakWriteBytes float64 `json:"peak_write_bytes"` + AvgReadBytes float64 `json:"avg_read_bytes"` + AvgWriteBytes float64 `json:"avg_write_bytes"` +} + +type DiskUsageQuerySet struct { + Device string `json:"device"` + MountPoint string `json:"mount_point"` + Max float64 `json:"max"` + AVG float64 `json:"avg"` +} + +type TrafficQuerySet struct { + Name string `json:"name"` + PeakInputPkts float64 `json:"peak_input_pkts"` + PeakInputBytes float64 `json:"peak_input_bytes"` + PeakOutputPkts float64 `json:"peak_output_pkts"` + PeakOutputBytes float64 `json:"peak_output_bytes"` + AvgInputPkts float64 `json:"avg_input_pkts"` + AvgInputBytes float64 `json:"avg_input_bytes"` + AvgOutputPkts float64 `json:"avg_output_pkts"` + AvgOutputBytes float64 `json:"avg_output_bytes"` +} + type CheckResult struct { Timestamp time.Time `json:"timestamp"` Usage float64 `json:"usage,omitempty"` @@ -57,13 +101,6 @@ type CheckResult struct { AvgOutputBytes uint64 `json:"avg_output_bytes,omitempty"` } -type CheckError struct { - CollectError error - GetQueryError error - SaveQueryError error - DeleteQueryError error -} - type MetricData struct { Type CheckType `json:"type"` Data []CheckResult `json:"data,omitempty"` diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index ad9cfcf..1aa71c1 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -14,11 +14,6 @@ type Check struct { base.BaseCheck } -type cpuQuerySet struct { - Max float64 - AVG float64 -} - func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ BaseCheck: base.NewBaseCheck(name, interval, buffer, client), @@ -28,7 +23,7 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli func (c *Check) Execute(ctx context.Context) { var checkError base.CheckError - queryset, err := c.getCPUPeakAndAvg(ctx) + queryset, err := c.getCPU(ctx) if err != nil { checkError.GetQueryError = err } @@ -45,7 +40,7 @@ func (c *Check) Execute(ctx context.Context) { } metric.Data = append(metric.Data, data) - if err := c.saveCPUPeakAndAvg(ctx, data); err != nil { + if err := c.saveCPUPerHour(ctx, data); err != nil { checkError.SaveQueryError = err } } @@ -62,12 +57,12 @@ func (c *Check) Execute(ctx context.Context) { } } -func (c *Check) getCPUPeakAndAvg(ctx context.Context) ([]cpuQuerySet, error) { +func (c *Check) getCPU(ctx context.Context) ([]base.CPUQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-1 * time.Hour) - var queryset []cpuQuerySet + var queryset []base.CPUQuerySet err := client.CPU.Query(). Where(cpu.TimestampGTE(from), cpu.TimestampLTE(now)). Aggregate( @@ -83,7 +78,7 @@ func (c *Check) getCPUPeakAndAvg(ctx context.Context) ([]cpuQuerySet, error) { return queryset, nil } -func (c *Check) saveCPUPeakAndAvg(ctx context.Context, data base.CheckResult) error { +func (c *Check) saveCPUPerHour(ctx context.Context, data base.CheckResult) error { client := c.GetClient() if err := client.CPUPerHour.Create(). SetTimestamp(data.Timestamp). diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index 34b3df5..434d84f 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -14,14 +14,6 @@ type Check struct { base.BaseCheck } -type diskIOQuerySet struct { - Device string `json:"device" db:"device"` - PeakReadBytes float64 `json:"peak_read_bytes"` - PeakWriteBytes float64 `json:"peak_write_bytes"` - AvgReadBytes float64 `json:"avg_read_bytes"` - AvgWriteBytes float64 `json:"avg_write_bytes"` -} - func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ BaseCheck: base.NewBaseCheck(name, interval, buffer, client), @@ -31,7 +23,7 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli func (c *Check) Execute(ctx context.Context) { var checkError base.CheckError - queryset, err := c.getDiskIOPeakAndAvg(ctx) + queryset, err := c.getDiskIO(ctx) if err != nil { checkError.GetQueryError = err } @@ -53,7 +45,7 @@ func (c *Check) Execute(ctx context.Context) { metric.Data = append(metric.Data, data) } - if err := c.saveDiskIOPeakAndAvg(ctx, metric.Data); err != nil { + if err := c.saveDiskIOPerHour(ctx, metric.Data); err != nil { checkError.SaveQueryError = err } } @@ -63,19 +55,19 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.SaveQueryError != nil { + if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric } } -func (c *Check) getDiskIOPeakAndAvg(ctx context.Context) ([]diskIOQuerySet, error) { +func (c *Check) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-1 * time.Hour) - var queryset []diskIOQuerySet + var queryset []base.DiskIOQuerySet err := client.DiskIO.Query(). Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)). GroupBy(diskio.FieldDevice). @@ -94,7 +86,7 @@ func (c *Check) getDiskIOPeakAndAvg(ctx context.Context) ([]diskIOQuerySet, erro return queryset, nil } -func (c *Check) saveDiskIOPeakAndAvg(ctx context.Context, data []base.CheckResult) error { +func (c *Check) saveDiskIOPerHour(ctx context.Context, data []base.CheckResult) error { client := c.GetClient() err := client.DiskIOPerHour.MapCreateBulk(data, func(q *ent.DiskIOPerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index 8a4d2a1..9223f36 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -14,13 +14,6 @@ type Check struct { base.BaseCheck } -type diskUsageQuerySet struct { - Device string `json:"device"` - MountPoint string `json:"mount_point"` - Max float64 `json:"max"` - AVG float64 `json:"avg"` -} - func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ BaseCheck: base.NewBaseCheck(name, interval, buffer, client), @@ -30,7 +23,7 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli func (c *Check) Execute(ctx context.Context) { var checkError base.CheckError - queryset, err := c.getDiskUsagePeakAndAvg(ctx) + queryset, err := c.getDiskUsage(ctx) if err != nil { checkError.GetQueryError = err } @@ -51,7 +44,7 @@ func (c *Check) Execute(ctx context.Context) { metric.Data = append(metric.Data, data) } - if err := c.saveDiskUsagePeakAndAvg(ctx, metric.Data); err != nil { + if err := c.saveDiskUsagePerHour(ctx, metric.Data); err != nil { checkError.SaveQueryError = err } } @@ -61,19 +54,19 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.SaveQueryError != nil { + if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric } } -func (c *Check) getDiskUsagePeakAndAvg(ctx context.Context) ([]diskUsageQuerySet, error) { +func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-1 * time.Hour) - var queryset []diskUsageQuerySet + var queryset []base.DiskUsageQuerySet err := client.DiskUsage.Query(). Where(diskusage.TimestampGTE(from), diskusage.TimestampLTE(now)). GroupBy(diskusage.FieldDevice, diskusage.FieldMountPoint). @@ -90,7 +83,7 @@ func (c *Check) getDiskUsagePeakAndAvg(ctx context.Context) ([]diskUsageQuerySet return queryset, nil } -func (c *Check) saveDiskUsagePeakAndAvg(ctx context.Context, data []base.CheckResult) error { +func (c *Check) saveDiskUsagePerHour(ctx context.Context, data []base.CheckResult) error { client := c.GetClient() err := client.DiskUsagePerHour.MapCreateBulk(data, func(q *ent.DiskUsagePerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index 332b2ca..9dcae78 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -14,11 +14,6 @@ type Check struct { base.BaseCheck } -type memoryQuerySet struct { - Max float64 - AVG float64 -} - func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ BaseCheck: base.NewBaseCheck(name, interval, buffer, client), @@ -28,7 +23,7 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli func (c *Check) Execute(ctx context.Context) { var checkError base.CheckError - queryset, err := c.getMemoryPeakAndAvg(ctx) + queryset, err := c.getMemory(ctx) if err != nil { checkError.GetQueryError = err } @@ -45,7 +40,7 @@ func (c *Check) Execute(ctx context.Context) { } metric.Data = append(metric.Data, data) - if err := c.saveMemoryPeakAndAvg(ctx, data); err != nil { + if err := c.saveMemoryPerHour(ctx, data); err != nil { checkError.SaveQueryError = err } } @@ -62,12 +57,12 @@ func (c *Check) Execute(ctx context.Context) { } } -func (c *Check) getMemoryPeakAndAvg(ctx context.Context) ([]memoryQuerySet, error) { +func (c *Check) getMemory(ctx context.Context) ([]base.MemoryQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-1 * time.Hour) - var queryset []memoryQuerySet + var queryset []base.MemoryQuerySet err := client.Memory.Query(). Where(memory.TimestampGTE(from), memory.TimestampLTE(now)). Aggregate( @@ -83,7 +78,7 @@ func (c *Check) getMemoryPeakAndAvg(ctx context.Context) ([]memoryQuerySet, erro return queryset, nil } -func (c *Check) saveMemoryPeakAndAvg(ctx context.Context, data base.CheckResult) error { +func (c *Check) saveMemoryPerHour(ctx context.Context, data base.CheckResult) error { client := c.GetClient() if err := client.MemoryPerHour.Create(). SetTimestamp(data.Timestamp). diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index faae984..7b2ee36 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -14,18 +14,6 @@ type Check struct { base.BaseCheck } -type trafficQuerySet struct { - Name string `json:"name"` - PeakInputPkts float64 `json:"peak_input_pkts"` - PeakInputBytes float64 `json:"peak_input_bytes"` - PeakOutputPkts float64 `json:"peak_output_pkts"` - PeakOutputBytes float64 `json:"peak_output_bytes"` - AvgInputPkts float64 `json:"avg_input_pkts"` - AvgInputBytes float64 `json:"avg_input_bytes"` - AvgOutputPkts float64 `json:"avg_output_pkts"` - AvgOutputBytes float64 `json:"avg_output_bytes"` -} - func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { return &Check{ BaseCheck: base.NewBaseCheck(name, interval, buffer, client), @@ -35,7 +23,7 @@ func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, cli func (c *Check) Execute(ctx context.Context) { var checkError base.CheckError - queryset, err := c.getTrafficPeakAndAvg(ctx) + queryset, err := c.getTraffic(ctx) if err != nil { checkError.GetQueryError = err } @@ -61,7 +49,7 @@ func (c *Check) Execute(ctx context.Context) { metric.Data = append(metric.Data, data) } - if err := c.saveTrafficPeakAndAvg(ctx, metric.Data); err != nil { + if err := c.saveTrafficPerHour(ctx, metric.Data); err != nil { checkError.SaveQueryError = err } } @@ -71,19 +59,19 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.SaveQueryError != nil { + if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric } } -func (c *Check) getTrafficPeakAndAvg(ctx context.Context) ([]trafficQuerySet, error) { +func (c *Check) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-1 * time.Hour) - var queryset []trafficQuerySet + var queryset []base.TrafficQuerySet err := client.Traffic.Query(). Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)). GroupBy(traffic.FieldName). @@ -106,7 +94,7 @@ func (c *Check) getTrafficPeakAndAvg(ctx context.Context) ([]trafficQuerySet, er return queryset, nil } -func (c *Check) saveTrafficPeakAndAvg(ctx context.Context, data []base.CheckResult) error { +func (c *Check) saveTrafficPerHour(ctx context.Context, data []base.CheckResult) error { client := c.GetClient() err := client.TrafficPerHour.MapCreateBulk(data, func(q *ent.TrafficPerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). From 7aaa540437985a36ef0250ce59c4f3c7d1dfc3e4 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 25 Nov 2024 14:34:25 +0900 Subject: [PATCH 071/364] Minor fix Fix typo --- pkg/db/schema/memory_per_hour.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/db/schema/memory_per_hour.go b/pkg/db/schema/memory_per_hour.go index 87c0550..9ead465 100644 --- a/pkg/db/schema/memory_per_hour.go +++ b/pkg/db/schema/memory_per_hour.go @@ -8,7 +8,7 @@ import ( "entgo.io/ent/schema/index" ) -// MemoryPerHour holds the schema definition for theMemoryPerHour entity. +// MemoryPerHour holds the schema definition for the MemoryPerHour entity. type MemoryPerHour struct { ent.Schema } From d382572e4c95a17fbe03e1a57764238ebd42e2f4 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Mon, 25 Nov 2024 15:27:34 +0900 Subject: [PATCH 072/364] Add delete query for old metrics Add functions to each metric that execute delete queries to remove old data after batch processing. Daily batch jobs delete data from the previous day, while hourly batch jobs remove data from the previous hour. --- pkg/collector/check/batch/daily/cpu/cpu.go | 21 +++++++++++++++- pkg/collector/check/batch/daily/disk/io/io.go | 21 +++++++++++++++- .../check/batch/daily/disk/usage/usage.go | 21 +++++++++++++++- .../check/batch/daily/memory/memory.go | 21 +++++++++++++++- pkg/collector/check/batch/daily/net/net.go | 21 +++++++++++++++- pkg/collector/check/batch/hourly/cpu/cpu.go | 24 ++++++++++++++++++- .../check/batch/hourly/disk/io/io.go | 24 ++++++++++++++++++- .../check/batch/hourly/disk/usage/usage.go | 24 ++++++++++++++++++- .../check/batch/hourly/memory/memory.go | 24 ++++++++++++++++++- pkg/collector/check/batch/hourly/net/net.go | 24 ++++++++++++++++++- 10 files changed, 215 insertions(+), 10 deletions(-) diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index 514cb64..8366338 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -39,6 +39,10 @@ func (c *Check) Execute(ctx context.Context) { AvgUsage: queryset[0].AVG, } metric.Data = append(metric.Data, data) + + if err := c.deleteCPUPerHour(ctx); err != nil { + checkError.DeleteQueryError = err + } } if ctx.Err() != nil { @@ -46,7 +50,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil { + if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -73,3 +77,18 @@ func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { return queryset, nil } + +func (c *Check) deleteCPUPerHour(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + _, err := client.CPUPerHour.Delete(). + Where(cpuperhour.TimestampGTE(from), cpuperhour.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index 48e235b..ca93d7f 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -43,6 +43,10 @@ func (c *Check) Execute(ctx context.Context) { AvgReadBytes: uint64(row.AvgReadBytes), } metric.Data = append(metric.Data, data) + + if err := c.deleteDiskIOPerHour(ctx); err != nil { + checkError.DeleteQueryError = err + } } } @@ -51,7 +55,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil { + if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -81,3 +85,18 @@ func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, er return queryset, nil } + +func (c *Check) deleteDiskIOPerHour(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + _, err := client.DiskIOPerHour.Delete(). + Where(diskioperhour.TimestampGTE(from), diskioperhour.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index 83574e5..85654a5 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -42,6 +42,10 @@ func (c *Check) Execute(ctx context.Context) { AvgUsage: row.AVG, } metric.Data = append(metric.Data, data) + + if err := c.deleteDiskUsagePerHour(ctx); err != nil { + checkError.DeleteQueryError = err + } } } @@ -50,7 +54,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil { + if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -78,3 +82,18 @@ func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQueryS return queryset, nil } + +func (c *Check) deleteDiskUsagePerHour(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + _, err := client.DiskUsagePerHour.Delete(). + Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index 95279a6..7e665d4 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -39,6 +39,10 @@ func (c *Check) Execute(ctx context.Context) { AvgUsage: queryset[0].AVG, } metric.Data = append(metric.Data, data) + + if err := c.deleteMemoryPerHour(ctx); err != nil { + checkError.DeleteQueryError = err + } } if ctx.Err() != nil { @@ -46,7 +50,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil { + if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -73,3 +77,18 @@ func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, er return queryset, nil } + +func (c *Check) deleteMemoryPerHour(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + _, err := client.MemoryPerHour.Delete(). + Where(memoryperhour.TimestampGTE(from), memoryperhour.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index 10836e4..46f9fab 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -47,6 +47,10 @@ func (c *Check) Execute(ctx context.Context) { AvgOutputBytes: uint64(row.AvgOutputBytes), } metric.Data = append(metric.Data, data) + + if err := c.deleteTrafficPerHour(ctx); err != nil { + checkError.DeleteQueryError = err + } } } @@ -55,7 +59,7 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil { + if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -89,3 +93,18 @@ func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, return queryset, nil } + +func (c *Check) deleteTrafficPerHour(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-24 * time.Hour) + + _, err := client.TrafficPerHour.Delete(). + Where(trafficperhour.TimestampGTE(from), trafficperhour.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index 1aa71c1..5833c38 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -43,6 +43,10 @@ func (c *Check) Execute(ctx context.Context) { if err := c.saveCPUPerHour(ctx, data); err != nil { checkError.SaveQueryError = err } + + if err := c.deleteCPU(ctx); err != nil { + checkError.DeleteQueryError = err + } } if ctx.Err() != nil { @@ -50,7 +54,10 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { + isFailed := checkError.GetQueryError != nil || + checkError.SaveQueryError != nil || + checkError.DeleteQueryError != nil + if isFailed { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -89,3 +96,18 @@ func (c *Check) saveCPUPerHour(ctx context.Context, data base.CheckResult) error return nil } + +func (c *Check) deleteCPU(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + _, err := client.CPU.Delete(). + Where(cpu.TimestampGTE(from), cpu.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index 434d84f..b0d6097 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -48,6 +48,10 @@ func (c *Check) Execute(ctx context.Context) { if err := c.saveDiskIOPerHour(ctx, metric.Data); err != nil { checkError.SaveQueryError = err } + + if err := c.deleteDiskIO(ctx); err != nil { + checkError.DeleteQueryError = err + } } if ctx.Err() != nil { @@ -55,7 +59,10 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { + isFailed := checkError.GetQueryError != nil || + checkError.SaveQueryError != nil || + checkError.DeleteQueryError != nil + if isFailed { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -102,3 +109,18 @@ func (c *Check) saveDiskIOPerHour(ctx context.Context, data []base.CheckResult) return nil } + +func (c *Check) deleteDiskIO(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + _, err := client.DiskIO.Delete(). + Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index 9223f36..6f0da37 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -47,6 +47,10 @@ func (c *Check) Execute(ctx context.Context) { if err := c.saveDiskUsagePerHour(ctx, metric.Data); err != nil { checkError.SaveQueryError = err } + + if err := c.deleteDiskUsage(ctx); err != nil { + checkError.DeleteQueryError = err + } } if ctx.Err() != nil { @@ -54,7 +58,10 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { + isFailed := checkError.GetQueryError != nil || + checkError.SaveQueryError != nil || + checkError.DeleteQueryError != nil + if isFailed { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -98,3 +105,18 @@ func (c *Check) saveDiskUsagePerHour(ctx context.Context, data []base.CheckResul return nil } + +func (c *Check) deleteDiskUsage(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + _, err := client.DiskUsage.Delete(). + Where(diskusage.TimestampGTE(from), diskusage.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index 9dcae78..f866d38 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -43,6 +43,10 @@ func (c *Check) Execute(ctx context.Context) { if err := c.saveMemoryPerHour(ctx, data); err != nil { checkError.SaveQueryError = err } + + if err := c.deleteMemory(ctx); err != nil { + checkError.DeleteQueryError = err + } } if ctx.Err() != nil { @@ -50,7 +54,10 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { + isFailed := checkError.GetQueryError != nil || + checkError.SaveQueryError != nil || + checkError.DeleteQueryError != nil + if isFailed { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -89,3 +96,18 @@ func (c *Check) saveMemoryPerHour(ctx context.Context, data base.CheckResult) er return nil } + +func (c *Check) deleteMemory(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + _, err := client.Memory.Delete(). + Where(memory.TimestampGTE(from), memory.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index 7b2ee36..b116dc7 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -52,6 +52,10 @@ func (c *Check) Execute(ctx context.Context) { if err := c.saveTrafficPerHour(ctx, metric.Data); err != nil { checkError.SaveQueryError = err } + + if err := c.deleteTraffic(ctx); err != nil { + checkError.DeleteQueryError = err + } } if ctx.Err() != nil { @@ -59,7 +63,10 @@ func (c *Check) Execute(ctx context.Context) { } buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.SaveQueryError != nil { + isFailed := checkError.GetQueryError != nil || + checkError.SaveQueryError != nil || + checkError.DeleteQueryError != nil + if isFailed { buffer.FailureQueue <- metric } else { buffer.SuccessQueue <- metric @@ -114,3 +121,18 @@ func (c *Check) saveTrafficPerHour(ctx context.Context, data []base.CheckResult) return nil } + +func (c *Check) deleteTraffic(ctx context.Context) error { + client := c.GetClient() + now := time.Now() + from := now.Add(-1 * time.Hour) + + _, err := client.Traffic.Delete(). + Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)). + Exec(ctx) + if err != nil { + return err + } + + return nil +} From cbe828bd97109565125d68fd2e25d66c63f587d4 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Wed, 27 Nov 2024 16:26:08 +0900 Subject: [PATCH 073/364] Add collectorArgs and collectConf type Add collectConf type to facilitate the retrieval of metric collection configuration from alpacon-server. Add collectorArgs type to encapsulate the parameters required for collector instantiation. The initialization process for the collector has been streamlined into the InitCollector() function. Inside InitCollector(), fetch the metric collection configuration and construct the object to be passed as an argument. --- pkg/collector/collector.go | 91 +++++++++++++++++++++++++++++--------- 1 file changed, 70 insertions(+), 21 deletions(-) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 52dd98c..f5681e7 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -2,7 +2,10 @@ package collector import ( "context" + "encoding/json" "fmt" + "net/http" + "os" "sync" "time" @@ -12,6 +15,12 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/transporter" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" session "github.com/alpacanetworks/alpamon-go/pkg/scheduler" + "github.com/google/uuid" + "github.com/rs/zerolog/log" +) + +var ( + confURL = "/api/metrics/config/" ) type Collector struct { @@ -23,8 +32,55 @@ type Collector struct { stopChan chan struct{} } -func NewCollector(session *session.Session, client *ent.Client, checkFactory check.CheckFactory, transporterFactory transporter.TransporterFactory) (*Collector, error) { - transporter, err := transporterFactory.CreateTransporter(session) +type collectorArgs struct { + session *session.Session + client *ent.Client + conf []collectConf + checkFactory check.CheckFactory + transportFactory transporter.TransporterFactory +} + +type collectConf struct { + Type base.CheckType + Interval int +} + +func InitCollector(session *session.Session, client *ent.Client) *Collector { + checkFactory := &check.DefaultCheckFactory{} + transporterFactory := &transporter.DefaultTransporterFactory{} + + var conf []collectConf + resp, statusCode, err := session.Get(confURL, 10) + if statusCode == http.StatusOK { + err = json.Unmarshal(resp, &conf) + if err != nil { + log.Error().Err(err).Msg("Failed to unmarshal collection config") + os.Exit(1) + } + } else { + log.Error().Err(err).Msgf("HTTP %d: Failed to get collection config", statusCode) + os.Exit(1) + } + + args := collectorArgs{ + session: session, + client: client, + conf: conf, + checkFactory: checkFactory, + transportFactory: transporterFactory, + } + + collector, err := NewCollector(args) + if err != nil { + log.Error().Err(err).Msg("Failed to create collector") + os.Exit(1) + } + + return collector +} + +func NewCollector(args collectorArgs) (*Collector, error) { + transporter, err := args.transportFactory.CreateTransporter(args.session) if err != nil { return nil, err } @@ -39,25 +95,18 @@ func NewCollector(session *session.Session, client *ent.Client, checkFactory che stopChan: make(chan struct{}), } - checkTypes := map[base.CheckType]string{ - base.CPU: "cpu", - base.MEM: "memory", - base.DISK_USAGE: "disk_usage", - base.DISK_IO: "disk_io", - base.NET: "net", - base.CPU_PER_HOUR: "cpu_per_hour", - base.MEM_PER_HOUR: "memory_per_hour", - base.DISK_USAGE_PER_HOUR: "disk_usage_per_hour", - base.DISK_IO_PER_HOUR: "disk_io_per_hour", - base.NET_PER_HOUR: "net_per_hour", - base.CPU_PER_DAY: "cpu_per_day", - base.MEM_PER_DAY: "memory_per_day", - base.DISK_USAGE_PER_DAY: "disk_usage_per_day", - base.DISK_IO_PER_DAY: "disk_io_per_day", - base.NET_PER_DAY: "net_per_day", - } - for checkType, name := range checkTypes { - check, err := checkFactory.CreateCheck(checkType, name, time.Duration(time.Duration.Seconds(5)), checkBuffer, client) + for _, entry := range args.conf { + duration := time.Duration(entry.Interval) * time.Minute + name := string(entry.Type) + "_" + uuid.NewString() + checkArgs := base.CheckArgs{ + Type: entry.Type, + Name: name, + Interval: time.Duration(duration.Minutes() * float64(time.Minute)), + Buffer: checkBuffer, + Client: args.client, + } + + check, err := args.checkFactory.CreateCheck(&checkArgs) if err != nil { return nil, err } From 39489a38a517d6dcc055e8d3d923b6b5da6c192f Mon Sep 17 00:00:00 2001 From: geunwoo Date: Wed, 27 Nov 2024 16:44:23 +0900 Subject: [PATCH 074/364] Add CheckArgs and CheckStrategy type To promote code maintainability, add CheckArgs type to hold the arguments passed to Check constructor. As part of the refactoring process, move CheckStrategy interface from the check package to this package to improve cohesion and reduce dependencies within CheckFactory. --- pkg/collector/check/base/base.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go index 75fe442..c789fde 100644 --- a/pkg/collector/check/base/base.go +++ b/pkg/collector/check/base/base.go @@ -1,6 +1,7 @@ package base import ( + "context" "time" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" @@ -22,10 +23,19 @@ const ( NET CheckType = "net" NET_PER_HOUR CheckType = "net_per_hour" NET_PER_DAY CheckType = "net_per_day" + CLEANUP CheckType = "cleanup" ) type CheckType string +type CheckArgs struct { + Type CheckType + Name string + Interval time.Duration + Buffer *CheckBuffer + Client *ent.Client +} + type CheckError struct { CollectError error GetQueryError error @@ -106,6 +116,14 @@ type MetricData struct { Data []CheckResult `json:"data,omitempty"` } +type CheckStrategy interface { + Execute(ctx context.Context) + GetInterval() time.Duration + GetName() string + GetBuffer() *CheckBuffer + GetClient() *ent.Client +} + type BaseCheck struct { name string interval time.Duration @@ -119,12 +137,12 @@ type CheckBuffer struct { Capacity int } -func NewBaseCheck(name string, interval time.Duration, buffer *CheckBuffer, client *ent.Client) BaseCheck { +func NewBaseCheck(args *CheckArgs) BaseCheck { return BaseCheck{ - name: name, - interval: interval, - buffer: buffer, - client: client, + name: args.Name, + interval: args.Interval, + buffer: args.Buffer, + client: args.Client, } } From 1d627693f8d8f9cb163c9f4eb10cf9a55f45a5ab Mon Sep 17 00:00:00 2001 From: geunwoo Date: Wed, 27 Nov 2024 16:53:02 +0900 Subject: [PATCH 075/364] Refactor CreateCheck() Initial approach involved using switch statement to invoke different NewCheck() depending on CheckType, leading to a less maintainable codebase due to code redundancy. To improve code organization and maintainability, standardize the creation of Check by adding newCheck() function type and refactoring the various package-specific NewCheck() functions to conform to this type. This unification has enabled to simplify the logic within CreateCheck() of CheckFactory by establishing a clear mapping between CheckType and the corresponding newCheck() function. --- pkg/collector/check/check.go | 69 ++++++++++++++---------------------- 1 file changed, 27 insertions(+), 42 deletions(-) diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index 0e47c1f..5a26f5a 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -3,7 +3,6 @@ package check import ( "context" "fmt" - "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" cpudaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cpu" @@ -21,56 +20,42 @@ import ( diskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/disk/usage" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/memory" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/net" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" ) -type CheckStrategy interface { +var checkFactories = map[base.CheckType]newCheck{ + base.CPU: cpu.NewCheck, + base.CPU_PER_HOUR: cpuhourly.NewCheck, + base.CPU_PER_DAY: cpudaily.NewCheck, + base.MEM: memory.NewCheck, + base.MEM_PER_HOUR: memoryhourly.NewCheck, + base.MEM_PER_DAY: memorydaily.NewCheck, + base.DISK_USAGE: diskusage.NewCheck, + base.DISK_USAGE_PER_HOUR: diskusagehourly.NewCheck, + base.DISK_USAGE_PER_DAY: diskusagedaily.NewCheck, + base.DISK_IO: diskio.NewCheck, + base.DISK_IO_PER_HOUR: diskiohourly.NewCheck, + base.DISK_IO_PER_DAY: diskiodaily.NewCheck, + base.NET: net.NewCheck, + base.NET_PER_HOUR: nethourly.NewCheck, + base.NET_PER_DAY: netdaily.NewCheck, +} + +type Check interface { Execute(ctx context.Context) - GetInterval() time.Duration - GetName() string - GetBuffer() *base.CheckBuffer - GetClient() *ent.Client } type CheckFactory interface { - CreateCheck(checkType base.CheckType, name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) (CheckStrategy, error) + CreateCheck(args *base.CheckArgs) (base.CheckStrategy, error) } +type newCheck func(args *base.CheckArgs) base.CheckStrategy + type DefaultCheckFactory struct{} -func (f *DefaultCheckFactory) CreateCheck(checkType base.CheckType, name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) (CheckStrategy, error) { - switch checkType { - case base.CPU: - return cpu.NewCheck(name, interval, buffer, client), nil - case base.CPU_PER_HOUR: - return cpuhourly.NewCheck(name, interval, buffer, client), nil - case base.CPU_PER_DAY: - return cpudaily.NewCheck(name, interval, buffer, client), nil - case base.MEM: - return memory.NewCheck(name, interval, buffer, client), nil - case base.MEM_PER_HOUR: - return memoryhourly.NewCheck(name, interval, buffer, client), nil - case base.MEM_PER_DAY: - return memorydaily.NewCheck(name, interval, buffer, client), nil - case base.DISK_USAGE: - return diskusage.NewCheck(name, interval, buffer, client), nil - case base.DISK_USAGE_PER_HOUR: - return diskusagehourly.NewCheck(name, interval, buffer, client), nil - case base.DISK_USAGE_PER_DAY: - return diskusagedaily.NewCheck(name, interval, buffer, client), nil - case base.DISK_IO: - return diskio.NewCheck(name, interval, buffer, client), nil - case base.DISK_IO_PER_HOUR: - return diskiohourly.NewCheck(name, interval, buffer, client), nil - case base.DISK_IO_PER_DAY: - return diskiodaily.NewCheck(name, interval, buffer, client), nil - case base.NET: - return net.NewCheck(name, interval, buffer, client), nil - case base.NET_PER_HOUR: - return nethourly.NewCheck(name, interval, buffer, client), nil - case base.NET_PER_DAY: - return netdaily.NewCheck(name, interval, buffer, client), nil - default: - return nil, fmt.Errorf("unknown check type: %s", checkType) +func (f *DefaultCheckFactory) CreateCheck(args *base.CheckArgs) (base.CheckStrategy, error) { + if factory, exists := checkFactories[args.Type]; exists { + return factory(args), nil } + + return nil, fmt.Errorf("unknown check type: %s", args.Type) } From 0ad6fe562c4a6f5a637e6d07b0c1cf0f36d89781 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Wed, 27 Nov 2024 16:55:09 +0900 Subject: [PATCH 076/364] Update due to apply changes Update code to reflect changes mentioned in the previous commit. --- cmd/alpamon/command/root.go | 12 +----------- pkg/collector/check/batch/daily/cpu/cpu.go | 4 ++-- pkg/collector/check/batch/daily/disk/io/io.go | 4 ++-- pkg/collector/check/batch/daily/disk/usage/usage.go | 4 ++-- pkg/collector/check/batch/daily/memory/memory.go | 4 ++-- pkg/collector/check/batch/daily/net/net.go | 4 ++-- pkg/collector/check/batch/hourly/cpu/cpu.go | 4 ++-- pkg/collector/check/batch/hourly/disk/io/io.go | 4 ++-- pkg/collector/check/batch/hourly/disk/usage/usage.go | 4 ++-- pkg/collector/check/batch/hourly/memory/memory.go | 4 ++-- pkg/collector/check/batch/hourly/net/net.go | 4 ++-- pkg/collector/check/realtime/cpu/cpu.go | 5 ++--- pkg/collector/check/realtime/disk/io/io.go | 4 ++-- pkg/collector/check/realtime/disk/usage/usage.go | 4 ++-- pkg/collector/check/realtime/memory/memory.go | 5 ++--- pkg/collector/check/realtime/net/net.go | 4 ++-- pkg/collector/scheduler/scheduler.go | 11 ++++++----- 17 files changed, 37 insertions(+), 48 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index a0a5531..7cab8fb 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -7,8 +7,6 @@ import ( "syscall" "github.com/alpacanetworks/alpamon-go/pkg/collector" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check" - "github.com/alpacanetworks/alpamon-go/pkg/collector/transporter" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/db" "github.com/alpacanetworks/alpamon-go/pkg/logger" @@ -79,15 +77,7 @@ func runAgent() { return } - checkFactory := &check.DefaultCheckFactory{} - transporterFactory := &transporter.DefaultTransporterFactory{} - - collector, err := collector.NewCollector(session, client, checkFactory, transporterFactory) - if err != nil { - log.Error().Err(err).Msg("Failed to create collector") - return - } - + collector := collector.InitCollector(session, client) if err := collector.Start(ctx); err != nil { log.Error().Err(err).Msg("Failed to start collector") return diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index 8366338..c84f532 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index ca93d7f..7a25b61 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index 85654a5..e91e9a3 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index 7e665d4..7b50ca3 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index 46f9fab..148ed39 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index 5833c38..fa25602 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index b0d6097..fa419f2 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index 6f0da37..f865e95 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index f866d38..032eec0 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index b116dc7..6d9e56a 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -14,9 +14,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/realtime/cpu/cpu.go b/pkg/collector/check/realtime/cpu/cpu.go index 6942211..681e844 100644 --- a/pkg/collector/check/realtime/cpu/cpu.go +++ b/pkg/collector/check/realtime/cpu/cpu.go @@ -6,7 +6,6 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/shirou/gopsutil/v4/cpu" ) @@ -14,9 +13,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/realtime/disk/io/io.go b/pkg/collector/check/realtime/disk/io/io.go index 5f6be9a..e6ab611 100644 --- a/pkg/collector/check/realtime/disk/io/io.go +++ b/pkg/collector/check/realtime/disk/io/io.go @@ -13,9 +13,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index ded1a48..8bc6847 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -22,9 +22,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/realtime/memory/memory.go b/pkg/collector/check/realtime/memory/memory.go index 51f4d73..6ed219c 100644 --- a/pkg/collector/check/realtime/memory/memory.go +++ b/pkg/collector/check/realtime/memory/memory.go @@ -5,7 +5,6 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/shirou/gopsutil/v4/mem" ) @@ -13,9 +12,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/check/realtime/net/net.go b/pkg/collector/check/realtime/net/net.go index bfa74a1..828678f 100644 --- a/pkg/collector/check/realtime/net/net.go +++ b/pkg/collector/check/realtime/net/net.go @@ -13,9 +13,9 @@ type Check struct { base.BaseCheck } -func NewCheck(name string, interval time.Duration, buffer *base.CheckBuffer, client *ent.Client) *Check { +func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(name, interval, buffer, client), + BaseCheck: base.NewBaseCheck(args), } } diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go index 54bd18f..806f044 100644 --- a/pkg/collector/scheduler/scheduler.go +++ b/pkg/collector/scheduler/scheduler.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check" + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" ) type Scheduler struct { @@ -15,7 +15,7 @@ type Scheduler struct { } type ScheduledTask struct { - check check.CheckStrategy + check base.CheckStrategy nextRun time.Time interval time.Duration running bool @@ -28,14 +28,15 @@ func NewScheduler() *Scheduler { } } -func (s *Scheduler) AddTask(check check.CheckStrategy) error { +func (s *Scheduler) AddTask(check base.CheckStrategy) error { s.mu.Lock() defer s.mu.Unlock() + interval := check.GetInterval() s.tasks[check.GetName()] = &ScheduledTask{ check: check, - nextRun: time.Now(), - interval: check.GetInterval(), + nextRun: time.Now().Add(interval), + interval: interval, running: false, } return nil From 54665b837ab931ec4d86969c3477a32b22e5c31e Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 2 Dec 2024 13:54:00 +0900 Subject: [PATCH 077/364] Add comments to explain double-checking ctx.Err() --- pkg/runner/pty.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 4bb918b..0e8e558 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -105,6 +105,7 @@ func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.Cance default: _, message, err := pc.conn.ReadMessage() if err != nil { + // Double-check ctx.Err() to handle cancellation during blocking read if ctx.Err() != nil { return } @@ -116,6 +117,7 @@ func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.Cance } _, err = pc.ptmx.Write(message) if err != nil { + // Double-check ctx.Err() to handle cancellation during blocking write if ctx.Err() != nil { return } From d7d44f54943e00980c96037092f9ef2f8768fa75 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 3 Dec 2024 11:51:42 +0900 Subject: [PATCH 078/364] Add cleanup check Add cleanup check to delete all data in the database daily. --- .../check/batch/daily/cleanup/cleanup.go | 184 ++++++++++++++++++ pkg/collector/check/check.go | 2 + 2 files changed, 186 insertions(+) create mode 100644 pkg/collector/check/batch/daily/cleanup/cleanup.go diff --git a/pkg/collector/check/batch/daily/cleanup/cleanup.go b/pkg/collector/check/batch/daily/cleanup/cleanup.go new file mode 100644 index 0000000..9dc29c9 --- /dev/null +++ b/pkg/collector/check/batch/daily/cleanup/cleanup.go @@ -0,0 +1,184 @@ +package cpu + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/utils" +) + +var ( + tables = []base.CheckType{ + base.CPU, + base.CPU_PER_HOUR, + base.MEM, + base.MEM_PER_HOUR, + base.DISK_USAGE, + base.DISK_USAGE_PER_HOUR, + base.DISK_IO, + base.DISK_IO_PER_HOUR, + base.NET, + base.NET_PER_HOUR, + } + deleteQueryMap = map[base.CheckType]deleteQuery{ + base.CPU: deleteAllCPU, + base.CPU_PER_HOUR: deleteAllCPUPerHour, + base.MEM: deleteAllMemory, + base.MEM_PER_HOUR: deleteAllMemoryPerHour, + base.DISK_USAGE: deleteAllDiskUsage, + base.DISK_USAGE_PER_HOUR: deleteAllDiskUsagePerHour, + base.DISK_IO: deleteAllDiskIO, + base.DISK_IO_PER_HOUR: deleteAllDiskIOPerHour, + base.NET: deleteAllTraffic, + base.NET_PER_HOUR: deleteAllTrafficPerHour, + } +) + +type deleteQuery func(context.Context, *ent.Client) error + +type Check struct { + base.BaseCheck + retryCount base.RetryCount +} + +func NewCheck(args *base.CheckArgs) base.CheckStrategy { + return &Check{ + BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxDeleteRetries: base.MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, + } +} + +func (c *Check) Execute(ctx context.Context) { + start := time.Now() + + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + if err := c.deleteAllMetric(ctx); err != nil { + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + continue + case <-ctx.Done(): + return + } + } + } + break + } + + if ctx.Err() != nil { + return + } +} + +func (c *Check) deleteAllMetric(ctx context.Context) error { + for _, table := range tables { + if query, exist := deleteQueryMap[table]; exist { + if err := query(ctx, c.GetClient()); err != nil { + return err + } + } + } + + return nil +} + +func deleteAllCPU(ctx context.Context, client *ent.Client) error { + _, err := client.CPU.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllCPUPerHour(ctx context.Context, client *ent.Client) error { + _, err := client.CPUPerHour.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllMemory(ctx context.Context, client *ent.Client) error { + _, err := client.Memory.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllMemoryPerHour(ctx context.Context, client *ent.Client) error { + _, err := client.MemoryPerHour.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllDiskUsage(ctx context.Context, client *ent.Client) error { + _, err := client.DiskUsage.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllDiskUsagePerHour(ctx context.Context, client *ent.Client) error { + _, err := client.DiskIOPerHour.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllDiskIO(ctx context.Context, client *ent.Client) error { + _, err := client.DiskIO.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllDiskIOPerHour(ctx context.Context, client *ent.Client) error { + _, err := client.DiskIOPerHour.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllTraffic(ctx context.Context, client *ent.Client) error { + _, err := client.Traffic.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} + +func deleteAllTrafficPerHour(ctx context.Context, client *ent.Client) error { + _, err := client.TrafficPerHour.Delete().Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index 5a26f5a..2c75d05 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + cleanup "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cleanup" cpudaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cpu" diskiodaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/io" diskusagedaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/usage" @@ -38,6 +39,7 @@ var checkFactories = map[base.CheckType]newCheck{ base.NET: net.NewCheck, base.NET_PER_HOUR: nethourly.NewCheck, base.NET_PER_DAY: netdaily.NewCheck, + base.CLEANUP: cleanup.NewCheck, } type Check interface { From efc5b169d840770e98714c11e2a1683ca8cfa810 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 3 Dec 2024 11:55:12 +0900 Subject: [PATCH 079/364] Adjust buffer size and waitgroup number Adjust buffer size and waitgroup count to match the number of checks received from alpacon-server. --- pkg/collector/collector.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index f5681e7..de1658d 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -85,7 +85,7 @@ func NewCollector(args collectorArgs) (*Collector, error) { return nil, err } - checkBuffer := base.NewCheckBuffer(10) + checkBuffer := base.NewCheckBuffer(len(args.conf) * 2) collector := &Collector{ transporter: transporter, @@ -121,7 +121,7 @@ func NewCollector(args collectorArgs) (*Collector, error) { func (c *Collector) Start(ctx context.Context) error { go c.scheduler.Start(ctx) - for i := 0; i < 5; i++ { + for i := 0; i < c.buffer.Capacity; i++ { c.wg.Add(1) go c.successQueueWorker(ctx) } @@ -151,7 +151,6 @@ func (c *Collector) successQueueWorker(ctx context.Context) { } } } - } func (c *Collector) failureQueueWorker(ctx context.Context) { From 2538342f054598c9fb469119c7ea26ac32342a24 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 3 Dec 2024 11:57:46 +0900 Subject: [PATCH 080/364] Add types.go in base package To improve code modularity and maintainability, move all check-related type definitions from base.go to a new file, types.go. --- pkg/collector/check/base/base.go | 115 -------------------------- pkg/collector/check/base/types.go | 131 ++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 115 deletions(-) create mode 100644 pkg/collector/check/base/types.go diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go index c789fde..af38662 100644 --- a/pkg/collector/check/base/base.go +++ b/pkg/collector/check/base/base.go @@ -7,115 +7,6 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/db/ent" ) -const ( - CPU CheckType = "cpu" - CPU_PER_HOUR CheckType = "cpu_per_hour" - CPU_PER_DAY CheckType = "cpu_per_day" - MEM CheckType = "memory" - MEM_PER_HOUR CheckType = "memory_per_hour" - MEM_PER_DAY CheckType = "memory_per_day" - DISK_USAGE CheckType = "disk_usage" - DISK_USAGE_PER_HOUR CheckType = "disk_usage_per_hour" - DISK_USAGE_PER_DAY CheckType = "disk_usage_per_day" - DISK_IO CheckType = "disk_io" - DISK_IO_PER_HOUR CheckType = "disk_io_per_hour" - DISK_IO_PER_DAY CheckType = "disk_io_per_day" - NET CheckType = "net" - NET_PER_HOUR CheckType = "net_per_hour" - NET_PER_DAY CheckType = "net_per_day" - CLEANUP CheckType = "cleanup" -) - -type CheckType string - -type CheckArgs struct { - Type CheckType - Name string - Interval time.Duration - Buffer *CheckBuffer - Client *ent.Client -} - -type CheckError struct { - CollectError error - GetQueryError error - SaveQueryError error - DeleteQueryError error -} - -type CPUQuerySet struct { - Max float64 - AVG float64 -} - -type MemoryQuerySet struct { - Max float64 - AVG float64 -} - -type DiskIOQuerySet struct { - Device string `json:"device" db:"device"` - PeakReadBytes float64 `json:"peak_read_bytes"` - PeakWriteBytes float64 `json:"peak_write_bytes"` - AvgReadBytes float64 `json:"avg_read_bytes"` - AvgWriteBytes float64 `json:"avg_write_bytes"` -} - -type DiskUsageQuerySet struct { - Device string `json:"device"` - MountPoint string `json:"mount_point"` - Max float64 `json:"max"` - AVG float64 `json:"avg"` -} - -type TrafficQuerySet struct { - Name string `json:"name"` - PeakInputPkts float64 `json:"peak_input_pkts"` - PeakInputBytes float64 `json:"peak_input_bytes"` - PeakOutputPkts float64 `json:"peak_output_pkts"` - PeakOutputBytes float64 `json:"peak_output_bytes"` - AvgInputPkts float64 `json:"avg_input_pkts"` - AvgInputBytes float64 `json:"avg_input_bytes"` - AvgOutputPkts float64 `json:"avg_output_pkts"` - AvgOutputBytes float64 `json:"avg_output_bytes"` -} - -type CheckResult struct { - Timestamp time.Time `json:"timestamp"` - Usage float64 `json:"usage,omitempty"` - Name string `json:"name,omitempty"` - Device string `json:"device,omitempty"` - MountPoint string `json:"mount_point,omitempty"` - Total uint64 `json:"total,omitempty"` - Free uint64 `json:"free,omitempty"` - Used uint64 `json:"used,omitempty"` - WriteBytes uint64 `json:"write_bytes,omitempty"` - ReadBytes uint64 `json:"read_bytes,omitempty"` - InputPkts uint64 `json:"input_pkts,omitempty"` - InputBytes uint64 `json:"input_bytes,omitempty"` - OutputPkts uint64 `json:"output_pkts,omitempty"` - OutputBytes uint64 `json:"output_bytes,omitempty"` - PeakUsage float64 `json:"peak_usage,omitempty"` - AvgUsage float64 `json:"avg_usage,omitempty"` - PeakWriteBytes uint64 `json:"peak_write_bytes,omitempty"` - PeakReadBytes uint64 `json:"peak_read_bytes,omitempty"` - AvgWriteBytes uint64 `json:"avg_write_bytes,omitempty"` - AvgReadBytes uint64 `json:"avg_read_bytes,omitempty"` - PeakInputPkts uint64 `json:"peak_input_pkts,omitempty"` - PeakInputBytes uint64 `json:"peak_input_bytes,omitempty"` - PeakOutputPkts uint64 `json:"peak_output_pkts,omitempty"` - PeakOutputBytes uint64 `json:"peak_output_bytes,omitempty"` - AvgInputPkts uint64 `json:"avg_input_pkts,omitempty"` - AvgInputBytes uint64 `json:"avg_input_bytes,omitempty"` - AvgOutputPkts uint64 `json:"avg_output_pkts,omitempty"` - AvgOutputBytes uint64 `json:"avg_output_bytes,omitempty"` -} - -type MetricData struct { - Type CheckType `json:"type"` - Data []CheckResult `json:"data,omitempty"` -} - type CheckStrategy interface { Execute(ctx context.Context) GetInterval() time.Duration @@ -131,12 +22,6 @@ type BaseCheck struct { client *ent.Client } -type CheckBuffer struct { - SuccessQueue chan MetricData - FailureQueue chan MetricData - Capacity int -} - func NewBaseCheck(args *CheckArgs) BaseCheck { return BaseCheck{ name: args.Name, diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go new file mode 100644 index 0000000..3507dce --- /dev/null +++ b/pkg/collector/check/base/types.go @@ -0,0 +1,131 @@ +package base + +import ( + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" +) + +const ( + CPU CheckType = "cpu" + CPU_PER_HOUR CheckType = "cpu_per_hour" + CPU_PER_DAY CheckType = "cpu_per_day" + MEM CheckType = "memory" + MEM_PER_HOUR CheckType = "memory_per_hour" + MEM_PER_DAY CheckType = "memory_per_day" + DISK_USAGE CheckType = "disk_usage" + DISK_USAGE_PER_HOUR CheckType = "disk_usage_per_hour" + DISK_USAGE_PER_DAY CheckType = "disk_usage_per_day" + DISK_IO CheckType = "disk_io" + DISK_IO_PER_HOUR CheckType = "disk_io_per_hour" + DISK_IO_PER_DAY CheckType = "disk_io_per_day" + NET CheckType = "net" + NET_PER_HOUR CheckType = "net_per_hour" + NET_PER_DAY CheckType = "net_per_day" + CLEANUP CheckType = "cleanup" + MAX_RETRIES int = 5 + MAX_RETRY_TIMES time.Duration = 1 * time.Minute + COLLECT_MAX_RETRIES int = 3 + GET_MAX_RETRIES int = 2 + SAVE_MAX_RETRIES int = 2 + DELETE_MAX_RETRIES int = 1 + DEFAULT_DELAY time.Duration = 1 * time.Second +) + +type CheckType string + +type CheckArgs struct { + Type CheckType + Name string + Interval time.Duration + Buffer *CheckBuffer + Client *ent.Client +} + +type RetryCount struct { + MaxCollectRetries int + MaxGetRetries int + MaxSaveRetries int + MaxDeleteRetries int + MaxRetryTime time.Duration + Delay time.Duration +} + +type CPUQuerySet struct { + Max float64 + AVG float64 +} + +type MemoryQuerySet struct { + Max float64 + AVG float64 +} + +type DiskIOQuerySet struct { + Device string `json:"device" db:"device"` + PeakReadBytes float64 `json:"peak_read_bytes"` + PeakWriteBytes float64 `json:"peak_write_bytes"` + AvgReadBytes float64 `json:"avg_read_bytes"` + AvgWriteBytes float64 `json:"avg_write_bytes"` +} + +type DiskUsageQuerySet struct { + Device string `json:"device"` + MountPoint string `json:"mount_point"` + Max float64 `json:"max"` + AVG float64 `json:"avg"` +} + +type TrafficQuerySet struct { + Name string `json:"name"` + PeakInputPkts float64 `json:"peak_input_pkts"` + PeakInputBytes float64 `json:"peak_input_bytes"` + PeakOutputPkts float64 `json:"peak_output_pkts"` + PeakOutputBytes float64 `json:"peak_output_bytes"` + AvgInputPkts float64 `json:"avg_input_pkts"` + AvgInputBytes float64 `json:"avg_input_bytes"` + AvgOutputPkts float64 `json:"avg_output_pkts"` + AvgOutputBytes float64 `json:"avg_output_bytes"` +} + +type CheckResult struct { + Timestamp time.Time `json:"timestamp"` + Usage float64 `json:"usage,omitempty"` + Name string `json:"name,omitempty"` + Device string `json:"device,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + Total uint64 `json:"total,omitempty"` + Free uint64 `json:"free,omitempty"` + Used uint64 `json:"used,omitempty"` + WriteBytes uint64 `json:"write_bytes,omitempty"` + ReadBytes uint64 `json:"read_bytes,omitempty"` + InputPkts uint64 `json:"input_pkts,omitempty"` + InputBytes uint64 `json:"input_bytes,omitempty"` + OutputPkts uint64 `json:"output_pkts,omitempty"` + OutputBytes uint64 `json:"output_bytes,omitempty"` + PeakUsage float64 `json:"peak_usage,omitempty"` + AvgUsage float64 `json:"avg_usage,omitempty"` + PeakWriteBytes uint64 `json:"peak_write_bytes,omitempty"` + PeakReadBytes uint64 `json:"peak_read_bytes,omitempty"` + AvgWriteBytes uint64 `json:"avg_write_bytes,omitempty"` + AvgReadBytes uint64 `json:"avg_read_bytes,omitempty"` + PeakInputPkts uint64 `json:"peak_input_pkts,omitempty"` + PeakInputBytes uint64 `json:"peak_input_bytes,omitempty"` + PeakOutputPkts uint64 `json:"peak_output_pkts,omitempty"` + PeakOutputBytes uint64 `json:"peak_output_bytes,omitempty"` + AvgInputPkts uint64 `json:"avg_input_pkts,omitempty"` + AvgInputBytes uint64 `json:"avg_input_bytes,omitempty"` + AvgOutputPkts uint64 `json:"avg_output_pkts,omitempty"` + AvgOutputBytes uint64 `json:"avg_output_bytes,omitempty"` +} + +type MetricData struct { + Type CheckType `json:"type"` + Data []CheckResult `json:"data,omitempty"` +} + +type CheckBuffer struct { + SuccessQueue chan MetricData + FailureQueue chan MetricData + Capacity int +} From 11792b554c650d416fe6c5978f442a68020ddaf1 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 3 Dec 2024 12:01:39 +0900 Subject: [PATCH 081/364] Delete running flag in scheduler Due to the idempotency of task execution, delete the unnecessary running flag. --- pkg/collector/scheduler/scheduler.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go index 806f044..4b858cd 100644 --- a/pkg/collector/scheduler/scheduler.go +++ b/pkg/collector/scheduler/scheduler.go @@ -18,7 +18,6 @@ type ScheduledTask struct { check base.CheckStrategy nextRun time.Time interval time.Duration - running bool } func NewScheduler() *Scheduler { @@ -37,7 +36,6 @@ func (s *Scheduler) AddTask(check base.CheckStrategy) error { check: check, nextRun: time.Now().Add(interval), interval: interval, - running: false, } return nil } @@ -56,8 +54,7 @@ func (s *Scheduler) Start(ctx context.Context) { s.mu.RLock() now := time.Now() for _, task := range s.tasks { - if now.After(task.nextRun) && !task.running { - task.running = true + if now.After(task.nextRun) { go s.executeTask(ctx, task) } } @@ -73,7 +70,6 @@ func (s *Scheduler) Stop() { func (s *Scheduler) executeTask(ctx context.Context, task *ScheduledTask) { defer func() { s.mu.Lock() - task.running = false task.nextRun = time.Now().Add(task.interval) s.mu.Unlock() }() From 8e0e9c3f42442ae388ad6386307ba0536769de58 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Tue, 3 Dec 2024 12:04:31 +0900 Subject: [PATCH 082/364] Add retry logic to checks Add retry mechanism for checks that fail. Use exponential backoff for retrying failed checks. --- pkg/collector/check/batch/daily/cpu/cpu.go | 104 +++++++++--- pkg/collector/check/batch/daily/disk/io/io.go | 115 ++++++++++--- .../check/batch/daily/disk/usage/usage.go | 113 ++++++++++--- .../check/batch/daily/memory/memory.go | 104 +++++++++--- pkg/collector/check/batch/daily/net/net.go | 123 ++++++++++---- pkg/collector/check/batch/hourly/cpu/cpu.go | 138 ++++++++++++--- .../check/batch/hourly/disk/io/io.go | 149 +++++++++++++---- .../check/batch/hourly/disk/usage/usage.go | 147 ++++++++++++---- .../check/batch/hourly/memory/memory.go | 138 ++++++++++++--- pkg/collector/check/batch/hourly/net/net.go | 157 ++++++++++++++---- pkg/collector/check/realtime/cpu/cpu.go | 105 +++++++++--- pkg/collector/check/realtime/disk/io/io.go | 129 +++++++++++--- .../check/realtime/disk/usage/usage.go | 127 ++++++++++---- pkg/collector/check/realtime/memory/memory.go | 103 +++++++++--- pkg/collector/check/realtime/net/net.go | 142 +++++++++++++--- pkg/utils/utils.go | 10 ++ 16 files changed, 1532 insertions(+), 372 deletions(-) diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index c84f532..fc57aae 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -2,59 +2,123 @@ package cpu import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpuperhour" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: 3, + MaxDeleteRetries: 2, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryCPUPerHour(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } - queryset, err := c.getCPUPerHour(ctx) + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetCPUPerHour(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + data := base.CheckResult{ + Timestamp: time.Now(), + PeakUsage: queryset[0].Max, + AvgUsage: queryset[0].AVG, + } metric := base.MetricData{ Type: base.CPU_PER_DAY, - Data: []base.CheckResult{}, + Data: []base.CheckResult{data}, } - if checkError.GetQueryError == nil { - data := base.CheckResult{ - Timestamp: time.Now(), - PeakUsage: queryset[0].Max, - AvgUsage: queryset[0].AVG, + + err = c.retryDeleteCPUPerHour(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - metric.Data = append(metric.Data, data) - if err := c.deleteCPUPerHour(ctx); err != nil { - checkError.DeleteQueryError = err + queryset, err := c.getCPUPerHour(ctx) + if err == nil { + return queryset, nil } - } - if ctx.Err() != nil { - return + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get cpu usage per hour queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } + } } - buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return nil, fmt.Errorf("failed to get cpu usage per hour queryset") +} + +func (c *Check) retryDeleteCPUPerHour(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteCPUPerHour(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete cpu usage per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete cpu usage per hour") } func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index 7a25b61..fbf7224 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -2,64 +2,129 @@ package io import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskioperhour" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: 3, + MaxDeleteRetries: 2, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryDiskIOPerHour(ctx) + if err != nil { + return + } - queryset, err := c.getDiskIOPerHour(ctx) + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetDiskIOPerHour(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + var data []base.CheckResult + for _, row := range queryset { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + PeakWriteBytes: uint64(row.PeakWriteBytes), + PeakReadBytes: uint64(row.PeakReadBytes), + AvgWriteBytes: uint64(row.AvgWriteBytes), + AvgReadBytes: uint64(row.AvgReadBytes), + }) + } metric := base.MetricData{ Type: base.DISK_IO_PER_DAY, - Data: []base.CheckResult{}, + Data: data, } - if checkError.GetQueryError == nil { - for _, row := range queryset { - data := base.CheckResult{ - Timestamp: time.Now(), - Device: row.Device, - PeakWriteBytes: uint64(row.PeakWriteBytes), - PeakReadBytes: uint64(row.PeakReadBytes), - AvgWriteBytes: uint64(row.AvgWriteBytes), - AvgReadBytes: uint64(row.AvgReadBytes), - } - metric.Data = append(metric.Data, data) - if err := c.deleteDiskIOPerHour(ctx); err != nil { - checkError.DeleteQueryError = err + err = c.retryDeleteDiskIOPerHour(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + queryset, err := c.getDiskIOPerHour(ctx) + if err == nil { + return queryset, nil + } + + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get disk io per hour queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() } } } - if ctx.Err() != nil { - return - } + return nil, fmt.Errorf("failed to get disk io per hour queryset") +} - buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric +func (c *Check) retryDeleteDiskIOPerHour(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteDiskIOPerHour(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete disk io per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete disk io per hour") } func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, error) { diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index e91e9a3..83d2836 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -2,63 +2,128 @@ package usage import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusageperhour" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: 3, + MaxDeleteRetries: 2, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryDiskUsagePerHour(ctx) + if err != nil { + return + } - queryset, err := c.getDiskUsagePerHour(ctx) + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetDiskUsagePerHour(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + var data []base.CheckResult + for _, row := range queryset { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + MountPoint: row.MountPoint, + PeakUsage: row.Max, + AvgUsage: row.AVG, + }) + } metric := base.MetricData{ Type: base.DISK_USAGE_PER_DAY, - Data: []base.CheckResult{}, + Data: data, } - if checkError.GetQueryError == nil { - for _, row := range queryset { - data := base.CheckResult{ - Timestamp: time.Now(), - Device: row.Device, - MountPoint: row.MountPoint, - PeakUsage: row.Max, - AvgUsage: row.AVG, - } - metric.Data = append(metric.Data, data) - if err := c.deleteDiskUsagePerHour(ctx); err != nil { - checkError.DeleteQueryError = err + err = c.retryDeleteDiskUsagePerHour(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + queryset, err := c.getDiskUsagePerHour(ctx) + if err == nil { + return queryset, nil + } + + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get disk usage per hour queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() } } } - if ctx.Err() != nil { - return - } + return nil, fmt.Errorf("failed to get disk usage per hour queryset") +} - buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric +func (c *Check) retryDeleteDiskUsagePerHour(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteDiskUsagePerHour(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete disk usage per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete disk usage per hour") } func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQuerySet, error) { diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index 7b50ca3..c6cf035 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -2,59 +2,123 @@ package memory import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memoryperhour" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: 3, + MaxDeleteRetries: 2, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryMemoryPerHour(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } - queryset, err := c.getMemoryPerHour(ctx) + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetMemoryPerHour(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + data := base.CheckResult{ + Timestamp: time.Now(), + PeakUsage: queryset[0].Max, + AvgUsage: queryset[0].AVG, + } metric := base.MetricData{ Type: base.MEM_PER_DAY, - Data: []base.CheckResult{}, + Data: []base.CheckResult{data}, } - if checkError.GetQueryError == nil { - data := base.CheckResult{ - Timestamp: time.Now(), - PeakUsage: queryset[0].Max, - AvgUsage: queryset[0].AVG, + + err = c.retryDeleteMemoryPerHour(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - metric.Data = append(metric.Data, data) - if err := c.deleteMemoryPerHour(ctx); err != nil { - checkError.DeleteQueryError = err + queryset, err := c.getMemoryPerHour(ctx) + if err == nil { + return queryset, nil } - } - if ctx.Err() != nil { - return + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get memory usage per hour queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } + } } - buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return nil, fmt.Errorf("failed to get memory usage per hour queryset") +} + +func (c *Check) retryDeleteMemoryPerHour(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteMemoryPerHour(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete memory usage per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete memory usage per hour") } func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, error) { diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index 148ed39..64f78e6 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -2,68 +2,133 @@ package net import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/trafficperhour" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: 3, + MaxDeleteRetries: 2, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryTrafficPerHour(ctx) + if err != nil { + return + } - queryset, err := c.getTrafficPerHour(ctx) + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetTrafficPerHour(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + var data []base.CheckResult + for _, row := range queryset { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Name: row.Name, + PeakInputPkts: uint64(row.PeakInputPkts), + PeakInputBytes: uint64(row.PeakInputBytes), + PeakOutputPkts: uint64(row.PeakOutputPkts), + PeakOutputBytes: uint64(row.PeakOutputBytes), + AvgInputPkts: uint64(row.AvgInputPkts), + AvgInputBytes: uint64(row.AvgInputBytes), + AvgOutputPkts: uint64(row.AvgOutputPkts), + AvgOutputBytes: uint64(row.AvgOutputBytes), + }) + } metric := base.MetricData{ Type: base.NET_PER_DAY, - Data: []base.CheckResult{}, + Data: data, } - if checkError.GetQueryError == nil { - for _, row := range queryset { - data := base.CheckResult{ - Timestamp: time.Now(), - Name: row.Name, - PeakInputPkts: uint64(row.PeakInputPkts), - PeakInputBytes: uint64(row.PeakInputBytes), - PeakOutputPkts: uint64(row.PeakOutputPkts), - PeakOutputBytes: uint64(row.PeakOutputBytes), - AvgInputPkts: uint64(row.AvgInputPkts), - AvgInputBytes: uint64(row.AvgInputBytes), - AvgOutputPkts: uint64(row.AvgOutputPkts), - AvgOutputBytes: uint64(row.AvgOutputBytes), - } - metric.Data = append(metric.Data, data) - if err := c.deleteTrafficPerHour(ctx); err != nil { - checkError.DeleteQueryError = err + err = c.retryDeleteTrafficPerHour(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + queryset, err := c.getTrafficPerHour(ctx) + if err == nil { + return queryset, nil + } + + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get traffic per hour queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() } } } - if ctx.Err() != nil { - return - } + return nil, fmt.Errorf("failed to get traffic per hour queryset") +} - buffer := c.GetBuffer() - if checkError.GetQueryError != nil || checkError.DeleteQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric +func (c *Check) retryDeleteTrafficPerHour(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteTrafficPerHour(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete traffic per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete traffic per hour") } func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, error) { diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index fa25602..e75ddf0 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -2,66 +2,156 @@ package cpu import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpu" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: base.GET_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxDeleteRetries: base.DELETE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryCPUUsage(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} - queryset, err := c.getCPU(ctx) +func (c *Check) queryCPUUsage(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetCPU(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + data := base.CheckResult{ + Timestamp: time.Now(), + PeakUsage: queryset[0].Max, + AvgUsage: queryset[0].AVG, + } metric := base.MetricData{ Type: base.CPU_PER_HOUR, - Data: []base.CheckResult{}, + Data: []base.CheckResult{data}, } - if checkError.GetQueryError == nil { - data := base.CheckResult{ - Timestamp: time.Now(), - PeakUsage: queryset[0].Max, - AvgUsage: queryset[0].AVG, + + err = c.retrySaveCPUPerHour(ctx, data) + if err != nil { + return base.MetricData{}, err + } + + err = c.retryDeleteCPU(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetCPU(ctx context.Context) ([]base.CPUQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - metric.Data = append(metric.Data, data) - if err := c.saveCPUPerHour(ctx, data); err != nil { - checkError.SaveQueryError = err + queryset, err := c.getCPU(ctx) + if err == nil { + return queryset, nil } - if err := c.deleteCPU(ctx); err != nil { - checkError.DeleteQueryError = err + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get cpu queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } } } - if ctx.Err() != nil { - return + return nil, fmt.Errorf("failed to get cpu queryset") +} + +func (c *Check) retrySaveCPUPerHour(ctx context.Context, data base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveCPUPerHour(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save cpu usage per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } - buffer := c.GetBuffer() - isFailed := checkError.GetQueryError != nil || - checkError.SaveQueryError != nil || - checkError.DeleteQueryError != nil - if isFailed { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return fmt.Errorf("failed to save cpu usage per hour") +} + +func (c *Check) retryDeleteCPU(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteCPU(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete cpu usage: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete cpu usage") } func (c *Check) getCPU(ctx context.Context) ([]base.CPUQuerySet, error) { diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index fa419f2..8840ee7 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -2,71 +2,162 @@ package io import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: base.GET_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxDeleteRetries: base.DELETE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryDiskIO(ctx) + if err != nil { + return + } - queryset, err := c.getDiskIO(ctx) + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) queryDiskIO(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetDiskIO(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + var data []base.CheckResult + for _, row := range queryset { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + PeakWriteBytes: uint64(row.PeakWriteBytes), + PeakReadBytes: uint64(row.PeakReadBytes), + AvgWriteBytes: uint64(row.AvgWriteBytes), + AvgReadBytes: uint64(row.AvgReadBytes), + }) + } metric := base.MetricData{ Type: base.DISK_IO_PER_HOUR, - Data: []base.CheckResult{}, + Data: data, } - if checkError.GetQueryError == nil { - for _, row := range queryset { - data := base.CheckResult{ - Timestamp: time.Now(), - Device: row.Device, - PeakWriteBytes: uint64(row.PeakWriteBytes), - PeakReadBytes: uint64(row.PeakReadBytes), - AvgWriteBytes: uint64(row.AvgWriteBytes), - AvgReadBytes: uint64(row.AvgReadBytes), - } - metric.Data = append(metric.Data, data) + + err = c.retrySaveDiskIOPerHour(ctx, data) + if err != nil { + return base.MetricData{}, err + } + + err = c.retryDeleteDiskIO(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - if err := c.saveDiskIOPerHour(ctx, metric.Data); err != nil { - checkError.SaveQueryError = err + queryset, err := c.getDiskIO(ctx) + if err == nil { + return queryset, nil } - if err := c.deleteDiskIO(ctx); err != nil { - checkError.DeleteQueryError = err + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get disk io queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } } } - if ctx.Err() != nil { - return + return nil, fmt.Errorf("failed to get disk io queryset") +} + +func (c *Check) retrySaveDiskIOPerHour(ctx context.Context, data []base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveDiskIOPerHour(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save disk io per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } - buffer := c.GetBuffer() - isFailed := checkError.GetQueryError != nil || - checkError.SaveQueryError != nil || - checkError.DeleteQueryError != nil - if isFailed { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return fmt.Errorf("failed to save disk io per hour") +} + +func (c *Check) retryDeleteDiskIO(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteDiskIO(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete disk io: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete disk io") } func (c *Check) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index f865e95..d56b08a 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -2,70 +2,161 @@ package usage import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusage" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: base.GET_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxDeleteRetries: base.DELETE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryDiskUsage(ctx) + if err != nil { + return + } - queryset, err := c.getDiskUsage(ctx) + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetDiskUsage(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + var data []base.CheckResult + for _, row := range queryset { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + MountPoint: row.MountPoint, + PeakUsage: row.Max, + AvgUsage: row.AVG, + }) + } metric := base.MetricData{ Type: base.DISK_USAGE_PER_HOUR, - Data: []base.CheckResult{}, + Data: data, } - if checkError.GetQueryError == nil { - for _, row := range queryset { - data := base.CheckResult{ - Timestamp: time.Now(), - Device: row.Device, - MountPoint: row.MountPoint, - PeakUsage: row.Max, - AvgUsage: row.AVG, - } - metric.Data = append(metric.Data, data) + + err = c.retrySaveDiskUsagePerHour(ctx, data) + if err != nil { + return base.MetricData{}, err + } + + err = c.retryDeleteDiskUsage(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - if err := c.saveDiskUsagePerHour(ctx, metric.Data); err != nil { - checkError.SaveQueryError = err + queryset, err := c.getDiskUsage(ctx) + if err == nil { + return queryset, nil } - if err := c.deleteDiskUsage(ctx); err != nil { - checkError.DeleteQueryError = err + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get disk usage queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } } } - if ctx.Err() != nil { - return + return nil, fmt.Errorf("failed to get disk usage queryset") +} + +func (c *Check) retrySaveDiskUsagePerHour(ctx context.Context, data []base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveDiskUsagePerHour(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save disk usage per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } - buffer := c.GetBuffer() - isFailed := checkError.GetQueryError != nil || - checkError.SaveQueryError != nil || - checkError.DeleteQueryError != nil - if isFailed { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return fmt.Errorf("failed to save disk usage per hour") +} + +func (c *Check) retryDeleteDiskUsage(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteDiskUsage(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete disk usage: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete disk usage") } func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index 032eec0..3454f8f 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -2,66 +2,156 @@ package memory import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memory" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: base.GET_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxDeleteRetries: base.DELETE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryMemoryUsage(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} - queryset, err := c.getMemory(ctx) +func (c *Check) queryMemoryUsage(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetMemory(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + data := base.CheckResult{ + Timestamp: time.Now(), + PeakUsage: queryset[0].Max, + AvgUsage: queryset[0].AVG, + } metric := base.MetricData{ Type: base.MEM_PER_HOUR, - Data: []base.CheckResult{}, + Data: []base.CheckResult{data}, } - if checkError.GetQueryError == nil { - data := base.CheckResult{ - Timestamp: time.Now(), - PeakUsage: queryset[0].Max, - AvgUsage: queryset[0].AVG, + + err = c.retrySaveMemoryPerHour(ctx, data) + if err != nil { + return base.MetricData{}, err + } + + err = c.retryDeleteMemory(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetMemory(ctx context.Context) ([]base.MemoryQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - metric.Data = append(metric.Data, data) - if err := c.saveMemoryPerHour(ctx, data); err != nil { - checkError.SaveQueryError = err + queryset, err := c.getMemory(ctx) + if err == nil { + return queryset, nil } - if err := c.deleteMemory(ctx); err != nil { - checkError.DeleteQueryError = err + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get memory queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } } } - if ctx.Err() != nil { - return + return nil, fmt.Errorf("failed to get memory queryset") +} + +func (c *Check) retrySaveMemoryPerHour(ctx context.Context, data base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveMemoryPerHour(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save memory usage per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } - buffer := c.GetBuffer() - isFailed := checkError.GetQueryError != nil || - checkError.SaveQueryError != nil || - checkError.DeleteQueryError != nil - if isFailed { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return fmt.Errorf("failed to save memory usage per hour") +} + +func (c *Check) retryDeleteMemory(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteMemory(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete memory usage: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete memory usage") } func (c *Check) getMemory(ctx context.Context) ([]base.MemoryQuerySet, error) { diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index 6d9e56a..ca00e81 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -2,75 +2,166 @@ package net import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxGetRetries: base.GET_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxDeleteRetries: base.DELETE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.queryTraffic(ctx) + if err != nil { + return + } - queryset, err := c.getTraffic(ctx) + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) queryTraffic(ctx context.Context) (base.MetricData, error) { + queryset, err := c.retryGetTraffic(ctx) if err != nil { - checkError.GetQueryError = err + return base.MetricData{}, err } + var data []base.CheckResult + for _, row := range queryset { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Name: row.Name, + PeakInputPkts: uint64(row.PeakInputPkts), + PeakInputBytes: uint64(row.PeakInputBytes), + PeakOutputPkts: uint64(row.PeakOutputPkts), + PeakOutputBytes: uint64(row.PeakOutputBytes), + AvgInputPkts: uint64(row.AvgInputPkts), + AvgInputBytes: uint64(row.AvgInputBytes), + AvgOutputPkts: uint64(row.AvgOutputPkts), + AvgOutputBytes: uint64(row.AvgOutputBytes), + }) + } metric := base.MetricData{ Type: base.NET_PER_HOUR, - Data: []base.CheckResult{}, + Data: data, } - if checkError.GetQueryError == nil { - for _, row := range queryset { - data := base.CheckResult{ - Timestamp: time.Now(), - Name: row.Name, - PeakInputPkts: uint64(row.PeakInputPkts), - PeakInputBytes: uint64(row.PeakInputBytes), - PeakOutputPkts: uint64(row.PeakOutputPkts), - PeakOutputBytes: uint64(row.PeakOutputBytes), - AvgInputPkts: uint64(row.AvgInputPkts), - AvgInputBytes: uint64(row.AvgInputBytes), - AvgOutputPkts: uint64(row.AvgOutputPkts), - AvgOutputBytes: uint64(row.AvgOutputBytes), - } - metric.Data = append(metric.Data, data) + + err = c.retrySaveTrafficPerHour(ctx, data) + if err != nil { + return base.MetricData{}, err + } + + err = c.retryDeleteTraffic(ctx) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryGetTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - if err := c.saveTrafficPerHour(ctx, metric.Data); err != nil { - checkError.SaveQueryError = err + queryset, err := c.getTraffic(ctx) + if err == nil { + return queryset, nil } - if err := c.deleteTraffic(ctx); err != nil { - checkError.DeleteQueryError = err + if attempt < c.retryCount.MaxGetRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to get traffic queryset: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } } } - if ctx.Err() != nil { - return + return nil, fmt.Errorf("failed to get traffic queryset") +} + +func (c *Check) retrySaveTrafficPerHour(ctx context.Context, data []base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveTrafficPerHour(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save traffic per hour: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } - buffer := c.GetBuffer() - isFailed := checkError.GetQueryError != nil || - checkError.SaveQueryError != nil || - checkError.DeleteQueryError != nil - if isFailed { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return fmt.Errorf("failed to save traffic per hour") +} + +func (c *Check) retryDeleteTraffic(ctx context.Context) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.deleteTraffic(ctx) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxDeleteRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to delete traffic: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to delete traffic") } func (c *Check) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) { diff --git a/pkg/collector/check/realtime/cpu/cpu.go b/pkg/collector/check/realtime/cpu/cpu.go index 681e844..b0f0c72 100644 --- a/pkg/collector/check/realtime/cpu/cpu.go +++ b/pkg/collector/check/realtime/cpu/cpu.go @@ -6,53 +6,118 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/cpu" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxCollectRetries: base.COLLECT_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.collectAndSaveCPUUsage(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } - usage, err := c.collectCPUUsage() + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) collectAndSaveCPUUsage(ctx context.Context) (base.MetricData, error) { + usage, err := c.retryCollectCPUUsage(ctx) if err != nil { - checkError.CollectError = err + return base.MetricData{}, err } + data := base.CheckResult{ + Timestamp: time.Now(), + Usage: usage, + } metric := base.MetricData{ Type: base.CPU, - Data: []base.CheckResult{}, + Data: []base.CheckResult{data}, } - if checkError.CollectError == nil { - data := base.CheckResult{ - Timestamp: time.Now(), - Usage: usage, + + err = c.retrySaveCPUUsage(ctx, data) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryCollectCPUUsage(ctx context.Context) (float64, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - metric.Data = append(metric.Data, data) - if err := c.saveCPUUsage(ctx, data); err != nil { - checkError.SaveQueryError = err + usage, err := c.collectCPUUsage() + if err == nil { + return usage, nil } - } - if ctx.Err() != nil { - return + if attempt < c.retryCount.MaxCollectRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to collect cpu usage: %d attempt", attempt) + continue + case <-ctx.Done(): + return 0, ctx.Err() + } + } } - buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.SaveQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return 0, fmt.Errorf("failed to collect cpu usage") +} + +func (c *Check) retrySaveCPUUsage(ctx context.Context, data base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + log.Debug().Msg("asdf") + break + } + + err := c.saveCPUUsage(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save cpu usage: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to save cpu usage") } func (c *Check) collectCPUUsage() (float64, error) { @@ -62,7 +127,7 @@ func (c *Check) collectCPUUsage() (float64, error) { } if len(usage) == 0 { - return 0, fmt.Errorf("no CPU usage data returned") + return 0, fmt.Errorf("no cpu usage data returned") } return usage[0], nil diff --git a/pkg/collector/check/realtime/disk/io/io.go b/pkg/collector/check/realtime/disk/io/io.go index e6ab611..06782c0 100644 --- a/pkg/collector/check/realtime/disk/io/io.go +++ b/pkg/collector/check/realtime/disk/io/io.go @@ -2,61 +2,146 @@ package diskio import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/disk" ) type Check struct { base.BaseCheck + retryCount base.RetryCount + lastMetric map[string]disk.IOCountersStat } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxCollectRetries: base.COLLECT_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, + lastMetric: make(map[string]disk.IOCountersStat), } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.collectAndSaveDiskIO(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} - ioCounters, err := c.collectDiskIO() +func (c *Check) collectAndSaveDiskIO(ctx context.Context) (base.MetricData, error) { + ioCounters, err := c.retryCollectDiskIO(ctx) if err != nil { - checkError.CollectError = err + return base.MetricData{}, err } metric := base.MetricData{ Type: base.DISK_IO, - Data: []base.CheckResult{}, + Data: c.parseDiskIO(ioCounters), } - if checkError.CollectError == nil { - for name, ioCounter := range ioCounters { - data := base.CheckResult{ - Timestamp: time.Now(), - Device: name, - ReadBytes: ioCounter.ReadBytes, - WriteBytes: ioCounter.WriteBytes, - } - metric.Data = append(metric.Data, data) + + err = c.retrySaveDiskIO(ctx, metric.Data) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryCollectDiskIO(ctx context.Context) (map[string]disk.IOCountersStat, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + ioCounters, err := c.collectDiskIO() + + if err == nil { + return ioCounters, nil } - if err := c.saveDiskIO(ctx, metric.Data); err != nil { - checkError.SaveQueryError = err + if attempt < c.retryCount.MaxCollectRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to collect disk io: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } } } - if ctx.Err() != nil { - return + return nil, fmt.Errorf("failed to collect disk io") +} + +func (c *Check) parseDiskIO(ioCounters map[string]disk.IOCountersStat) []base.CheckResult { + var data []base.CheckResult + for name, ioCounter := range ioCounters { + var readBytes, writeBytes uint64 + + if lastCounter, exist := c.lastMetric[name]; exist { + readBytes = ioCounter.ReadBytes - lastCounter.ReadBytes + writeBytes = ioCounter.WriteBytes - lastCounter.WriteBytes + } else { + readBytes = ioCounter.ReadBytes + writeBytes = ioCounter.WriteBytes + } + + c.lastMetric[name] = ioCounter + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Device: name, + ReadBytes: readBytes, + WriteBytes: writeBytes, + }) } - buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.SaveQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return data +} + +func (c *Check) retrySaveDiskIO(ctx context.Context, data []base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveDiskIO(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save disk io: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to save disk io") } func (c *Check) collectDiskIO() (map[string]disk.IOCountersStat, error) { diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index 8bc6847..ef67175 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -2,10 +2,13 @@ package diskusage import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/disk" ) @@ -20,58 +23,126 @@ var excludedFileSystems = map[string]bool{ type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxCollectRetries: base.COLLECT_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.collectAndSaveDiskUsage(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} - partitions, err := c.collectDiskPartitions() +func (c *Check) collectAndSaveDiskUsage(ctx context.Context) (base.MetricData, error) { + partitions, err := c.retryCollectDiskPartitions(ctx) if err != nil { - checkError.CollectError = err + return base.MetricData{}, err } metric := base.MetricData{ Type: base.DISK_USAGE, - Data: []base.CheckResult{}, + Data: c.parseDiskUsage(partitions), } - if checkError.CollectError == nil { - for _, partition := range partitions { - usage, usageErr := c.collectDiskUsage(partition.Mountpoint) - if usageErr == nil { - data := base.CheckResult{ - Timestamp: time.Now(), - Device: partition.Device, - MountPoint: partition.Mountpoint, - Usage: usage.UsedPercent, - Total: usage.Total, - Free: usage.Free, - Used: usage.Used, - } - metric.Data = append(metric.Data, data) - } + + err = c.retrySaveDiskUsage(ctx, metric.Data) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryCollectDiskPartitions(ctx context.Context) ([]disk.PartitionStat, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + partitions, err := c.collectDiskPartitions() + if err == nil && len(partitions) > 0 { + return partitions, nil } - if err := c.saveDiskUsage(ctx, metric.Data); err != nil { - checkError.SaveQueryError = err + if attempt < c.retryCount.MaxCollectRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to collect disk partitions: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, ctx.Err() + } } } - if ctx.Err() != nil { - return + return nil, fmt.Errorf("failed to collect disk partitions") +} + +func (c *Check) parseDiskUsage(partitions []disk.PartitionStat) []base.CheckResult { + var data []base.CheckResult + for _, partition := range partitions { + usage, err := c.collectDiskUsage(partition.Mountpoint) + if err == nil { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Device: partition.Device, + MountPoint: partition.Mountpoint, + Usage: usage.UsedPercent, + Total: usage.Total, + Free: usage.Free, + Used: usage.Used, + }) + } } - buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.SaveQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return data +} + +func (c *Check) retrySaveDiskUsage(ctx context.Context, data []base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveDiskUsage(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save disk usage: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to save disk usage") } func (c *Check) collectDiskPartitions() ([]disk.PartitionStat, error) { diff --git a/pkg/collector/check/realtime/memory/memory.go b/pkg/collector/check/realtime/memory/memory.go index 6ed219c..3bd1254 100644 --- a/pkg/collector/check/realtime/memory/memory.go +++ b/pkg/collector/check/realtime/memory/memory.go @@ -2,56 +2,121 @@ package memory import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/mem" ) type Check struct { base.BaseCheck + retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxCollectRetries: base.COLLECT_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.collectAndSaveMemoryUsage(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } - usage, err := c.collectMemoryUsage() + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} + +func (c *Check) collectAndSaveMemoryUsage(ctx context.Context) (base.MetricData, error) { + usage, err := c.retryCollectMemoryUsage(ctx) if err != nil { - checkError.CollectError = err + return base.MetricData{}, err } + data := base.CheckResult{ + Timestamp: time.Now(), + Usage: usage, + } metric := base.MetricData{ Type: base.MEM, - Data: []base.CheckResult{}, + Data: []base.CheckResult{data}, } - if checkError.CollectError == nil { - data := base.CheckResult{ - Timestamp: time.Now(), - Usage: usage, + + err = c.retrySaveMemoryUsage(ctx, data) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryCollectMemoryUsage(ctx context.Context) (float64, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - metric.Data = append(metric.Data, data) - if err := c.saveMemoryUsage(ctx, data); err != nil { - checkError.SaveQueryError = err + usage, err := c.collectMemoryUsage() + if err == nil { + return usage, nil } - } - if ctx.Err() != nil { - return + if attempt < c.retryCount.MaxCollectRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to collect memory usage: %d attempt", attempt) + continue + case <-ctx.Done(): + return 0, ctx.Err() + } + } } - buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.SaveQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric + return 0, fmt.Errorf("failed to collect memory usage") +} + +func (c *Check) retrySaveMemoryUsage(ctx context.Context, data base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveMemoryUsage(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save memory usage: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to save memory usage") } func (c *Check) collectMemoryUsage() (float64, error) { diff --git a/pkg/collector/check/realtime/net/net.go b/pkg/collector/check/realtime/net/net.go index 828678f..0dc9040 100644 --- a/pkg/collector/check/realtime/net/net.go +++ b/pkg/collector/check/realtime/net/net.go @@ -2,66 +2,154 @@ package net import ( "context" + "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/net" ) type Check struct { base.BaseCheck + retryCount base.RetryCount + lastMetric map[string]net.IOCountersStat } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), + retryCount: base.RetryCount{ + MaxCollectRetries: base.COLLECT_MAX_RETRIES, + MaxSaveRetries: base.SAVE_MAX_RETRIES, + MaxRetryTime: base.MAX_RETRY_TIMES, + Delay: base.DEFAULT_DELAY, + }, + lastMetric: make(map[string]net.IOCountersStat), } } func (c *Check) Execute(ctx context.Context) { - var checkError base.CheckError + metric, err := c.collectAndSaveTraffic(ctx) + if err != nil { + return + } + + if ctx.Err() != nil { + return + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric +} - ioCounters, err := c.collectIOCounters() - interfaces, _ := c.collectInterfaces() +func (c *Check) collectAndSaveTraffic(ctx context.Context) (base.MetricData, error) { + ioCounters, interfaces, err := c.retryCollectTraffic(ctx) if err != nil { - checkError.CollectError = err + return base.MetricData{}, err } metric := base.MetricData{ Type: base.NET, - Data: []base.CheckResult{}, + Data: c.parseTraffic(ioCounters, interfaces), } - if checkError.CollectError == nil { - for _, ioCounter := range ioCounters { - if _, ok := interfaces[ioCounter.Name]; ok { - data := base.CheckResult{ - Timestamp: time.Now(), - Name: ioCounter.Name, - InputPkts: ioCounter.PacketsRecv, - InputBytes: ioCounter.BytesRecv, - OutputPkts: ioCounter.PacketsSent, - OutputBytes: ioCounter.BytesSent, - } - metric.Data = append(metric.Data, data) - } + + err = c.retrySaveTraffic(ctx, metric.Data) + if err != nil { + return base.MetricData{}, err + } + + return metric, nil +} + +func (c *Check) retryCollectTraffic(ctx context.Context) ([]net.IOCountersStat, map[string]net.InterfaceStat, error) { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break } - if err := c.saveTraffic(ctx, metric.Data); err != nil { - checkError.SaveQueryError = err + ioCounters, ioErr := c.collectIOCounters() + interfaces, ifaceErr := c.collectInterfaces() + + if ioErr == nil && ifaceErr == nil { + return ioCounters, interfaces, nil + } + + if attempt < c.retryCount.MaxCollectRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to collect traffic: %d attempt", attempt) + continue + case <-ctx.Done(): + return nil, nil, ctx.Err() + } } } - if ctx.Err() != nil { - return + return nil, nil, fmt.Errorf("failed to collect traffic") +} + +func (c *Check) parseTraffic(ioCOunters []net.IOCountersStat, interfaces map[string]net.InterfaceStat) []base.CheckResult { + var data []base.CheckResult + for _, ioCounter := range ioCOunters { + if _, ok := interfaces[ioCounter.Name]; ok { + var inputPkts, inputBytes, outputPkts, outputBytes uint64 + + if lastCounter, exists := c.lastMetric[ioCounter.Name]; exists { + inputPkts = ioCounter.PacketsRecv - lastCounter.PacketsRecv + inputBytes = ioCounter.BytesRecv - lastCounter.BytesRecv + outputPkts = ioCounter.PacketsSent - lastCounter.PacketsSent + outputBytes = ioCounter.BytesSent - lastCounter.BytesSent + } else { + inputPkts = ioCounter.PacketsRecv + inputBytes = ioCounter.BytesRecv + outputPkts = ioCounter.PacketsSent + outputBytes = ioCounter.BytesSent + } + + c.lastMetric[ioCounter.Name] = ioCounter + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Name: ioCounter.Name, + InputPkts: inputPkts, + InputBytes: inputBytes, + OutputPkts: outputPkts, + OutputBytes: outputBytes, + }) + } } + return data +} - buffer := c.GetBuffer() - if checkError.CollectError != nil || checkError.SaveQueryError != nil { - buffer.FailureQueue <- metric - } else { - buffer.SuccessQueue <- metric +func (c *Check) retrySaveTraffic(ctx context.Context, data []base.CheckResult) error { + start := time.Now() + for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { + if time.Since(start) >= c.retryCount.MaxRetryTime { + break + } + + err := c.saveTraffic(ctx, data) + if err == nil { + return nil + } + + if attempt < c.retryCount.MaxSaveRetries { + backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) + select { + case <-time.After(backoff): + log.Debug().Msgf("Retry to save traffic: %d attempt", attempt) + continue + case <-ctx.Done(): + return ctx.Err() + } + } } + + return fmt.Errorf("failed to save traffic") } func (c *Check) collectInterfaces() (map[string]net.InterfaceStat, error) { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index b7b2621..2c7e672 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -3,11 +3,14 @@ package utils import ( "bytes" "fmt" + "math" + "math/rand" "net/url" "os" "runtime" "strconv" "strings" + "time" "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/host" @@ -106,3 +109,10 @@ func ConvertGroupIds(groupIds []string) []uint32 { } return gids } + +func CalculateBackOff(delay time.Duration, attempt int) time.Duration { + backoff := delay * time.Duration(math.Pow(2, float64(attempt))) + jitter := time.Duration(rand.Float64() * float64(backoff) * 0.2) + + return backoff * jitter +} From 8065f0d12ed4e0907141c5e3ee76eb4505486427 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Fri, 6 Dec 2024 15:26:19 +0900 Subject: [PATCH 083/364] Add remote log threshold for commit.go --- pkg/logger/logger.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 5d803ab..4cd9d09 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -91,6 +91,7 @@ var remoteLogThresholds = map[string]int{ "client.go": 30, "reporter.go": 30, "command.go": 30, + "commit.go": 30, "pty.go": 30, } From 7ae737ae28c330217a4a46217200677d66f6870c Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 6 Dec 2024 16:43:08 +0900 Subject: [PATCH 084/364] Update due to changes in alpacon-server Due to changes in Traffic-related models in alpacon-server, update the system to receive input/output data in bps and pps format, rather than raw bytes and packets. --- pkg/collector/check/base/types.go | 74 ++++++++++----------- pkg/collector/check/batch/daily/net/net.go | 39 ++++++----- pkg/collector/check/batch/hourly/net/net.go | 55 ++++++++------- pkg/collector/check/realtime/net/net.go | 36 +++++----- pkg/db/schema/traffic.go | 8 +-- pkg/db/schema/traffic_per_hour.go | 16 ++--- 6 files changed, 112 insertions(+), 116 deletions(-) diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index 3507dce..c4813eb 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -77,46 +77,46 @@ type DiskUsageQuerySet struct { } type TrafficQuerySet struct { - Name string `json:"name"` - PeakInputPkts float64 `json:"peak_input_pkts"` - PeakInputBytes float64 `json:"peak_input_bytes"` - PeakOutputPkts float64 `json:"peak_output_pkts"` - PeakOutputBytes float64 `json:"peak_output_bytes"` - AvgInputPkts float64 `json:"avg_input_pkts"` - AvgInputBytes float64 `json:"avg_input_bytes"` - AvgOutputPkts float64 `json:"avg_output_pkts"` - AvgOutputBytes float64 `json:"avg_output_bytes"` + Name string `json:"name"` + PeakInputPps float64 `json:"peak_input_pps"` + PeakInputBps float64 `json:"peak_input_bps"` + PeakOutputPps float64 `json:"peak_output_pps"` + PeakOutputBps float64 `json:"peak_output_bps"` + AvgInputPps float64 `json:"avg_input_pps"` + AvgInputBps float64 `json:"avg_input_bps"` + AvgOutputPps float64 `json:"avg_output_pps"` + AvgOutputBps float64 `json:"avg_output_bps"` } type CheckResult struct { - Timestamp time.Time `json:"timestamp"` - Usage float64 `json:"usage,omitempty"` - Name string `json:"name,omitempty"` - Device string `json:"device,omitempty"` - MountPoint string `json:"mount_point,omitempty"` - Total uint64 `json:"total,omitempty"` - Free uint64 `json:"free,omitempty"` - Used uint64 `json:"used,omitempty"` - WriteBytes uint64 `json:"write_bytes,omitempty"` - ReadBytes uint64 `json:"read_bytes,omitempty"` - InputPkts uint64 `json:"input_pkts,omitempty"` - InputBytes uint64 `json:"input_bytes,omitempty"` - OutputPkts uint64 `json:"output_pkts,omitempty"` - OutputBytes uint64 `json:"output_bytes,omitempty"` - PeakUsage float64 `json:"peak_usage,omitempty"` - AvgUsage float64 `json:"avg_usage,omitempty"` - PeakWriteBytes uint64 `json:"peak_write_bytes,omitempty"` - PeakReadBytes uint64 `json:"peak_read_bytes,omitempty"` - AvgWriteBytes uint64 `json:"avg_write_bytes,omitempty"` - AvgReadBytes uint64 `json:"avg_read_bytes,omitempty"` - PeakInputPkts uint64 `json:"peak_input_pkts,omitempty"` - PeakInputBytes uint64 `json:"peak_input_bytes,omitempty"` - PeakOutputPkts uint64 `json:"peak_output_pkts,omitempty"` - PeakOutputBytes uint64 `json:"peak_output_bytes,omitempty"` - AvgInputPkts uint64 `json:"avg_input_pkts,omitempty"` - AvgInputBytes uint64 `json:"avg_input_bytes,omitempty"` - AvgOutputPkts uint64 `json:"avg_output_pkts,omitempty"` - AvgOutputBytes uint64 `json:"avg_output_bytes,omitempty"` + Timestamp time.Time `json:"timestamp"` + Usage float64 `json:"usage,omitempty"` + Name string `json:"name,omitempty"` + Device string `json:"device,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + Total uint64 `json:"total,omitempty"` + Free uint64 `json:"free,omitempty"` + Used uint64 `json:"used,omitempty"` + WriteBytes uint64 `json:"write_bytes,omitempty"` + ReadBytes uint64 `json:"read_bytes,omitempty"` + InputPps float64 `json:"input_pps,omitempty"` + InputBps float64 `json:"input_bps,omitempty"` + OutputPps float64 `json:"output_pps,omitempty"` + OutputBps float64 `json:"output_bps,omitempty"` + PeakUsage float64 `json:"peak_usage,omitempty"` + AvgUsage float64 `json:"avg_usage,omitempty"` + PeakWriteBytes uint64 `json:"peak_write_bytes,omitempty"` + PeakReadBytes uint64 `json:"peak_read_bytes,omitempty"` + AvgWriteBytes uint64 `json:"avg_write_bytes,omitempty"` + AvgReadBytes uint64 `json:"avg_read_bytes,omitempty"` + PeakInputPps float64 `json:"peak_input_pps,omitempty"` + PeakInputBps float64 `json:"peak_input_bps,omitempty"` + PeakOutputPps float64 `json:"peak_output_pps,omitempty"` + PeakOutputBps float64 `json:"peak_output_bps,omitempty"` + AvgInputPps float64 `json:"avg_input_pps,omitempty"` + AvgInputBps float64 `json:"avg_input_bps,omitempty"` + AvgOutputPps float64 `json:"avg_output_pps,omitempty"` + AvgOutputBps float64 `json:"avg_output_bps,omitempty"` } type MetricData struct { diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index 64f78e6..9722e9a 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -52,16 +52,16 @@ func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error var data []base.CheckResult for _, row := range queryset { data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Name: row.Name, - PeakInputPkts: uint64(row.PeakInputPkts), - PeakInputBytes: uint64(row.PeakInputBytes), - PeakOutputPkts: uint64(row.PeakOutputPkts), - PeakOutputBytes: uint64(row.PeakOutputBytes), - AvgInputPkts: uint64(row.AvgInputPkts), - AvgInputBytes: uint64(row.AvgInputBytes), - AvgOutputPkts: uint64(row.AvgOutputPkts), - AvgOutputBytes: uint64(row.AvgOutputBytes), + Timestamp: time.Now(), + Name: row.Name, + PeakInputPps: row.PeakInputPps, + PeakInputBps: row.PeakInputBps, + PeakOutputPps: row.PeakOutputPps, + PeakOutputBps: row.PeakOutputBps, + AvgInputPps: row.AvgInputPps, + AvgInputBps: row.AvgInputBps, + AvgOutputPps: row.AvgOutputPps, + AvgOutputBps: row.AvgOutputBps, }) } metric := base.MetricData{ @@ -141,16 +141,15 @@ func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, Where(trafficperhour.TimestampGTE(from), trafficperhour.TimestampLTE(now)). GroupBy(trafficperhour.FieldName). Aggregate( - ent.As(ent.Max(trafficperhour.FieldPeakInputPkts), "peak_input_pkts"), - ent.As(ent.Max(trafficperhour.FieldPeakInputBytes), "peak_input_bytes"), - ent.As(ent.Max(trafficperhour.FieldPeakOutputPkts), "peak_output_pkts"), - ent.As(ent.Max(trafficperhour.FieldPeakOutputBytes), "peak_output_bytes"), - ent.As(ent.Mean(trafficperhour.FieldAvgInputPkts), "avg_input_pkts"), - ent.As(ent.Mean(trafficperhour.FieldAvgInputBytes), "avg_input_bytes"), - ent.As(ent.Mean(trafficperhour.FieldAvgOutputPkts), "avg_output_pkts"), - ent.As(ent.Mean(trafficperhour.FieldAvgOutputBytes), "avg_output_bytes"), - ). - Scan(ctx, &queryset) + ent.As(ent.Max(trafficperhour.FieldPeakInputPps), "peak_input_pps"), + ent.As(ent.Max(trafficperhour.FieldPeakInputBps), "peak_input_bps"), + ent.As(ent.Max(trafficperhour.FieldPeakOutputPps), "peak_output_pps"), + ent.As(ent.Max(trafficperhour.FieldPeakOutputBps), "peak_output_bps"), + ent.As(ent.Mean(trafficperhour.FieldAvgInputPps), "avg_input_pps"), + ent.As(ent.Mean(trafficperhour.FieldAvgInputBps), "avg_input_bps"), + ent.As(ent.Mean(trafficperhour.FieldAvgOutputPps), "avg_output_pps"), + ent.As(ent.Mean(trafficperhour.FieldAvgOutputBps), "avg_output_bps"), + ).Scan(ctx, &queryset) if err != nil { log.Debug().Msg(err.Error()) return queryset, err diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index ca00e81..8f16118 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -53,16 +53,16 @@ func (c *Check) queryTraffic(ctx context.Context) (base.MetricData, error) { var data []base.CheckResult for _, row := range queryset { data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Name: row.Name, - PeakInputPkts: uint64(row.PeakInputPkts), - PeakInputBytes: uint64(row.PeakInputBytes), - PeakOutputPkts: uint64(row.PeakOutputPkts), - PeakOutputBytes: uint64(row.PeakOutputBytes), - AvgInputPkts: uint64(row.AvgInputPkts), - AvgInputBytes: uint64(row.AvgInputBytes), - AvgOutputPkts: uint64(row.AvgOutputPkts), - AvgOutputBytes: uint64(row.AvgOutputBytes), + Timestamp: time.Now(), + Name: row.Name, + PeakInputPps: row.PeakInputPps, + PeakInputBps: row.PeakInputBps, + PeakOutputPps: row.PeakOutputPps, + PeakOutputBps: row.PeakOutputBps, + AvgInputPps: row.AvgInputPps, + AvgInputBps: row.AvgInputBps, + AvgOutputPps: row.AvgOutputPps, + AvgOutputBps: row.AvgOutputBps, }) } metric := base.MetricData{ @@ -174,16 +174,15 @@ func (c *Check) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)). GroupBy(traffic.FieldName). Aggregate( - ent.As(ent.Max(traffic.FieldInputPkts), "peak_input_pkts"), - ent.As(ent.Max(traffic.FieldInputBytes), "peak_input_bytes"), - ent.As(ent.Max(traffic.FieldOutputPkts), "peak_output_pkts"), - ent.As(ent.Max(traffic.FieldOutputBytes), "peak_output_bytes"), - ent.As(ent.Mean(traffic.FieldInputPkts), "avg_input_pkts"), - ent.As(ent.Mean(traffic.FieldInputBytes), "avg_input_bytes"), - ent.As(ent.Mean(traffic.FieldOutputPkts), "avg_output_pkts"), - ent.As(ent.Mean(traffic.FieldOutputBytes), "avg_output_bytes"), - ). - Scan(ctx, &queryset) + ent.As(ent.Max(traffic.FieldInputPps), "peak_input_pps"), + ent.As(ent.Max(traffic.FieldInputBps), "peak_input_bps"), + ent.As(ent.Max(traffic.FieldOutputPps), "peak_output_pps"), + ent.As(ent.Max(traffic.FieldOutputBps), "peak_output_bps"), + ent.As(ent.Mean(traffic.FieldInputPps), "avg_input_pps"), + ent.As(ent.Mean(traffic.FieldInputBps), "avg_input_bps"), + ent.As(ent.Mean(traffic.FieldOutputPps), "avg_output_pps"), + ent.As(ent.Mean(traffic.FieldOutputBps), "avg_output_bps"), + ).Scan(ctx, &queryset) if err != nil { log.Debug().Msg(err.Error()) return queryset, err @@ -197,14 +196,14 @@ func (c *Check) saveTrafficPerHour(ctx context.Context, data []base.CheckResult) err := client.TrafficPerHour.MapCreateBulk(data, func(q *ent.TrafficPerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetName(data[i].Name). - SetPeakInputPkts(int64(data[i].PeakInputPkts)). - SetPeakInputBytes(int64(data[i].PeakInputBytes)). - SetPeakOutputPkts(int64(data[i].PeakOutputPkts)). - SetPeakOutputBytes(int64(data[i].PeakOutputBytes)). - SetAvgInputPkts(int64(data[i].AvgInputPkts)). - SetAvgInputBytes(int64(data[i].AvgInputBytes)). - SetAvgOutputPkts(int64(data[i].AvgOutputPkts)). - SetAvgOutputBytes(int64(data[i].AvgOutputBytes)) + SetPeakInputPps(data[i].PeakInputPps). + SetPeakInputBps(data[i].PeakInputBps). + SetPeakOutputPps(data[i].PeakOutputPps). + SetPeakOutputBps(data[i].PeakOutputBps). + SetAvgInputPps(data[i].AvgInputPps). + SetAvgInputBps(data[i].AvgInputBps). + SetAvgOutputPps(data[i].AvgOutputPps). + SetAvgOutputBps(data[i].AvgOutputBps) }).Exec(ctx) if err != nil { return err diff --git a/pkg/collector/check/realtime/net/net.go b/pkg/collector/check/realtime/net/net.go index 0dc9040..d34e3ca 100644 --- a/pkg/collector/check/realtime/net/net.go +++ b/pkg/collector/check/realtime/net/net.go @@ -97,28 +97,26 @@ func (c *Check) parseTraffic(ioCOunters []net.IOCountersStat, interfaces map[str var data []base.CheckResult for _, ioCounter := range ioCOunters { if _, ok := interfaces[ioCounter.Name]; ok { - var inputPkts, inputBytes, outputPkts, outputBytes uint64 + var inputPps, inputBps, outputPps, outputBps float64 if lastCounter, exists := c.lastMetric[ioCounter.Name]; exists { - inputPkts = ioCounter.PacketsRecv - lastCounter.PacketsRecv - inputBytes = ioCounter.BytesRecv - lastCounter.BytesRecv - outputPkts = ioCounter.PacketsSent - lastCounter.PacketsSent - outputBytes = ioCounter.BytesSent - lastCounter.BytesSent + inputPps, outputPps = utils.CalculatePps(ioCounter, lastCounter, c.GetInterval()) + inputBps, outputBps = utils.CalculateBps(ioCounter, lastCounter, c.GetInterval()) } else { - inputPkts = ioCounter.PacketsRecv - inputBytes = ioCounter.BytesRecv - outputPkts = ioCounter.PacketsSent - outputBytes = ioCounter.BytesSent + inputPps = 0 + inputBps = 0 + outputPps = 0 + outputBps = 0 } c.lastMetric[ioCounter.Name] = ioCounter data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Name: ioCounter.Name, - InputPkts: inputPkts, - InputBytes: inputBytes, - OutputPkts: outputPkts, - OutputBytes: outputBytes, + Timestamp: time.Now(), + Name: ioCounter.Name, + InputPps: inputPps, + InputBps: inputBps, + OutputPps: outputPps, + OutputBps: outputBps, }) } } @@ -184,10 +182,10 @@ func (c *Check) saveTraffic(ctx context.Context, data []base.CheckResult) error err := client.Traffic.MapCreateBulk(data, func(q *ent.TrafficCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetName(data[i].Name). - SetInputPkts(int64(data[i].InputPkts)). - SetInputBytes(int64(data[i].InputBytes)). - SetOutputPkts(int64(data[i].OutputPkts)). - SetOutputBytes(int64(data[i].OutputBytes)) + SetInputPps(data[i].InputPps). + SetInputBps(data[i].InputBps). + SetOutputPps(data[i].OutputPps). + SetOutputBps(data[i].OutputBps) }).Exec(ctx) if err != nil { return err diff --git a/pkg/db/schema/traffic.go b/pkg/db/schema/traffic.go index 42dc9cc..3eb8aaa 100644 --- a/pkg/db/schema/traffic.go +++ b/pkg/db/schema/traffic.go @@ -18,10 +18,10 @@ func (Traffic) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("name"), - field.Int64("input_pkts"), - field.Int64("input_bytes"), - field.Int64("output_pkts"), - field.Int64("output_bytes"), + field.Float("input_pps"), + field.Float("input_bps"), + field.Float("output_pps"), + field.Float("output_bps"), } } diff --git a/pkg/db/schema/traffic_per_hour.go b/pkg/db/schema/traffic_per_hour.go index 7d620ea..d0700d6 100644 --- a/pkg/db/schema/traffic_per_hour.go +++ b/pkg/db/schema/traffic_per_hour.go @@ -18,14 +18,14 @@ func (TrafficPerHour) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("name"), - field.Int64("peak_input_pkts"), - field.Int64("peak_input_bytes"), - field.Int64("peak_output_pkts"), - field.Int64("peak_output_bytes"), - field.Int64("avg_input_pkts"), - field.Int64("avg_input_bytes"), - field.Int64("avg_output_pkts"), - field.Int64("avg_output_bytes"), + field.Float("peak_input_pps"), + field.Float("peak_input_bps"), + field.Float("peak_output_pps"), + field.Float("peak_output_bps"), + field.Float("avg_input_pps"), + field.Float("avg_input_bps"), + field.Float("avg_output_pps"), + field.Float("avg_output_bps"), } } From 5a9a619281f46fd67a237931ae7b9315f60d80a5 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 6 Dec 2024 16:44:38 +0900 Subject: [PATCH 085/364] Add functions to calculate bps/pps Add utility functions to calculate bps and pps. --- pkg/utils/utils.go | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 2c7e672..7fe349a 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -14,6 +14,7 @@ import ( "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/host" + "github.com/shirou/gopsutil/v4/net" ) var ( @@ -116,3 +117,33 @@ func CalculateBackOff(delay time.Duration, attempt int) time.Duration { return backoff * jitter } + +func CalculateBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { + if interval == 0 { + return 0, 0 + } + + inputBytesDiff := float64(current.BytesRecv - last.BytesRecv) + outputBytesDiff := float64(current.BytesSent - last.BytesSent) + seconds := interval.Seconds() + + inputBps = (inputBytesDiff * 8) / seconds + outputBps = (outputBytesDiff * 8) / seconds + + return inputBps, outputBps +} + +func CalculatePps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputPps float64, outputPps float64) { + if interval == 0 { + return 0, 0 + } + + inputPktsDiff := float64(current.PacketsRecv - last.PacketsRecv) + outputPktsDiff := float64(current.PacketsSent - last.PacketsSent) + seconds := interval.Seconds() + + inputPps = (inputPktsDiff * 8) / seconds + outputPps = (outputPktsDiff * 8) / seconds + + return inputPps, outputPps +} From d97975eadd65a32a5ee312953dfb13b5dc01ef55 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 6 Dec 2024 16:48:05 +0900 Subject: [PATCH 086/364] Minor fix To omit unnecessary file systems from the collection process on macOS, fix excludedFileSystems. Upon its initial data collection, alpamon is now configured to set the initial disk read and write bytes to 0 --- pkg/collector/check/realtime/disk/io/io.go | 4 ++-- pkg/collector/check/realtime/disk/usage/usage.go | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/collector/check/realtime/disk/io/io.go b/pkg/collector/check/realtime/disk/io/io.go index 06782c0..dc11cbf 100644 --- a/pkg/collector/check/realtime/disk/io/io.go +++ b/pkg/collector/check/realtime/disk/io/io.go @@ -101,8 +101,8 @@ func (c *Check) parseDiskIO(ioCounters map[string]disk.IOCountersStat) []base.Ch readBytes = ioCounter.ReadBytes - lastCounter.ReadBytes writeBytes = ioCounter.WriteBytes - lastCounter.WriteBytes } else { - readBytes = ioCounter.ReadBytes - writeBytes = ioCounter.WriteBytes + readBytes = 0 + writeBytes = 0 } c.lastMetric[name] = ioCounter diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index ef67175..94b23dc 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -19,6 +19,7 @@ var excludedFileSystems = map[string]bool{ "sysfs": true, "cgroup": true, "overlay": true, + "autofs": true, } type Check struct { From 01f062fcee799f3ff00923e049026320bb7ca2f7 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Sun, 8 Dec 2024 22:36:18 +0900 Subject: [PATCH 087/364] Fix backoff calculation by resetting entry.due based on current time --- pkg/scheduler/reporter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/scheduler/reporter.go b/pkg/scheduler/reporter.go index 12db88e..ca13dc3 100644 --- a/pkg/scheduler/reporter.go +++ b/pkg/scheduler/reporter.go @@ -86,7 +86,7 @@ func (r *Reporter) query(entry PriorityEntry) { r.counters.failure++ if entry.retry > 0 { backoff := time.Duration(math.Pow(2, float64(RetryLimit-entry.retry))) * time.Second - entry.due = entry.due.Add(backoff) + entry.due = time.Now().Add(backoff) entry.retry-- err = Rqueue.queue.Offer(entry) if err != nil { From c4d32c0436d17542da6bcde2e2424ee637fb599a Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Sun, 8 Dec 2024 22:44:03 +0900 Subject: [PATCH 088/364] Increase log level threshold for reporter.go to reduce unnecessary logs --- pkg/logger/logger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 4cd9d09..23f1e6f 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -89,7 +89,7 @@ type logRecordWriter struct{} // If a file is not listed, all logs will be sent regardless of level. var remoteLogThresholds = map[string]int{ "client.go": 30, - "reporter.go": 30, + "reporter.go": 40, "command.go": 30, "commit.go": 30, "pty.go": 30, From 0ca563e316e132dca5cef9a71ce782e7df169c6d Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 9 Dec 2024 00:05:50 +0900 Subject: [PATCH 089/364] Increase MaxConnectInterval to 300s --- pkg/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 3de5c08..afd3954 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -24,7 +24,7 @@ var ( const ( wsPath = "/ws/servers/backhaul/" MinConnectInterval = 5 * time.Second - MaxConnectInterval = 60 * time.Second + MaxConnectInterval = 300 * time.Second ) func InitSettings(settings Settings) { From e8967a9c7cfe1011b76d46e79ca19b01523b20ab Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 9 Dec 2024 11:25:57 +0900 Subject: [PATCH 090/364] Add error handling to skip reporter execution if queue is full --- pkg/scheduler/queue.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/scheduler/queue.go b/pkg/scheduler/queue.go index 44f8688..9aaf562 100644 --- a/pkg/scheduler/queue.go +++ b/pkg/scheduler/queue.go @@ -52,9 +52,11 @@ func (rq *RequestQueue) request(method, url string, data interface{}, priority i retry: RetryLimit, } + // Do not wake reporter goroutine if the queue is full or uninitialized. err := rq.queue.Offer(entry) if err != nil { - log.Error().Err(err).Msg("Error offering priority entry") + log.Error().Err(err).Msgf("Queue is full or uninitialized, dropping entry: %s", entry.url) + return } rq.cond.Signal() From 76871aefe446a526271da322c245b12a7a886306 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 9 Dec 2024 11:38:40 +0900 Subject: [PATCH 091/364] Fix retry on invalid CheckSession response instead of returning false --- pkg/scheduler/session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 2a30bab..f2ea087 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -75,7 +75,7 @@ func (session *Session) CheckSession() bool { return commissioned } else { log.Error().Msg("Unable to find 'commissioned' field in the response") - return false + continue } } } From 321e58b7ebe9e3fc0f7bd8ea76f57274dd7ec670 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 9 Dec 2024 13:36:13 +0900 Subject: [PATCH 092/364] Refactor enhance file-based log filtering with logRecordFileHandlers --- pkg/logger/logger.go | 34 +++++++++++++++++++--------------- pkg/runner/shell.go | 4 ++-- pkg/scheduler/reporter.go | 5 ++--- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 23f1e6f..79ab45b 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -84,27 +84,27 @@ type zerologEntry struct { type logRecordWriter struct{} -// remoteLogThresholds defines log level thresholds for specific callers (files). -// Logs below the specified level for a given file will not be sent to the alpacon-server. -// If a file is not listed, all logs will be sent regardless of level. -var remoteLogThresholds = map[string]int{ - "client.go": 30, - "reporter.go": 40, - "command.go": 30, - "commit.go": 30, - "pty.go": 30, +// logRecordFileHandlers defines log level thresholds for specific files. +// Only files listed here will have their logs sent to the remote server. +// Logs from files not listed will be ignored entirely. +// Logs below the specified level for a listed file will also be ignored. +var logRecordFileHandlers = map[string]int{ + "command.go": 30, + "commit.go": 10, + "pty.go": 30, + "shell.go": 20, } func (w *logRecordWriter) Write(p []byte) (n int, err error) { var entry zerologEntry err = json.Unmarshal(p, &entry) if err != nil { - return 0, err + return n, err } caller := entry.Caller if caller == "" { - return len(p), nil + return n, err } lineno := 0 @@ -113,10 +113,14 @@ func (w *logRecordWriter) Write(p []byte) (n int, err error) { } callerFileName := getCallerFileName(caller) - if levelThreshold, ok := remoteLogThresholds[callerFileName]; ok { - if convertLevelToNumber(entry.Level) < levelThreshold { - return len(p), nil - } + + levelThreshold, exists := logRecordFileHandlers[callerFileName] + if !exists { + return len(p), nil + } + + if convertLevelToNumber(entry.Level) < levelThreshold { + return len(p), nil } record := logRecord{ diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index c3369ef..ad97558 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -28,12 +28,12 @@ func demote(username, groupname string) (*syscall.SysProcAttr, error) { usr, err := user.Lookup(username) if err != nil { - return nil, fmt.Errorf("There is no corresponding %s username in this server", username) + return nil, fmt.Errorf("there is no corresponding %s username in this server", username) } group, err := user.LookupGroup(groupname) if err != nil { - return nil, fmt.Errorf("There is no corresponding %s groupname in this server", groupname) + return nil, fmt.Errorf("there is no corresponding %s groupname in this server", groupname) } uid, err := strconv.Atoi(usr.Uid) diff --git a/pkg/scheduler/reporter.go b/pkg/scheduler/reporter.go index ca13dc3..04d06d7 100644 --- a/pkg/scheduler/reporter.go +++ b/pkg/scheduler/reporter.go @@ -86,12 +86,11 @@ func (r *Reporter) query(entry PriorityEntry) { r.counters.failure++ if entry.retry > 0 { backoff := time.Duration(math.Pow(2, float64(RetryLimit-entry.retry))) * time.Second - entry.due = time.Now().Add(backoff) + entry.due = entry.due.Add(backoff) entry.retry-- err = Rqueue.queue.Offer(entry) if err != nil { r.counters.ignored++ - time.Sleep(1 * time.Second) } } else { r.counters.ignored++ @@ -117,8 +116,8 @@ func (r *Reporter) Run() { err = Rqueue.queue.Offer(entry) if err != nil { r.counters.ignored++ - time.Sleep(1 * time.Second) } + time.Sleep(1 * time.Second) } else { r.query(entry) } From 7779f636f92fd201646d80c2d54fc5dc952145fc Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 9 Dec 2024 13:38:19 +0900 Subject: [PATCH 093/364] Minor fix --- pkg/logger/logger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 79ab45b..611de0f 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -104,7 +104,7 @@ func (w *logRecordWriter) Write(p []byte) (n int, err error) { caller := entry.Caller if caller == "" { - return n, err + return len(p), nil } lineno := 0 From 363785e189e20662f8ba815bc4903a54bdd104e5 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 9 Dec 2024 13:51:50 +0900 Subject: [PATCH 094/364] Add support for setting initial pty size before command execution --- pkg/runner/pty.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 0e8e558..ba4dd60 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -75,7 +75,13 @@ func (pc *PtyClient) RunPtyBackground() { } pc.setPtyCmdSysProcAttrAndEnv(uid, gid, groupIds, env) - pc.ptmx, err = pty.Start(pc.cmd) + + initialSize := &pty.Winsize{ + Rows: pc.rows, + Cols: pc.cols, + } + + pc.ptmx, err = pty.StartWithSize(pc.cmd, initialSize) if err != nil { log.Error().Err(err).Msg("Failed to start pty") pc.close() From e4108d2530932bf64c7637c322f843480bb133c8 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Tue, 10 Dec 2024 19:51:17 +0900 Subject: [PATCH 095/364] Remove packages from committed data --- pkg/runner/commit.go | 16 ++++++++-------- pkg/runner/commit_types.go | 20 ++++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 02e85e7..2f8957d 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -144,11 +144,11 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { log.Debug().Err(err).Msg("Failed to retrieve network addresses") } remoteData = &[]Address{} - case "packages": - if currentData, err = getSystemPackages(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve system packages") - } - remoteData = &[]SystemPackageData{} + //case "packages": + // if currentData, err = getSystemPackages(); err != nil { + // log.Debug().Err(err).Msg("Failed to retrieve system packages") + // } + // remoteData = &[]SystemPackageData{} default: log.Warn().Msgf("Unknown key: %s", key) continue @@ -250,9 +250,9 @@ func collectData() *commitData { if data.Addresses, err = getNetworkAddresses(); err != nil { log.Debug().Err(err).Msg("Failed to retrieve network addresses") } - if data.Packages, err = getSystemPackages(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve system packages") - } + //if data.Packages, err = getSystemPackages(); err != nil { + // log.Debug().Err(err).Msg("Failed to retrieve system packages") + //} return data } diff --git a/pkg/runner/commit_types.go b/pkg/runner/commit_types.go index 02c4308..ccb7dbc 100644 --- a/pkg/runner/commit_types.go +++ b/pkg/runner/commit_types.go @@ -137,16 +137,16 @@ type Address struct { } type commitData struct { - Version string `json:"version"` - Load float64 `json:"load"` - Info SystemData `json:"info"` - OS OSData `json:"os"` - Time TimeData `json:"time"` - Users []UserData `json:"users"` - Groups []GroupData `json:"groups"` - Interfaces []Interface `json:"interfaces"` - Addresses []Address `json:"addresses"` - Packages []SystemPackageData `json:"packages"` + Version string `json:"version"` + Load float64 `json:"load"` + Info SystemData `json:"info"` + OS OSData `json:"os"` + Time TimeData `json:"time"` + Users []UserData `json:"users"` + Groups []GroupData `json:"groups"` + Interfaces []Interface `json:"interfaces"` + Addresses []Address `json:"addresses"` + // Packages []SystemPackageData `json:"packages"` } // Defines the ComparableData interface for comparing different types. From 3a7b12577c479005ec369ead09163f0e623c7138 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 11 Dec 2024 09:03:54 +0900 Subject: [PATCH 096/364] Increase log level threshold for commit to 20 --- pkg/logger/logger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 611de0f..6c91b32 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -90,7 +90,7 @@ type logRecordWriter struct{} // Logs below the specified level for a listed file will also be ignored. var logRecordFileHandlers = map[string]int{ "command.go": 30, - "commit.go": 10, + "commit.go": 20, "pty.go": 30, "shell.go": 20, } From 27c6e86256913054c2a7f95a9b35e5ca064e2c9d Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 11 Dec 2024 09:04:40 +0900 Subject: [PATCH 097/364] Add websocket session check for alpacon --- pkg/runner/client.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index fe58582..4080475 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -144,6 +144,10 @@ func (wc *WebsocketClient) commandRequestHandler(message []byte) { } } + // Sends "hello" for Alpacon to verify WebSocket session status without error handling. + helloQuery := map[string]string{"query": "hello"} + _ = wc.writeJSON(helloQuery) + switch content.Query { case "command": scheduler.Rqueue.Post(fmt.Sprintf(eventCommandAckURL, content.Command.ID), @@ -163,3 +167,12 @@ func (wc *WebsocketClient) commandRequestHandler(message []byte) { log.Warn().Msgf("Not implemented query: %s", content.Query) } } + +func (wc *WebsocketClient) writeJSON(data interface{}) error { + err := wc.conn.WriteJSON(data) + if err != nil { + log.Debug().Err(err).Msgf("Failed to write json data to websocket") + return err + } + return nil +} From dc8fdb990fd1564a65aafa38dc26837a81757498 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Wed, 11 Dec 2024 09:30:14 +0900 Subject: [PATCH 098/364] Increase log level threshold for commit,shell to 30 --- pkg/logger/logger.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 6c91b32..6910804 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -90,9 +90,9 @@ type logRecordWriter struct{} // Logs below the specified level for a listed file will also be ignored. var logRecordFileHandlers = map[string]int{ "command.go": 30, - "commit.go": 20, + "commit.go": 30, "pty.go": 30, - "shell.go": 20, + "shell.go": 30, } func (w *logRecordWriter) Write(p []byte) (n int, err error) { From ce646678ddecb2dbd492a411e3502900559db96d Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Thu, 12 Dec 2024 16:52:08 +0900 Subject: [PATCH 099/364] Decrease log level threshold for commits --- pkg/logger/logger.go | 2 +- pkg/runner/commit.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 6910804..2cf5ebc 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -90,7 +90,7 @@ type logRecordWriter struct{} // Logs below the specified level for a listed file will also be ignored. var logRecordFileHandlers = map[string]int{ "command.go": 30, - "commit.go": 30, + "commit.go": 20, "pty.go": 30, "shell.go": 30, } diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 2f8957d..5df95cd 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -63,7 +63,7 @@ func CommitAsync(session *scheduler.Session, commissioned bool) { } func commitSystemInfo() { - log.Info().Msg("Start committing system information.") + log.Debug().Msg("Start committing system information.") data := collectData() @@ -77,7 +77,7 @@ func commitSystemInfo() { } func syncSystemInfo(session *scheduler.Session, keys []string) { - log.Info().Msg("Start system information synchronization.") + log.Debug().Msg("Start system information synchronization.") syncMutex.Lock() defer syncMutex.Unlock() From 70b520ec74d2dfbbfa071e2fe4cda4639f007f7a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 13 Dec 2024 11:10:23 +0900 Subject: [PATCH 100/364] Refactor Scheduler Add retry logic to Scheduler to handle errors that may occur during check execution. To improve performance, add worker pool and task queue for parallel task execution. To optimize performance and simplify concurrent access, replace the existing data structure with sync.Map for task registration and locking. --- pkg/collector/scheduler/scheduler.go | 98 +++++++++++++++++++++------- 1 file changed, 76 insertions(+), 22 deletions(-) diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go index 4b858cd..eba74da 100644 --- a/pkg/collector/scheduler/scheduler.go +++ b/pkg/collector/scheduler/scheduler.go @@ -6,12 +6,20 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/utils" +) + +const ( + MAX_RETRIES int = 5 + MAX_RETRY_TIMES time.Duration = 1 * time.Minute + DEFAULT_DELAY time.Duration = 1 * time.Second ) type Scheduler struct { - tasks map[string]*ScheduledTask - mu sync.RWMutex - stopChan chan struct{} + tasks sync.Map + retryConf RetryConf + taskQueue chan *ScheduledTask + stopChan chan struct{} } type ScheduledTask struct { @@ -20,27 +28,48 @@ type ScheduledTask struct { interval time.Duration } +type RetryConf struct { + MaxRetries int + MaxRetryTime time.Duration + Delay time.Duration +} + func NewScheduler() *Scheduler { return &Scheduler{ - tasks: make(map[string]*ScheduledTask), - stopChan: make(chan struct{}), + retryConf: RetryConf{ + MaxRetries: MAX_RETRIES, + MaxRetryTime: MAX_RETRY_TIMES, + Delay: DEFAULT_DELAY, + }, + taskQueue: make(chan *ScheduledTask), + stopChan: make(chan struct{}), } } -func (s *Scheduler) AddTask(check base.CheckStrategy) error { - s.mu.Lock() - defer s.mu.Unlock() - +func (s *Scheduler) AddTask(check base.CheckStrategy) { interval := check.GetInterval() - s.tasks[check.GetName()] = &ScheduledTask{ + task := &ScheduledTask{ check: check, nextRun: time.Now().Add(interval), interval: interval, } - return nil + s.tasks.Store(check.GetName(), task) } -func (s *Scheduler) Start(ctx context.Context) { +func (s *Scheduler) Start(ctx context.Context, workerCount int) { + for i := 0; i < workerCount; i++ { + go s.worker(ctx) + } + + go s.dispatcher(ctx) +} + +func (s *Scheduler) Stop() { + close(s.stopChan) + close(s.taskQueue) +} + +func (s *Scheduler) dispatcher(ctx context.Context) { ticker := time.NewTicker(time.Second) defer ticker.Stop() @@ -51,28 +80,53 @@ func (s *Scheduler) Start(ctx context.Context) { case <-s.stopChan: return case <-ticker.C: - s.mu.RLock() now := time.Now() - for _, task := range s.tasks { + s.tasks.Range(func(key, value interface{}) bool { + task, ok := value.(*ScheduledTask) + if !ok { + return true + } + if now.After(task.nextRun) { - go s.executeTask(ctx, task) + s.taskQueue <- task } - } - s.mu.RUnlock() + return true + }) } } } -func (s *Scheduler) Stop() { - close(s.stopChan) +func (s *Scheduler) worker(ctx context.Context) { + for task := range s.taskQueue { + select { + case <-ctx.Done(): + return + default: + s.executeTask(ctx, task) + } + } } func (s *Scheduler) executeTask(ctx context.Context, task *ScheduledTask) { defer func() { - s.mu.Lock() task.nextRun = time.Now().Add(task.interval) - s.mu.Unlock() }() - task.check.Execute(ctx) + for attempt := 0; attempt <= s.retryConf.MaxRetries; attempt++ { + err := task.check.Execute(ctx) + if err != nil { + if attempt < s.retryConf.MaxRetries { + backoff := utils.CalculateBackOff(s.retryConf.Delay, attempt) + select { + case <-time.After(backoff): + continue + case <-ctx.Done(): + return + } + } + return + } + break + } + } From 7f74cde94fe05110c2553387966e7daabaf03598 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 13 Dec 2024 11:17:47 +0900 Subject: [PATCH 101/364] Refactor Checks To enforce the single responsibility principle and confine the Check's duties to metric tasks, retry logic has been moved to the Scheduler. To support database rollbacks in check execution failures, add explicit transaction management. --- .../check/batch/daily/cleanup/cleanup.go | 136 ++++++++++++----- pkg/collector/check/batch/daily/cpu/cpu.go | 94 +++--------- pkg/collector/check/batch/daily/disk/io/io.go | 94 +++--------- .../check/batch/daily/disk/usage/usage.go | 94 +++--------- .../check/batch/daily/memory/memory.go | 94 +++--------- pkg/collector/check/batch/daily/net/net.go | 91 +++--------- pkg/collector/check/batch/hourly/cpu/cpu.go | 140 ++++-------------- .../check/batch/hourly/disk/io/io.go | 137 ++++------------- .../check/batch/hourly/disk/usage/usage.go | 137 ++++------------- .../check/batch/hourly/memory/memory.go | 140 ++++-------------- pkg/collector/check/batch/hourly/net/net.go | 134 ++++------------- pkg/collector/check/realtime/cpu/cpu.go | 83 ++--------- pkg/collector/check/realtime/disk/io/io.go | 89 ++--------- .../check/realtime/disk/usage/usage.go | 78 +--------- pkg/collector/check/realtime/memory/memory.go | 83 ++--------- pkg/collector/check/realtime/net/net.go | 101 ++++--------- 16 files changed, 403 insertions(+), 1322 deletions(-) diff --git a/pkg/collector/check/batch/daily/cleanup/cleanup.go b/pkg/collector/check/batch/daily/cleanup/cleanup.go index 9dc29c9..ffe9081 100644 --- a/pkg/collector/check/batch/daily/cleanup/cleanup.go +++ b/pkg/collector/check/batch/daily/cleanup/cleanup.go @@ -2,11 +2,9 @@ package cpu import ( "context" - "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/utils" ) var ( @@ -40,45 +38,25 @@ type deleteQuery func(context.Context, *ent.Client) error type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxDeleteRetries: base.MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { - start := time.Now() - - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - if err := c.deleteAllMetric(ctx); err != nil { - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - continue - case <-ctx.Done(): - return - } - } - } - break +func (c *Check) Execute(ctx context.Context) error { + err := c.deleteAllMetric(ctx) + if err != nil { + return err } if ctx.Err() != nil { - return + return ctx.Err() } + + return nil } func (c *Check) deleteAllMetric(ctx context.Context) error { @@ -94,91 +72,171 @@ func (c *Check) deleteAllMetric(ctx context.Context) error { } func deleteAllCPU(ctx context.Context, client *ent.Client) error { - _, err := client.CPU.Delete().Exec(ctx) + tx, err := client.Tx(ctx) if err != nil { return err } + defer tx.Rollback() + + _, err = tx.CPU.Delete().Exec(ctx) + if err != nil { + return err + } + + _ = tx.Commit() return nil } func deleteAllCPUPerHour(ctx context.Context, client *ent.Client) error { - _, err := client.CPUPerHour.Delete().Exec(ctx) + tx, err := client.Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + _, err = tx.CPUPerHour.Delete().Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } func deleteAllMemory(ctx context.Context, client *ent.Client) error { - _, err := client.Memory.Delete().Exec(ctx) + tx, err := client.Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + _, err = tx.Memory.Delete().Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } func deleteAllMemoryPerHour(ctx context.Context, client *ent.Client) error { - _, err := client.MemoryPerHour.Delete().Exec(ctx) + tx, err := client.Tx(ctx) if err != nil { return err } + defer tx.Rollback() + + _, err = tx.MemoryPerHour.Delete().Exec(ctx) + if err != nil { + return err + } + + _ = tx.Commit() return nil } func deleteAllDiskUsage(ctx context.Context, client *ent.Client) error { - _, err := client.DiskUsage.Delete().Exec(ctx) + tx, err := client.Tx(ctx) if err != nil { return err } + defer tx.Rollback() + + _, err = tx.DiskUsage.Delete().Exec(ctx) + if err != nil { + return err + } + + _ = tx.Commit() return nil } func deleteAllDiskUsagePerHour(ctx context.Context, client *ent.Client) error { - _, err := client.DiskIOPerHour.Delete().Exec(ctx) + tx, err := client.Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + _, err = tx.DiskIOPerHour.Delete().Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } func deleteAllDiskIO(ctx context.Context, client *ent.Client) error { - _, err := client.DiskIO.Delete().Exec(ctx) + tx, err := client.Tx(ctx) if err != nil { return err } + defer tx.Rollback() + + _, err = client.DiskIO.Delete().Exec(ctx) + if err != nil { + return err + } + + _ = tx.Commit() return nil } func deleteAllDiskIOPerHour(ctx context.Context, client *ent.Client) error { - _, err := client.DiskIOPerHour.Delete().Exec(ctx) + tx, err := client.Tx(ctx) if err != nil { return err } + defer tx.Rollback() + + _, err = tx.DiskIOPerHour.Delete().Exec(ctx) + if err != nil { + return err + } + + _ = tx.Commit() return nil } func deleteAllTraffic(ctx context.Context, client *ent.Client) error { - _, err := client.Traffic.Delete().Exec(ctx) + tx, err := client.Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + _, err = tx.Traffic.Delete().Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } func deleteAllTrafficPerHour(ctx context.Context, client *ent.Client) error { - _, err := client.TrafficPerHour.Delete().Exec(ctx) + tx, err := client.Tx(ctx) if err != nil { return err } + defer tx.Rollback() + + _, err = tx.TrafficPerHour.Delete().Exec(ctx) + if err != nil { + return err + } + + _ = tx.Commit() return nil } diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index fc57aae..e1e5774 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -2,49 +2,41 @@ package cpu import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpuperhour" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: 3, - MaxDeleteRetries: 2, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryCPUPerHour(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetCPUPerHour(ctx) + queryset, err := c.getCPUPerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -59,7 +51,7 @@ func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { Data: []base.CheckResult{data}, } - err = c.retryDeleteCPUPerHour(ctx) + err = c.deleteCPUPerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -67,60 +59,6 @@ func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { return metric, nil } -func (c *Check) retryGetCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getCPUPerHour(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get cpu usage per hour queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get cpu usage per hour queryset") -} - -func (c *Check) retryDeleteCPUPerHour(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteCPUPerHour(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete cpu usage per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete cpu usage per hour") -} - func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { client := c.GetClient() now := time.Now() @@ -132,10 +70,8 @@ func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { Aggregate( ent.Max(cpuperhour.FieldPeakUsage), ent.Mean(cpuperhour.FieldAvgUsage), - ). - Scan(ctx, &queryset) + ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } @@ -143,16 +79,22 @@ func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { } func (c *Check) deleteCPUPerHour(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-24 * time.Hour) - _, err := client.CPUPerHour.Delete(). - Where(cpuperhour.TimestampGTE(from), cpuperhour.TimestampLTE(now)). - Exec(ctx) + _, err = tx.CPUPerHour.Delete(). + Where(cpuperhour.TimestampGTE(from), cpuperhour.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index fbf7224..1340c0a 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -2,49 +2,41 @@ package io import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskioperhour" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: 3, - MaxDeleteRetries: 2, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryDiskIOPerHour(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetDiskIOPerHour(ctx) + queryset, err := c.getDiskIOPerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -65,7 +57,7 @@ func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) Data: data, } - err = c.retryDeleteDiskIOPerHour(ctx) + err = c.deleteDiskIOPerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -73,60 +65,6 @@ func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) return metric, nil } -func (c *Check) retryGetDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getDiskIOPerHour(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get disk io per hour queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get disk io per hour queryset") -} - -func (c *Check) retryDeleteDiskIOPerHour(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteDiskIOPerHour(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete disk io per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete disk io per hour") -} - func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, error) { client := c.GetClient() now := time.Now() @@ -141,10 +79,8 @@ func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, er ent.As(ent.Max(diskioperhour.FieldPeakWriteBytes), "peak_write_bytes"), ent.As(ent.Mean(diskioperhour.FieldAvgReadBytes), "avg_read_bytes"), ent.As(ent.Mean(diskioperhour.FieldAvgWriteBytes), "avg_write_bytes"), - ). - Scan(ctx, &queryset) + ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } @@ -152,16 +88,22 @@ func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, er } func (c *Check) deleteDiskIOPerHour(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-24 * time.Hour) - _, err := client.DiskIOPerHour.Delete(). - Where(diskioperhour.TimestampGTE(from), diskioperhour.TimestampLTE(now)). - Exec(ctx) + _, err = tx.DiskIOPerHour.Delete(). + Where(diskioperhour.TimestampGTE(from), diskioperhour.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index 83d2836..1b28906 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -2,49 +2,41 @@ package usage import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusageperhour" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: 3, - MaxDeleteRetries: 2, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryDiskUsagePerHour(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetDiskUsagePerHour(ctx) + queryset, err := c.getDiskUsagePerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -64,7 +56,7 @@ func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, err Data: data, } - err = c.retryDeleteDiskUsagePerHour(ctx) + err = c.deleteDiskUsagePerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -72,60 +64,6 @@ func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, err return metric, nil } -func (c *Check) retryGetDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getDiskUsagePerHour(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get disk usage per hour queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get disk usage per hour queryset") -} - -func (c *Check) retryDeleteDiskUsagePerHour(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteDiskUsagePerHour(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete disk usage per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete disk usage per hour") -} - func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQuerySet, error) { client := c.GetClient() now := time.Now() @@ -138,10 +76,8 @@ func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQueryS Aggregate( ent.Max(diskusageperhour.FieldPeakUsage), ent.Mean(diskusageperhour.FieldAvgUsage), - ). - Scan(ctx, &queryset) + ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } @@ -149,16 +85,22 @@ func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQueryS } func (c *Check) deleteDiskUsagePerHour(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-24 * time.Hour) - _, err := client.DiskUsagePerHour.Delete(). - Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)). - Exec(ctx) + _, err = tx.DiskUsagePerHour.Delete(). + Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index c6cf035..ce69995 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -2,49 +2,41 @@ package memory import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memoryperhour" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: 3, - MaxDeleteRetries: 2, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryMemoryPerHour(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetMemoryPerHour(ctx) + queryset, err := c.getMemoryPerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -59,7 +51,7 @@ func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) Data: []base.CheckResult{data}, } - err = c.retryDeleteMemoryPerHour(ctx) + err = c.deleteMemoryPerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -67,60 +59,6 @@ func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) return metric, nil } -func (c *Check) retryGetMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getMemoryPerHour(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get memory usage per hour queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get memory usage per hour queryset") -} - -func (c *Check) retryDeleteMemoryPerHour(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteMemoryPerHour(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete memory usage per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete memory usage per hour") -} - func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, error) { client := c.GetClient() now := time.Now() @@ -132,10 +70,8 @@ func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, er Aggregate( ent.Max(memoryperhour.FieldPeakUsage), ent.Mean(memoryperhour.FieldAvgUsage), - ). - Scan(ctx, &queryset) + ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } @@ -143,16 +79,22 @@ func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, er } func (c *Check) deleteMemoryPerHour(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-24 * time.Hour) - _, err := client.MemoryPerHour.Delete(). - Where(memoryperhour.TimestampGTE(from), memoryperhour.TimestampLTE(now)). - Exec(ctx) + _, err = tx.MemoryPerHour.Delete(). + Where(memoryperhour.TimestampGTE(from), memoryperhour.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index 9722e9a..30ea49e 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -2,49 +2,41 @@ package net import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/trafficperhour" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: 3, - MaxDeleteRetries: 2, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryTrafficPerHour(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetTrafficPerHour(ctx) + queryset, err := c.getTrafficPerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -69,7 +61,7 @@ func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error Data: data, } - err = c.retryDeleteTrafficPerHour(ctx) + err = c.deleteTrafficPerHour(ctx) if err != nil { return base.MetricData{}, err } @@ -77,60 +69,6 @@ func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error return metric, nil } -func (c *Check) retryGetTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getTrafficPerHour(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get traffic per hour queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get traffic per hour queryset") -} - -func (c *Check) retryDeleteTrafficPerHour(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteTrafficPerHour(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete traffic per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete traffic per hour") -} - func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, error) { client := c.GetClient() now := time.Now() @@ -151,7 +89,6 @@ func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, ent.As(ent.Mean(trafficperhour.FieldAvgOutputBps), "avg_output_bps"), ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } @@ -159,16 +96,22 @@ func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, } func (c *Check) deleteTrafficPerHour(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-24 * time.Hour) - _, err := client.TrafficPerHour.Delete(). - Where(trafficperhour.TimestampGTE(from), trafficperhour.TimestampLTE(now)). - Exec(ctx) + _, err = tx.TrafficPerHour.Delete(). + Where(trafficperhour.TimestampGTE(from), trafficperhour.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index e75ddf0..ee1d067 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -2,50 +2,41 @@ package cpu import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpu" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: base.GET_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxDeleteRetries: base.DELETE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryCPUUsage(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryCPUUsage(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetCPU(ctx) + queryset, err := c.getCPU(ctx) if err != nil { return base.MetricData{}, err } @@ -60,12 +51,12 @@ func (c *Check) queryCPUUsage(ctx context.Context) (base.MetricData, error) { Data: []base.CheckResult{data}, } - err = c.retrySaveCPUPerHour(ctx, data) + err = c.saveCPUPerHour(data, ctx) if err != nil { return base.MetricData{}, err } - err = c.retryDeleteCPU(ctx) + err = c.deleteCPU(ctx) if err != nil { return base.MetricData{}, err } @@ -73,87 +64,6 @@ func (c *Check) queryCPUUsage(ctx context.Context) (base.MetricData, error) { return metric, nil } -func (c *Check) retryGetCPU(ctx context.Context) ([]base.CPUQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getCPU(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get cpu queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get cpu queryset") -} - -func (c *Check) retrySaveCPUPerHour(ctx context.Context, data base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveCPUPerHour(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save cpu usage per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save cpu usage per hour") -} - -func (c *Check) retryDeleteCPU(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteCPU(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete cpu usage: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete cpu usage") -} - func (c *Check) getCPU(ctx context.Context) ([]base.CPUQuerySet, error) { client := c.GetClient() now := time.Now() @@ -165,39 +75,51 @@ func (c *Check) getCPU(ctx context.Context) ([]base.CPUQuerySet, error) { Aggregate( ent.Max(cpu.FieldUsage), ent.Mean(cpu.FieldUsage), - ). - Scan(ctx, &queryset) + ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } return queryset, nil } -func (c *Check) saveCPUPerHour(ctx context.Context, data base.CheckResult) error { - client := c.GetClient() - if err := client.CPUPerHour.Create(). +func (c *Check) saveCPUPerHour(data base.CheckResult, ctx context.Context) error { + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + err = tx.CPUPerHour.Create(). SetTimestamp(data.Timestamp). SetPeakUsage(data.PeakUsage). - SetAvgUsage(data.AvgUsage).Exec(ctx); err != nil { + SetAvgUsage(data.AvgUsage).Exec(ctx) + if err != nil { return err } + _ = tx.Commit() + return nil } func (c *Check) deleteCPU(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-1 * time.Hour) - _, err := client.CPU.Delete(). - Where(cpu.TimestampGTE(from), cpu.TimestampLTE(now)). - Exec(ctx) + _, err = tx.CPU.Delete(). + Where(cpu.TimestampGTE(from), cpu.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index 8840ee7..793485d 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -2,50 +2,41 @@ package io import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: base.GET_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxDeleteRetries: base.DELETE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryDiskIO(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryDiskIO(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetDiskIO(ctx) + queryset, err := c.getDiskIO(ctx) if err != nil { return base.MetricData{}, err } @@ -66,12 +57,12 @@ func (c *Check) queryDiskIO(ctx context.Context) (base.MetricData, error) { Data: data, } - err = c.retrySaveDiskIOPerHour(ctx, data) + err = c.saveDiskIOPerHour(data, ctx) if err != nil { return base.MetricData{}, err } - err = c.retryDeleteDiskIO(ctx) + err = c.deleteDiskIO(ctx) if err != nil { return base.MetricData{}, err } @@ -79,87 +70,6 @@ func (c *Check) queryDiskIO(ctx context.Context) (base.MetricData, error) { return metric, nil } -func (c *Check) retryGetDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getDiskIO(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get disk io queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get disk io queryset") -} - -func (c *Check) retrySaveDiskIOPerHour(ctx context.Context, data []base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveDiskIOPerHour(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save disk io per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save disk io per hour") -} - -func (c *Check) retryDeleteDiskIO(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteDiskIO(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete disk io: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete disk io") -} - func (c *Check) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { client := c.GetClient() now := time.Now() @@ -174,19 +84,22 @@ func (c *Check) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { ent.As(ent.Max(diskio.FieldWriteBytes), "peak_write_bytes"), ent.As(ent.Mean(diskio.FieldReadBytes), "avg_read_bytes"), ent.As(ent.Mean(diskio.FieldWriteBytes), "avg_write_bytes"), - ). - Scan(ctx, &queryset) + ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } return queryset, nil } -func (c *Check) saveDiskIOPerHour(ctx context.Context, data []base.CheckResult) error { - client := c.GetClient() - err := client.DiskIOPerHour.MapCreateBulk(data, func(q *ent.DiskIOPerHourCreate, i int) { +func (c *Check) saveDiskIOPerHour(data []base.CheckResult, ctx context.Context) error { + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return nil + } + defer tx.Rollback() + + err = tx.DiskIOPerHour.MapCreateBulk(data, func(q *ent.DiskIOPerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). SetPeakReadBytes(int64(data[i].PeakReadBytes)). @@ -198,20 +111,28 @@ func (c *Check) saveDiskIOPerHour(ctx context.Context, data []base.CheckResult) return err } + _ = tx.Commit() + return nil } func (c *Check) deleteDiskIO(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return nil + } + defer tx.Rollback() + now := time.Now() from := now.Add(-1 * time.Hour) - _, err := client.DiskIO.Delete(). - Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)). - Exec(ctx) + _, err = tx.DiskIO.Delete(). + Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index d56b08a..4fe0c2e 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -2,50 +2,41 @@ package usage import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusage" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: base.GET_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxDeleteRetries: base.DELETE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryDiskUsage(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetDiskUsage(ctx) + queryset, err := c.getDiskUsage(ctx) if err != nil { return base.MetricData{}, err } @@ -65,12 +56,12 @@ func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { Data: data, } - err = c.retrySaveDiskUsagePerHour(ctx, data) + err = c.saveDiskUsagePerHour(data, ctx) if err != nil { return base.MetricData{}, err } - err = c.retryDeleteDiskUsage(ctx) + err = c.deleteDiskUsage(ctx) if err != nil { return base.MetricData{}, err } @@ -78,87 +69,6 @@ func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { return metric, nil } -func (c *Check) retryGetDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getDiskUsage(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get disk usage queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get disk usage queryset") -} - -func (c *Check) retrySaveDiskUsagePerHour(ctx context.Context, data []base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveDiskUsagePerHour(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save disk usage per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save disk usage per hour") -} - -func (c *Check) retryDeleteDiskUsage(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteDiskUsage(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete disk usage: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete disk usage") -} - func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { client := c.GetClient() now := time.Now() @@ -171,19 +81,22 @@ func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, err Aggregate( ent.Max(diskusage.FieldUsage), ent.Mean(diskusage.FieldUsage), - ). - Scan(ctx, &queryset) + ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } return queryset, nil } -func (c *Check) saveDiskUsagePerHour(ctx context.Context, data []base.CheckResult) error { - client := c.GetClient() - err := client.DiskUsagePerHour.MapCreateBulk(data, func(q *ent.DiskUsagePerHourCreate, i int) { +func (c *Check) saveDiskUsagePerHour(data []base.CheckResult, ctx context.Context) error { + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + err = tx.DiskUsagePerHour.MapCreateBulk(data, func(q *ent.DiskUsagePerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). SetMountPoint(data[i].MountPoint). @@ -194,20 +107,28 @@ func (c *Check) saveDiskUsagePerHour(ctx context.Context, data []base.CheckResul return err } + _ = tx.Commit() + return nil } func (c *Check) deleteDiskUsage(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-1 * time.Hour) - _, err := client.DiskUsage.Delete(). - Where(diskusage.TimestampGTE(from), diskusage.TimestampLTE(now)). - Exec(ctx) + _, err = tx.DiskUsage.Delete(). + Where(diskusage.TimestampGTE(from), diskusage.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index 3454f8f..cf7face 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -2,50 +2,41 @@ package memory import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memory" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: base.GET_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxDeleteRetries: base.DELETE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryMemoryUsage(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryMemoryUsage(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetMemory(ctx) + queryset, err := c.getMemory(ctx) if err != nil { return base.MetricData{}, err } @@ -60,12 +51,12 @@ func (c *Check) queryMemoryUsage(ctx context.Context) (base.MetricData, error) { Data: []base.CheckResult{data}, } - err = c.retrySaveMemoryPerHour(ctx, data) + err = c.saveMemoryPerHour(data, ctx) if err != nil { return base.MetricData{}, err } - err = c.retryDeleteMemory(ctx) + err = c.deleteMemory(ctx) if err != nil { return base.MetricData{}, err } @@ -73,87 +64,6 @@ func (c *Check) queryMemoryUsage(ctx context.Context) (base.MetricData, error) { return metric, nil } -func (c *Check) retryGetMemory(ctx context.Context) ([]base.MemoryQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getMemory(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get memory queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get memory queryset") -} - -func (c *Check) retrySaveMemoryPerHour(ctx context.Context, data base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveMemoryPerHour(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save memory usage per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save memory usage per hour") -} - -func (c *Check) retryDeleteMemory(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteMemory(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete memory usage: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete memory usage") -} - func (c *Check) getMemory(ctx context.Context) ([]base.MemoryQuerySet, error) { client := c.GetClient() now := time.Now() @@ -165,39 +75,51 @@ func (c *Check) getMemory(ctx context.Context) ([]base.MemoryQuerySet, error) { Aggregate( ent.Max(memory.FieldUsage), ent.Mean(memory.FieldUsage), - ). - Scan(ctx, &queryset) + ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } return queryset, nil } -func (c *Check) saveMemoryPerHour(ctx context.Context, data base.CheckResult) error { - client := c.GetClient() - if err := client.MemoryPerHour.Create(). +func (c *Check) saveMemoryPerHour(data base.CheckResult, ctx context.Context) error { + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + err = tx.MemoryPerHour.Create(). SetTimestamp(data.Timestamp). SetPeakUsage(data.PeakUsage). - SetAvgUsage(data.AvgUsage).Exec(ctx); err != nil { + SetAvgUsage(data.AvgUsage).Exec(ctx) + if err != nil { return err } + _ = tx.Commit() + return nil } func (c *Check) deleteMemory(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-1 * time.Hour) - _, err := client.Memory.Delete(). - Where(memory.TimestampGTE(from), memory.TimestampLTE(now)). - Exec(ctx) + _, err = tx.Memory.Delete(). + Where(memory.TimestampGTE(from), memory.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index 8f16118..259f884 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -2,50 +2,41 @@ package net import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxGetRetries: base.GET_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxDeleteRetries: base.DELETE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.queryTraffic(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) queryTraffic(ctx context.Context) (base.MetricData, error) { - queryset, err := c.retryGetTraffic(ctx) + queryset, err := c.getTraffic(ctx) if err != nil { return base.MetricData{}, err } @@ -70,12 +61,12 @@ func (c *Check) queryTraffic(ctx context.Context) (base.MetricData, error) { Data: data, } - err = c.retrySaveTrafficPerHour(ctx, data) + err = c.saveTrafficPerHour(data, ctx) if err != nil { return base.MetricData{}, err } - err = c.retryDeleteTraffic(ctx) + err = c.deleteTraffic(ctx) if err != nil { return base.MetricData{}, err } @@ -83,87 +74,6 @@ func (c *Check) queryTraffic(ctx context.Context) (base.MetricData, error) { return metric, nil } -func (c *Check) retryGetTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxGetRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - queryset, err := c.getTraffic(ctx) - if err == nil { - return queryset, nil - } - - if attempt < c.retryCount.MaxGetRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to get traffic queryset: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to get traffic queryset") -} - -func (c *Check) retrySaveTrafficPerHour(ctx context.Context, data []base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveTrafficPerHour(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save traffic per hour: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save traffic per hour") -} - -func (c *Check) retryDeleteTraffic(ctx context.Context) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxDeleteRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.deleteTraffic(ctx) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxDeleteRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to delete traffic: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to delete traffic") -} - func (c *Check) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) { client := c.GetClient() now := time.Now() @@ -184,16 +94,20 @@ func (c *Check) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) ent.As(ent.Mean(traffic.FieldOutputBps), "avg_output_bps"), ).Scan(ctx, &queryset) if err != nil { - log.Debug().Msg(err.Error()) return queryset, err } return queryset, nil } -func (c *Check) saveTrafficPerHour(ctx context.Context, data []base.CheckResult) error { - client := c.GetClient() - err := client.TrafficPerHour.MapCreateBulk(data, func(q *ent.TrafficPerHourCreate, i int) { +func (c *Check) saveTrafficPerHour(data []base.CheckResult, ctx context.Context) error { + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + err = tx.TrafficPerHour.MapCreateBulk(data, func(q *ent.TrafficPerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetName(data[i].Name). SetPeakInputPps(data[i].PeakInputPps). @@ -209,20 +123,28 @@ func (c *Check) saveTrafficPerHour(ctx context.Context, data []base.CheckResult) return err } + _ = tx.Commit() + return nil } func (c *Check) deleteTraffic(ctx context.Context) error { - client := c.GetClient() + tx, err := c.GetClient().Tx(ctx) + if err != nil { + return err + } + defer tx.Rollback() + now := time.Now() from := now.Add(-1 * time.Hour) - _, err := client.Traffic.Delete(). - Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)). - Exec(ctx) + _, err = tx.Traffic.Delete(). + Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)).Exec(ctx) if err != nil { return err } + _ = tx.Commit() + return nil } diff --git a/pkg/collector/check/realtime/cpu/cpu.go b/pkg/collector/check/realtime/cpu/cpu.go index b0f0c72..e3238c7 100644 --- a/pkg/collector/check/realtime/cpu/cpu.go +++ b/pkg/collector/check/realtime/cpu/cpu.go @@ -6,44 +6,37 @@ import ( "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/cpu" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxCollectRetries: base.COLLECT_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.collectAndSaveCPUUsage(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) collectAndSaveCPUUsage(ctx context.Context) (base.MetricData, error) { - usage, err := c.retryCollectCPUUsage(ctx) + usage, err := c.collectCPUUsage() if err != nil { return base.MetricData{}, err } @@ -57,7 +50,7 @@ func (c *Check) collectAndSaveCPUUsage(ctx context.Context) (base.MetricData, er Data: []base.CheckResult{data}, } - err = c.retrySaveCPUUsage(ctx, data) + err = c.saveCPUUsage(data, ctx) if err != nil { return base.MetricData{}, err } @@ -65,61 +58,6 @@ func (c *Check) collectAndSaveCPUUsage(ctx context.Context) (base.MetricData, er return metric, nil } -func (c *Check) retryCollectCPUUsage(ctx context.Context) (float64, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - usage, err := c.collectCPUUsage() - if err == nil { - return usage, nil - } - - if attempt < c.retryCount.MaxCollectRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to collect cpu usage: %d attempt", attempt) - continue - case <-ctx.Done(): - return 0, ctx.Err() - } - } - } - - return 0, fmt.Errorf("failed to collect cpu usage") -} - -func (c *Check) retrySaveCPUUsage(ctx context.Context, data base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - log.Debug().Msg("asdf") - break - } - - err := c.saveCPUUsage(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save cpu usage: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save cpu usage") -} - func (c *Check) collectCPUUsage() (float64, error) { usage, err := cpu.Percent(0, false) if err != nil { @@ -133,11 +71,12 @@ func (c *Check) collectCPUUsage() (float64, error) { return usage[0], nil } -func (c *Check) saveCPUUsage(ctx context.Context, data base.CheckResult) error { +func (c *Check) saveCPUUsage(data base.CheckResult, ctx context.Context) error { client := c.GetClient() - if err := client.CPU.Create(). + err := client.CPU.Create(). SetTimestamp(data.Timestamp). - SetUsage(data.Usage).Exec(ctx); err != nil { + SetUsage(data.Usage).Exec(ctx) + if err != nil { return err } diff --git a/pkg/collector/check/realtime/disk/io/io.go b/pkg/collector/check/realtime/disk/io/io.go index dc11cbf..f13f2fc 100644 --- a/pkg/collector/check/realtime/disk/io/io.go +++ b/pkg/collector/check/realtime/disk/io/io.go @@ -2,51 +2,43 @@ package diskio import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/disk" ) type Check struct { base.BaseCheck - retryCount base.RetryCount lastMetric map[string]disk.IOCountersStat } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxCollectRetries: base.COLLECT_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, + BaseCheck: base.NewBaseCheck(args), lastMetric: make(map[string]disk.IOCountersStat), } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.collectAndSaveDiskIO(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) collectAndSaveDiskIO(ctx context.Context) (base.MetricData, error) { - ioCounters, err := c.retryCollectDiskIO(ctx) + ioCounters, err := c.collectDiskIO() if err != nil { return base.MetricData{}, err } @@ -56,7 +48,7 @@ func (c *Check) collectAndSaveDiskIO(ctx context.Context) (base.MetricData, erro Data: c.parseDiskIO(ioCounters), } - err = c.retrySaveDiskIO(ctx, metric.Data) + err = c.saveDiskIO(metric.Data, ctx) if err != nil { return base.MetricData{}, err } @@ -64,34 +56,6 @@ func (c *Check) collectAndSaveDiskIO(ctx context.Context) (base.MetricData, erro return metric, nil } -func (c *Check) retryCollectDiskIO(ctx context.Context) (map[string]disk.IOCountersStat, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - ioCounters, err := c.collectDiskIO() - - if err == nil { - return ioCounters, nil - } - - if attempt < c.retryCount.MaxCollectRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to collect disk io: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to collect disk io") -} - func (c *Check) parseDiskIO(ioCounters map[string]disk.IOCountersStat) []base.CheckResult { var data []base.CheckResult for name, ioCounter := range ioCounters { @@ -109,41 +73,14 @@ func (c *Check) parseDiskIO(ioCounters map[string]disk.IOCountersStat) []base.Ch data = append(data, base.CheckResult{ Timestamp: time.Now(), Device: name, - ReadBytes: readBytes, - WriteBytes: writeBytes, + ReadBytes: &readBytes, + WriteBytes: &writeBytes, }) } return data } -func (c *Check) retrySaveDiskIO(ctx context.Context, data []base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveDiskIO(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save disk io: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save disk io") -} - func (c *Check) collectDiskIO() (map[string]disk.IOCountersStat, error) { ioCounters, err := disk.IOCounters() if err != nil { @@ -153,13 +90,13 @@ func (c *Check) collectDiskIO() (map[string]disk.IOCountersStat, error) { return ioCounters, nil } -func (c *Check) saveDiskIO(ctx context.Context, data []base.CheckResult) error { +func (c *Check) saveDiskIO(data []base.CheckResult, ctx context.Context) error { client := c.GetClient() err := client.DiskIO.MapCreateBulk(data, func(q *ent.DiskIOCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). - SetReadBytes(int64(data[i].ReadBytes)). - SetWriteBytes(int64(data[i].WriteBytes)) + SetReadBytes(int64(*data[i].ReadBytes)). + SetWriteBytes(int64(*data[i].WriteBytes)) }).Exec(ctx) if err != nil { return err diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index 94b23dc..9f54c22 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -2,13 +2,10 @@ package diskusage import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/disk" ) @@ -24,37 +21,32 @@ var excludedFileSystems = map[string]bool{ type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxCollectRetries: base.COLLECT_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.collectAndSaveDiskUsage(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) collectAndSaveDiskUsage(ctx context.Context) (base.MetricData, error) { - partitions, err := c.retryCollectDiskPartitions(ctx) + partitions, err := c.collectDiskPartitions() if err != nil { return base.MetricData{}, err } @@ -64,7 +56,7 @@ func (c *Check) collectAndSaveDiskUsage(ctx context.Context) (base.MetricData, e Data: c.parseDiskUsage(partitions), } - err = c.retrySaveDiskUsage(ctx, metric.Data) + err = c.saveDiskUsage(metric.Data, ctx) if err != nil { return base.MetricData{}, err } @@ -72,33 +64,6 @@ func (c *Check) collectAndSaveDiskUsage(ctx context.Context) (base.MetricData, e return metric, nil } -func (c *Check) retryCollectDiskPartitions(ctx context.Context) ([]disk.PartitionStat, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - partitions, err := c.collectDiskPartitions() - if err == nil && len(partitions) > 0 { - return partitions, nil - } - - if attempt < c.retryCount.MaxCollectRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to collect disk partitions: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - return nil, fmt.Errorf("failed to collect disk partitions") -} - func (c *Check) parseDiskUsage(partitions []disk.PartitionStat) []base.CheckResult { var data []base.CheckResult for _, partition := range partitions { @@ -119,33 +84,6 @@ func (c *Check) parseDiskUsage(partitions []disk.PartitionStat) []base.CheckResu return data } -func (c *Check) retrySaveDiskUsage(ctx context.Context, data []base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveDiskUsage(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save disk usage: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save disk usage") -} - func (c *Check) collectDiskPartitions() ([]disk.PartitionStat, error) { partitions, err := disk.Partitions(true) if err != nil { @@ -171,7 +109,7 @@ func (c *Check) collectDiskUsage(path string) (*disk.UsageStat, error) { return usage, nil } -func (c *Check) saveDiskUsage(ctx context.Context, data []base.CheckResult) error { +func (c *Check) saveDiskUsage(data []base.CheckResult, ctx context.Context) error { client := c.GetClient() err := client.DiskUsage.MapCreateBulk(data, func(q *ent.DiskUsageCreate, i int) { q.SetTimestamp(data[i].Timestamp). diff --git a/pkg/collector/check/realtime/memory/memory.go b/pkg/collector/check/realtime/memory/memory.go index 3bd1254..640c596 100644 --- a/pkg/collector/check/realtime/memory/memory.go +++ b/pkg/collector/check/realtime/memory/memory.go @@ -2,48 +2,40 @@ package memory import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/mem" ) type Check struct { base.BaseCheck - retryCount base.RetryCount } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxCollectRetries: base.COLLECT_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.collectAndSaveMemoryUsage(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) collectAndSaveMemoryUsage(ctx context.Context) (base.MetricData, error) { - usage, err := c.retryCollectMemoryUsage(ctx) + usage, err := c.collectMemoryUsage() if err != nil { return base.MetricData{}, err } @@ -57,7 +49,7 @@ func (c *Check) collectAndSaveMemoryUsage(ctx context.Context) (base.MetricData, Data: []base.CheckResult{data}, } - err = c.retrySaveMemoryUsage(ctx, data) + err = c.saveMemoryUsage(data, ctx) if err != nil { return base.MetricData{}, err } @@ -65,60 +57,6 @@ func (c *Check) collectAndSaveMemoryUsage(ctx context.Context) (base.MetricData, return metric, nil } -func (c *Check) retryCollectMemoryUsage(ctx context.Context) (float64, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - usage, err := c.collectMemoryUsage() - if err == nil { - return usage, nil - } - - if attempt < c.retryCount.MaxCollectRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to collect memory usage: %d attempt", attempt) - continue - case <-ctx.Done(): - return 0, ctx.Err() - } - } - } - - return 0, fmt.Errorf("failed to collect memory usage") -} - -func (c *Check) retrySaveMemoryUsage(ctx context.Context, data base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveMemoryUsage(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save memory usage: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save memory usage") -} - func (c *Check) collectMemoryUsage() (float64, error) { memory, err := mem.VirtualMemory() if err != nil { @@ -128,11 +66,12 @@ func (c *Check) collectMemoryUsage() (float64, error) { return memory.UsedPercent, nil } -func (c *Check) saveMemoryUsage(ctx context.Context, data base.CheckResult) error { +func (c *Check) saveMemoryUsage(data base.CheckResult, ctx context.Context) error { client := c.GetClient() - if err := client.Memory.Create(). + err := client.Memory.Create(). SetTimestamp(data.Timestamp). - SetUsage(data.Usage).Exec(ctx); err != nil { + SetUsage(data.Usage).Exec(ctx) + if err != nil { return err } diff --git a/pkg/collector/check/realtime/net/net.go b/pkg/collector/check/realtime/net/net.go index d34e3ca..d9338d7 100644 --- a/pkg/collector/check/realtime/net/net.go +++ b/pkg/collector/check/realtime/net/net.go @@ -2,51 +2,44 @@ package net import ( "context" - "fmt" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/net" ) type Check struct { base.BaseCheck - retryCount base.RetryCount lastMetric map[string]net.IOCountersStat } func NewCheck(args *base.CheckArgs) base.CheckStrategy { return &Check{ - BaseCheck: base.NewBaseCheck(args), - retryCount: base.RetryCount{ - MaxCollectRetries: base.COLLECT_MAX_RETRIES, - MaxSaveRetries: base.SAVE_MAX_RETRIES, - MaxRetryTime: base.MAX_RETRY_TIMES, - Delay: base.DEFAULT_DELAY, - }, + BaseCheck: base.NewBaseCheck(args), lastMetric: make(map[string]net.IOCountersStat), } } -func (c *Check) Execute(ctx context.Context) { +func (c *Check) Execute(ctx context.Context) error { metric, err := c.collectAndSaveTraffic(ctx) if err != nil { - return + return err } if ctx.Err() != nil { - return + return ctx.Err() } buffer := c.GetBuffer() buffer.SuccessQueue <- metric + + return nil } func (c *Check) collectAndSaveTraffic(ctx context.Context) (base.MetricData, error) { - ioCounters, interfaces, err := c.retryCollectTraffic(ctx) + ioCounters, interfaces, err := c.collectTraffic() if err != nil { return base.MetricData{}, err } @@ -56,7 +49,7 @@ func (c *Check) collectAndSaveTraffic(ctx context.Context) (base.MetricData, err Data: c.parseTraffic(ioCounters, interfaces), } - err = c.retrySaveTraffic(ctx, metric.Data) + err = c.saveTraffic(metric.Data, ctx) if err != nil { return base.MetricData{}, err } @@ -64,33 +57,18 @@ func (c *Check) collectAndSaveTraffic(ctx context.Context) (base.MetricData, err return metric, nil } -func (c *Check) retryCollectTraffic(ctx context.Context) ([]net.IOCountersStat, map[string]net.InterfaceStat, error) { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxCollectRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - ioCounters, ioErr := c.collectIOCounters() - interfaces, ifaceErr := c.collectInterfaces() - - if ioErr == nil && ifaceErr == nil { - return ioCounters, interfaces, nil - } +func (c *Check) collectTraffic() ([]net.IOCountersStat, map[string]net.InterfaceStat, error) { + ioCounters, err := c.collectIOCounters() + if err != nil { + return nil, nil, err + } - if attempt < c.retryCount.MaxCollectRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to collect traffic: %d attempt", attempt) - continue - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - } + interfaces, err := c.collectInterfaces() + if err != nil { + return nil, nil, err } - return nil, nil, fmt.Errorf("failed to collect traffic") + return ioCounters, interfaces, nil } func (c *Check) parseTraffic(ioCOunters []net.IOCountersStat, interfaces map[string]net.InterfaceStat) []base.CheckResult { @@ -113,43 +91,16 @@ func (c *Check) parseTraffic(ioCOunters []net.IOCountersStat, interfaces map[str data = append(data, base.CheckResult{ Timestamp: time.Now(), Name: ioCounter.Name, - InputPps: inputPps, - InputBps: inputBps, - OutputPps: outputPps, - OutputBps: outputBps, + InputPps: &inputPps, + InputBps: &inputBps, + OutputPps: &outputPps, + OutputBps: &outputBps, }) } } return data } -func (c *Check) retrySaveTraffic(ctx context.Context, data []base.CheckResult) error { - start := time.Now() - for attempt := 0; attempt <= c.retryCount.MaxSaveRetries; attempt++ { - if time.Since(start) >= c.retryCount.MaxRetryTime { - break - } - - err := c.saveTraffic(ctx, data) - if err == nil { - return nil - } - - if attempt < c.retryCount.MaxSaveRetries { - backoff := utils.CalculateBackOff(c.retryCount.Delay, attempt) - select { - case <-time.After(backoff): - log.Debug().Msgf("Retry to save traffic: %d attempt", attempt) - continue - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return fmt.Errorf("failed to save traffic") -} - func (c *Check) collectInterfaces() (map[string]net.InterfaceStat, error) { ifaces, err := net.Interfaces() if err != nil { @@ -177,15 +128,15 @@ func (c *Check) collectIOCounters() ([]net.IOCountersStat, error) { return ioCounters, nil } -func (c *Check) saveTraffic(ctx context.Context, data []base.CheckResult) error { +func (c *Check) saveTraffic(data []base.CheckResult, ctx context.Context) error { client := c.GetClient() err := client.Traffic.MapCreateBulk(data, func(q *ent.TrafficCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetName(data[i].Name). - SetInputPps(data[i].InputPps). - SetInputBps(data[i].InputBps). - SetOutputPps(data[i].OutputPps). - SetOutputBps(data[i].OutputBps) + SetInputPps(*data[i].InputPps). + SetInputBps(*data[i].InputBps). + SetOutputPps(*data[i].OutputPps). + SetOutputBps(*data[i].OutputBps) }).Exec(ctx) if err != nil { return err From 5869ef425ab1b0247d943be98eebf479c546b084 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 13 Dec 2024 11:19:55 +0900 Subject: [PATCH 102/364] Update due to apply changes Update due to follow a refactoring of Checks and Scheduler. --- pkg/collector/check/base/base.go | 2 +- pkg/collector/check/base/types.go | 60 ++++++++++++------------------- pkg/collector/check/check.go | 2 +- pkg/collector/collector.go | 6 ++-- 4 files changed, 26 insertions(+), 44 deletions(-) diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go index af38662..99bb6cb 100644 --- a/pkg/collector/check/base/base.go +++ b/pkg/collector/check/base/base.go @@ -8,7 +8,7 @@ import ( ) type CheckStrategy interface { - Execute(ctx context.Context) + Execute(ctx context.Context) error GetInterval() time.Duration GetName() string GetBuffer() *CheckBuffer diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index c4813eb..0f2289b 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -7,29 +7,22 @@ import ( ) const ( - CPU CheckType = "cpu" - CPU_PER_HOUR CheckType = "cpu_per_hour" - CPU_PER_DAY CheckType = "cpu_per_day" - MEM CheckType = "memory" - MEM_PER_HOUR CheckType = "memory_per_hour" - MEM_PER_DAY CheckType = "memory_per_day" - DISK_USAGE CheckType = "disk_usage" - DISK_USAGE_PER_HOUR CheckType = "disk_usage_per_hour" - DISK_USAGE_PER_DAY CheckType = "disk_usage_per_day" - DISK_IO CheckType = "disk_io" - DISK_IO_PER_HOUR CheckType = "disk_io_per_hour" - DISK_IO_PER_DAY CheckType = "disk_io_per_day" - NET CheckType = "net" - NET_PER_HOUR CheckType = "net_per_hour" - NET_PER_DAY CheckType = "net_per_day" - CLEANUP CheckType = "cleanup" - MAX_RETRIES int = 5 - MAX_RETRY_TIMES time.Duration = 1 * time.Minute - COLLECT_MAX_RETRIES int = 3 - GET_MAX_RETRIES int = 2 - SAVE_MAX_RETRIES int = 2 - DELETE_MAX_RETRIES int = 1 - DEFAULT_DELAY time.Duration = 1 * time.Second + CPU CheckType = "cpu" + CPU_PER_HOUR CheckType = "cpu_per_hour" + CPU_PER_DAY CheckType = "cpu_per_day" + MEM CheckType = "memory" + MEM_PER_HOUR CheckType = "memory_per_hour" + MEM_PER_DAY CheckType = "memory_per_day" + DISK_USAGE CheckType = "disk_usage" + DISK_USAGE_PER_HOUR CheckType = "disk_usage_per_hour" + DISK_USAGE_PER_DAY CheckType = "disk_usage_per_day" + DISK_IO CheckType = "disk_io" + DISK_IO_PER_HOUR CheckType = "disk_io_per_hour" + DISK_IO_PER_DAY CheckType = "disk_io_per_day" + NET CheckType = "net" + NET_PER_HOUR CheckType = "net_per_hour" + NET_PER_DAY CheckType = "net_per_day" + CLEANUP CheckType = "cleanup" ) type CheckType string @@ -42,15 +35,6 @@ type CheckArgs struct { Client *ent.Client } -type RetryCount struct { - MaxCollectRetries int - MaxGetRetries int - MaxSaveRetries int - MaxDeleteRetries int - MaxRetryTime time.Duration - Delay time.Duration -} - type CPUQuerySet struct { Max float64 AVG float64 @@ -97,12 +81,12 @@ type CheckResult struct { Total uint64 `json:"total,omitempty"` Free uint64 `json:"free,omitempty"` Used uint64 `json:"used,omitempty"` - WriteBytes uint64 `json:"write_bytes,omitempty"` - ReadBytes uint64 `json:"read_bytes,omitempty"` - InputPps float64 `json:"input_pps,omitempty"` - InputBps float64 `json:"input_bps,omitempty"` - OutputPps float64 `json:"output_pps,omitempty"` - OutputBps float64 `json:"output_bps,omitempty"` + WriteBytes *uint64 `json:"write_bytes,omitempty"` + ReadBytes *uint64 `json:"read_bytes,omitempty"` + InputPps *float64 `json:"input_pps,omitempty"` + InputBps *float64 `json:"input_bps,omitempty"` + OutputPps *float64 `json:"output_pps,omitempty"` + OutputBps *float64 `json:"output_bps,omitempty"` PeakUsage float64 `json:"peak_usage,omitempty"` AvgUsage float64 `json:"avg_usage,omitempty"` PeakWriteBytes uint64 `json:"peak_write_bytes,omitempty"` diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index 2c75d05..c85f59b 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -43,7 +43,7 @@ var checkFactories = map[base.CheckType]newCheck{ } type Check interface { - Execute(ctx context.Context) + Execute(ctx context.Context) error } type CheckFactory interface { diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index de1658d..6a1f913 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -110,16 +110,14 @@ func NewCollector(args collectorArgs) (*Collector, error) { if err != nil { return nil, err } - if err := collector.scheduler.AddTask(check); err != nil { - return nil, err - } + collector.scheduler.AddTask(check) } return collector, nil } func (c *Collector) Start(ctx context.Context) error { - go c.scheduler.Start(ctx) + go c.scheduler.Start(ctx, c.buffer.Capacity) for i := 0; i < c.buffer.Capacity; i++ { c.wg.Add(1) From 0cf3f95b5c4c2949b9467f41897827e761f26008 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 13 Dec 2024 13:41:45 +0900 Subject: [PATCH 103/364] Refactor Transporter Refactor Transporter that was violating Dependency Inversion Principle. Update Collector due to apply Transporter's change. --- pkg/collector/collector.go | 6 ++- pkg/collector/transporter/transporter.go | 57 +++++++--------------- pkg/collector/transporter/utils.go | 60 ++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 41 deletions(-) create mode 100644 pkg/collector/transporter/utils.go diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 6a1f913..23605e2 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -47,7 +47,9 @@ type collectConf struct { func InitCollector(session *session.Session, client *ent.Client) *Collector { checkFactory := &check.DefaultCheckFactory{} - transporterFactory := &transporter.DefaultTransporterFactory{} + + urlResolver := transporter.NewURLResolver() + transporterFactory := transporter.NewDefaultTransporterFactory(urlResolver) var conf []collectConf resp, statusCode, err := session.Get(confURL, 10) @@ -96,7 +98,7 @@ func NewCollector(args collectorArgs) (*Collector, error) { } for _, entry := range args.conf { - duration := time.Duration(entry.Interval) * time.Minute + duration := time.Duration(entry.Interval) * time.Second name := string(entry.Type) + "_" + uuid.NewString() checkArgs := base.CheckArgs{ Type: entry.Type, diff --git a/pkg/collector/transporter/transporter.go b/pkg/collector/transporter/transporter.go index c524764..ad309ed 100644 --- a/pkg/collector/transporter/transporter.go +++ b/pkg/collector/transporter/transporter.go @@ -1,30 +1,10 @@ package transporter import ( - "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/scheduler" ) -var checkTypeUrlMap = map[base.CheckType]string{ - base.CPU: "/api/metrics/realtime/cpu/", - base.CPU_PER_HOUR: "/api/metrics/hourly/cpu/", - base.CPU_PER_DAY: "/api/metrics/daily/cpu/", - base.MEM: "/api/metrics/realtime/memory/", - base.MEM_PER_HOUR: "/api/metrics/hourly/memory/", - base.MEM_PER_DAY: "/api/metrics/daily/memory/", - base.DISK_USAGE: "/api/metrics/realtime/disk-usage/", - base.DISK_USAGE_PER_HOUR: "/api/metrics/hourly/disk-usage/", - base.DISK_USAGE_PER_DAY: "/api/metrics/daily/disk-usage/", - base.DISK_IO: "/api/metrics/realtime/disk-io/", - base.DISK_IO_PER_HOUR: "/api/metrics/hourly/disk-io/", - base.DISK_IO_PER_DAY: "/api/metrics/daily/disk-io/", - base.NET: "/api/metrics/realtime/traffic/", - base.NET_PER_HOUR: "/api/metrics/hourly/traffic/", - base.NET_PER_DAY: "/api/metrics/daily/traffic/", -} - type TransportStrategy interface { Send(data base.MetricData) error } @@ -33,39 +13,38 @@ type TransporterFactory interface { CreateTransporter(session *scheduler.Session) (TransportStrategy, error) } -type DefaultTransporterFactory struct{} +type DefaultTransporterFactory struct { + resolver *URLResolver +} -type Transporter struct { - session *scheduler.Session +func NewDefaultTransporterFactory(resolver *URLResolver) *DefaultTransporterFactory { + return &DefaultTransporterFactory{resolver: resolver} } // TODO: Support for various transporters will be required in the future func (f *DefaultTransporterFactory) CreateTransporter(session *scheduler.Session) (TransportStrategy, error) { - return NewTransporter(session), nil + return NewTransporter(session, f.resolver), nil } -func NewTransporter(session *scheduler.Session) *Transporter { +type Transporter struct { + session *scheduler.Session + resolver *URLResolver +} + +func NewTransporter(session *scheduler.Session, resolver *URLResolver) *Transporter { return &Transporter{ - session: session, + session: session, + resolver: resolver, } } func (t *Transporter) Send(data base.MetricData) error { - checkType := data.Type - - var err error - switch checkType { - case base.CPU, base.CPU_PER_HOUR, base.CPU_PER_DAY, - base.MEM, base.MEM_PER_HOUR, base.MEM_PER_DAY: - _, _, err = t.session.Post(checkTypeUrlMap[checkType], data.Data[0], 10) - case base.DISK_USAGE, base.DISK_USAGE_PER_HOUR, base.DISK_USAGE_PER_DAY, - base.DISK_IO, base.DISK_IO_PER_HOUR, base.DISK_IO_PER_DAY, - base.NET, base.NET_PER_HOUR, base.NET_PER_DAY: - _, _, err = t.session.Post(checkTypeUrlMap[checkType], data.Data, 10) - default: - err = fmt.Errorf("unknown check type: %s", checkType) + url, err := t.resolver.ResolveURL(data.Type) + if err != nil { + return err } + _, _, err = t.session.Post(url, data.Data, 10) if err != nil { return err } diff --git a/pkg/collector/transporter/utils.go b/pkg/collector/transporter/utils.go new file mode 100644 index 0000000..aabdd96 --- /dev/null +++ b/pkg/collector/transporter/utils.go @@ -0,0 +1,60 @@ +package transporter + +import ( + "fmt" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" +) + +const ( + CPU string = "/api/metrics/realtime/cpu/" + CPU_PER_HOUR string = "/api/metrics/hourly/cpu/" + CPU_PER_DAY string = "/api/metrics/daily/cpu/" + MEM string = "/api/metrics/realtime/memory/" + MEM_PER_HOUR string = "/api/metrics/hourly/memory/" + MEM_PER_DAY string = "/api/metrics/daily/memory/" + DISK_USAGE string = "/api/metrics/realtime/disk-usage/" + DISK_USAGE_PER_HOUR string = "/api/metrics/hourly/disk-usage/" + DISK_USAGE_PER_DAY string = "/api/metrics/daily/disk-usage/" + DISK_IO string = "/api/metrics/realtime/disk-io/" + DISK_IO_PER_HOUR string = "/api/metrics/hourly/disk-io/" + DISK_IO_PER_DAY string = "/api/metrics/daily/disk-io/" + NET string = "/api/metrics/realtime/traffic/" + NET_PER_HOUR string = "/api/metrics/hourly/traffic/" + NET_PER_DAY string = "/api/metrics/daily/traffic/" +) + +type URLResolver struct { + checkTypeToURL map[base.CheckType]string +} + +func NewURLResolver() *URLResolver { + return &URLResolver{ + checkTypeToURL: map[base.CheckType]string{ + base.CPU: CPU, + base.CPU_PER_HOUR: CPU_PER_HOUR, + base.CPU_PER_DAY: CPU_PER_DAY, + base.MEM: MEM, + base.MEM_PER_HOUR: MEM_PER_HOUR, + base.MEM_PER_DAY: MEM_PER_DAY, + base.DISK_USAGE: DISK_USAGE, + base.DISK_USAGE_PER_HOUR: DISK_USAGE_PER_HOUR, + base.DISK_USAGE_PER_DAY: DISK_USAGE_PER_DAY, + base.DISK_IO: DISK_IO, + base.DISK_IO_PER_HOUR: DISK_IO_PER_HOUR, + base.DISK_IO_PER_DAY: DISK_IO_PER_DAY, + base.NET: NET, + base.NET_PER_HOUR: NET_PER_HOUR, + base.NET_PER_DAY: NET_PER_DAY, + }, + } +} + +func (r *URLResolver) ResolveURL(checkType base.CheckType) (string, error) { + url, exists := r.checkTypeToURL[checkType] + if !exists { + return "", fmt.Errorf("unknown check type: %s", checkType) + } + + return url, nil +} From 0046605728b0bc7d95488fdd384ae223df550f61 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Fri, 13 Dec 2024 14:47:42 +0900 Subject: [PATCH 104/364] Minor fix --- pkg/runner/client.go | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 4080475..e39b294 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -13,8 +13,8 @@ import ( ) const ( - MinConnectInterval = 5 * time.Second - MaxConnectInterval = 60 * time.Second + minConnectInterval = 5 * time.Second + maxConnectInterval = 60 * time.Second eventCommandAckURL = "/api/events/commands/%s/ack/" eventCommandFinURL = "/api/events/commands/%s/fin/" @@ -55,11 +55,23 @@ func (wc *WebsocketClient) RunForever() { if err != nil { wc.closeAndReconnect() } + // Sends "ping" query for Alpacon to verify WebSocket session status without error handling. + _ = wc.sendPingQuery() wc.commandRequestHandler(message) } } } +func (wc *WebsocketClient) sendPingQuery() error { + pingQuery := map[string]string{"query": "ping"} + err := wc.writeJSON(pingQuery) + if err != nil { + return err + } + + return nil +} + func (wc *WebsocketClient) readMessage() (messageType int, message []byte, err error) { messageType, message, err = wc.conn.ReadMessage() if err != nil { @@ -70,13 +82,15 @@ func (wc *WebsocketClient) readMessage() (messageType int, message []byte, err e } func (wc *WebsocketClient) connect() { + log.Info().Msgf("Connecting to websocket at %s", config.GlobalSettings.WSPath) + wsBackoff := backoff.NewExponentialBackOff() - wsBackoff.InitialInterval = MinConnectInterval - wsBackoff.MaxInterval = MaxConnectInterval + wsBackoff.InitialInterval = minConnectInterval + wsBackoff.MaxInterval = maxConnectInterval wsBackoff.MaxElapsedTime = 0 // No time limit for retries (infinite retry) wsBackoff.RandomizationFactor = 0 // Retry forever - err := backoff.Retry(func() error { + operation := func() error { conn, _, err := websocket.DefaultDialer.Dial(config.GlobalSettings.WSPath, wc.requestHeader) if err != nil { nextInterval := wsBackoff.NextBackOff() @@ -87,10 +101,12 @@ func (wc *WebsocketClient) connect() { wc.conn = conn log.Debug().Msg("Backhaul connection established") return nil - }, wsBackoff) + } + err := backoff.Retry(operation, wsBackoff) if err != nil { - log.Error().Err(err).Msgf("Could not connect to %s: terminated unexpectedly", config.GlobalSettings.WSPath) + log.Error().Err(err).Msg("Unexpected error occurred during backoff") + return } } @@ -144,10 +160,6 @@ func (wc *WebsocketClient) commandRequestHandler(message []byte) { } } - // Sends "hello" for Alpacon to verify WebSocket session status without error handling. - helloQuery := map[string]string{"query": "hello"} - _ = wc.writeJSON(helloQuery) - switch content.Query { case "command": scheduler.Rqueue.Post(fmt.Sprintf(eventCommandAckURL, content.Command.ID), From ba365383933764fe37da7ea6ef8d914adef91fa6 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Fri, 13 Dec 2024 14:59:12 +0900 Subject: [PATCH 105/364] Add initialization logs for agent startup --- cmd/alpamon/command/root.go | 6 ++++-- pkg/scheduler/reporter.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 01a6294..d50a19c 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -2,6 +2,7 @@ package command import ( "fmt" + "github.com/alpacanetworks/alpamon-go/pkg/version" "os" "syscall" @@ -11,7 +12,6 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/runner" "github.com/alpacanetworks/alpamon-go/pkg/scheduler" "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/alpacanetworks/alpamon-go/pkg/version" "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -40,10 +40,11 @@ func runAgent() { } defer func() { _ = os.Remove(pidFilePath) }() + fmt.Printf("alpamon version %s starting.\n", version.Version) + // Config & Settings settings := config.LoadConfig() config.InitSettings(settings) - fmt.Printf("alpamon-go %s starting.\n", version.Version) // Session session := scheduler.InitSession() @@ -55,6 +56,7 @@ func runAgent() { // Logger logFile := logger.InitLogger() defer func() { _ = logFile.Close() }() + log.Info().Msg("alpamon initialized and running.") // Commit runner.CommitAsync(session, commissioned) diff --git a/pkg/scheduler/reporter.go b/pkg/scheduler/reporter.go index 04d06d7..e9db9ff 100644 --- a/pkg/scheduler/reporter.go +++ b/pkg/scheduler/reporter.go @@ -51,7 +51,7 @@ func reportStartupEvent() { eventData, _ := json.Marshal(map[string]string{ "reporter": "alpamon", "record": "started", - "description": fmt.Sprintf("alpamon-go %s started running.", version.Version), + "description": fmt.Sprintf("alpamon %s started running.", version.Version), }) Rqueue.Post(startUpEventURL, eventData, 10, time.Time{}) From 850dcea2afb85483150c8172b6efcf29e35cbc18 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 13 Dec 2024 15:02:37 +0900 Subject: [PATCH 106/364] Add retry logic at Transporter & Collector Refactor Collector and Transporte to retry with exponential backoff in Collector when data transmission to Alpacon Server via Transporter's Send() fails. --- pkg/collector/collector.go | 99 +++++++++++++++++------- pkg/collector/transporter/transporter.go | 8 +- 2 files changed, 76 insertions(+), 31 deletions(-) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 23605e2..3166230 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -19,8 +19,10 @@ import ( "github.com/rs/zerolog/log" ) -var ( - confURL = "/api/metrics/config/" +const ( + confURL = "/api/metrics/config/" + maxRetryCount = 5 + delay = 1 * time.Second ) type Collector struct { @@ -32,6 +34,11 @@ type Collector struct { stopChan chan struct{} } +type collectConf struct { + Type base.CheckType + Interval int +} + type collectorArgs struct { session *session.Session client *ent.Client @@ -40,30 +47,16 @@ type collectorArgs struct { transportFactory transporter.TransporterFactory } -type collectConf struct { - Type base.CheckType - Interval int -} - func InitCollector(session *session.Session, client *ent.Client) *Collector { - checkFactory := &check.DefaultCheckFactory{} - - urlResolver := transporter.NewURLResolver() - transporterFactory := transporter.NewDefaultTransporterFactory(urlResolver) - - var conf []collectConf - resp, statusCode, err := session.Get(confURL, 10) - if statusCode == http.StatusOK { - err = json.Unmarshal(resp, &conf) - if err != nil { - log.Error().Err(err).Msg("Failed to unmarshal collection config") - os.Exit(1) - } - } else { - log.Error().Err(err).Msgf("HTTP %d: Failed to get collection config", statusCode) + conf, err := fetchConfig(session) + if err != nil { + log.Error().Err(err).Msg("Failed to fetch collector config") os.Exit(1) } + checkFactory := &check.DefaultCheckFactory{} + urlResolver := transporter.NewURLResolver() + transporterFactory := transporter.NewDefaultTransporterFactory(urlResolver) args := collectorArgs{ session: session, client: client, @@ -81,6 +74,24 @@ func InitCollector(session *session.Session, client *ent.Client) *Collector { return collector } +func fetchConfig(session *session.Session) ([]collectConf, error) { + resp, statusCode, err := session.Get(confURL, 10) + if err != nil { + return nil, err + } + if statusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get collection config: %d status code", statusCode) + } + + var conf []collectConf + err = json.Unmarshal(resp, &conf) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal config: %w", err) + } + + return conf, nil +} + func NewCollector(args collectorArgs) (*Collector, error) { transporter, err := args.transportFactory.CreateTransporter(args.session) if err != nil { @@ -88,7 +99,6 @@ func NewCollector(args collectorArgs) (*Collector, error) { } checkBuffer := base.NewCheckBuffer(len(args.conf) * 2) - collector := &Collector{ transporter: transporter, scheduler: scheduler.NewScheduler(), @@ -97,6 +107,15 @@ func NewCollector(args collectorArgs) (*Collector, error) { stopChan: make(chan struct{}), } + err = collector.initTasks(args) + if err != nil { + return nil, err + } + + return collector, nil +} + +func (c *Collector) initTasks(args collectorArgs) error { for _, entry := range args.conf { duration := time.Duration(entry.Interval) * time.Second name := string(entry.Type) + "_" + uuid.NewString() @@ -104,18 +123,17 @@ func NewCollector(args collectorArgs) (*Collector, error) { Type: entry.Type, Name: name, Interval: time.Duration(duration.Minutes() * float64(time.Minute)), - Buffer: checkBuffer, + Buffer: c.buffer, Client: args.client, } check, err := args.checkFactory.CreateCheck(&checkArgs) if err != nil { - return nil, err + return err } - collector.scheduler.AddTask(check) + c.scheduler.AddTask(check) } - - return collector, nil + return nil } func (c *Collector) Start(ctx context.Context) error { @@ -167,11 +185,32 @@ func (c *Collector) failureQueueWorker(ctx context.Context) { return case <-retryTicker.C: metric := <-c.buffer.FailureQueue - if err := c.transporter.Send(metric); err != nil { - c.buffer.FailureQueue <- metric + err := c.retryWithBackoff(ctx, metric) + if err != nil { + log.Error().Err(err).Msgf("Failed to check metric: %s", metric.Type) + } + } + } +} + +func (c *Collector) retryWithBackoff(ctx context.Context, metric base.MetricData) error { + retryCount := 0 + for retryCount < maxRetryCount { + select { + case <-ctx.Done(): + return nil + case <-time.After(time.Duration(1< 300 { + return fmt.Errorf("%d Bad Request: %s", statusCode, resp) + } + return nil } From e708dc33b5c6c3a10405051bab98b98752c8890f Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Fri, 13 Dec 2024 21:22:35 +0900 Subject: [PATCH 107/364] Add SetReadDeadline to websocket client to reconnect on read timeout --- pkg/runner/client.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index e39b294..7fbb06f 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -13,8 +13,9 @@ import ( ) const ( - minConnectInterval = 5 * time.Second - maxConnectInterval = 60 * time.Second + minConnectInterval = 5 * time.Second + maxConnectInterval = 60 * time.Second + connectionReadTimeout = 35 * time.Minute eventCommandAckURL = "/api/events/commands/%s/ack/" eventCommandFinURL = "/api/events/commands/%s/fin/" @@ -51,6 +52,10 @@ func (wc *WebsocketClient) RunForever() { case <-wc.quitChan: return default: + err := wc.conn.SetReadDeadline(time.Now().Add(connectionReadTimeout)) + if err != nil { + wc.closeAndReconnect() + } _, message, err := wc.readMessage() if err != nil { wc.closeAndReconnect() From 46906de459e1b18c5bcde4348a9cb61bc6e757cf Mon Sep 17 00:00:00 2001 From: Eunyoung Jeong Date: Sun, 15 Dec 2024 11:47:43 +0900 Subject: [PATCH 108/364] Log received commands for debugging --- pkg/runner/command.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 706dee4..1ea940f 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -46,6 +46,8 @@ func (cr *CommandRunner) Run() { var exitCode int var result string + log.Debug().Msgf("Received command: %s> %s", cr.command.Shell, cr.command.Line) + start := time.Now() switch cr.command.Shell { case "internal": @@ -93,7 +95,7 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { cmd = "apt-get update -y && " + "apt-get upgrade -y alpamon" } else if utils.PlatformLike == "rhel" { - cmd = "yum update- y &&" + + cmd = "yum update -y &&" + "yum upgrade -y alpamon" } else { return 1, fmt.Sprintf("Platform '%s' not supported.", utils.PlatformLike) From 02e91e05b510e5a2c7abe6a69674feda2ebc0b38 Mon Sep 17 00:00:00 2001 From: Eunyoung Jeong Date: Sun, 15 Dec 2024 23:11:48 +0900 Subject: [PATCH 109/364] Fix log messages --- pkg/runner/client.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 7fbb06f..5a1fbb3 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -87,7 +87,7 @@ func (wc *WebsocketClient) readMessage() (messageType int, message []byte, err e } func (wc *WebsocketClient) connect() { - log.Info().Msgf("Connecting to websocket at %s", config.GlobalSettings.WSPath) + log.Info().Msgf("Connecting to websocket at %s...", config.GlobalSettings.WSPath) wsBackoff := backoff.NewExponentialBackOff() wsBackoff.InitialInterval = minConnectInterval @@ -99,18 +99,18 @@ func (wc *WebsocketClient) connect() { conn, _, err := websocket.DefaultDialer.Dial(config.GlobalSettings.WSPath, wc.requestHeader) if err != nil { nextInterval := wsBackoff.NextBackOff() - log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds", config.GlobalSettings.WSPath, int(nextInterval.Seconds())) + log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds.", config.GlobalSettings.WSPath, int(nextInterval.Seconds())) return err } wc.conn = conn - log.Debug().Msg("Backhaul connection established") + log.Debug().Msg("Backhaul connection established.") return nil } err := backoff.Retry(operation, wsBackoff) if err != nil { - log.Error().Err(err).Msg("Unexpected error occurred during backoff") + log.Error().Err(err).Msg("Unexpected error occurred during backoff.") return } } @@ -127,7 +127,7 @@ func (wc *WebsocketClient) close() { if wc.conn != nil { err := wc.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) if err != nil { - log.Debug().Err(err).Msg("Failed to write close message to websocket") + log.Debug().Err(err).Msg("Failed to write close message to websocket.") } _ = wc.conn.Close() } @@ -188,7 +188,7 @@ func (wc *WebsocketClient) commandRequestHandler(message []byte) { func (wc *WebsocketClient) writeJSON(data interface{}) error { err := wc.conn.WriteJSON(data) if err != nil { - log.Debug().Err(err).Msgf("Failed to write json data to websocket") + log.Debug().Err(err).Msgf("Failed to write json data to websocket.") return err } return nil From e10bcba5c9ce22025ebf76b3c5d76780e1217a17 Mon Sep 17 00:00:00 2001 From: Eunyoung Jeong Date: Sun, 15 Dec 2024 23:12:00 +0900 Subject: [PATCH 110/364] Fix the command to upgrade alpamon --- pkg/runner/command.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 1ea940f..d6886fd 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -93,10 +93,9 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { case "upgrade": if utils.PlatformLike == "debian" { cmd = "apt-get update -y && " + - "apt-get upgrade -y alpamon" + "apt-get install --only-upgrade alpamon" } else if utils.PlatformLike == "rhel" { - cmd = "yum update -y &&" + - "yum upgrade -y alpamon" + cmd = "yum update -y alpamon" } else { return 1, fmt.Sprintf("Platform '%s' not supported.", utils.PlatformLike) } From 6c2ef28174e9105078548a26765c4f856edffe8f Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Mon, 16 Dec 2024 11:59:55 +0900 Subject: [PATCH 111/364] Use preremove instead of postremove to preserve config files during upgrade --- .goreleaser.yaml | 2 +- scripts/postremove.sh | 39 --------------------------------------- scripts/preremove.sh | 12 ++++++++++++ 3 files changed, 13 insertions(+), 40 deletions(-) delete mode 100644 scripts/postremove.sh create mode 100644 scripts/preremove.sh diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 7b3580f..0767516 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -43,7 +43,7 @@ nfpms: scripts: postinstall: "scripts/postinstall.sh" - postremove: "scripts/postremove.sh" + preremove: "scripts/preremove.sh" changelog: sort: asc diff --git a/scripts/postremove.sh b/scripts/postremove.sh deleted file mode 100644 index f6cdb00..0000000 --- a/scripts/postremove.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh - -TMP_FILE_PATH="/usr/lib/tmpfiles.d/alpamon.conf" -SVC_FILE_PATH="/lib/systemd/system/alpamon.service" -LOG_FILE_PATH="/var/log/alpamon/alpamon.log" - -main() { - clean_systemd - clean_files - clean_directories - echo "Alpamon has been removed successfully!" -} - -clean_systemd() { - echo "Uninstalling systemd service for Alpamon..." - - systemctl stop alpamon.service || true - systemctl disable alpamon.service || true - systemctl daemon-reload || true -} - -clean_files() { - echo "Removing configuration files..." - - rm -f /etc/alpamon/alpamon.conf || true - rm -f "$TMP_FILE_PATH" || true - rm -f "$SVC_FILE_PATH" || true - rm -f "$LOG_FILE_PATH" || true -} - -clean_directories() { - echo "Removing directories..." - - rm -rf /etc/alpamon 2>/dev/null || true - rm -rf /var/log/alpamon 2>/dev/null || true -} - -set -ue -main \ No newline at end of file diff --git a/scripts/preremove.sh b/scripts/preremove.sh new file mode 100644 index 0000000..a3b8f42 --- /dev/null +++ b/scripts/preremove.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +if [ "$1" = 'remove' ]; then + echo 'Stopping and disabling Alpamon service...' + + if command -v systemctl >/dev/null; then + systemctl stop alpamon.service || true + systemctl disable alpamon.service || true + else + echo "Systemctl is not available. Skipping service management." + fi +fi \ No newline at end of file From 4398f873f5d3888ddff36114c957f4c78000f558 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Tue, 17 Dec 2024 11:40:48 +0900 Subject: [PATCH 112/364] Include previously removed packages in commit --- pkg/runner/commit.go | 16 ++++++++-------- pkg/runner/commit_types.go | 20 ++++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 5df95cd..73507d0 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -144,11 +144,11 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { log.Debug().Err(err).Msg("Failed to retrieve network addresses") } remoteData = &[]Address{} - //case "packages": - // if currentData, err = getSystemPackages(); err != nil { - // log.Debug().Err(err).Msg("Failed to retrieve system packages") - // } - // remoteData = &[]SystemPackageData{} + case "packages": + if currentData, err = getSystemPackages(); err != nil { + log.Debug().Err(err).Msg("Failed to retrieve system packages") + } + remoteData = &[]SystemPackageData{} default: log.Warn().Msgf("Unknown key: %s", key) continue @@ -250,9 +250,9 @@ func collectData() *commitData { if data.Addresses, err = getNetworkAddresses(); err != nil { log.Debug().Err(err).Msg("Failed to retrieve network addresses") } - //if data.Packages, err = getSystemPackages(); err != nil { - // log.Debug().Err(err).Msg("Failed to retrieve system packages") - //} + if data.Packages, err = getSystemPackages(); err != nil { + log.Debug().Err(err).Msg("Failed to retrieve system packages") + } return data } diff --git a/pkg/runner/commit_types.go b/pkg/runner/commit_types.go index ccb7dbc..02c4308 100644 --- a/pkg/runner/commit_types.go +++ b/pkg/runner/commit_types.go @@ -137,16 +137,16 @@ type Address struct { } type commitData struct { - Version string `json:"version"` - Load float64 `json:"load"` - Info SystemData `json:"info"` - OS OSData `json:"os"` - Time TimeData `json:"time"` - Users []UserData `json:"users"` - Groups []GroupData `json:"groups"` - Interfaces []Interface `json:"interfaces"` - Addresses []Address `json:"addresses"` - // Packages []SystemPackageData `json:"packages"` + Version string `json:"version"` + Load float64 `json:"load"` + Info SystemData `json:"info"` + OS OSData `json:"os"` + Time TimeData `json:"time"` + Users []UserData `json:"users"` + Groups []GroupData `json:"groups"` + Interfaces []Interface `json:"interfaces"` + Addresses []Address `json:"addresses"` + Packages []SystemPackageData `json:"packages"` } // Defines the ComparableData interface for comparing different types. From 9327ffd32beeb77ebc44c878572329e1f10ece76 Mon Sep 17 00:00:00 2001 From: Younghwan Kim Date: Thu, 19 Dec 2024 16:19:43 +0900 Subject: [PATCH 113/364] Add Dockerfiles for tests --- Dockerfiles/build.sh | 13 +++++++++++ Dockerfiles/centos/7/Dockerfile | 29 ++++++++++++++++++++++++ Dockerfiles/centos/7/entrypoint.sh | 22 ++++++++++++++++++ Dockerfiles/debian/10/Dockerfile | 31 ++++++++++++++++++++++++++ Dockerfiles/debian/10/entrypoint.sh | 22 ++++++++++++++++++ Dockerfiles/debian/11/Dockerfile | 31 ++++++++++++++++++++++++++ Dockerfiles/debian/11/entrypoint.sh | 22 ++++++++++++++++++ Dockerfiles/redhat/8/Dockerfile | 29 ++++++++++++++++++++++++ Dockerfiles/redhat/8/entrypoint.sh | 22 ++++++++++++++++++ Dockerfiles/redhat/9/Dockerfile | 29 ++++++++++++++++++++++++ Dockerfiles/redhat/9/entrypoint.sh | 22 ++++++++++++++++++ Dockerfiles/ubuntu/18.04/Dockerfile | 31 ++++++++++++++++++++++++++ Dockerfiles/ubuntu/18.04/entrypoint.sh | 22 ++++++++++++++++++ Dockerfiles/ubuntu/20.04/Dockerfile | 31 ++++++++++++++++++++++++++ Dockerfiles/ubuntu/20.04/entrypoint.sh | 22 ++++++++++++++++++ Dockerfiles/ubuntu/22.04/Dockerfile | 31 ++++++++++++++++++++++++++ Dockerfiles/ubuntu/22.04/entrypoint.sh | 22 ++++++++++++++++++ 17 files changed, 431 insertions(+) create mode 100755 Dockerfiles/build.sh create mode 100644 Dockerfiles/centos/7/Dockerfile create mode 100644 Dockerfiles/centos/7/entrypoint.sh create mode 100644 Dockerfiles/debian/10/Dockerfile create mode 100644 Dockerfiles/debian/10/entrypoint.sh create mode 100644 Dockerfiles/debian/11/Dockerfile create mode 100644 Dockerfiles/debian/11/entrypoint.sh create mode 100644 Dockerfiles/redhat/8/Dockerfile create mode 100644 Dockerfiles/redhat/8/entrypoint.sh create mode 100644 Dockerfiles/redhat/9/Dockerfile create mode 100644 Dockerfiles/redhat/9/entrypoint.sh create mode 100644 Dockerfiles/ubuntu/18.04/Dockerfile create mode 100644 Dockerfiles/ubuntu/18.04/entrypoint.sh create mode 100644 Dockerfiles/ubuntu/20.04/Dockerfile create mode 100644 Dockerfiles/ubuntu/20.04/entrypoint.sh create mode 100644 Dockerfiles/ubuntu/22.04/Dockerfile create mode 100644 Dockerfiles/ubuntu/22.04/entrypoint.sh diff --git a/Dockerfiles/build.sh b/Dockerfiles/build.sh new file mode 100755 index 0000000..3b72552 --- /dev/null +++ b/Dockerfiles/build.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +docker build -t alpamon:debian-10 -f Dockerfiles/debian/10/Dockerfile . +docker build -t alpamon:debian-11 -f Dockerfiles/debian/11/Dockerfile . + +docker build -t alpamon:ubuntu-18.04 -f Dockerfiles/ubuntu/18.04/Dockerfile . +docker build -t alpamon:ubuntu-20.04 -f Dockerfiles/ubuntu/20.04/Dockerfile . +docker build -t alpamon:ubuntu-22.04 -f Dockerfiles/ubuntu/22.04/Dockerfile . + +docker build -t alpamon:redhat-8 -f Dockerfiles/redhat/8/Dockerfile . +docker build -t alpamon:redhat-9 -f Dockerfiles/redhat/9/Dockerfile . + +docker build -t alpamon:centos-7 -f Dockerfiles/centos/7/Dockerfile . \ No newline at end of file diff --git a/Dockerfiles/centos/7/Dockerfile b/Dockerfiles/centos/7/Dockerfile new file mode 100644 index 0000000..bdedd19 --- /dev/null +++ b/Dockerfiles/centos/7/Dockerfile @@ -0,0 +1,29 @@ +FROM golang:1.22 AS builder + +# Set golang env +ENV GO111MODULE=on \ + CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=amd64 + +WORKDIR /build + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY . . + +RUN go build -o alpamon ./cmd/alpamon/main.go + +FROM centos:centos7 + +WORKDIR /usr/local/alpamon + +COPY --from=builder /build/alpamon ./alpamon + +COPY Dockerfiles/centos/7/entrypoint.sh /usr/local/alpamon/entrypoint.sh + +RUN chmod +x /usr/local/alpamon/entrypoint.sh + +ENTRYPOINT ["/usr/local/alpamon/entrypoint.sh"] \ No newline at end of file diff --git a/Dockerfiles/centos/7/entrypoint.sh b/Dockerfiles/centos/7/entrypoint.sh new file mode 100644 index 0000000..9729630 --- /dev/null +++ b/Dockerfiles/centos/7/entrypoint.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +ALPACON_URL=${ALPACON_URL:-"http://host.docker.internal:8000"} +PLUGIN_ID=${PLUGIN_ID:-"959ae5c7-84b0-4fba-8c1e-5b8a3d6dcadc"} +PLUGIN_KEY=${PLUGIN_KEY:-"alpaca"} + +mkdir -p /etc/alpamon + +cat > /etc/alpamon/alpamon.conf < /etc/alpamon/alpamon.conf < /etc/alpamon/alpamon.conf < /etc/alpamon/alpamon.conf < /etc/alpamon/alpamon.conf < /etc/alpamon/alpamon.conf < /etc/alpamon/alpamon.conf < /etc/alpamon/alpamon.conf < Date: Fri, 20 Dec 2024 15:35:06 +0900 Subject: [PATCH 114/364] Update README.md --- README.md | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 19648f1..4277ccb 100644 --- a/README.md +++ b/README.md @@ -88,6 +88,38 @@ go run main.go ``` Ensure that you are in the correct directory (`/cmd/alpamon`), as this is where the `main.go` file resides. +### Docker +You can also use docker to test alpamon in various Linux distributions. We use Docker Desktop to test alpamon on following distributions. + +- Ubuntu: 18.04, 20.04, 22.04 +- Debian: 10, 11 +- RedHat: 8, 9 +- CentOS: 7 + +#### Build +Build docker images with the build script. +``` +./Dockerfiles/build.sh +``` + +#### Run +You can run containers for these images in Docker Desktop or using command line like below. +``` +docker run alpamon:ubuntu-22.04 +``` +- Note : This will run the container with the default workspace URL (http://localhost:8000), plugin ID, and key values. +For more details, refer to the `entrypoint.sh` file in the Dockerfiles directory corresponding to each operating system. + +To run the container with a custom workspace URL, plugin ID, and key, use the following command: +``` +docker run \ + -e ALPACON_URL="your_workspace_url" \ + -e PLUGIN_ID="your_plugin_id" \ + -e PLUGIN_KEY="your_plugin_key" \ + alpamon:latest +``` +- Replace the environment variable values (your_workspace_url, your_plugin_id, your_plugin_key) with your actual workspace configuration. + ### Deploy as a service For Linux systems supporting `systemd`, you can run `alpamon` as a systemd service. In this case, you need to adapt `alpamon/config/alpamon.service` for your environment. @@ -117,4 +149,4 @@ The result would look like the following. The status must be loaded and active ( alpamon.service - alpamon agent for Alpacon Loaded: loaded (/lib/systemd/system/alpamon.service; enabled; vendor preset: enabled) Active: active (running) since Thu 2023-09-28 23:48:55 KST; 4 days ago -``` \ No newline at end of file +``` From 7c04bafdf8dfb7a33755a340c3fb329c0a666042 Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 20 Dec 2024 17:40:53 +0900 Subject: [PATCH 115/364] Apply versioned migrations Apply versioned migration to ensure stability when adding new metrics or expanding the system in the future. --- pkg/db/migrate.go | 31 ++++++++++---- .../migration/20241220080719_init_schemas.sql | 40 +++++++++++++++++++ pkg/db/migration/atlas.sum | 2 + 3 files changed, 65 insertions(+), 8 deletions(-) create mode 100644 pkg/db/migration/20241220080719_init_schemas.sql create mode 100644 pkg/db/migration/atlas.sum diff --git a/pkg/db/migrate.go b/pkg/db/migrate.go index db6f84c..b8caa6c 100644 --- a/pkg/db/migrate.go +++ b/pkg/db/migrate.go @@ -2,18 +2,33 @@ package db import ( "context" + "fmt" + "os" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/migrate" + "ariga.io/atlas-go-sdk/atlasexec" ) -// TODO: Apply Versioned Migrations -func RunMigration(ctx context.Context, client *ent.Client) error { - err := client.Schema.Create( - ctx, - migrate.WithDropIndex(true), - migrate.WithDropColumn(true), +func RunMigration(path string, ctx context.Context) error { + workDir, err := atlasexec.NewWorkingDir( + atlasexec.WithMigrations( + os.DirFS("../../pkg/db/migration"), + ), ) + if err != nil { + return err + } + defer workDir.Close() + + client, err := atlasexec.NewClient(workDir.Path(), "atlas") + if err != nil { + return err + } + + url := fmt.Sprintf("sqlite://%s", path) + + _, err = client.MigrateApply(ctx, &atlasexec.MigrateApplyParams{ + URL: url, + }) if err != nil { return err diff --git a/pkg/db/migration/20241220080719_init_schemas.sql b/pkg/db/migration/20241220080719_init_schemas.sql new file mode 100644 index 0000000..d7fe147 --- /dev/null +++ b/pkg/db/migration/20241220080719_init_schemas.sql @@ -0,0 +1,40 @@ +-- Create "cp_us" table +CREATE TABLE `cp_us` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `usage` real NOT NULL); +-- Create index "cpu_timestamp" to table: "cp_us" +CREATE INDEX `cpu_timestamp` ON `cp_us` (`timestamp`); +-- Create "cpu_per_hours" table +CREATE TABLE `cpu_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak_usage` real NOT NULL, `avg_usage` real NOT NULL); +-- Create index "cpuperhour_timestamp" to table: "cpu_per_hours" +CREATE INDEX `cpuperhour_timestamp` ON `cpu_per_hours` (`timestamp`); +-- Create "disk_ios" table +CREATE TABLE `disk_ios` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `read_bytes` integer NOT NULL, `write_bytes` integer NOT NULL); +-- Create index "diskio_timestamp" to table: "disk_ios" +CREATE INDEX `diskio_timestamp` ON `disk_ios` (`timestamp`); +-- Create "disk_io_per_hours" table +CREATE TABLE `disk_io_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak_read_bytes` integer NOT NULL, `peak_write_bytes` integer NOT NULL, `avg_read_bytes` integer NOT NULL, `avg_write_bytes` integer NOT NULL); +-- Create index "diskioperhour_timestamp" to table: "disk_io_per_hours" +CREATE INDEX `diskioperhour_timestamp` ON `disk_io_per_hours` (`timestamp`); +-- Create "disk_usages" table +CREATE TABLE `disk_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `mount_point` text NOT NULL, `usage` real NOT NULL, `total` integer NOT NULL, `free` integer NOT NULL, `used` integer NOT NULL); +-- Create index "diskusage_timestamp" to table: "disk_usages" +CREATE INDEX `diskusage_timestamp` ON `disk_usages` (`timestamp`); +-- Create "disk_usage_per_hours" table +CREATE TABLE `disk_usage_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `mount_point` text NOT NULL, `peak_usage` real NOT NULL, `avg_usage` real NOT NULL); +-- Create index "diskusageperhour_timestamp" to table: "disk_usage_per_hours" +CREATE INDEX `diskusageperhour_timestamp` ON `disk_usage_per_hours` (`timestamp`); +-- Create "memories" table +CREATE TABLE `memories` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `usage` real NOT NULL); +-- Create index "memory_timestamp" to table: "memories" +CREATE INDEX `memory_timestamp` ON `memories` (`timestamp`); +-- Create "memory_per_hours" table +CREATE TABLE `memory_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak_usage` real NOT NULL, `avg_usage` real NOT NULL); +-- Create index "memoryperhour_timestamp" to table: "memory_per_hours" +CREATE INDEX `memoryperhour_timestamp` ON `memory_per_hours` (`timestamp`); +-- Create "traffics" table +CREATE TABLE `traffics` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `name` text NOT NULL, `input_pps` real NOT NULL, `input_bps` real NOT NULL, `output_pps` real NOT NULL, `output_bps` real NOT NULL); +-- Create index "traffic_timestamp" to table: "traffics" +CREATE INDEX `traffic_timestamp` ON `traffics` (`timestamp`); +-- Create "traffic_per_hours" table +CREATE TABLE `traffic_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `name` text NOT NULL, `peak_input_pps` real NOT NULL, `peak_input_bps` real NOT NULL, `peak_output_pps` real NOT NULL, `peak_output_bps` real NOT NULL, `avg_input_pps` real NOT NULL, `avg_input_bps` real NOT NULL, `avg_output_pps` real NOT NULL, `avg_output_bps` real NOT NULL); +-- Create index "trafficperhour_timestamp" to table: "traffic_per_hours" +CREATE INDEX `trafficperhour_timestamp` ON `traffic_per_hours` (`timestamp`); diff --git a/pkg/db/migration/atlas.sum b/pkg/db/migration/atlas.sum new file mode 100644 index 0000000..ed4267f --- /dev/null +++ b/pkg/db/migration/atlas.sum @@ -0,0 +1,2 @@ +h1:wmp9Vxk94yD6vXpCuqcdTB/kjhZwA6IFh8az5B1NUuw= +20241220080719_init_schemas.sql h1:Ac2KWGJHarbrqr4oKE/sM2aHt6WKmzGsLCxC0iJJJRg= From 41351f515e213e553983f0a7ab6686bdbb85307a Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 20 Dec 2024 17:42:25 +0900 Subject: [PATCH 116/364] Add ariga.io/atlas-go-sdk package Add ariga.io/atlas-go-sdk package to support versioned migration. --- go.mod | 16 +++++++++------- go.sum | 21 +++++++++++++++++++++ 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index a7c69ca..8aa77f8 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/knqyf263/go-rpmdb v0.1.1 - github.com/mattn/go-sqlite3 v1.14.16 + github.com/mattn/go-sqlite3 v1.14.17 github.com/rs/zerolog v1.33.0 github.com/shirou/gopsutil/v4 v4.24.8 github.com/spf13/cobra v1.8.1 @@ -21,9 +21,11 @@ require ( ) require ( - ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect - github.com/agext/levenshtein v1.2.1 // indirect + ariga.io/atlas v0.21.2-0.20240418081819-02b3f6239b04 // indirect + ariga.io/atlas-go-sdk v0.6.5 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -31,13 +33,13 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/hashicorp/hcl/v2 v2.13.0 // indirect + github.com/hashicorp/hcl/v2 v2.18.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 // indirect @@ -46,10 +48,10 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - github.com/zclconf/go-cty v1.8.0 // indirect + github.com/zclconf/go-cty v1.14.1 // indirect golang.org/x/mod v0.15.0 // indirect golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 7de8d1c..88e2c64 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,9 @@ ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= +ariga.io/atlas v0.21.2-0.20240418081819-02b3f6239b04 h1:YF3qiqtnhn+y4tfhZKTfZKfizpjqHYt7rWPUb+eA4ZA= +ariga.io/atlas v0.21.2-0.20240418081819-02b3f6239b04/go.mod h1:VPlcXdd4w2KqKnH54yEZcry79UAhpaWaxEsmn5JRNoE= +ariga.io/atlas-go-sdk v0.6.5 h1:tl0L3ObGtHjitP9N/56njjDHUrj5jJTQBjftMNwJBcM= +ariga.io/atlas-go-sdk v0.6.5/go.mod h1:9Q+/04PVyJHUse1lEE9Kp6E18xj/6mIzaUTcWYSjSnQ= entgo.io/ent v0.14.0 h1:EO3Z9aZ5bXJatJeGqu/EVdnNr6K4mRq3rWe5owt0MC4= entgo.io/ent v0.14.0/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= @@ -8,8 +12,12 @@ github.com/adrianbrad/queue v1.3.0 h1:8FH1N+93HXbqta5+URa1AL+diV7MP3VDXAEnP+DNp4 github.com/adrianbrad/queue v1.3.0/go.mod h1:wYiPC/3MPbyT45QHLrPR4zcqJWPePubM1oEP/xTwhUs= github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -47,12 +55,15 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= +github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= +github.com/hashicorp/hcl/v2 v2.18.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/knqyf263/go-rpmdb v0.1.1 h1:oh68mTCvp1XzxdU7EfafcWzzfstUZAEa3MW0IJye584= github.com/knqyf263/go-rpmdb v0.1.1/go.mod h1:9LQcoMCMQ9vrF7HcDtXfvqGO4+ddxFQ8+YF/0CVGDww= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -73,8 +84,12 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= +github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -91,6 +106,7 @@ github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWR github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/shirou/gopsutil/v4 v4.24.8 h1:pVQjIenQkIhqO81mwTaXjTzOMT7d3TZkf43PlVFHENI= github.com/shirou/gopsutil/v4 v4.24.8/go.mod h1:wE0OrJtj4dG+hYkxqDH3QiBICdKSf04/npcvLLc/oRg= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -113,6 +129,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= +github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= @@ -135,6 +153,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= @@ -145,6 +165,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M= From c4a70bab514cdec6802eab55c3034afff05a10fd Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 20 Dec 2024 17:48:34 +0900 Subject: [PATCH 117/364] Add InitDB() Add InitDB() to execute both the DB client acquisition logic and DB migration logic at once, which were previously executed separately for readability. --- cmd/alpamon/command/root.go | 14 +++--------- pkg/db/client.go | 6 +++-- pkg/db/db.go | 44 +++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 13 deletions(-) create mode 100644 pkg/db/db.go diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 7cab8fb..6f79120 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -65,18 +65,10 @@ func runAgent() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, err := db.GetClient() - if err != nil { - log.Error().Err(err).Msg("Failed to open database client") - return - } - defer db.Close() - - if err := db.RunMigration(ctx, client); err != nil { - log.Error().Err(err).Msg("Failed to migrate schema") - return - } + // DB + client := db.InitDB(ctx) + // Collector collector := collector.InitCollector(session, client) if err := collector.Start(ctx); err != nil { log.Error().Err(err).Msg("Failed to start collector") diff --git a/pkg/db/client.go b/pkg/db/client.go index d18fd9e..4b3c3ea 100644 --- a/pkg/db/client.go +++ b/pkg/db/client.go @@ -1,6 +1,7 @@ package db import ( + "fmt" "sync" "entgo.io/ent/dialect" @@ -14,10 +15,11 @@ var ( initErr error ) -func GetClient() (*ent.Client, error) { +func GetClient(path string) (*ent.Client, error) { once.Do(func() { var err error - client, err = ent.Open(dialect.SQLite, "file:./metrics.db?cache=shared&_fk=1") + url := fmt.Sprintf("file:%s?cache=shared&_fk=1", path) + client, err = ent.Open(dialect.SQLite, url) if err != nil { initErr = err client = nil diff --git a/pkg/db/db.go b/pkg/db/db.go new file mode 100644 index 0000000..1765281 --- /dev/null +++ b/pkg/db/db.go @@ -0,0 +1,44 @@ +package db + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + _ "github.com/mattn/go-sqlite3" + "github.com/rs/zerolog/log" +) + +const ( + dbDir = "/var/lib/alpamon" + dbFileName = "alpamon.db" +) + +func InitDB(ctx context.Context) *ent.Client { + fileName := fmt.Sprintf("%s/%s", dbDir, dbFileName) + if _, err := os.Stat(dbDir); os.IsNotExist(err) { + fileName, _ = filepath.Abs(dbFileName) + } + + dbFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0777) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Failed to open db file: %v\n", err) + os.Exit(1) + } + + err = RunMigration(dbFile.Name(), ctx) + if err != nil { + log.Error().Err(err).Msgf("Failed to migrate db: %v\n", err) + os.Exit(1) + } + + client, err := GetClient(dbFile.Name()) + if err != nil { + log.Error().Err(err).Msgf("Failed to get db client: %v\n", err) + os.Exit(1) + } + + return client +} From 42cea59ee830fd7a232c404930de6515c9d99a0c Mon Sep 17 00:00:00 2001 From: geunwoo Date: Fri, 20 Dec 2024 17:49:04 +0900 Subject: [PATCH 118/364] Minor fix Fix .gitignore Fix typo in pkg/collector/collector.go --- .gitignore | 2 +- pkg/collector/collector.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index e34047a..ae4438c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ /pkg/db/ent/* !/pkg/db/ent/generate.go alpamon.log -metrics.db +alpamon.db .DS_Store \ No newline at end of file diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 3166230..897cabe 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -117,7 +117,7 @@ func NewCollector(args collectorArgs) (*Collector, error) { func (c *Collector) initTasks(args collectorArgs) error { for _, entry := range args.conf { - duration := time.Duration(entry.Interval) * time.Second + duration := time.Duration(entry.Interval) * time.Minute name := string(entry.Type) + "_" + uuid.NewString() checkArgs := base.CheckArgs{ Type: entry.Type, From c5e319282ef7e6477d0cef5503f3be85a3e80b3f Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 23 Dec 2024 13:10:46 +0900 Subject: [PATCH 119/364] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4277ccb..636a8c4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# alpamon-go +# Alpamon New Go-based Secure Server Agent for Alpacon **Alpamon** is a server agent for **Alpacon**. Each server should have Alpamon installed to be controlled via Alpacon. From 9bdfac6921c2aab889733d7fb2492c291dc5cc79 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 23 Dec 2024 16:53:47 +0900 Subject: [PATCH 120/364] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 636a8c4..489a436 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ sudo yum install alpamon #### Clone the source code To get started on macOS, clone the source code from the repository: ```bash -git clone https://github.com/alpacanetworks/alpamon-go.git +git clone https://github.com/alpacanetworks/alpamon.git ``` #### Install Go dependencies @@ -82,7 +82,7 @@ debug = true To run Alpamon in a local development environment, navigate to the cmd/alpamon directory and run the application using Go: ```sh -cd /path/to/alpamon-go/cmd/alpamon +cd /path/to/alpamon/cmd/alpamon go run main.go ``` From c4dbe65440804147e5b601c338a7adef50e8b74c Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 26 Dec 2024 15:16:11 +0900 Subject: [PATCH 121/364] Fix SQL query for disk usage A defect in the select query of hourly disk usage batch job check was causing incorrect data retrieval. To fix this, the query was refactored to employ a subquery, ensuring accurate data extraction. DiskUsageQuerySet has been correspondingly adjusted. --- pkg/collector/check/base/types.go | 7 ++- .../check/batch/daily/disk/usage/usage.go | 11 +++-- .../check/batch/hourly/disk/usage/usage.go | 45 +++++++++++++------ 3 files changed, 39 insertions(+), 24 deletions(-) diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index 0f2289b..bafdb21 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -54,10 +54,9 @@ type DiskIOQuerySet struct { } type DiskUsageQuerySet struct { - Device string `json:"device"` - MountPoint string `json:"mount_point"` - Max float64 `json:"max"` - AVG float64 `json:"avg"` + Device string `json:"device"` + Max float64 `json:"max"` + AVG float64 `json:"avg"` } type TrafficQuerySet struct { diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index 1b28906..ef48db9 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -44,11 +44,10 @@ func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, err var data []base.CheckResult for _, row := range queryset { data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Device: row.Device, - MountPoint: row.MountPoint, - PeakUsage: row.Max, - AvgUsage: row.AVG, + Timestamp: time.Now(), + Device: row.Device, + PeakUsage: row.Max, + AvgUsage: row.AVG, }) } metric := base.MetricData{ @@ -72,7 +71,7 @@ func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQueryS var queryset []base.DiskUsageQuerySet err := client.DiskUsagePerHour.Query(). Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)). - GroupBy(diskusageperhour.FieldDevice, diskusageperhour.FieldMountPoint). + GroupBy(diskusageperhour.FieldDevice). Aggregate( ent.Max(diskusageperhour.FieldPeakUsage), ent.Mean(diskusageperhour.FieldAvgUsage), diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index 4fe0c2e..75bbfa8 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -4,6 +4,7 @@ import ( "context" "time" + "entgo.io/ent/dialect/sql" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusage" @@ -44,11 +45,10 @@ func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { var data []base.CheckResult for _, row := range queryset { data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Device: row.Device, - MountPoint: row.MountPoint, - PeakUsage: row.Max, - AvgUsage: row.AVG, + Timestamp: time.Now(), + Device: row.Device, + PeakUsage: row.Max, + AvgUsage: row.AVG, }) } metric := base.MetricData{ @@ -71,17 +71,35 @@ func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { client := c.GetClient() - now := time.Now() - from := now.Add(-1 * time.Hour) var queryset []base.DiskUsageQuerySet err := client.DiskUsage.Query(). - Where(diskusage.TimestampGTE(from), diskusage.TimestampLTE(now)). - GroupBy(diskusage.FieldDevice, diskusage.FieldMountPoint). - Aggregate( - ent.Max(diskusage.FieldUsage), - ent.Mean(diskusage.FieldUsage), - ).Scan(ctx, &queryset) + Modify(func(s *sql.Selector) { + now := time.Now() + from := now.Add(-1 * time.Hour) + usageExpr := "(CAST(SUM(used) AS FLOAT) * 100.0) / NULLIF(SUM(total), 0)" + t := sql.Table(diskusage.Table) + + subq := sql.Select( + "device", + "timestamp", + sql.As(usageExpr, "usage"), + ). + From(t). + Where( + sql.And( + sql.GTE(t.C(diskusage.FieldTimestamp), from), + sql.LTE(t.C(diskusage.FieldTimestamp), now), + ), + ). + GroupBy("device", "timestamp") + + *s = *sql.Select( + "device", + sql.As(sql.Max("usage"), "max"), + sql.As(sql.Avg("usage"), "avg"), + ).From(subq).GroupBy("device") + }).Scan(ctx, &queryset) if err != nil { return queryset, err } @@ -99,7 +117,6 @@ func (c *Check) saveDiskUsagePerHour(data []base.CheckResult, ctx context.Contex err = tx.DiskUsagePerHour.MapCreateBulk(data, func(q *ent.DiskUsagePerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). - SetMountPoint(data[i].MountPoint). SetPeakUsage(data[i].PeakUsage). SetAvgUsage(data[i].AvgUsage) }).Exec(ctx) From 70a652b5dd16260abc65096eef1a2e9ce13f7e66 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 26 Dec 2024 15:21:56 +0900 Subject: [PATCH 122/364] Delete mount_point field Delete the unnecessary mount_point field from diskusageperhour table Fix the corresponding migration file. --- ...80719_init_schemas.sql => 20241226055814_init_schemas.sql} | 2 +- pkg/db/migration/atlas.sum | 4 ++-- pkg/db/schema/diskusage_per_hour.go | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) rename pkg/db/migration/{20241220080719_init_schemas.sql => 20241226055814_init_schemas.sql} (97%) diff --git a/pkg/db/migration/20241220080719_init_schemas.sql b/pkg/db/migration/20241226055814_init_schemas.sql similarity index 97% rename from pkg/db/migration/20241220080719_init_schemas.sql rename to pkg/db/migration/20241226055814_init_schemas.sql index d7fe147..a8b45c1 100644 --- a/pkg/db/migration/20241220080719_init_schemas.sql +++ b/pkg/db/migration/20241226055814_init_schemas.sql @@ -19,7 +19,7 @@ CREATE TABLE `disk_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `ti -- Create index "diskusage_timestamp" to table: "disk_usages" CREATE INDEX `diskusage_timestamp` ON `disk_usages` (`timestamp`); -- Create "disk_usage_per_hours" table -CREATE TABLE `disk_usage_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `mount_point` text NOT NULL, `peak_usage` real NOT NULL, `avg_usage` real NOT NULL); +CREATE TABLE `disk_usage_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak_usage` real NOT NULL, `avg_usage` real NOT NULL); -- Create index "diskusageperhour_timestamp" to table: "disk_usage_per_hours" CREATE INDEX `diskusageperhour_timestamp` ON `disk_usage_per_hours` (`timestamp`); -- Create "memories" table diff --git a/pkg/db/migration/atlas.sum b/pkg/db/migration/atlas.sum index ed4267f..9af891c 100644 --- a/pkg/db/migration/atlas.sum +++ b/pkg/db/migration/atlas.sum @@ -1,2 +1,2 @@ -h1:wmp9Vxk94yD6vXpCuqcdTB/kjhZwA6IFh8az5B1NUuw= -20241220080719_init_schemas.sql h1:Ac2KWGJHarbrqr4oKE/sM2aHt6WKmzGsLCxC0iJJJRg= +h1:Ua6PYlur0VFyOYZWiXefY4a8EwrW9OTZ9monNCM67Bw= +20241226055814_init_schemas.sql h1:1tpxOm5guI/FF8f3k8bDT+qJiLfze7vuhkMsH5QWm0U= diff --git a/pkg/db/schema/diskusage_per_hour.go b/pkg/db/schema/diskusage_per_hour.go index f8e2daa..43fba74 100644 --- a/pkg/db/schema/diskusage_per_hour.go +++ b/pkg/db/schema/diskusage_per_hour.go @@ -18,7 +18,6 @@ func (DiskUsagePerHour) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("device"), - field.String("mount_point"), field.Float("peak_usage"), field.Float("avg_usage"), } From c150a8c576ad6105707577ed3a12d39bc7b20007 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 26 Dec 2024 15:24:46 +0900 Subject: [PATCH 123/364] Add --feature flag at ent command To support sql/modifier feature in ent package, add --feature flag and the corresponding functionality to command --- .goreleaser.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index e77cabf..a6d9ad3 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -4,7 +4,7 @@ before: hooks: - go mod tidy - go generate ./... - - go run -mod=mod entgo.io/ent/cmd/ent generate --target ./pkg/db/ent ./pkg/db/schema + - go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema builds: - main: ./cmd/alpamon From 9c3b6813de00e707dc35e6e980993100f81a1ffb Mon Sep 17 00:00:00 2001 From: Chae-Jisung Date: Thu, 26 Dec 2024 17:43:35 +0900 Subject: [PATCH 124/364] Update README.md - add config file value Matters not mentioned in the issue - The configuration file for developers has been updated in the Alpacon Server GitLab repository, but it has not been updated in the Alpamon Agent GitHub repository. - The PR adds instructions to the README.md to include the update of id and key values in the /etc/alpamon/alpamon.conf file for the connection between Alpamon agent and Alpacon server. - Well, I'm not sure if these values are publicly accessible. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 489a436..d8cc376 100644 --- a/README.md +++ b/README.md @@ -55,8 +55,8 @@ It is recommended to use `/etc/alpamon/alpamon.conf` for deployment, but you can ```ini [server] url = http://localhost:8000 -id = -key = +id = 7a50ea6c-2138-4d3f-9633-e50694c847c4 +key = alpaca [ssl] verify = true From 8e6beb195697386cdb8b5170e90e54933c2edb2c Mon Sep 17 00:00:00 2001 From: Chae-Jisung Date: Fri, 27 Dec 2024 10:35:45 +0900 Subject: [PATCH 125/364] Update README.md - add comments about testing values - add explaination about values for testing --- README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/README.md b/README.md index 489a436..8655574 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,22 @@ debug = true - `logging`: Logging settings - `debug`: Whether to print debug logs or not +For testing with the Alpacon-server, you can use the following values: +```ini +[server] +url = http://localhost:8000 +id = 7a50ea6c-2138-4d3f-9633-e50694c847c4 +key = alpaca + +[ssl] +verify = true +ca_cert = + +[logging] +debug = true +``` + + ## Run ### Local environment From cd0281da81f0448c56bcc8465c6c82e977438836 Mon Sep 17 00:00:00 2001 From: Chae-Jisung Date: Fri, 27 Dec 2024 10:46:51 +0900 Subject: [PATCH 126/364] Update README.md - add comments about testing values - add explaination about values for testing --- README.md | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d8cc376..768475a 100644 --- a/README.md +++ b/README.md @@ -55,8 +55,8 @@ It is recommended to use `/etc/alpamon/alpamon.conf` for deployment, but you can ```ini [server] url = http://localhost:8000 -id = 7a50ea6c-2138-4d3f-9633-e50694c847c4 -key = alpaca +id = +key = [ssl] verify = true @@ -75,6 +75,22 @@ debug = true - `ca_cert`: Path for the CA certificate - `logging`: Logging settings - `debug`: Whether to print debug logs or not + +For testing with the Alpacon-server, you can use the following values: + +```ini +[server] +url = http://localhost:8000 +id = 7a50ea6c-2138-4d3f-9633-e50694c847c4 +key = alpaca + +[ssl] +verify = true +ca_cert = + +[logging] +debug = true +``` ## Run From 4c4c770509d0fac5cebb370c9bc79207182cdef6 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 27 Dec 2024 10:53:18 +0900 Subject: [PATCH 127/364] Minor fix --- README.md | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 768475a..7c3dafc 100644 --- a/README.md +++ b/README.md @@ -76,21 +76,10 @@ debug = true - `logging`: Logging settings - `debug`: Whether to print debug logs or not -For testing with the Alpacon-server, you can use the following values: - -```ini -[server] -url = http://localhost:8000 -id = 7a50ea6c-2138-4d3f-9633-e50694c847c4 -key = alpaca - -[ssl] -verify = true -ca_cert = - -[logging] -debug = true -``` +For testing with the `Alpacon-Server`, you can use the following values: +- `url` = `http://localhost:8000` +- `id` = `7a50ea6c-2138-4d3f-9633-e50694c847c4` +- `key` = `alpaca` ## Run From 900a8346427e876b82cb5b8285d7d1a19b4c7aa3 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 27 Dec 2024 10:55:46 +0900 Subject: [PATCH 128/364] Minor fix --- README.md | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/README.md b/README.md index 626f43d..6a7e0da 100644 --- a/README.md +++ b/README.md @@ -81,21 +81,6 @@ For testing with the `Alpacon-Server`, you can use the following values: - `id` = `7a50ea6c-2138-4d3f-9633-e50694c847c4` - `key` = `alpaca` -For testing with the Alpacon-server, you can use the following values: -```ini -[server] -url = http://localhost:8000 -id = 7a50ea6c-2138-4d3f-9633-e50694c847c4 -key = alpaca - -[ssl] -verify = true -ca_cert = - -[logging] -debug = true -``` - ## Run From 0f04dc3f839cfdcd26811d5601f59c0663b0f09d Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 30 Dec 2024 13:39:21 +0900 Subject: [PATCH 129/364] Minor fix Fix retry logic to ensure that subsequent tasks continue to execute even if a retry fails. --- pkg/collector/scheduler/scheduler.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go index eba74da..8164705 100644 --- a/pkg/collector/scheduler/scheduler.go +++ b/pkg/collector/scheduler/scheduler.go @@ -124,9 +124,7 @@ func (s *Scheduler) executeTask(ctx context.Context, task *ScheduledTask) { return } } - return } break } - } From 599b58b31580713122787a9c214e869edc1153db Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 30 Dec 2024 22:09:21 +0900 Subject: [PATCH 130/364] Add logger Add logger for debugging purposes in case of errors. --- pkg/db/db.go | 7 ++++--- pkg/db/migrate.go | 4 ++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index 1765281..b14d652 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -22,21 +22,22 @@ func InitDB(ctx context.Context) *ent.Client { fileName, _ = filepath.Abs(dbFileName) } - dbFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0777) + dbFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0750) if err != nil { + log.Error().Err(err).Msgf("failed to open db file: %v", err) _, _ = fmt.Fprintf(os.Stderr, "Failed to open db file: %v\n", err) os.Exit(1) } err = RunMigration(dbFile.Name(), ctx) if err != nil { - log.Error().Err(err).Msgf("Failed to migrate db: %v\n", err) + log.Error().Err(err).Msgf("failed to migrate db: %v\n", err) os.Exit(1) } client, err := GetClient(dbFile.Name()) if err != nil { - log.Error().Err(err).Msgf("Failed to get db client: %v\n", err) + log.Error().Err(err).Msgf("failed to get db client: %v\n", err) os.Exit(1) } diff --git a/pkg/db/migrate.go b/pkg/db/migrate.go index b8caa6c..2897c80 100644 --- a/pkg/db/migrate.go +++ b/pkg/db/migrate.go @@ -6,6 +6,7 @@ import ( "os" "ariga.io/atlas-go-sdk/atlasexec" + "github.com/rs/zerolog/log" ) func RunMigration(path string, ctx context.Context) error { @@ -15,12 +16,14 @@ func RunMigration(path string, ctx context.Context) error { ), ) if err != nil { + log.Error().Err(err).Msgf("failed to get migration dir: %v", err) return err } defer workDir.Close() client, err := atlasexec.NewClient(workDir.Path(), "atlas") if err != nil { + log.Error().Err(err).Msgf("failed to get atlas client: %v", err) return err } @@ -31,6 +34,7 @@ func RunMigration(path string, ctx context.Context) error { }) if err != nil { + log.Error().Err(err).Msgf("failed to migrate db: %v", err) return err } From f101acbe42e35174c080be24ac1c43ea720d0a29 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 30 Dec 2024 22:11:00 +0900 Subject: [PATCH 131/364] Add ariga.io/atlas-go-sdk package Add ariga.io/atlas-go-sdk package for versioned migrations. --- go.mod | 6 +++--- go.sum | 53 +++++++++++------------------------------------------ 2 files changed, 14 insertions(+), 45 deletions(-) diff --git a/go.mod b/go.mod index 8aa77f8..98824c0 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/alpacanetworks/alpamon-go go 1.22.5 require ( + ariga.io/atlas-go-sdk v0.6.5 entgo.io/ent v0.14.0 github.com/adrianbrad/queue v1.3.0 github.com/cenkalti/backoff v2.2.1+incompatible @@ -22,9 +23,7 @@ require ( require ( ariga.io/atlas v0.21.2-0.20240418081819-02b3f6239b04 // indirect - ariga.io/atlas-go-sdk v0.6.5 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -43,13 +42,14 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zclconf/go-cty v1.14.1 // indirect - golang.org/x/mod v0.15.0 // indirect + golang.org/x/mod v0.18.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect diff --git a/go.sum b/go.sum index 88e2c64..bf87103 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= -ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= ariga.io/atlas v0.21.2-0.20240418081819-02b3f6239b04 h1:YF3qiqtnhn+y4tfhZKTfZKfizpjqHYt7rWPUb+eA4ZA= ariga.io/atlas v0.21.2-0.20240418081819-02b3f6239b04/go.mod h1:VPlcXdd4w2KqKnH54yEZcry79UAhpaWaxEsmn5JRNoE= ariga.io/atlas-go-sdk v0.6.5 h1:tl0L3ObGtHjitP9N/56njjDHUrj5jJTQBjftMNwJBcM= @@ -10,12 +8,8 @@ github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20O github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/adrianbrad/queue v1.3.0 h1:8FH1N+93HXbqta5+URa1AL+diV7MP3VDXAEnP+DNp48= github.com/adrianbrad/queue v1.3.0/go.mod h1:wYiPC/3MPbyT45QHLrPR4zcqJWPePubM1oEP/xTwhUs= -github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -41,9 +35,6 @@ github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91 github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -53,19 +44,14 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= -github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= github.com/hashicorp/hcl/v2 v2.18.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/knqyf263/go-rpmdb v0.1.1 h1:oh68mTCvp1XzxdU7EfafcWzzfstUZAEa3MW0IJye584= github.com/knqyf263/go-rpmdb v0.1.1/go.mod h1:9LQcoMCMQ9vrF7HcDtXfvqGO4+ddxFQ8+YF/0CVGDww= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -82,14 +68,12 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -100,13 +84,14 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 h1:VstopitMQi3hZP0fzvnsLmzXZdQGc4bEcgu24cp+d4M= github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shirou/gopsutil/v4 v4.24.8 h1:pVQjIenQkIhqO81mwTaXjTzOMT7d3TZkf43PlVFHENI= github.com/shirou/gopsutil/v4 v4.24.8/go.mod h1:wE0OrJtj4dG+hYkxqDH3QiBICdKSf04/npcvLLc/oRg= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -123,22 +108,14 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= -github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -148,24 +125,16 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M= From ba492eeb00a146eb4d6a482ca2eed71779242d19 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 30 Dec 2024 22:13:32 +0900 Subject: [PATCH 132/364] Refactor for building Linux packages Add Atlas CLI installation script to postinstall.sh for Linux package builds. Refactor .goreleaser.yaml to generate ent-related code. Add the database file path to tmpfile.conf. --- .goreleaser.yaml | 3 +-- cmd/alpamon/command/configs/tmpfile.conf | 1 + scripts/postinstall.sh | 10 ++++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index a6d9ad3..8d32d72 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -2,9 +2,8 @@ project_name: alpamon before: hooks: + - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema - go mod tidy - - go generate ./... - - go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema builds: - main: ./cmd/alpamon diff --git a/cmd/alpamon/command/configs/tmpfile.conf b/cmd/alpamon/command/configs/tmpfile.conf index e998fc6..8c3fbf7 100644 --- a/cmd/alpamon/command/configs/tmpfile.conf +++ b/cmd/alpamon/command/configs/tmpfile.conf @@ -1,4 +1,5 @@ d /etc/alpamon 0700 root root - - f /etc/alpamon/alpamon.conf 0600 root root - - d /var/lib/alpamon 0750 root root - - +f /var/lib/alpamon/alpamon.db 0750 root root - - d /var/log/alpamon 0750 root root - - diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 7def911..76886b3 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -6,6 +6,7 @@ main() { check_root_permission check_systemd_status check_alpamon_binary + install_atlas_cli install_alpamon start_systemd_service } @@ -31,6 +32,15 @@ check_alpamon_binary() { fi } +install_atlas_cli() { + echo "Installing Atlas CLI..." + curl -sSf https://atlasgo.sh | sh -s -- -y + if [ $? -ne 0 ]; then + echo "Error: Failed to install Atlas CLI." + exit 1 + fi +} + install_alpamon() { "$ALPAMON_BIN" install if [ $? -ne 0 ]; then From 407916cfa49e1339606ed4ef29332d2011064fdc Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 30 Dec 2024 22:14:57 +0900 Subject: [PATCH 133/364] Update README.md Add explanations on how to generate ent-related code. --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 4277ccb..1bf63d4 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,18 @@ To get started on macOS, clone the source code from the repository: git clone https://github.com/alpacanetworks/alpamon-go.git ``` +#### Generate Ent Schema Code with Entgo +To generate Ent schema code with custom features, navigate to the root of the project and use the following command: +```bash +go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema +``` + +#### Install Atlas CLI +To enable versioned migrations, install Atlas CLI using the following command: +```bash +curl -sSf https://atlasgo.sh | sh +``` + #### Install Go dependencies Make sure you have Go installed. Then, navigate to the project root and download the necessary Go packages: ```bash From ef927fcb0ce39fa3b58021bfc816bf61dd30057c Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 31 Dec 2024 14:26:57 +0900 Subject: [PATCH 134/364] Fix migration directory path A problem occurred where the migration directory was specified with a relative path, making it impossible for the binary file to locate this path. To resolve this issue, embed package was used to specify migration directory path. --- pkg/db/migrate.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/pkg/db/migrate.go b/pkg/db/migrate.go index 2897c80..28ac070 100644 --- a/pkg/db/migrate.go +++ b/pkg/db/migrate.go @@ -2,17 +2,27 @@ package db import ( "context" + "embed" "fmt" - "os" + "io/fs" "ariga.io/atlas-go-sdk/atlasexec" "github.com/rs/zerolog/log" ) +//go:embed migration/* +var migrations embed.FS + func RunMigration(path string, ctx context.Context) error { + migrationFS, err := getMigrationDir() + if err != nil { + log.Error().Err(err).Msg("failed to get migration filesystem") + return err + } + workDir, err := atlasexec.NewWorkingDir( atlasexec.WithMigrations( - os.DirFS("../../pkg/db/migration"), + migrationFS, ), ) if err != nil { @@ -40,3 +50,12 @@ func RunMigration(path string, ctx context.Context) error { return nil } + +func getMigrationDir() (fs.FS, error) { + migrationFS, err := fs.Sub(migrations, "migration") + if err != nil { + return nil, err + } + + return migrationFS, nil +} From e9a1dff8287d28329874296a115c4fe1ba64b2e1 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 31 Dec 2024 14:45:31 +0900 Subject: [PATCH 135/364] Add logger Add logger for debugging purposes in case of errors. --- pkg/collector/scheduler/scheduler.go | 2 ++ pkg/db/migrate.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go index 8164705..2e62c7a 100644 --- a/pkg/collector/scheduler/scheduler.go +++ b/pkg/collector/scheduler/scheduler.go @@ -7,6 +7,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/rs/zerolog/log" ) const ( @@ -115,6 +116,7 @@ func (s *Scheduler) executeTask(ctx context.Context, task *ScheduledTask) { for attempt := 0; attempt <= s.retryConf.MaxRetries; attempt++ { err := task.check.Execute(ctx) if err != nil { + log.Error().Err(err).Msgf("failed to execute check: %v", err) if attempt < s.retryConf.MaxRetries { backoff := utils.CalculateBackOff(s.retryConf.Delay, attempt) select { diff --git a/pkg/db/migrate.go b/pkg/db/migrate.go index 28ac070..4fe3bcf 100644 --- a/pkg/db/migrate.go +++ b/pkg/db/migrate.go @@ -26,7 +26,7 @@ func RunMigration(path string, ctx context.Context) error { ), ) if err != nil { - log.Error().Err(err).Msgf("failed to get migration dir: %v", err) + log.Error().Err(err).Msgf("failed to open migration dir: %v", err) return err } defer workDir.Close() From 41b275e02375e46cff4ba1777c0711362d5c5686 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 31 Dec 2024 15:14:17 +0900 Subject: [PATCH 136/364] Fix builds env Fix .goreleaser.yaml to enable cgo for using go-sqlite3 package. --- .goreleaser.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 8d32d72..c6ed902 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -11,7 +11,9 @@ builds: ldflags: - -X github.com/alpacanetworks/alpamon-go/pkg/version.Version={{.Version}} env: - - CGO_ENABLED=0 + - CGO_ENABLED=1 + - CGO_CFLAGS=-I/usr/include + - CGO_LDFLAGS=-L/usr/lib goos: - linux - darwin From 0eba3d830a8173aa98fca71f15add3a5e079bf69 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 31 Dec 2024 17:34:22 +0900 Subject: [PATCH 137/364] Delete mattn/go-sqlite3 package Remove mattn/go-sqlite3 package to avoid using cgo. --- go.mod | 17 +++++++++-------- go.sum | 55 ++++++++++++++++++++++++++++++++++--------------------- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index 98824c0..364746a 100644 --- a/go.mod +++ b/go.mod @@ -8,11 +8,10 @@ require ( github.com/adrianbrad/queue v1.3.0 github.com/cenkalti/backoff v2.2.1+incompatible github.com/creack/pty v1.1.23 - github.com/glebarez/go-sqlite v1.20.3 + github.com/glebarez/go-sqlite v1.22.0 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/knqyf263/go-rpmdb v0.1.1 - github.com/mattn/go-sqlite3 v1.14.17 github.com/rs/zerolog v1.33.0 github.com/shirou/gopsutil/v4 v4.24.8 github.com/spf13/cobra v1.8.1 @@ -39,9 +38,10 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -52,11 +52,12 @@ require ( golang.org/x/mod v0.18.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/tools v0.22.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/libc v1.22.2 // indirect - modernc.org/mathutil v1.5.0 // indirect - modernc.org/memory v1.5.0 // indirect - modernc.org/sqlite v1.20.3 // indirect + modernc.org/libc v1.55.3 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.8.0 // indirect + modernc.org/sqlite v1.28.0 // indirect ) diff --git a/go.sum b/go.sum index bf87103..c14f6ac 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/glebarez/go-sqlite v1.20.3 h1:89BkqGOXR9oRmG58ZrzgoY/Fhy5x0M+/WV48U5zVrZ4= -github.com/glebarez/go-sqlite v1.20.3/go.mod h1:u3N6D/wftiAzIOJtZl6BmedqxmmkDfH3q+ihjqxC9u0= +github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= +github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= @@ -66,24 +66,21 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 h1:VstopitMQi3hZP0fzvnsLmzXZdQGc4bEcgu24cp+d4M= -github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= @@ -114,8 +111,8 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -130,8 +127,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -143,11 +140,27 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0= -modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= -modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= -modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/sqlite v1.20.3 h1:SqGJMMxjj1PHusLxdYxeQSodg7Jxn9WWkaAQjKrntZs= -modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A= +modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ= +modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= +modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y= +modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw= +modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= +modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U= +modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= +modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= +modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= +modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= From aad628b33c53aea58a1b83118a69ddf25bf8256c Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 31 Dec 2024 17:35:51 +0900 Subject: [PATCH 138/364] Update due to apply changes Update to reflect the removal of mattn/go-sqlite3 package. --- pkg/db/client.go | 7 +++---- pkg/db/db.go | 5 ++++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/db/client.go b/pkg/db/client.go index 4b3c3ea..cfa530b 100644 --- a/pkg/db/client.go +++ b/pkg/db/client.go @@ -4,9 +4,8 @@ import ( "fmt" "sync" - "entgo.io/ent/dialect" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - _ "github.com/mattn/go-sqlite3" + _ "github.com/glebarez/go-sqlite" ) var ( @@ -18,8 +17,8 @@ var ( func GetClient(path string) (*ent.Client, error) { once.Do(func() { var err error - url := fmt.Sprintf("file:%s?cache=shared&_fk=1", path) - client, err = ent.Open(dialect.SQLite, url) + url := fmt.Sprintf("file:%s?cache=shared&__pragma=foreign_keys(1)", path) + client, err = ent.Open("sqlite3", url) if err != nil { initErr = err client = nil diff --git a/pkg/db/db.go b/pkg/db/db.go index b14d652..d624aae 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -2,12 +2,13 @@ package db import ( "context" + "database/sql" "fmt" "os" "path/filepath" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - _ "github.com/mattn/go-sqlite3" + "github.com/glebarez/go-sqlite" "github.com/rs/zerolog/log" ) @@ -29,6 +30,8 @@ func InitDB(ctx context.Context) *ent.Client { os.Exit(1) } + sql.Register("sqlite3", &sqlite.Driver{}) + err = RunMigration(dbFile.Name(), ctx) if err != nil { log.Error().Err(err).Msgf("failed to migrate db: %v\n", err) From 2940137c32af567fc7a7946e5244a18ab7d165e5 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 31 Dec 2024 17:36:14 +0900 Subject: [PATCH 139/364] Minor fix Undo CGO_ENABLED --- .goreleaser.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index c6ed902..8d32d72 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -11,9 +11,7 @@ builds: ldflags: - -X github.com/alpacanetworks/alpamon-go/pkg/version.Version={{.Version}} env: - - CGO_ENABLED=1 - - CGO_CFLAGS=-I/usr/include - - CGO_LDFLAGS=-L/usr/lib + - CGO_ENABLED=0 goos: - linux - darwin From 919f46d581dbddef08ff87abf325f06365fef611 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 31 Dec 2024 23:29:54 +0900 Subject: [PATCH 140/364] Update github action yml Update github action yml due to apply changes --- .github/workflows/build-and-test.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index c8a0a6c..28aa791 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -26,6 +26,10 @@ jobs: - name: Check out code uses: actions/checkout@v4 + + - name: Generate go code + run: go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema + working-directory: ./ - name: Build run: go build -v . From 5b1cfb54603b2892037ed8c3e32639c7ab96335c Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 31 Dec 2024 23:41:50 +0900 Subject: [PATCH 141/364] Update github action yml Update github action yml due to apply changes --- .github/workflows/lint.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4cc46b3..6dfefd8 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,6 +32,10 @@ jobs: - uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} + + - name: Generate go code + run: go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema + working-directory: ./ - name: GolangCI-Lint uses: golangci/golangci-lint-action@v6 From 2d3ceecee5a4bf681e98a66da0a221b7d4a87c77 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 2 Jan 2025 11:04:18 +0900 Subject: [PATCH 142/364] Minor fix Apply go-lint. --- .../check/batch/daily/cleanup/cleanup.go | 20 +++++++++---------- pkg/collector/check/batch/daily/cpu/cpu.go | 2 +- pkg/collector/check/batch/daily/disk/io/io.go | 2 +- .../check/batch/daily/disk/usage/usage.go | 2 +- .../check/batch/daily/memory/memory.go | 2 +- pkg/collector/check/batch/daily/net/net.go | 2 +- pkg/collector/check/batch/hourly/cpu/cpu.go | 4 ++-- .../check/batch/hourly/disk/io/io.go | 4 ++-- .../check/batch/hourly/disk/usage/usage.go | 4 ++-- .../check/batch/hourly/memory/memory.go | 4 ++-- pkg/collector/check/batch/hourly/net/net.go | 4 ++-- 11 files changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/collector/check/batch/daily/cleanup/cleanup.go b/pkg/collector/check/batch/daily/cleanup/cleanup.go index ffe9081..671dbb8 100644 --- a/pkg/collector/check/batch/daily/cleanup/cleanup.go +++ b/pkg/collector/check/batch/daily/cleanup/cleanup.go @@ -76,7 +76,7 @@ func deleteAllCPU(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.CPU.Delete().Exec(ctx) if err != nil { @@ -93,7 +93,7 @@ func deleteAllCPUPerHour(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.CPUPerHour.Delete().Exec(ctx) if err != nil { @@ -110,7 +110,7 @@ func deleteAllMemory(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.Memory.Delete().Exec(ctx) if err != nil { @@ -127,7 +127,7 @@ func deleteAllMemoryPerHour(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.MemoryPerHour.Delete().Exec(ctx) if err != nil { @@ -144,7 +144,7 @@ func deleteAllDiskUsage(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.DiskUsage.Delete().Exec(ctx) if err != nil { @@ -161,7 +161,7 @@ func deleteAllDiskUsagePerHour(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.DiskIOPerHour.Delete().Exec(ctx) if err != nil { @@ -178,7 +178,7 @@ func deleteAllDiskIO(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = client.DiskIO.Delete().Exec(ctx) if err != nil { @@ -195,7 +195,7 @@ func deleteAllDiskIOPerHour(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.DiskIOPerHour.Delete().Exec(ctx) if err != nil { @@ -212,7 +212,7 @@ func deleteAllTraffic(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.Traffic.Delete().Exec(ctx) if err != nil { @@ -229,7 +229,7 @@ func deleteAllTrafficPerHour(ctx context.Context, client *ent.Client) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() _, err = tx.TrafficPerHour.Delete().Exec(ctx) if err != nil { diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index e1e5774..6da8e13 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -83,7 +83,7 @@ func (c *Check) deleteCPUPerHour(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-24 * time.Hour) diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index 1340c0a..0acf4ce 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -92,7 +92,7 @@ func (c *Check) deleteDiskIOPerHour(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-24 * time.Hour) diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index ef48db9..4dae6b2 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -88,7 +88,7 @@ func (c *Check) deleteDiskUsagePerHour(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-24 * time.Hour) diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index ce69995..d2a8ef1 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -83,7 +83,7 @@ func (c *Check) deleteMemoryPerHour(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-24 * time.Hour) diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index 30ea49e..e217524 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -100,7 +100,7 @@ func (c *Check) deleteTrafficPerHour(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-24 * time.Hour) diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index ee1d067..57a79c0 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -88,7 +88,7 @@ func (c *Check) saveCPUPerHour(data base.CheckResult, ctx context.Context) error if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() err = tx.CPUPerHour.Create(). SetTimestamp(data.Timestamp). @@ -108,7 +108,7 @@ func (c *Check) deleteCPU(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-1 * time.Hour) diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index 793485d..9868a28 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -97,7 +97,7 @@ func (c *Check) saveDiskIOPerHour(data []base.CheckResult, ctx context.Context) if err != nil { return nil } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() err = tx.DiskIOPerHour.MapCreateBulk(data, func(q *ent.DiskIOPerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). @@ -121,7 +121,7 @@ func (c *Check) deleteDiskIO(ctx context.Context) error { if err != nil { return nil } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-1 * time.Hour) diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index 75bbfa8..ab9f24a 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -112,7 +112,7 @@ func (c *Check) saveDiskUsagePerHour(data []base.CheckResult, ctx context.Contex if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() err = tx.DiskUsagePerHour.MapCreateBulk(data, func(q *ent.DiskUsagePerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). @@ -134,7 +134,7 @@ func (c *Check) deleteDiskUsage(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-1 * time.Hour) diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index cf7face..148eabf 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -88,7 +88,7 @@ func (c *Check) saveMemoryPerHour(data base.CheckResult, ctx context.Context) er if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() err = tx.MemoryPerHour.Create(). SetTimestamp(data.Timestamp). @@ -108,7 +108,7 @@ func (c *Check) deleteMemory(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-1 * time.Hour) diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index 259f884..858c9ce 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -105,7 +105,7 @@ func (c *Check) saveTrafficPerHour(data []base.CheckResult, ctx context.Context) if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() err = tx.TrafficPerHour.MapCreateBulk(data, func(q *ent.TrafficPerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). @@ -133,7 +133,7 @@ func (c *Check) deleteTraffic(ctx context.Context) error { if err != nil { return err } - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() now := time.Now() from := now.Add(-1 * time.Hour) From 1b1018c32b84c4975e5fd8cbfbd17412477f8fd9 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 3 Jan 2025 17:25:01 +0900 Subject: [PATCH 143/364] Add exception handling for file extensions Add exception handling to prevent automatic unzipping of compressed file like .whl or .jar. --- pkg/runner/command.go | 8 ++++++-- pkg/runner/command_types.go | 17 +++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index d6886fd..2a6f3ca 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -756,7 +756,7 @@ func fileDownload(data CommandData, sysProcAttr *syscall.SysProcAttr) (exitCode return 1, err.Error() } - isZip := isZipFile(content) + isZip := isZipFile(content, filepath.Ext(data.Path)) if isZip { command := fmt.Sprintf("tee -a %s > /dev/null && unzip -n %s -d %s; rm %s", strings.ReplaceAll(data.Path, " ", "\\ "), @@ -780,7 +780,11 @@ func fileDownload(data CommandData, sysProcAttr *syscall.SysProcAttr) (exitCode return 0, fmt.Sprintf("Successfully downloaded %s.", data.Path) } -func isZipFile(content []byte) bool { +func isZipFile(content []byte, ext string) bool { + if _, found := nonZipExt[ext]; found { + return false + } + _, err := zip.NewReader(bytes.NewReader(content), int64(len(content))) return err == nil diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index 36140b3..156eb33 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -103,3 +103,20 @@ type commandFin struct { Result string `json:"result"` ElapsedTime float64 `json:"elapsed_time"` } + +var nonZipExt = map[string]bool{ + ".jar": true, + ".war": true, + ".ear": true, + ".apk": true, + ".xpi": true, + ".vsix": true, + ".crx": true, + ".egg": true, + ".whl": true, + ".appx": true, + ".msix": true, + ".ipk": true, + ".nupkg": true, + ".kmz": true, +} From 788e52744667e70e6d82b20a8aa0979977a00311 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 7 Jan 2025 19:41:58 +0900 Subject: [PATCH 144/364] Refactor realtime NET & DISK_IO check Refactor Checks that collect, store, and transmit real-time network traffic and disk I/O to ensure collected data is meaningful. Fix disk I/O check to collect bytes per second (BPS) to measure speed, whereas it previously only collected disk read/write bytes. Due to the long collection interval, the data collected by the existing traffic check for input/output bps and pps was not meaningful. To improve data accuracy and timeliness, refactor the check to separate the collection and storage processes from the data transmission process. --- pkg/collector/check/realtime/disk/io/base.go | 23 ++++ pkg/collector/check/realtime/disk/io/io.go | 106 ------------------ .../check/realtime/disk/io/io_collect.go | 91 +++++++++++++++ .../check/realtime/disk/io/io_send.go | 78 +++++++++++++ pkg/collector/check/realtime/net/base.go | 23 ++++ .../realtime/net/{net.go => net_collect.go} | 45 +++----- pkg/collector/check/realtime/net/net_send.go | 86 ++++++++++++++ 7 files changed, 316 insertions(+), 136 deletions(-) create mode 100644 pkg/collector/check/realtime/disk/io/base.go delete mode 100644 pkg/collector/check/realtime/disk/io/io.go create mode 100644 pkg/collector/check/realtime/disk/io/io_collect.go create mode 100644 pkg/collector/check/realtime/disk/io/io_send.go create mode 100644 pkg/collector/check/realtime/net/base.go rename pkg/collector/check/realtime/net/{net.go => net_collect.go} (62%) create mode 100644 pkg/collector/check/realtime/net/net_send.go diff --git a/pkg/collector/check/realtime/disk/io/base.go b/pkg/collector/check/realtime/disk/io/base.go new file mode 100644 index 0000000..0787923 --- /dev/null +++ b/pkg/collector/check/realtime/disk/io/base.go @@ -0,0 +1,23 @@ +package diskio + +import ( + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/shirou/gopsutil/v4/disk" +) + +func NewCheck(args *base.CheckArgs) base.CheckStrategy { + var check base.CheckStrategy + switch args.Type { + case base.DISK_IO_COLLECTOR: + check = &CollectCheck{ + BaseCheck: base.NewBaseCheck(args), + lastMetric: make(map[string]disk.IOCountersStat), + } + case base.DISK_IO: + check = &SendCheck{ + BaseCheck: base.NewBaseCheck(args), + } + } + + return check +} diff --git a/pkg/collector/check/realtime/disk/io/io.go b/pkg/collector/check/realtime/disk/io/io.go deleted file mode 100644 index f13f2fc..0000000 --- a/pkg/collector/check/realtime/disk/io/io.go +++ /dev/null @@ -1,106 +0,0 @@ -package diskio - -import ( - "context" - "time" - - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/shirou/gopsutil/v4/disk" -) - -type Check struct { - base.BaseCheck - lastMetric map[string]disk.IOCountersStat -} - -func NewCheck(args *base.CheckArgs) base.CheckStrategy { - return &Check{ - BaseCheck: base.NewBaseCheck(args), - lastMetric: make(map[string]disk.IOCountersStat), - } -} - -func (c *Check) Execute(ctx context.Context) error { - metric, err := c.collectAndSaveDiskIO(ctx) - if err != nil { - return err - } - - if ctx.Err() != nil { - return ctx.Err() - } - - buffer := c.GetBuffer() - buffer.SuccessQueue <- metric - - return nil -} - -func (c *Check) collectAndSaveDiskIO(ctx context.Context) (base.MetricData, error) { - ioCounters, err := c.collectDiskIO() - if err != nil { - return base.MetricData{}, err - } - - metric := base.MetricData{ - Type: base.DISK_IO, - Data: c.parseDiskIO(ioCounters), - } - - err = c.saveDiskIO(metric.Data, ctx) - if err != nil { - return base.MetricData{}, err - } - - return metric, nil -} - -func (c *Check) parseDiskIO(ioCounters map[string]disk.IOCountersStat) []base.CheckResult { - var data []base.CheckResult - for name, ioCounter := range ioCounters { - var readBytes, writeBytes uint64 - - if lastCounter, exist := c.lastMetric[name]; exist { - readBytes = ioCounter.ReadBytes - lastCounter.ReadBytes - writeBytes = ioCounter.WriteBytes - lastCounter.WriteBytes - } else { - readBytes = 0 - writeBytes = 0 - } - - c.lastMetric[name] = ioCounter - data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Device: name, - ReadBytes: &readBytes, - WriteBytes: &writeBytes, - }) - } - - return data -} - -func (c *Check) collectDiskIO() (map[string]disk.IOCountersStat, error) { - ioCounters, err := disk.IOCounters() - if err != nil { - return nil, err - } - - return ioCounters, nil -} - -func (c *Check) saveDiskIO(data []base.CheckResult, ctx context.Context) error { - client := c.GetClient() - err := client.DiskIO.MapCreateBulk(data, func(q *ent.DiskIOCreate, i int) { - q.SetTimestamp(data[i].Timestamp). - SetDevice(data[i].Device). - SetReadBytes(int64(*data[i].ReadBytes)). - SetWriteBytes(int64(*data[i].WriteBytes)) - }).Exec(ctx) - if err != nil { - return err - } - - return nil -} diff --git a/pkg/collector/check/realtime/disk/io/io_collect.go b/pkg/collector/check/realtime/disk/io/io_collect.go new file mode 100644 index 0000000..415e156 --- /dev/null +++ b/pkg/collector/check/realtime/disk/io/io_collect.go @@ -0,0 +1,91 @@ +package diskio + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/shirou/gopsutil/v4/disk" +) + +type CollectCheck struct { + base.BaseCheck + lastMetric map[string]disk.IOCountersStat +} + +func (c *CollectCheck) Execute(ctx context.Context) error { + err := c.collectAndSaveDiskIO(ctx) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + + return nil +} + +func (c *CollectCheck) collectAndSaveDiskIO(ctx context.Context) error { + ioCounters, err := c.collectDiskIO() + if err != nil { + return err + } + + err = c.saveDiskIO(c.parseDiskIO(ioCounters), ctx) + if err != nil { + return err + } + + return nil +} + +func (c *CollectCheck) parseDiskIO(ioCounters map[string]disk.IOCountersStat) []base.CheckResult { + var data []base.CheckResult + for name, ioCounter := range ioCounters { + var readBps, writeBps float64 + + if lastCounter, exist := c.lastMetric[name]; exist { + readBps, writeBps = utils.CalculateDiskIOBps(ioCounter, lastCounter, c.GetInterval()) + } else { + readBps = 0 + writeBps = 0 + } + + c.lastMetric[name] = ioCounter + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Device: name, + ReadBps: &readBps, + WriteBps: &writeBps, + }) + } + + return data +} + +func (c *CollectCheck) collectDiskIO() (map[string]disk.IOCountersStat, error) { + ioCounters, err := disk.IOCounters() + if err != nil { + return nil, err + } + + return ioCounters, nil +} + +func (c *CollectCheck) saveDiskIO(data []base.CheckResult, ctx context.Context) error { + client := c.GetClient() + err := client.DiskIO.MapCreateBulk(data, func(q *ent.DiskIOCreate, i int) { + q.SetTimestamp(data[i].Timestamp). + SetDevice(data[i].Device). + SetReadBps(*data[i].ReadBps). + SetWriteBps(*data[i].WriteBps) + }).Exec(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/collector/check/realtime/disk/io/io_send.go b/pkg/collector/check/realtime/disk/io/io_send.go new file mode 100644 index 0000000..89c33b3 --- /dev/null +++ b/pkg/collector/check/realtime/disk/io/io_send.go @@ -0,0 +1,78 @@ +package diskio + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" +) + +type SendCheck struct { + base.BaseCheck +} + +func (c *SendCheck) Execute(ctx context.Context) error { + metric, err := c.queryDiskIO(ctx) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric + + return nil +} + +func (c *SendCheck) queryDiskIO(ctx context.Context) (base.MetricData, error) { + queryset, err := c.getDiskIO(ctx) + if err != nil { + return base.MetricData{}, err + } + + var data []base.CheckResult + for _, row := range queryset { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Device: row.Device, + PeakWriteBps: row.PeakWriteBps, + PeakReadBps: row.PeakReadBps, + AvgWriteBps: row.AvgWriteBps, + AvgReadBps: row.AvgReadBps, + }) + } + metric := base.MetricData{ + Type: base.DISK_IO, + Data: data, + } + + return metric, nil +} + +func (c *SendCheck) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { + client := c.GetClient() + interval := c.GetInterval() + now := time.Now() + from := now.Add(-1 * interval * time.Second) + + var queryset []base.DiskIOQuerySet + err := client.DiskIO.Query(). + Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)). + GroupBy(diskio.FieldDevice). + Aggregate( + ent.As(ent.Max(diskio.FieldReadBps), "peak_read_bps"), + ent.As(ent.Max(diskio.FieldWriteBps), "peak_write_bps"), + ent.As(ent.Mean(diskio.FieldReadBps), "avg_read_bps"), + ent.As(ent.Mean(diskio.FieldWriteBps), "avg_write_bps"), + ).Scan(ctx, &queryset) + if err != nil { + return queryset, err + } + + return queryset, nil +} diff --git a/pkg/collector/check/realtime/net/base.go b/pkg/collector/check/realtime/net/base.go new file mode 100644 index 0000000..cc5d35f --- /dev/null +++ b/pkg/collector/check/realtime/net/base.go @@ -0,0 +1,23 @@ +package net + +import ( + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/shirou/gopsutil/v4/net" +) + +func NewCheck(args *base.CheckArgs) base.CheckStrategy { + var check base.CheckStrategy + switch args.Type { + case base.NET_COLLECTOR: + check = &CollectCheck{ + BaseCheck: base.NewBaseCheck(args), + lastMetric: make(map[string]net.IOCountersStat), + } + case base.NET: + check = &SendCheck{ + BaseCheck: base.NewBaseCheck(args), + } + } + + return check +} diff --git a/pkg/collector/check/realtime/net/net.go b/pkg/collector/check/realtime/net/net_collect.go similarity index 62% rename from pkg/collector/check/realtime/net/net.go rename to pkg/collector/check/realtime/net/net_collect.go index d9338d7..d93bdd9 100644 --- a/pkg/collector/check/realtime/net/net.go +++ b/pkg/collector/check/realtime/net/net_collect.go @@ -10,20 +10,13 @@ import ( "github.com/shirou/gopsutil/v4/net" ) -type Check struct { +type CollectCheck struct { base.BaseCheck lastMetric map[string]net.IOCountersStat } -func NewCheck(args *base.CheckArgs) base.CheckStrategy { - return &Check{ - BaseCheck: base.NewBaseCheck(args), - lastMetric: make(map[string]net.IOCountersStat), - } -} - -func (c *Check) Execute(ctx context.Context) error { - metric, err := c.collectAndSaveTraffic(ctx) +func (c *CollectCheck) Execute(ctx context.Context) error { + err := c.collectAndSaveTraffic(ctx) if err != nil { return err } @@ -32,32 +25,24 @@ func (c *Check) Execute(ctx context.Context) error { return ctx.Err() } - buffer := c.GetBuffer() - buffer.SuccessQueue <- metric - return nil } -func (c *Check) collectAndSaveTraffic(ctx context.Context) (base.MetricData, error) { +func (c *CollectCheck) collectAndSaveTraffic(ctx context.Context) error { ioCounters, interfaces, err := c.collectTraffic() if err != nil { - return base.MetricData{}, err - } - - metric := base.MetricData{ - Type: base.NET, - Data: c.parseTraffic(ioCounters, interfaces), + return err } - err = c.saveTraffic(metric.Data, ctx) + err = c.saveTraffic(c.parseTraffic(ioCounters, interfaces), ctx) if err != nil { - return base.MetricData{}, err + return err } - return metric, nil + return nil } -func (c *Check) collectTraffic() ([]net.IOCountersStat, map[string]net.InterfaceStat, error) { +func (c *CollectCheck) collectTraffic() ([]net.IOCountersStat, map[string]net.InterfaceStat, error) { ioCounters, err := c.collectIOCounters() if err != nil { return nil, nil, err @@ -71,15 +56,15 @@ func (c *Check) collectTraffic() ([]net.IOCountersStat, map[string]net.Interface return ioCounters, interfaces, nil } -func (c *Check) parseTraffic(ioCOunters []net.IOCountersStat, interfaces map[string]net.InterfaceStat) []base.CheckResult { +func (c *CollectCheck) parseTraffic(ioCOunters []net.IOCountersStat, interfaces map[string]net.InterfaceStat) []base.CheckResult { var data []base.CheckResult for _, ioCounter := range ioCOunters { if _, ok := interfaces[ioCounter.Name]; ok { var inputPps, inputBps, outputPps, outputBps float64 if lastCounter, exists := c.lastMetric[ioCounter.Name]; exists { - inputPps, outputPps = utils.CalculatePps(ioCounter, lastCounter, c.GetInterval()) - inputBps, outputBps = utils.CalculateBps(ioCounter, lastCounter, c.GetInterval()) + inputPps, outputPps = utils.CalculateNetworkPps(ioCounter, lastCounter, c.GetInterval()) + inputBps, outputBps = utils.CalculateNetworkBps(ioCounter, lastCounter, c.GetInterval()) } else { inputPps = 0 inputBps = 0 @@ -101,7 +86,7 @@ func (c *Check) parseTraffic(ioCOunters []net.IOCountersStat, interfaces map[str return data } -func (c *Check) collectInterfaces() (map[string]net.InterfaceStat, error) { +func (c *CollectCheck) collectInterfaces() (map[string]net.InterfaceStat, error) { ifaces, err := net.Interfaces() if err != nil { return nil, err @@ -119,7 +104,7 @@ func (c *Check) collectInterfaces() (map[string]net.InterfaceStat, error) { return interfaces, nil } -func (c *Check) collectIOCounters() ([]net.IOCountersStat, error) { +func (c *CollectCheck) collectIOCounters() ([]net.IOCountersStat, error) { ioCounters, err := net.IOCounters(true) if err != nil { return nil, err @@ -128,7 +113,7 @@ func (c *Check) collectIOCounters() ([]net.IOCountersStat, error) { return ioCounters, nil } -func (c *Check) saveTraffic(data []base.CheckResult, ctx context.Context) error { +func (c *CollectCheck) saveTraffic(data []base.CheckResult, ctx context.Context) error { client := c.GetClient() err := client.Traffic.MapCreateBulk(data, func(q *ent.TrafficCreate, i int) { q.SetTimestamp(data[i].Timestamp). diff --git a/pkg/collector/check/realtime/net/net_send.go b/pkg/collector/check/realtime/net/net_send.go new file mode 100644 index 0000000..01e7e6f --- /dev/null +++ b/pkg/collector/check/realtime/net/net_send.go @@ -0,0 +1,86 @@ +package net + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" +) + +type SendCheck struct { + base.BaseCheck +} + +func (c *SendCheck) Execute(ctx context.Context) error { + metric, err := c.queryTraffic(ctx) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + + buffer := c.GetBuffer() + buffer.SuccessQueue <- metric + + return nil +} + +func (c *SendCheck) queryTraffic(ctx context.Context) (base.MetricData, error) { + queryset, err := c.getTraffic(ctx) + if err != nil { + return base.MetricData{}, err + } + + var data []base.CheckResult + for _, row := range queryset { + data = append(data, base.CheckResult{ + Timestamp: time.Now(), + Name: row.Name, + PeakInputPps: row.PeakInputPps, + PeakInputBps: row.PeakInputBps, + PeakOutputPps: row.PeakOutputPps, + PeakOutputBps: row.PeakOutputBps, + AvgInputPps: row.AvgInputPps, + AvgInputBps: row.AvgInputBps, + AvgOutputPps: row.AvgOutputPps, + AvgOutputBps: row.AvgOutputBps, + }) + } + metric := base.MetricData{ + Type: base.NET, + Data: data, + } + + return metric, nil +} + +func (c *SendCheck) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) { + client := c.GetClient() + interval := c.GetInterval() + now := time.Now() + from := now.Add(-1 * interval * time.Second) + + var queryset []base.TrafficQuerySet + err := client.Traffic.Query(). + Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)). + GroupBy(traffic.FieldName). + Aggregate( + ent.As(ent.Max(traffic.FieldInputPps), "peak_input_pps"), + ent.As(ent.Max(traffic.FieldInputBps), "peak_input_bps"), + ent.As(ent.Max(traffic.FieldOutputPps), "peak_output_pps"), + ent.As(ent.Max(traffic.FieldOutputBps), "peak_output_bps"), + ent.As(ent.Mean(traffic.FieldInputPps), "avg_input_pps"), + ent.As(ent.Mean(traffic.FieldInputBps), "avg_input_bps"), + ent.As(ent.Mean(traffic.FieldOutputPps), "avg_output_pps"), + ent.As(ent.Mean(traffic.FieldOutputBps), "avg_output_bps"), + ).Scan(ctx, &queryset) + if err != nil { + return queryset, err + } + + return queryset, nil +} From a63dd3b682cb84f5028f58275d21936c6eeb390a Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 7 Jan 2025 19:45:37 +0900 Subject: [PATCH 145/364] Update to apply the changes Update due to changes in alpacon-server and refactoring of alpamon. --- pkg/collector/check/base/types.go | 68 ++++++++++--------- .../check/batch/daily/cleanup/cleanup.go | 66 ++++++++++++------ pkg/collector/check/batch/daily/cpu/cpu.go | 13 ++-- pkg/collector/check/batch/daily/disk/io/io.go | 25 ++++--- .../check/batch/daily/disk/usage/usage.go | 13 ++-- .../check/batch/daily/memory/memory.go | 13 ++-- pkg/collector/check/batch/daily/net/net.go | 5 +- pkg/collector/check/batch/hourly/cpu/cpu.go | 13 ++-- .../check/batch/hourly/disk/io/io.go | 33 +++++---- .../check/batch/hourly/disk/usage/usage.go | 13 ++-- .../check/batch/hourly/memory/memory.go | 13 ++-- pkg/collector/check/batch/hourly/net/net.go | 5 +- pkg/collector/check/check.go | 2 + pkg/collector/collector.go | 4 +- ...as.sql => 20250107063722_init_schemas.sql} | 10 +-- pkg/db/migration/atlas.sum | 4 +- pkg/db/schema/cpu_per_hour.go | 4 +- pkg/db/schema/diskio.go | 4 +- pkg/db/schema/diskio_per_hour.go | 8 +-- pkg/db/schema/diskusage_per_hour.go | 4 +- pkg/db/schema/memory_per_hour.go | 4 +- pkg/utils/utils.go | 24 +++++-- 22 files changed, 190 insertions(+), 158 deletions(-) rename pkg/db/migration/{20241226055814_init_schemas.sql => 20250107063722_init_schemas.sql} (86%) diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index bafdb21..6df934f 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -17,9 +17,11 @@ const ( DISK_USAGE_PER_HOUR CheckType = "disk_usage_per_hour" DISK_USAGE_PER_DAY CheckType = "disk_usage_per_day" DISK_IO CheckType = "disk_io" + DISK_IO_COLLECTOR CheckType = "disk_io_collector" DISK_IO_PER_HOUR CheckType = "disk_io_per_hour" DISK_IO_PER_DAY CheckType = "disk_io_per_day" NET CheckType = "net" + NET_COLLECTOR CheckType = "net_collector" NET_PER_HOUR CheckType = "net_per_hour" NET_PER_DAY CheckType = "net_per_day" CLEANUP CheckType = "cleanup" @@ -46,11 +48,11 @@ type MemoryQuerySet struct { } type DiskIOQuerySet struct { - Device string `json:"device" db:"device"` - PeakReadBytes float64 `json:"peak_read_bytes"` - PeakWriteBytes float64 `json:"peak_write_bytes"` - AvgReadBytes float64 `json:"avg_read_bytes"` - AvgWriteBytes float64 `json:"avg_write_bytes"` + Device string `json:"device" db:"device"` + PeakReadBps float64 `json:"peak_read_bps"` + PeakWriteBps float64 `json:"peak_write_bps"` + AvgReadBps float64 `json:"avg_read_bps"` + AvgWriteBps float64 `json:"avg_write_bps"` } type DiskUsageQuerySet struct { @@ -72,34 +74,34 @@ type TrafficQuerySet struct { } type CheckResult struct { - Timestamp time.Time `json:"timestamp"` - Usage float64 `json:"usage,omitempty"` - Name string `json:"name,omitempty"` - Device string `json:"device,omitempty"` - MountPoint string `json:"mount_point,omitempty"` - Total uint64 `json:"total,omitempty"` - Free uint64 `json:"free,omitempty"` - Used uint64 `json:"used,omitempty"` - WriteBytes *uint64 `json:"write_bytes,omitempty"` - ReadBytes *uint64 `json:"read_bytes,omitempty"` - InputPps *float64 `json:"input_pps,omitempty"` - InputBps *float64 `json:"input_bps,omitempty"` - OutputPps *float64 `json:"output_pps,omitempty"` - OutputBps *float64 `json:"output_bps,omitempty"` - PeakUsage float64 `json:"peak_usage,omitempty"` - AvgUsage float64 `json:"avg_usage,omitempty"` - PeakWriteBytes uint64 `json:"peak_write_bytes,omitempty"` - PeakReadBytes uint64 `json:"peak_read_bytes,omitempty"` - AvgWriteBytes uint64 `json:"avg_write_bytes,omitempty"` - AvgReadBytes uint64 `json:"avg_read_bytes,omitempty"` - PeakInputPps float64 `json:"peak_input_pps,omitempty"` - PeakInputBps float64 `json:"peak_input_bps,omitempty"` - PeakOutputPps float64 `json:"peak_output_pps,omitempty"` - PeakOutputBps float64 `json:"peak_output_bps,omitempty"` - AvgInputPps float64 `json:"avg_input_pps,omitempty"` - AvgInputBps float64 `json:"avg_input_bps,omitempty"` - AvgOutputPps float64 `json:"avg_output_pps,omitempty"` - AvgOutputBps float64 `json:"avg_output_bps,omitempty"` + Timestamp time.Time `json:"timestamp"` + Usage float64 `json:"usage,omitempty"` + Name string `json:"name,omitempty"` + Device string `json:"device,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + Total uint64 `json:"total,omitempty"` + Free uint64 `json:"free,omitempty"` + Used uint64 `json:"used,omitempty"` + WriteBps *float64 `json:"write_bps,omitempty"` + ReadBps *float64 `json:"read_bps,omitempty"` + InputPps *float64 `json:"input_pps,omitempty"` + InputBps *float64 `json:"input_bps,omitempty"` + OutputPps *float64 `json:"output_pps,omitempty"` + OutputBps *float64 `json:"output_bps,omitempty"` + Peak float64 `json:"peak,omitempty"` + Avg float64 `json:"avg,omitempty"` + PeakWriteBps float64 `json:"peak_write_bps,omitempty"` + PeakReadBps float64 `json:"peak_read_bps,omitempty"` + AvgWriteBps float64 `json:"avg_write_bps,omitempty"` + AvgReadBps float64 `json:"avg_read_bps,omitempty"` + PeakInputPps float64 `json:"peak_input_pps,omitempty"` + PeakInputBps float64 `json:"peak_input_bps,omitempty"` + PeakOutputPps float64 `json:"peak_output_pps,omitempty"` + PeakOutputBps float64 `json:"peak_output_bps,omitempty"` + AvgInputPps float64 `json:"avg_input_pps,omitempty"` + AvgInputBps float64 `json:"avg_input_bps,omitempty"` + AvgOutputPps float64 `json:"avg_output_pps,omitempty"` + AvgOutputBps float64 `json:"avg_output_bps,omitempty"` } type MetricData struct { diff --git a/pkg/collector/check/batch/daily/cleanup/cleanup.go b/pkg/collector/check/batch/daily/cleanup/cleanup.go index 671dbb8..889c824 100644 --- a/pkg/collector/check/batch/daily/cleanup/cleanup.go +++ b/pkg/collector/check/batch/daily/cleanup/cleanup.go @@ -2,9 +2,20 @@ package cpu import ( "context" + "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpu" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpuperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskioperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusage" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusageperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memory" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memoryperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/trafficperhour" ) var ( @@ -34,7 +45,7 @@ var ( } ) -type deleteQuery func(context.Context, *ent.Client) error +type deleteQuery func(context.Context, *ent.Client, time.Time) error type Check struct { base.BaseCheck @@ -60,9 +71,10 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) deleteAllMetric(ctx context.Context) error { + now := time.Now() for _, table := range tables { if query, exist := deleteQueryMap[table]; exist { - if err := query(ctx, c.GetClient()); err != nil { + if err := query(ctx, c.GetClient(), now); err != nil { return err } } @@ -71,14 +83,15 @@ func (c *Check) deleteAllMetric(ctx context.Context) error { return nil } -func deleteAllCPU(ctx context.Context, client *ent.Client) error { +func deleteAllCPU(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.CPU.Delete().Exec(ctx) + _, err = tx.CPU.Delete(). + Where(cpu.TimestampLTE(now.Add(-1 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -88,14 +101,15 @@ func deleteAllCPU(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllCPUPerHour(ctx context.Context, client *ent.Client) error { +func deleteAllCPUPerHour(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.CPUPerHour.Delete().Exec(ctx) + _, err = tx.CPUPerHour.Delete(). + Where(cpuperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -105,14 +119,15 @@ func deleteAllCPUPerHour(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllMemory(ctx context.Context, client *ent.Client) error { +func deleteAllMemory(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.Memory.Delete().Exec(ctx) + _, err = tx.Memory.Delete(). + Where(memory.TimestampLTE(now.Add(-1 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -122,14 +137,15 @@ func deleteAllMemory(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllMemoryPerHour(ctx context.Context, client *ent.Client) error { +func deleteAllMemoryPerHour(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.MemoryPerHour.Delete().Exec(ctx) + _, err = tx.MemoryPerHour.Delete(). + Where(memoryperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -139,14 +155,15 @@ func deleteAllMemoryPerHour(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllDiskUsage(ctx context.Context, client *ent.Client) error { +func deleteAllDiskUsage(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.DiskUsage.Delete().Exec(ctx) + _, err = tx.DiskUsage.Delete(). + Where(diskusage.TimestampLTE(now.Add(-1 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -156,14 +173,15 @@ func deleteAllDiskUsage(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllDiskUsagePerHour(ctx context.Context, client *ent.Client) error { +func deleteAllDiskUsagePerHour(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.DiskIOPerHour.Delete().Exec(ctx) + _, err = tx.DiskUsagePerHour.Delete(). + Where(diskusageperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -173,14 +191,15 @@ func deleteAllDiskUsagePerHour(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllDiskIO(ctx context.Context, client *ent.Client) error { +func deleteAllDiskIO(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = client.DiskIO.Delete().Exec(ctx) + _, err = client.DiskIO.Delete(). + Where(diskio.TimestampLTE(now.Add(-1 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -190,14 +209,15 @@ func deleteAllDiskIO(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllDiskIOPerHour(ctx context.Context, client *ent.Client) error { +func deleteAllDiskIOPerHour(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.DiskIOPerHour.Delete().Exec(ctx) + _, err = tx.DiskIOPerHour.Delete(). + Where(diskioperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -207,14 +227,15 @@ func deleteAllDiskIOPerHour(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllTraffic(ctx context.Context, client *ent.Client) error { +func deleteAllTraffic(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.Traffic.Delete().Exec(ctx) + _, err = tx.Traffic.Delete(). + Where(traffic.TimestampLTE(now.Add(-1 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -224,14 +245,15 @@ func deleteAllTraffic(ctx context.Context, client *ent.Client) error { return nil } -func deleteAllTrafficPerHour(ctx context.Context, client *ent.Client) error { +func deleteAllTrafficPerHour(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.TrafficPerHour.Delete().Exec(ctx) + _, err = tx.TrafficPerHour.Delete(). + Where(trafficperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index 6da8e13..5e90f60 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -43,8 +43,8 @@ func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { data := base.CheckResult{ Timestamp: time.Now(), - PeakUsage: queryset[0].Max, - AvgUsage: queryset[0].AVG, + Peak: queryset[0].Max, + Avg: queryset[0].AVG, } metric := base.MetricData{ Type: base.CPU_PER_DAY, @@ -68,8 +68,8 @@ func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { err := client.CPUPerHour.Query(). Where(cpuperhour.TimestampGTE(from), cpuperhour.TimestampLTE(now)). Aggregate( - ent.Max(cpuperhour.FieldPeakUsage), - ent.Mean(cpuperhour.FieldAvgUsage), + ent.Max(cpuperhour.FieldPeak), + ent.Mean(cpuperhour.FieldAvg), ).Scan(ctx, &queryset) if err != nil { return queryset, err @@ -85,11 +85,10 @@ func (c *Check) deleteCPUPerHour(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-24 * time.Hour) + from := time.Now().Add(-24 * time.Hour) _, err = tx.CPUPerHour.Delete(). - Where(cpuperhour.TimestampGTE(from), cpuperhour.TimestampLTE(now)).Exec(ctx) + Where(cpuperhour.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index 0acf4ce..d99fb33 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -44,12 +44,12 @@ func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) var data []base.CheckResult for _, row := range queryset { data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Device: row.Device, - PeakWriteBytes: uint64(row.PeakWriteBytes), - PeakReadBytes: uint64(row.PeakReadBytes), - AvgWriteBytes: uint64(row.AvgWriteBytes), - AvgReadBytes: uint64(row.AvgReadBytes), + Timestamp: time.Now(), + Device: row.Device, + PeakWriteBps: row.PeakWriteBps, + PeakReadBps: row.PeakReadBps, + AvgWriteBps: row.AvgWriteBps, + AvgReadBps: row.AvgReadBps, }) } metric := base.MetricData{ @@ -75,10 +75,10 @@ func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, er Where(diskioperhour.TimestampGTE(from), diskioperhour.TimestampLTE(now)). GroupBy(diskioperhour.FieldDevice). Aggregate( - ent.As(ent.Max(diskioperhour.FieldPeakReadBytes), "peak_read_bytes"), - ent.As(ent.Max(diskioperhour.FieldPeakWriteBytes), "peak_write_bytes"), - ent.As(ent.Mean(diskioperhour.FieldAvgReadBytes), "avg_read_bytes"), - ent.As(ent.Mean(diskioperhour.FieldAvgWriteBytes), "avg_write_bytes"), + ent.As(ent.Max(diskioperhour.FieldPeakReadBps), "peak_read_bps"), + ent.As(ent.Max(diskioperhour.FieldPeakWriteBps), "peak_write_bps"), + ent.As(ent.Mean(diskioperhour.FieldAvgReadBps), "avg_read_bps"), + ent.As(ent.Mean(diskioperhour.FieldAvgWriteBps), "avg_write_bps"), ).Scan(ctx, &queryset) if err != nil { return queryset, err @@ -94,11 +94,10 @@ func (c *Check) deleteDiskIOPerHour(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-24 * time.Hour) + from := time.Now().Add(-24 * time.Hour) _, err = tx.DiskIOPerHour.Delete(). - Where(diskioperhour.TimestampGTE(from), diskioperhour.TimestampLTE(now)).Exec(ctx) + Where(diskioperhour.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index 4dae6b2..b6bc106 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -46,8 +46,8 @@ func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, err data = append(data, base.CheckResult{ Timestamp: time.Now(), Device: row.Device, - PeakUsage: row.Max, - AvgUsage: row.AVG, + Peak: row.Max, + Avg: row.AVG, }) } metric := base.MetricData{ @@ -73,8 +73,8 @@ func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQueryS Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)). GroupBy(diskusageperhour.FieldDevice). Aggregate( - ent.Max(diskusageperhour.FieldPeakUsage), - ent.Mean(diskusageperhour.FieldAvgUsage), + ent.Max(diskusageperhour.FieldPeak), + ent.Mean(diskusageperhour.FieldAvg), ).Scan(ctx, &queryset) if err != nil { return queryset, err @@ -90,11 +90,10 @@ func (c *Check) deleteDiskUsagePerHour(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-24 * time.Hour) + from := time.Now().Add(-24 * time.Hour) _, err = tx.DiskUsagePerHour.Delete(). - Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)).Exec(ctx) + Where(diskusageperhour.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index d2a8ef1..dd60aad 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -43,8 +43,8 @@ func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) data := base.CheckResult{ Timestamp: time.Now(), - PeakUsage: queryset[0].Max, - AvgUsage: queryset[0].AVG, + Peak: queryset[0].Max, + Avg: queryset[0].AVG, } metric := base.MetricData{ Type: base.MEM_PER_DAY, @@ -68,8 +68,8 @@ func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, er err := client.MemoryPerHour.Query(). Where(memoryperhour.TimestampGTE(from), memoryperhour.TimestampLTE(now)). Aggregate( - ent.Max(memoryperhour.FieldPeakUsage), - ent.Mean(memoryperhour.FieldAvgUsage), + ent.Max(memoryperhour.FieldPeak), + ent.Mean(memoryperhour.FieldAvg), ).Scan(ctx, &queryset) if err != nil { return queryset, err @@ -85,11 +85,10 @@ func (c *Check) deleteMemoryPerHour(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-24 * time.Hour) + from := time.Now().Add(-24 * time.Hour) _, err = tx.MemoryPerHour.Delete(). - Where(memoryperhour.TimestampGTE(from), memoryperhour.TimestampLTE(now)).Exec(ctx) + Where(memoryperhour.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index e217524..d16acda 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -102,11 +102,10 @@ func (c *Check) deleteTrafficPerHour(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-24 * time.Hour) + from := time.Now().Add(-24 * time.Hour) _, err = tx.TrafficPerHour.Delete(). - Where(trafficperhour.TimestampGTE(from), trafficperhour.TimestampLTE(now)).Exec(ctx) + Where(trafficperhour.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index 57a79c0..c95fa44 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -43,8 +43,8 @@ func (c *Check) queryCPUUsage(ctx context.Context) (base.MetricData, error) { data := base.CheckResult{ Timestamp: time.Now(), - PeakUsage: queryset[0].Max, - AvgUsage: queryset[0].AVG, + Peak: queryset[0].Max, + Avg: queryset[0].AVG, } metric := base.MetricData{ Type: base.CPU_PER_HOUR, @@ -92,8 +92,8 @@ func (c *Check) saveCPUPerHour(data base.CheckResult, ctx context.Context) error err = tx.CPUPerHour.Create(). SetTimestamp(data.Timestamp). - SetPeakUsage(data.PeakUsage). - SetAvgUsage(data.AvgUsage).Exec(ctx) + SetPeak(data.Peak). + SetAvg(data.Avg).Exec(ctx) if err != nil { return err } @@ -110,11 +110,10 @@ func (c *Check) deleteCPU(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-1 * time.Hour) + from := time.Now().Add(-1 * time.Hour) _, err = tx.CPU.Delete(). - Where(cpu.TimestampGTE(from), cpu.TimestampLTE(now)).Exec(ctx) + Where(cpu.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index 9868a28..52287b6 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -44,12 +44,12 @@ func (c *Check) queryDiskIO(ctx context.Context) (base.MetricData, error) { var data []base.CheckResult for _, row := range queryset { data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Device: row.Device, - PeakWriteBytes: uint64(row.PeakWriteBytes), - PeakReadBytes: uint64(row.PeakReadBytes), - AvgWriteBytes: uint64(row.AvgWriteBytes), - AvgReadBytes: uint64(row.AvgReadBytes), + Timestamp: time.Now(), + Device: row.Device, + PeakWriteBps: row.PeakWriteBps, + PeakReadBps: row.PeakReadBps, + AvgWriteBps: row.AvgWriteBps, + AvgReadBps: row.AvgReadBps, }) } metric := base.MetricData{ @@ -80,10 +80,10 @@ func (c *Check) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)). GroupBy(diskio.FieldDevice). Aggregate( - ent.As(ent.Max(diskio.FieldReadBytes), "peak_read_bytes"), - ent.As(ent.Max(diskio.FieldWriteBytes), "peak_write_bytes"), - ent.As(ent.Mean(diskio.FieldReadBytes), "avg_read_bytes"), - ent.As(ent.Mean(diskio.FieldWriteBytes), "avg_write_bytes"), + ent.As(ent.Max(diskio.FieldReadBps), "peak_read_bps"), + ent.As(ent.Max(diskio.FieldWriteBps), "peak_write_bps"), + ent.As(ent.Mean(diskio.FieldReadBps), "avg_read_bps"), + ent.As(ent.Mean(diskio.FieldWriteBps), "avg_write_bps"), ).Scan(ctx, &queryset) if err != nil { return queryset, err @@ -102,10 +102,10 @@ func (c *Check) saveDiskIOPerHour(data []base.CheckResult, ctx context.Context) err = tx.DiskIOPerHour.MapCreateBulk(data, func(q *ent.DiskIOPerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). - SetPeakReadBytes(int64(data[i].PeakReadBytes)). - SetPeakWriteBytes(int64(data[i].PeakWriteBytes)). - SetAvgReadBytes(int64(data[i].AvgReadBytes)). - SetAvgWriteBytes(int64(data[i].AvgWriteBytes)) + SetPeakReadBps(data[i].PeakReadBps). + SetPeakWriteBps(data[i].PeakWriteBps). + SetAvgReadBps(data[i].AvgReadBps). + SetAvgWriteBps(data[i].AvgWriteBps) }).Exec(ctx) if err != nil { return err @@ -123,11 +123,10 @@ func (c *Check) deleteDiskIO(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-1 * time.Hour) + from := time.Now().Add(-1 * time.Hour) _, err = tx.DiskIO.Delete(). - Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)).Exec(ctx) + Where(diskio.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index ab9f24a..14bc382 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -47,8 +47,8 @@ func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { data = append(data, base.CheckResult{ Timestamp: time.Now(), Device: row.Device, - PeakUsage: row.Max, - AvgUsage: row.AVG, + Peak: row.Max, + Avg: row.AVG, }) } metric := base.MetricData{ @@ -117,8 +117,8 @@ func (c *Check) saveDiskUsagePerHour(data []base.CheckResult, ctx context.Contex err = tx.DiskUsagePerHour.MapCreateBulk(data, func(q *ent.DiskUsagePerHourCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). - SetPeakUsage(data[i].PeakUsage). - SetAvgUsage(data[i].AvgUsage) + SetPeak(data[i].Peak). + SetAvg(data[i].Avg) }).Exec(ctx) if err != nil { return err @@ -136,11 +136,10 @@ func (c *Check) deleteDiskUsage(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-1 * time.Hour) + from := time.Now().Add(-1 * time.Hour) _, err = tx.DiskUsage.Delete(). - Where(diskusage.TimestampGTE(from), diskusage.TimestampLTE(now)).Exec(ctx) + Where(diskusage.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index 148eabf..5b2f276 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -43,8 +43,8 @@ func (c *Check) queryMemoryUsage(ctx context.Context) (base.MetricData, error) { data := base.CheckResult{ Timestamp: time.Now(), - PeakUsage: queryset[0].Max, - AvgUsage: queryset[0].AVG, + Peak: queryset[0].Max, + Avg: queryset[0].AVG, } metric := base.MetricData{ Type: base.MEM_PER_HOUR, @@ -92,8 +92,8 @@ func (c *Check) saveMemoryPerHour(data base.CheckResult, ctx context.Context) er err = tx.MemoryPerHour.Create(). SetTimestamp(data.Timestamp). - SetPeakUsage(data.PeakUsage). - SetAvgUsage(data.AvgUsage).Exec(ctx) + SetPeak(data.Peak). + SetAvg(data.Avg).Exec(ctx) if err != nil { return err } @@ -110,11 +110,10 @@ func (c *Check) deleteMemory(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-1 * time.Hour) + from := time.Now().Add(-1 * time.Hour) _, err = tx.Memory.Delete(). - Where(memory.TimestampGTE(from), memory.TimestampLTE(now)).Exec(ctx) + Where(memory.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index 858c9ce..030e74b 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -135,11 +135,10 @@ func (c *Check) deleteTraffic(ctx context.Context) error { } defer func() { _ = tx.Rollback() }() - now := time.Now() - from := now.Add(-1 * time.Hour) + from := time.Now().Add(-1 * time.Hour) _, err = tx.Traffic.Delete(). - Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)).Exec(ctx) + Where(traffic.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index c85f59b..66a2e09 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -34,9 +34,11 @@ var checkFactories = map[base.CheckType]newCheck{ base.DISK_USAGE_PER_HOUR: diskusagehourly.NewCheck, base.DISK_USAGE_PER_DAY: diskusagedaily.NewCheck, base.DISK_IO: diskio.NewCheck, + base.DISK_IO_COLLECTOR: diskio.NewCheck, base.DISK_IO_PER_HOUR: diskiohourly.NewCheck, base.DISK_IO_PER_DAY: diskiodaily.NewCheck, base.NET: net.NewCheck, + base.NET_COLLECTOR: net.NewCheck, base.NET_PER_HOUR: nethourly.NewCheck, base.NET_PER_DAY: netdaily.NewCheck, base.CLEANUP: cleanup.NewCheck, diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 897cabe..ef40945 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -117,12 +117,12 @@ func NewCollector(args collectorArgs) (*Collector, error) { func (c *Collector) initTasks(args collectorArgs) error { for _, entry := range args.conf { - duration := time.Duration(entry.Interval) * time.Minute + duration := time.Duration(entry.Interval) * time.Second name := string(entry.Type) + "_" + uuid.NewString() checkArgs := base.CheckArgs{ Type: entry.Type, Name: name, - Interval: time.Duration(duration.Minutes() * float64(time.Minute)), + Interval: time.Duration(duration.Seconds() * float64(time.Second)), Buffer: c.buffer, Client: args.client, } diff --git a/pkg/db/migration/20241226055814_init_schemas.sql b/pkg/db/migration/20250107063722_init_schemas.sql similarity index 86% rename from pkg/db/migration/20241226055814_init_schemas.sql rename to pkg/db/migration/20250107063722_init_schemas.sql index a8b45c1..12378cc 100644 --- a/pkg/db/migration/20241226055814_init_schemas.sql +++ b/pkg/db/migration/20250107063722_init_schemas.sql @@ -3,15 +3,15 @@ CREATE TABLE `cp_us` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestam -- Create index "cpu_timestamp" to table: "cp_us" CREATE INDEX `cpu_timestamp` ON `cp_us` (`timestamp`); -- Create "cpu_per_hours" table -CREATE TABLE `cpu_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak_usage` real NOT NULL, `avg_usage` real NOT NULL); +CREATE TABLE `cpu_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); -- Create index "cpuperhour_timestamp" to table: "cpu_per_hours" CREATE INDEX `cpuperhour_timestamp` ON `cpu_per_hours` (`timestamp`); -- Create "disk_ios" table -CREATE TABLE `disk_ios` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `read_bytes` integer NOT NULL, `write_bytes` integer NOT NULL); +CREATE TABLE `disk_ios` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `read_bps` real NOT NULL, `write_bps` real NOT NULL); -- Create index "diskio_timestamp" to table: "disk_ios" CREATE INDEX `diskio_timestamp` ON `disk_ios` (`timestamp`); -- Create "disk_io_per_hours" table -CREATE TABLE `disk_io_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak_read_bytes` integer NOT NULL, `peak_write_bytes` integer NOT NULL, `avg_read_bytes` integer NOT NULL, `avg_write_bytes` integer NOT NULL); +CREATE TABLE `disk_io_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak_read_bps` real NOT NULL, `peak_write_bps` real NOT NULL, `avg_read_bps` real NOT NULL, `avg_write_bps` real NOT NULL); -- Create index "diskioperhour_timestamp" to table: "disk_io_per_hours" CREATE INDEX `diskioperhour_timestamp` ON `disk_io_per_hours` (`timestamp`); -- Create "disk_usages" table @@ -19,7 +19,7 @@ CREATE TABLE `disk_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `ti -- Create index "diskusage_timestamp" to table: "disk_usages" CREATE INDEX `diskusage_timestamp` ON `disk_usages` (`timestamp`); -- Create "disk_usage_per_hours" table -CREATE TABLE `disk_usage_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak_usage` real NOT NULL, `avg_usage` real NOT NULL); +CREATE TABLE `disk_usage_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); -- Create index "diskusageperhour_timestamp" to table: "disk_usage_per_hours" CREATE INDEX `diskusageperhour_timestamp` ON `disk_usage_per_hours` (`timestamp`); -- Create "memories" table @@ -27,7 +27,7 @@ CREATE TABLE `memories` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `times -- Create index "memory_timestamp" to table: "memories" CREATE INDEX `memory_timestamp` ON `memories` (`timestamp`); -- Create "memory_per_hours" table -CREATE TABLE `memory_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak_usage` real NOT NULL, `avg_usage` real NOT NULL); +CREATE TABLE `memory_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); -- Create index "memoryperhour_timestamp" to table: "memory_per_hours" CREATE INDEX `memoryperhour_timestamp` ON `memory_per_hours` (`timestamp`); -- Create "traffics" table diff --git a/pkg/db/migration/atlas.sum b/pkg/db/migration/atlas.sum index 9af891c..193486d 100644 --- a/pkg/db/migration/atlas.sum +++ b/pkg/db/migration/atlas.sum @@ -1,2 +1,2 @@ -h1:Ua6PYlur0VFyOYZWiXefY4a8EwrW9OTZ9monNCM67Bw= -20241226055814_init_schemas.sql h1:1tpxOm5guI/FF8f3k8bDT+qJiLfze7vuhkMsH5QWm0U= +h1:k69B7UlJuvRJOvyjX4gVPNwJvuSsyldCoHWvTFEo8yw= +20250107063722_init_schemas.sql h1:LD/GOMLp+gzyK5owvHj++opu9v3nHF/6rTr/aRS0A1k= diff --git a/pkg/db/schema/cpu_per_hour.go b/pkg/db/schema/cpu_per_hour.go index 3f4ec0d..b872f46 100644 --- a/pkg/db/schema/cpu_per_hour.go +++ b/pkg/db/schema/cpu_per_hour.go @@ -17,8 +17,8 @@ type CPUPerHour struct { func (CPUPerHour) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), - field.Float("peak_usage"), - field.Float("avg_usage"), + field.Float("peak"), + field.Float("avg"), } } diff --git a/pkg/db/schema/diskio.go b/pkg/db/schema/diskio.go index 02fc945..e9fb145 100644 --- a/pkg/db/schema/diskio.go +++ b/pkg/db/schema/diskio.go @@ -18,8 +18,8 @@ func (DiskIO) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("device"), - field.Int64("read_bytes"), - field.Int64("write_bytes"), + field.Float("read_bps"), + field.Float("write_bps"), } } diff --git a/pkg/db/schema/diskio_per_hour.go b/pkg/db/schema/diskio_per_hour.go index a426367..a78934f 100644 --- a/pkg/db/schema/diskio_per_hour.go +++ b/pkg/db/schema/diskio_per_hour.go @@ -18,10 +18,10 @@ func (DiskIOPerHour) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("device"), - field.Int64("peak_read_bytes"), - field.Int64("peak_write_bytes"), - field.Int64("avg_read_bytes"), - field.Int64("avg_write_bytes"), + field.Float("peak_read_bps"), + field.Float("peak_write_bps"), + field.Float("avg_read_bps"), + field.Float("avg_write_bps"), } } diff --git a/pkg/db/schema/diskusage_per_hour.go b/pkg/db/schema/diskusage_per_hour.go index 43fba74..8a894d9 100644 --- a/pkg/db/schema/diskusage_per_hour.go +++ b/pkg/db/schema/diskusage_per_hour.go @@ -18,8 +18,8 @@ func (DiskUsagePerHour) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("device"), - field.Float("peak_usage"), - field.Float("avg_usage"), + field.Float("peak"), + field.Float("avg"), } } diff --git a/pkg/db/schema/memory_per_hour.go b/pkg/db/schema/memory_per_hour.go index 9ead465..db5fa4e 100644 --- a/pkg/db/schema/memory_per_hour.go +++ b/pkg/db/schema/memory_per_hour.go @@ -17,8 +17,8 @@ type MemoryPerHour struct { func (MemoryPerHour) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), - field.Float("peak_usage"), - field.Float("avg_usage"), + field.Float("peak"), + field.Float("avg"), } } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 7fe349a..5acd236 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -13,6 +13,7 @@ import ( "time" "github.com/rs/zerolog/log" + "github.com/shirou/gopsutil/v4/disk" "github.com/shirou/gopsutil/v4/host" "github.com/shirou/gopsutil/v4/net" ) @@ -118,7 +119,7 @@ func CalculateBackOff(delay time.Duration, attempt int) time.Duration { return backoff * jitter } -func CalculateBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { +func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { if interval == 0 { return 0, 0 } @@ -133,7 +134,7 @@ func CalculateBps(current net.IOCountersStat, last net.IOCountersStat, interval return inputBps, outputBps } -func CalculatePps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputPps float64, outputPps float64) { +func CalculateNetworkPps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputPps float64, outputPps float64) { if interval == 0 { return 0, 0 } @@ -142,8 +143,23 @@ func CalculatePps(current net.IOCountersStat, last net.IOCountersStat, interval outputPktsDiff := float64(current.PacketsSent - last.PacketsSent) seconds := interval.Seconds() - inputPps = (inputPktsDiff * 8) / seconds - outputPps = (outputPktsDiff * 8) / seconds + inputPps = inputPktsDiff / seconds + outputPps = outputPktsDiff / seconds return inputPps, outputPps } + +func CalculateDiskIOBps(current disk.IOCountersStat, last disk.IOCountersStat, interval time.Duration) (readBps float64, writeBps float64) { + if interval == 0 { + return 0, 0 + } + + readBytesDiff := float64(current.ReadBytes - last.ReadBytes) + writeBytesDiff := float64(current.WriteBytes - last.WriteBytes) + seconds := interval.Seconds() + + readBps = readBytesDiff / seconds + writeBps = writeBytesDiff / seconds + + return readBps, writeBps +} From f84abee148ae288cfc783ee477db0d2f4da01d32 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 10 Jan 2025 16:01:53 +0900 Subject: [PATCH 146/364] Minor fix Apply go code convention. Apply code review --- cmd/alpamon/command/root.go | 17 +++++------- pkg/collector/check/batch/daily/cpu/cpu.go | 14 +++++----- pkg/collector/check/batch/daily/disk/io/io.go | 12 ++++----- .../check/batch/daily/disk/usage/usage.go | 12 ++++----- .../check/batch/daily/memory/memory.go | 14 +++++----- pkg/collector/check/batch/daily/net/net.go | 12 ++++----- pkg/collector/check/batch/hourly/cpu/cpu.go | 14 +++++----- .../check/batch/hourly/disk/io/io.go | 12 ++++----- .../check/batch/hourly/disk/usage/usage.go | 12 ++++----- .../check/batch/hourly/memory/memory.go | 14 +++++----- .../check/realtime/disk/io/io_send.go | 12 ++++----- pkg/collector/check/realtime/net/net_send.go | 12 ++++----- pkg/collector/collector.go | 27 ++++++++----------- pkg/collector/transporter/transporter.go | 16 ++++++----- pkg/db/migrate.go | 2 +- pkg/utils/utils.go | 9 ------- 16 files changed, 98 insertions(+), 113 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 508c235..cf64f55 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -3,7 +3,6 @@ package command import ( "context" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/version" "os" "syscall" @@ -15,6 +14,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/runner" "github.com/alpacanetworks/alpamon-go/pkg/scheduler" "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon-go/pkg/version" "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -71,17 +71,12 @@ func runAgent() { client := db.InitDB(ctx) // Collector - collector := collector.InitCollector(session, client) - if err := collector.Start(ctx); err != nil { - log.Error().Err(err).Msg("Failed to start collector") - return - } + metricCollector := collector.InitCollector(session, client) + metricCollector.Start(ctx) - go func() { - for err := range collector.Errors() { - log.Error().Err(err).Msg("Collector error") - } - }() + for err := range metricCollector.Errors() { + log.Error().Err(err).Msgf("Collector error: %v", err) + } // Websocket Client wsClient := runner.NewWebsocketClient(session) diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index 5e90f60..904ca14 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -36,15 +36,15 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getCPUPerHour(ctx) + querySet, err := c.getCPUPerHour(ctx) if err != nil { return base.MetricData{}, err } data := base.CheckResult{ Timestamp: time.Now(), - Peak: queryset[0].Max, - Avg: queryset[0].AVG, + Peak: querySet[0].Max, + Avg: querySet[0].AVG, } metric := base.MetricData{ Type: base.CPU_PER_DAY, @@ -64,18 +64,18 @@ func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { now := time.Now() from := now.Add(-24 * time.Hour) - var queryset []base.CPUQuerySet + var querySet []base.CPUQuerySet err := client.CPUPerHour.Query(). Where(cpuperhour.TimestampGTE(from), cpuperhour.TimestampLTE(now)). Aggregate( ent.Max(cpuperhour.FieldPeak), ent.Mean(cpuperhour.FieldAvg), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) deleteCPUPerHour(ctx context.Context) error { diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index d99fb33..4450b7c 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -36,13 +36,13 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getDiskIOPerHour(ctx) + querySet, err := c.getDiskIOPerHour(ctx) if err != nil { return base.MetricData{}, err } var data []base.CheckResult - for _, row := range queryset { + for _, row := range querySet { data = append(data, base.CheckResult{ Timestamp: time.Now(), Device: row.Device, @@ -70,7 +70,7 @@ func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, er now := time.Now() from := now.Add(-24 * time.Hour) - var queryset []base.DiskIOQuerySet + var querySet []base.DiskIOQuerySet err := client.DiskIOPerHour.Query(). Where(diskioperhour.TimestampGTE(from), diskioperhour.TimestampLTE(now)). GroupBy(diskioperhour.FieldDevice). @@ -79,12 +79,12 @@ func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, er ent.As(ent.Max(diskioperhour.FieldPeakWriteBps), "peak_write_bps"), ent.As(ent.Mean(diskioperhour.FieldAvgReadBps), "avg_read_bps"), ent.As(ent.Mean(diskioperhour.FieldAvgWriteBps), "avg_write_bps"), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) deleteDiskIOPerHour(ctx context.Context) error { diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index b6bc106..37d20b0 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -36,13 +36,13 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getDiskUsagePerHour(ctx) + querySet, err := c.getDiskUsagePerHour(ctx) if err != nil { return base.MetricData{}, err } var data []base.CheckResult - for _, row := range queryset { + for _, row := range querySet { data = append(data, base.CheckResult{ Timestamp: time.Now(), Device: row.Device, @@ -68,19 +68,19 @@ func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQueryS now := time.Now() from := now.Add(-24 * time.Hour) - var queryset []base.DiskUsageQuerySet + var querySet []base.DiskUsageQuerySet err := client.DiskUsagePerHour.Query(). Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)). GroupBy(diskusageperhour.FieldDevice). Aggregate( ent.Max(diskusageperhour.FieldPeak), ent.Mean(diskusageperhour.FieldAvg), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) deleteDiskUsagePerHour(ctx context.Context) error { diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index dd60aad..a654daf 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -36,15 +36,15 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getMemoryPerHour(ctx) + querySet, err := c.getMemoryPerHour(ctx) if err != nil { return base.MetricData{}, err } data := base.CheckResult{ Timestamp: time.Now(), - Peak: queryset[0].Max, - Avg: queryset[0].AVG, + Peak: querySet[0].Max, + Avg: querySet[0].AVG, } metric := base.MetricData{ Type: base.MEM_PER_DAY, @@ -64,18 +64,18 @@ func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, er now := time.Now() from := now.Add(-24 * time.Hour) - var queryset []base.MemoryQuerySet + var querySet []base.MemoryQuerySet err := client.MemoryPerHour.Query(). Where(memoryperhour.TimestampGTE(from), memoryperhour.TimestampLTE(now)). Aggregate( ent.Max(memoryperhour.FieldPeak), ent.Mean(memoryperhour.FieldAvg), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) deleteMemoryPerHour(ctx context.Context) error { diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index d16acda..a85ed34 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -36,13 +36,13 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getTrafficPerHour(ctx) + querySet, err := c.getTrafficPerHour(ctx) if err != nil { return base.MetricData{}, err } var data []base.CheckResult - for _, row := range queryset { + for _, row := range querySet { data = append(data, base.CheckResult{ Timestamp: time.Now(), Name: row.Name, @@ -74,7 +74,7 @@ func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, now := time.Now() from := now.Add(-24 * time.Hour) - var queryset []base.TrafficQuerySet + var querySet []base.TrafficQuerySet err := client.TrafficPerHour.Query(). Where(trafficperhour.TimestampGTE(from), trafficperhour.TimestampLTE(now)). GroupBy(trafficperhour.FieldName). @@ -87,12 +87,12 @@ func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, ent.As(ent.Mean(trafficperhour.FieldAvgInputBps), "avg_input_bps"), ent.As(ent.Mean(trafficperhour.FieldAvgOutputPps), "avg_output_pps"), ent.As(ent.Mean(trafficperhour.FieldAvgOutputBps), "avg_output_bps"), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) deleteTrafficPerHour(ctx context.Context) error { diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index c95fa44..d11bf88 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -36,15 +36,15 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryCPUUsage(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getCPU(ctx) + querySet, err := c.getCPU(ctx) if err != nil { return base.MetricData{}, err } data := base.CheckResult{ Timestamp: time.Now(), - Peak: queryset[0].Max, - Avg: queryset[0].AVG, + Peak: querySet[0].Max, + Avg: querySet[0].AVG, } metric := base.MetricData{ Type: base.CPU_PER_HOUR, @@ -69,18 +69,18 @@ func (c *Check) getCPU(ctx context.Context) ([]base.CPUQuerySet, error) { now := time.Now() from := now.Add(-1 * time.Hour) - var queryset []base.CPUQuerySet + var querySet []base.CPUQuerySet err := client.CPU.Query(). Where(cpu.TimestampGTE(from), cpu.TimestampLTE(now)). Aggregate( ent.Max(cpu.FieldUsage), ent.Mean(cpu.FieldUsage), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) saveCPUPerHour(data base.CheckResult, ctx context.Context) error { diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index 52287b6..a8c435a 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -36,13 +36,13 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryDiskIO(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getDiskIO(ctx) + querySet, err := c.getDiskIO(ctx) if err != nil { return base.MetricData{}, err } var data []base.CheckResult - for _, row := range queryset { + for _, row := range querySet { data = append(data, base.CheckResult{ Timestamp: time.Now(), Device: row.Device, @@ -75,7 +75,7 @@ func (c *Check) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { now := time.Now() from := now.Add(-1 * time.Hour) - var queryset []base.DiskIOQuerySet + var querySet []base.DiskIOQuerySet err := client.DiskIO.Query(). Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)). GroupBy(diskio.FieldDevice). @@ -84,12 +84,12 @@ func (c *Check) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { ent.As(ent.Max(diskio.FieldWriteBps), "peak_write_bps"), ent.As(ent.Mean(diskio.FieldReadBps), "avg_read_bps"), ent.As(ent.Mean(diskio.FieldWriteBps), "avg_write_bps"), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) saveDiskIOPerHour(data []base.CheckResult, ctx context.Context) error { diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index 14bc382..a14d676 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -37,13 +37,13 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getDiskUsage(ctx) + querySet, err := c.getDiskUsage(ctx) if err != nil { return base.MetricData{}, err } var data []base.CheckResult - for _, row := range queryset { + for _, row := range querySet { data = append(data, base.CheckResult{ Timestamp: time.Now(), Device: row.Device, @@ -72,7 +72,7 @@ func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { client := c.GetClient() - var queryset []base.DiskUsageQuerySet + var querySet []base.DiskUsageQuerySet err := client.DiskUsage.Query(). Modify(func(s *sql.Selector) { now := time.Now() @@ -99,12 +99,12 @@ func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, err sql.As(sql.Max("usage"), "max"), sql.As(sql.Avg("usage"), "avg"), ).From(subq).GroupBy("device") - }).Scan(ctx, &queryset) + }).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) saveDiskUsagePerHour(data []base.CheckResult, ctx context.Context) error { diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index 5b2f276..f79f697 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -36,15 +36,15 @@ func (c *Check) Execute(ctx context.Context) error { } func (c *Check) queryMemoryUsage(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getMemory(ctx) + querySet, err := c.getMemory(ctx) if err != nil { return base.MetricData{}, err } data := base.CheckResult{ Timestamp: time.Now(), - Peak: queryset[0].Max, - Avg: queryset[0].AVG, + Peak: querySet[0].Max, + Avg: querySet[0].AVG, } metric := base.MetricData{ Type: base.MEM_PER_HOUR, @@ -69,18 +69,18 @@ func (c *Check) getMemory(ctx context.Context) ([]base.MemoryQuerySet, error) { now := time.Now() from := now.Add(-1 * time.Hour) - var queryset []base.MemoryQuerySet + var querySet []base.MemoryQuerySet err := client.Memory.Query(). Where(memory.TimestampGTE(from), memory.TimestampLTE(now)). Aggregate( ent.Max(memory.FieldUsage), ent.Mean(memory.FieldUsage), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } func (c *Check) saveMemoryPerHour(data base.CheckResult, ctx context.Context) error { diff --git a/pkg/collector/check/realtime/disk/io/io_send.go b/pkg/collector/check/realtime/disk/io/io_send.go index 89c33b3..dadb885 100644 --- a/pkg/collector/check/realtime/disk/io/io_send.go +++ b/pkg/collector/check/realtime/disk/io/io_send.go @@ -30,13 +30,13 @@ func (c *SendCheck) Execute(ctx context.Context) error { } func (c *SendCheck) queryDiskIO(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getDiskIO(ctx) + querySet, err := c.getDiskIO(ctx) if err != nil { return base.MetricData{}, err } var data []base.CheckResult - for _, row := range queryset { + for _, row := range querySet { data = append(data, base.CheckResult{ Timestamp: time.Now(), Device: row.Device, @@ -60,7 +60,7 @@ func (c *SendCheck) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error now := time.Now() from := now.Add(-1 * interval * time.Second) - var queryset []base.DiskIOQuerySet + var querySet []base.DiskIOQuerySet err := client.DiskIO.Query(). Where(diskio.TimestampGTE(from), diskio.TimestampLTE(now)). GroupBy(diskio.FieldDevice). @@ -69,10 +69,10 @@ func (c *SendCheck) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error ent.As(ent.Max(diskio.FieldWriteBps), "peak_write_bps"), ent.As(ent.Mean(diskio.FieldReadBps), "avg_read_bps"), ent.As(ent.Mean(diskio.FieldWriteBps), "avg_write_bps"), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } diff --git a/pkg/collector/check/realtime/net/net_send.go b/pkg/collector/check/realtime/net/net_send.go index 01e7e6f..1e09999 100644 --- a/pkg/collector/check/realtime/net/net_send.go +++ b/pkg/collector/check/realtime/net/net_send.go @@ -30,13 +30,13 @@ func (c *SendCheck) Execute(ctx context.Context) error { } func (c *SendCheck) queryTraffic(ctx context.Context) (base.MetricData, error) { - queryset, err := c.getTraffic(ctx) + querySet, err := c.getTraffic(ctx) if err != nil { return base.MetricData{}, err } var data []base.CheckResult - for _, row := range queryset { + for _, row := range querySet { data = append(data, base.CheckResult{ Timestamp: time.Now(), Name: row.Name, @@ -64,7 +64,7 @@ func (c *SendCheck) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, err now := time.Now() from := now.Add(-1 * interval * time.Second) - var queryset []base.TrafficQuerySet + var querySet []base.TrafficQuerySet err := client.Traffic.Query(). Where(traffic.TimestampGTE(from), traffic.TimestampLTE(now)). GroupBy(traffic.FieldName). @@ -77,10 +77,10 @@ func (c *SendCheck) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, err ent.As(ent.Mean(traffic.FieldInputBps), "avg_input_bps"), ent.As(ent.Mean(traffic.FieldOutputPps), "avg_output_pps"), ent.As(ent.Mean(traffic.FieldOutputBps), "avg_output_bps"), - ).Scan(ctx, &queryset) + ).Scan(ctx, &querySet) if err != nil { - return queryset, err + return querySet, err } - return queryset, nil + return querySet, nil } diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index ef40945..c712949 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -93,26 +93,26 @@ func fetchConfig(session *session.Session) ([]collectConf, error) { } func NewCollector(args collectorArgs) (*Collector, error) { - transporter, err := args.transportFactory.CreateTransporter(args.session) + metricTransporter, err := args.transportFactory.CreateTransporter(args.session) if err != nil { return nil, err } checkBuffer := base.NewCheckBuffer(len(args.conf) * 2) - collector := &Collector{ - transporter: transporter, + metricCollector := &Collector{ + transporter: metricTransporter, scheduler: scheduler.NewScheduler(), buffer: checkBuffer, errorChan: make(chan error, 10), stopChan: make(chan struct{}), } - err = collector.initTasks(args) + err = metricCollector.initTasks(args) if err != nil { return nil, err } - return collector, nil + return metricCollector, nil } func (c *Collector) initTasks(args collectorArgs) error { @@ -127,16 +127,16 @@ func (c *Collector) initTasks(args collectorArgs) error { Client: args.client, } - check, err := args.checkFactory.CreateCheck(&checkArgs) + metricCheck, err := args.checkFactory.CreateCheck(&checkArgs) if err != nil { return err } - c.scheduler.AddTask(check) + c.scheduler.AddTask(metricCheck) } return nil } -func (c *Collector) Start(ctx context.Context) error { +func (c *Collector) Start(ctx context.Context) { go c.scheduler.Start(ctx, c.buffer.Capacity) for i := 0; i < c.buffer.Capacity; i++ { @@ -146,8 +146,6 @@ func (c *Collector) Start(ctx context.Context) error { c.wg.Add(1) go c.failureQueueWorker(ctx) - - return nil } func (c *Collector) successQueueWorker(ctx context.Context) { @@ -160,12 +158,9 @@ func (c *Collector) successQueueWorker(ctx context.Context) { case <-c.stopChan: return case metric := <-c.buffer.SuccessQueue: - if err := c.transporter.Send(metric); err != nil { - select { - case c.buffer.FailureQueue <- metric: - default: - c.errorChan <- fmt.Errorf("failed to move metric to failure queue: %v", err) - } + err := c.transporter.Send(metric) + if err != nil { + c.buffer.FailureQueue <- metric } } } diff --git a/pkg/collector/transporter/transporter.go b/pkg/collector/transporter/transporter.go index 1994adb..58cb5c8 100644 --- a/pkg/collector/transporter/transporter.go +++ b/pkg/collector/transporter/transporter.go @@ -2,9 +2,11 @@ package transporter import ( "fmt" + "net/http" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/scheduler" + "github.com/alpacanetworks/alpamon-go/pkg/utils" ) type TransportStrategy interface { @@ -49,11 +51,13 @@ func (t *Transporter) Send(data base.MetricData) error { resp, statusCode, err := t.session.Post(url, data.Data, 10) if err != nil { return err + } else if utils.IsSuccessStatusCode(statusCode) { + return nil + } else { + if statusCode == http.StatusBadRequest { + return fmt.Errorf("%d Bad Request: %s", statusCode, resp) + } else { + return fmt.Errorf("%s %s Error: %d %s", "POST", url, statusCode, resp) + } } - - if statusCode > 300 { - return fmt.Errorf("%d Bad Request: %s", statusCode, resp) - } - - return nil } diff --git a/pkg/db/migrate.go b/pkg/db/migrate.go index 4fe3bcf..7728794 100644 --- a/pkg/db/migrate.go +++ b/pkg/db/migrate.go @@ -29,7 +29,7 @@ func RunMigration(path string, ctx context.Context) error { log.Error().Err(err).Msgf("failed to open migration dir: %v", err) return err } - defer workDir.Close() + defer func() { _ = workDir.Close() }() client, err := atlasexec.NewClient(workDir.Path(), "atlas") if err != nil { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 5acd236..807fc61 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -3,8 +3,6 @@ package utils import ( "bytes" "fmt" - "math" - "math/rand" "net/url" "os" "runtime" @@ -112,13 +110,6 @@ func ConvertGroupIds(groupIds []string) []uint32 { return gids } -func CalculateBackOff(delay time.Duration, attempt int) time.Duration { - backoff := delay * time.Duration(math.Pow(2, float64(attempt))) - jitter := time.Duration(rand.Float64() * float64(backoff) * 0.2) - - return backoff * jitter -} - func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { if interval == 0 { return 0, 0 From 237f14d4b8ed9b1930dd8a1228d90b7638f60818 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 10 Jan 2025 16:05:29 +0900 Subject: [PATCH 147/364] Fix retry logic in collector's scheduler To simplify and improve readability of the retry logic with exponential backoff, RetryStatus has been added to ScheduledTask. --- pkg/collector/scheduler/scheduler.go | 82 ++++++++++++++++++---------- 1 file changed, 54 insertions(+), 28 deletions(-) diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go index 2e62c7a..bd622cc 100644 --- a/pkg/collector/scheduler/scheduler.go +++ b/pkg/collector/scheduler/scheduler.go @@ -2,18 +2,18 @@ package scheduler import ( "context" + "math" "sync" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" ) const ( - MAX_RETRIES int = 5 - MAX_RETRY_TIMES time.Duration = 1 * time.Minute - DEFAULT_DELAY time.Duration = 1 * time.Second + MAX_RETRIES = 5 + MAX_RETRY_TIMES = 1 * time.Minute + DEFAULT_DELAY = 1 * time.Second ) type Scheduler struct { @@ -24,9 +24,11 @@ type Scheduler struct { } type ScheduledTask struct { - check base.CheckStrategy - nextRun time.Time - interval time.Duration + check base.CheckStrategy + nextRun time.Time + retryStatus RetryStatus + isSuccess bool + interval time.Duration } type RetryConf struct { @@ -35,6 +37,12 @@ type RetryConf struct { Delay time.Duration } +type RetryStatus struct { + due time.Time + expiry time.Time + attempt int +} + func NewScheduler() *Scheduler { return &Scheduler{ retryConf: RetryConf{ @@ -49,10 +57,17 @@ func NewScheduler() *Scheduler { func (s *Scheduler) AddTask(check base.CheckStrategy) { interval := check.GetInterval() + retryStatus := RetryStatus{ + due: time.Now(), + expiry: time.Now().Add(s.retryConf.MaxRetryTime), + attempt: 0, + } task := &ScheduledTask{ - check: check, - nextRun: time.Now().Add(interval), - interval: interval, + check: check, + nextRun: time.Now().Add(interval), + retryStatus: retryStatus, + isSuccess: true, + interval: interval, } s.tasks.Store(check.GetName(), task) } @@ -89,8 +104,14 @@ func (s *Scheduler) dispatcher(ctx context.Context) { } if now.After(task.nextRun) { + task.nextRun = now.Add(task.interval) s.taskQueue <- task } + + if task.isRetryRequired(now) { + s.taskQueue <- task + } + return true }) } @@ -109,24 +130,29 @@ func (s *Scheduler) worker(ctx context.Context) { } func (s *Scheduler) executeTask(ctx context.Context, task *ScheduledTask) { - defer func() { - task.nextRun = time.Now().Add(task.interval) - }() - - for attempt := 0; attempt <= s.retryConf.MaxRetries; attempt++ { - err := task.check.Execute(ctx) - if err != nil { - log.Error().Err(err).Msgf("failed to execute check: %v", err) - if attempt < s.retryConf.MaxRetries { - backoff := utils.CalculateBackOff(s.retryConf.Delay, attempt) - select { - case <-time.After(backoff): - continue - case <-ctx.Done(): - return - } - } + err := task.check.Execute(ctx) + if err != nil { + log.Error().Err(err).Msgf("failed to execute check: %v", err) + + if task.retryStatus.attempt < s.retryConf.MaxRetries { + now := time.Now() + backoff := time.Duration(math.Pow(2, float64(task.retryStatus.attempt))) * time.Second + + task.isSuccess = false + task.retryStatus.due = now.Add(backoff) + task.retryStatus.expiry = now.Add(s.retryConf.MaxRetryTime) + task.retryStatus.attempt++ } - break + } else { + task.isSuccess = true + task.retryStatus.attempt = 0 } } + +func (st *ScheduledTask) isRetryRequired(now time.Time) bool { + isRetryTask := !st.isSuccess + isDue := now.After(st.retryStatus.due) + isExpire := now.After(st.retryStatus.expiry) + + return isRetryTask && isDue && !isExpire +} From 12ac49a3df423d728ec27fa37c02b945ed272270 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 10 Jan 2025 16:06:38 +0900 Subject: [PATCH 148/364] Add DBClientManager To manage the database client as a singleton, add DBClientManager. --- pkg/db/client.go | 37 ++++++++++++++++--------------------- pkg/db/db.go | 3 ++- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/pkg/db/client.go b/pkg/db/client.go index cfa530b..034351c 100644 --- a/pkg/db/client.go +++ b/pkg/db/client.go @@ -8,28 +8,23 @@ import ( _ "github.com/glebarez/go-sqlite" ) -var ( - client *ent.Client - once sync.Once - initErr error -) - -func GetClient(path string) (*ent.Client, error) { - once.Do(func() { - var err error - url := fmt.Sprintf("file:%s?cache=shared&__pragma=foreign_keys(1)", path) - client, err = ent.Open("sqlite3", url) - if err != nil { - initErr = err - client = nil - } - }) - return client, initErr +type DBClientManager struct { + client *ent.Client + once sync.Once + path string } -func Close() error { - if client != nil { - return client.Close() +func NewDBClientManager(path string) *DBClientManager { + return &DBClientManager{ + path: path, } - return nil +} + +func (cm *DBClientManager) GetClient() (*ent.Client, error) { + var err error + cm.once.Do(func() { + url := fmt.Sprintf("file:%s?cache=shared&__pragma=foreign_keys(1)", cm.path) + cm.client, err = ent.Open("sqlite3", url) + }) + return cm.client, err } diff --git a/pkg/db/db.go b/pkg/db/db.go index d624aae..b5f16b1 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -38,7 +38,8 @@ func InitDB(ctx context.Context) *ent.Client { os.Exit(1) } - client, err := GetClient(dbFile.Name()) + dbManager := NewDBClientManager(dbFile.Name()) + client, err := dbManager.GetClient() if err != nil { log.Error().Err(err).Msgf("failed to get db client: %v\n", err) os.Exit(1) From 4958b956eeabd5f7543d1a020a7d487b5f93a52a Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 13 Jan 2025 01:46:30 +0900 Subject: [PATCH 149/364] Add test cases for checks To ensure reliability and maintainability, add test cases for checks. --- .../check/batch/daily/cpu/daily_cpu_test.go | 57 +++++++++++ .../batch/daily/disk/io/daily_io_test.go | 64 ++++++++++++ .../daily/disk/usage/daily_usage_test.go | 59 +++++++++++ .../batch/daily/memory/daily_memory_test.go | 57 +++++++++++ .../check/batch/daily/net/daily_net_test.go | 72 ++++++++++++++ .../check/batch/hourly/cpu/hourly_cpu_test.go | 69 +++++++++++++ .../batch/hourly/disk/io/hourly_io_test.go | 86 ++++++++++++++++ .../hourly/disk/usage/hourly_usage_test.go | 88 +++++++++++++++++ .../batch/hourly/memory/hourly_memory_test.go | 69 +++++++++++++ .../check/batch/hourly/net/hourly_net_test.go | 98 +++++++++++++++++++ pkg/collector/check/realtime/cpu/cpu_test.go | 51 ++++++++++ .../check/realtime/disk/io/io_test.go | 72 ++++++++++++++ .../check/realtime/disk/usage/usage_test.go | 63 ++++++++++++ .../check/realtime/memory/memory_test.go | 51 ++++++++++ pkg/collector/check/realtime/net/net_test.go | 78 +++++++++++++++ 15 files changed, 1034 insertions(+) create mode 100644 pkg/collector/check/batch/daily/cpu/daily_cpu_test.go create mode 100644 pkg/collector/check/batch/daily/disk/io/daily_io_test.go create mode 100644 pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go create mode 100644 pkg/collector/check/batch/daily/memory/daily_memory_test.go create mode 100644 pkg/collector/check/batch/daily/net/daily_net_test.go create mode 100644 pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go create mode 100644 pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go create mode 100644 pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go create mode 100644 pkg/collector/check/batch/hourly/memory/hourly_memory_test.go create mode 100644 pkg/collector/check/batch/hourly/net/hourly_net_test.go create mode 100644 pkg/collector/check/realtime/cpu/cpu_test.go create mode 100644 pkg/collector/check/realtime/disk/io/io_test.go create mode 100644 pkg/collector/check/realtime/disk/usage/usage_test.go create mode 100644 pkg/collector/check/realtime/memory/memory_test.go create mode 100644 pkg/collector/check/realtime/net/net_test.go diff --git a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go new file mode 100644 index 0000000..e7e70c1 --- /dev/null +++ b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go @@ -0,0 +1,57 @@ +package cpu + +import ( + "context" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.CPU_PER_DAY, + Name: string(base.CPU_PER_DAY) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetCPUPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().CPUPerHour.Create(). + SetTimestamp(time.Now()). + SetPeak(50.0). + SetAvg(50.0).Exec(ctx) + assert.NoError(t, err, "Failed to create cpu usage per hour.") + + querySet, err := check.getCPUPerHour(ctx) + assert.NoError(t, err, "Failed to get cpu usage per hour.") + assert.NotEmpty(t, querySet, "CPUPerHour queryset should not be empty") +} + +func TestDeleteCPUPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().CPUPerHour.Create(). + SetTimestamp(time.Now().Add(-25 * time.Hour)). + SetPeak(50.0). + SetAvg(50.0).Exec(ctx) + assert.NoError(t, err, "Failed to create cpu usage per hour.") + + err = check.deleteCPUPerHour(ctx) + assert.NoError(t, err, "Failed to delete cpu usage per hour.") +} diff --git a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go new file mode 100644 index 0000000..ab9f6e9 --- /dev/null +++ b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go @@ -0,0 +1,64 @@ +package io + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.DISK_IO_PER_DAY, + Name: string(base.DISK_IO_PER_DAY) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetDiskIOPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().DiskIOPerHour.Create(). + SetTimestamp(time.Now()). + SetDevice(uuid.NewString()). + SetPeakReadBps(rand.Float64()). + SetPeakWriteBps(rand.Float64()). + SetAvgReadBps(rand.Float64()). + SetAvgWriteBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create disk io per hour.") + + querySet, err := check.getDiskIOPerHour(ctx) + assert.NoError(t, err, "Failed to get disk io per hour.") + assert.NotEmpty(t, querySet, "DiskIOPerHour queryset should not be empty") +} + +func TestDeleteDiskIOPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().DiskIOPerHour.Create(). + SetTimestamp(time.Now().Add(-25 * time.Hour)). + SetDevice(uuid.NewString()). + SetPeakReadBps(rand.Float64()). + SetPeakWriteBps(rand.Float64()). + SetAvgReadBps(rand.Float64()). + SetAvgWriteBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create disk io per hour.") + + err = check.deleteDiskIOPerHour(ctx) + assert.NoError(t, err, "Failed to delete disk io per hour.") +} diff --git a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go new file mode 100644 index 0000000..c45a783 --- /dev/null +++ b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go @@ -0,0 +1,59 @@ +package usage + +import ( + "context" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.DISK_USAGE_PER_DAY, + Name: string(base.DISK_USAGE_PER_DAY) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetDiskUsagePerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().DiskUsagePerHour.Create(). + SetTimestamp(time.Now()). + SetDevice(uuid.NewString()). + SetPeak(50.0). + SetAvg(50.0).Exec(ctx) + assert.NoError(t, err, "Failed to create disk usage per hour.") + + querySet, err := check.getDiskUsagePerHour(ctx) + assert.NoError(t, err, "Failed to get disk usage per hour.") + assert.NotEmpty(t, querySet, "DiskUsagePerHour queryset should not be empty") +} + +func TestDeleteDiskUsagePerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().DiskUsagePerHour.Create(). + SetTimestamp(time.Now().Add(-25 * time.Hour)). + SetDevice(uuid.NewString()). + SetPeak(50.0). + SetAvg(50.0).Exec(ctx) + assert.NoError(t, err, "Failed to create disk usage per hour.") + + err = check.deleteDiskUsagePerHour(ctx) + assert.NoError(t, err, "Failed to delete disk usage per hour.") +} diff --git a/pkg/collector/check/batch/daily/memory/daily_memory_test.go b/pkg/collector/check/batch/daily/memory/daily_memory_test.go new file mode 100644 index 0000000..a810840 --- /dev/null +++ b/pkg/collector/check/batch/daily/memory/daily_memory_test.go @@ -0,0 +1,57 @@ +package memory + +import ( + "context" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.MEM_PER_DAY, + Name: string(base.MEM_PER_DAY) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetMemoryPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().MemoryPerHour.Create(). + SetTimestamp(time.Now()). + SetPeak(50.0). + SetAvg(50.0).Exec(ctx) + assert.NoError(t, err, "Failed to create memory usage per hour.") + + querySet, err := check.getMemoryPerHour(ctx) + assert.NoError(t, err, "Failed to get memory usage per hour.") + assert.NotEmpty(t, querySet, "MemoryPerHour queryset should not be empty") +} + +func TestDeleteMemoryPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().MemoryPerHour.Create(). + SetTimestamp(time.Now().Add(-25 * time.Hour)). + SetPeak(50.0). + SetAvg(50.0).Exec(ctx) + assert.NoError(t, err, "Failed to create memory usage per hour.") + + err = check.deleteMemoryPerHour(ctx) + assert.NoError(t, err, "Failed to delete memory usage per hour.") +} diff --git a/pkg/collector/check/batch/daily/net/daily_net_test.go b/pkg/collector/check/batch/daily/net/daily_net_test.go new file mode 100644 index 0000000..c8e36ea --- /dev/null +++ b/pkg/collector/check/batch/daily/net/daily_net_test.go @@ -0,0 +1,72 @@ +package net + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.NET_PER_DAY, + Name: string(base.NET_PER_DAY) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetTrafficPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().TrafficPerHour.Create(). + SetTimestamp(time.Now()). + SetName(uuid.NewString()). + SetPeakInputPps(rand.Float64()). + SetPeakInputBps(rand.Float64()). + SetPeakOutputPps(rand.Float64()). + SetPeakOutputBps(rand.Float64()). + SetAvgInputPps(rand.Float64()). + SetAvgInputBps(rand.Float64()). + SetAvgOutputPps(rand.Float64()). + SetAvgOutputBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create traffic per hour.") + + querySet, err := check.getTrafficPerHour(ctx) + assert.NoError(t, err, "Failed to get traffic per hour.") + assert.NotEmpty(t, querySet, "TrafficPerHour queryset should not be empty") +} + +func TestDeleteTrafficPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().TrafficPerHour.Create(). + SetTimestamp(time.Now().Add(-25 * time.Hour)). + SetName(uuid.NewString()). + SetPeakInputPps(rand.Float64()). + SetPeakInputBps(rand.Float64()). + SetPeakOutputPps(rand.Float64()). + SetPeakOutputBps(rand.Float64()). + SetAvgInputPps(rand.Float64()). + SetAvgInputBps(rand.Float64()). + SetAvgOutputPps(rand.Float64()). + SetAvgOutputBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create traffic per hour.") + + err = check.deleteTrafficPerHour(ctx) + assert.NoError(t, err, "Failed to delete traffic per hour.") +} diff --git a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go new file mode 100644 index 0000000..09d4762 --- /dev/null +++ b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go @@ -0,0 +1,69 @@ +package cpu + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.CPU_PER_HOUR, + Name: string(base.CPU_PER_HOUR) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetCPU(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().CPU.Create(). + SetTimestamp(time.Now()). + SetUsage(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create cpu usage.") + + querySet, err := check.getCPU(ctx) + assert.NoError(t, err, "Failed to get cpu usage.") + assert.NotEmpty(t, querySet, "CPU queryset should not be empty") +} + +func TestSaveCPUPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + data := base.CheckResult{ + Timestamp: time.Now(), + Peak: 50.0, + Avg: 50.0, + } + + err := check.saveCPUPerHour(data, ctx) + assert.NoError(t, err, "Failed to save cpu usage per hour.") +} + +func TestDeleteCPU(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().CPU.Create(). + SetTimestamp(time.Now().Add(-2 * time.Hour)). + SetUsage(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create cpu usage.") + + err = check.deleteCPU(ctx) + assert.NoError(t, err, "Failed to delete cpu usage.") +} diff --git a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go new file mode 100644 index 0000000..562c9ec --- /dev/null +++ b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go @@ -0,0 +1,86 @@ +package io + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.DISK_IO_PER_HOUR, + Name: string(base.DISK_IO_PER_HOUR) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetDiskIO(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().DiskIO.Create(). + SetTimestamp(time.Now()). + SetDevice(uuid.NewString()). + SetReadBps(rand.Float64()). + SetWriteBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create disk io.") + + querySet, err := check.getDiskIO(ctx) + assert.NoError(t, err, "Failed to get disk io.") + assert.NotEmpty(t, querySet, "Disk io queryset should not be empty") +} + +func TestSaveDiskIOPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + data := []base.CheckResult{ + { + Timestamp: time.Now(), + Device: uuid.NewString(), + PeakWriteBps: rand.Float64(), + PeakReadBps: rand.Float64(), + AvgWriteBps: rand.Float64(), + AvgReadBps: rand.Float64(), + }, + { + Timestamp: time.Now(), + Device: uuid.NewString(), + PeakWriteBps: rand.Float64(), + PeakReadBps: rand.Float64(), + AvgWriteBps: rand.Float64(), + AvgReadBps: rand.Float64(), + }, + } + + err := check.saveDiskIOPerHour(data, ctx) + assert.NoError(t, err, "Failed to save disk io per hour.") +} + +func TestDeleteDiskIO(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().DiskIO.Create(). + SetTimestamp(time.Now().Add(-2 * time.Hour)). + SetDevice(uuid.NewString()). + SetReadBps(rand.Float64()). + SetWriteBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create disk io.") + + err = check.deleteDiskIO(ctx) + assert.NoError(t, err, "Failed to delete disk io.") +} diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go new file mode 100644 index 0000000..735f1f3 --- /dev/null +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -0,0 +1,88 @@ +package usage + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.DISK_USAGE_PER_HOUR, + Name: string(base.DISK_USAGE_PER_HOUR) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetDiskUsage(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().DiskUsage.Create(). + SetTimestamp(time.Now()). + SetDevice(uuid.NewString()). + SetMountPoint(uuid.NewString()). + SetUsage(rand.Float64()). + SetTotal(int64(rand.Int())). + SetFree(int64(rand.Int())). + SetUsed(int64(rand.Int())).Exec(ctx) + assert.NoError(t, err, "Failed to create disk usage.") + + querySet, err := check.getDiskUsage(ctx) + assert.NoError(t, err, "Failed to get disk usage.") + assert.NotEmpty(t, querySet, "Disk usage queryset should not be empty") +} + +func TestSaveDiskUsagePerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + data := []base.CheckResult{ + { + Timestamp: time.Now(), + Device: uuid.NewString(), + Peak: 50.0, + Avg: 50.0, + }, + { + Timestamp: time.Now(), + Device: uuid.NewString(), + Peak: 50.0, + Avg: 50.0, + }, + } + + err := check.saveDiskUsagePerHour(data, ctx) + assert.NoError(t, err, "Failed to save disk usage per hour.") +} + +func TestDeleteDiskUsage(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().DiskUsage.Create(). + SetTimestamp(time.Now().Add(-2 * time.Hour)). + SetDevice(uuid.NewString()). + SetMountPoint(uuid.NewString()). + SetUsage(rand.Float64()). + SetTotal(int64(rand.Int())). + SetFree(int64(rand.Int())). + SetUsed(int64(rand.Int())).Exec(ctx) + assert.NoError(t, err, "Failed to create disk usage.") + + err = check.deleteDiskUsage(ctx) + assert.NoError(t, err, "Failed to delete disk usage.") +} diff --git a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go new file mode 100644 index 0000000..172d6aa --- /dev/null +++ b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go @@ -0,0 +1,69 @@ +package memory + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.MEM_PER_HOUR, + Name: string(base.MEM_PER_HOUR) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetMemory(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().CPU.Create(). + SetTimestamp(time.Now()). + SetUsage(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create memory usage.") + + querySet, err := check.getMemory(ctx) + assert.NoError(t, err, "Failed to get memory usage.") + assert.NotEmpty(t, querySet, "Memory queryset should not be empty") +} + +func TestSaveMemoryPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + data := base.CheckResult{ + Timestamp: time.Now(), + Peak: 50.0, + Avg: 50.0, + } + + err := check.saveMemoryPerHour(data, ctx) + assert.NoError(t, err, "Failed to save memory usage per hour.") +} + +func TestDeleteMemory(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().Memory.Create(). + SetTimestamp(time.Now().Add(-2 * time.Hour)). + SetUsage(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create memory usage.") + + err = check.deleteMemory(ctx) + assert.NoError(t, err, "Failed to delete memory usage.") +} diff --git a/pkg/collector/check/batch/hourly/net/hourly_net_test.go b/pkg/collector/check/batch/hourly/net/hourly_net_test.go new file mode 100644 index 0000000..13c69cd --- /dev/null +++ b/pkg/collector/check/batch/hourly/net/hourly_net_test.go @@ -0,0 +1,98 @@ +package net + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.NET_PER_HOUR, + Name: string(base.NET_PER_HOUR) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestGetTraffic(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().Traffic.Create(). + SetTimestamp(time.Now()). + SetName(uuid.NewString()). + SetInputPps(rand.Float64()). + SetInputBps(rand.Float64()). + SetOutputPps(rand.Float64()). + SetOutputBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create traffic.") + + querySet, err := check.getTraffic(ctx) + assert.NoError(t, err, "Failed to get traffic.") + assert.NotEmpty(t, querySet, "Traffic queryset should not be empty") +} + +func TestSaveTrafficPerHour(t *testing.T) { + check := setUp() + ctx := context.Background() + data := []base.CheckResult{ + { + Timestamp: time.Now(), + Name: uuid.NewString(), + PeakInputPps: rand.Float64(), + PeakInputBps: rand.Float64(), + AvgInputPps: rand.Float64(), + AvgInputBps: rand.Float64(), + PeakOutputPps: rand.Float64(), + PeakOutputBps: rand.Float64(), + AvgOutputPps: rand.Float64(), + AvgOutputBps: rand.Float64(), + }, + { + Timestamp: time.Now(), + Name: uuid.NewString(), + PeakInputPps: rand.Float64(), + PeakInputBps: rand.Float64(), + AvgInputPps: rand.Float64(), + AvgInputBps: rand.Float64(), + PeakOutputPps: rand.Float64(), + PeakOutputBps: rand.Float64(), + AvgOutputPps: rand.Float64(), + AvgOutputBps: rand.Float64(), + }, + } + + err := check.saveTrafficPerHour(data, ctx) + assert.NoError(t, err, "Failed to save traffic per hour.") +} + +func TestDeleteTraffic(t *testing.T) { + check := setUp() + ctx := context.Background() + + err := check.GetClient().Traffic.Create(). + SetTimestamp(time.Now().Add(-2 * time.Hour)). + SetName(uuid.NewString()). + SetInputPps(rand.Float64()). + SetInputBps(rand.Float64()). + SetOutputPps(rand.Float64()). + SetOutputBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create traffic.") + + err = check.deleteTraffic(ctx) + assert.NoError(t, err, "Failed to delete traffic.") +} diff --git a/pkg/collector/check/realtime/cpu/cpu_test.go b/pkg/collector/check/realtime/cpu/cpu_test.go new file mode 100644 index 0000000..f4fdccf --- /dev/null +++ b/pkg/collector/check/realtime/cpu/cpu_test.go @@ -0,0 +1,51 @@ +package cpu + +import ( + "context" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.CPU, + Name: string(base.CPU) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestCollectCPUUsage(t *testing.T) { + check := setUp() + + usage, err := check.collectCPUUsage() + + assert.NoError(t, err, "Failed to get cpu usage.") + assert.GreaterOrEqual(t, usage, 0.0, "CPU usage should be non-negative.") + assert.LessOrEqual(t, usage, 100.0, "CPU usage should not exceed 100%.") +} + +func TestSaveCPUUsage(t *testing.T) { + check := setUp() + ctx := context.Background() + data := base.CheckResult{ + Timestamp: time.Now(), + Usage: 50.0, + } + + err := check.saveCPUUsage(data, ctx) + + assert.NoError(t, err, "Failed to save cpu usage.") +} diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go new file mode 100644 index 0000000..3c98a4e --- /dev/null +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -0,0 +1,72 @@ +package diskio + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp(checkType base.CheckType) base.CheckStrategy { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: checkType, + Name: string(checkType) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args) + + return check +} + +func TestCollectDiskIO(t *testing.T) { + check := setUp(base.DISK_IO_COLLECTOR).(*CollectCheck) + + ioCounters, err := check.collectDiskIO() + assert.NoError(t, err, "Failed to get disk io.") + + assert.NotEmpty(t, ioCounters, "Disk IO should not be empty") + for name, ioCounter := range ioCounters { + assert.NotEmpty(t, name, "Device name should not be empty") + assert.True(t, ioCounter.ReadBytes > 0, "Read bytes should be non-negative.") + assert.True(t, ioCounter.WriteBytes > 0, "Write bytes should be non-negative.") + } +} + +func TestSaveDiskIO(t *testing.T) { + check := setUp(base.DISK_IO_COLLECTOR).(*CollectCheck) + ctx := context.Background() + + ioCounters, err := check.collectDiskIO() + assert.NoError(t, err, "Failed to get disk io.") + + data := check.parseDiskIO(ioCounters) + + err = check.saveDiskIO(data, ctx) + assert.NoError(t, err, "Failed to save cpu usage.") +} + +func TestGetDiskIO(t *testing.T) { + check := setUp(base.DISK_IO).(*SendCheck) + ctx := context.Background() + + err := check.GetClient().DiskIO.Create(). + SetTimestamp(time.Now()). + SetDevice(uuid.NewString()). + SetReadBps(rand.Float64()). + SetWriteBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create disk io.") + + querySet, err := check.getDiskIO(ctx) + assert.NoError(t, err, "Failed to get disk io queryset.") + assert.NotEmpty(t, querySet, "Disk IO queryset should not be empty") +} diff --git a/pkg/collector/check/realtime/disk/usage/usage_test.go b/pkg/collector/check/realtime/disk/usage/usage_test.go new file mode 100644 index 0000000..60839da --- /dev/null +++ b/pkg/collector/check/realtime/disk/usage/usage_test.go @@ -0,0 +1,63 @@ +package diskusage + +import ( + "context" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.DISK_USAGE, + Name: string(base.DISK_USAGE) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestCollectDiskPartitions(t *testing.T) { + check := setUp() + + partitions, err := check.collectDiskPartitions() + + assert.NoError(t, err, "Failed to get disk partitions.") + assert.NotEmpty(t, partitions, "Disk partitions should not be empty") +} + +func TestCollectDiskUsage(t *testing.T) { + check := setUp() + + partitions, err := check.collectDiskPartitions() + assert.NoError(t, err, "Failed to get disk partitions.") + + assert.NotEmpty(t, partitions, "Disk partitions should not be empty") + for _, partition := range partitions { + usage, err := check.collectDiskUsage(partition.Mountpoint) + assert.NoError(t, err, "Failed to get disk usage.") + assert.GreaterOrEqual(t, usage.UsedPercent, 0.0, "Disk usage should be non-negative.") + assert.LessOrEqual(t, usage.UsedPercent, 100.0, "Disk usage should not exceed 100%.") + } +} + +func TestSaveDiskUsage(t *testing.T) { + check := setUp() + ctx := context.Background() + + partitions, err := check.collectDiskPartitions() + assert.NoError(t, err, "Failed to get disk partitions.") + + err = check.saveDiskUsage(check.parseDiskUsage(partitions), ctx) + assert.NoError(t, err, "Failed to save disk usage.") +} diff --git a/pkg/collector/check/realtime/memory/memory_test.go b/pkg/collector/check/realtime/memory/memory_test.go new file mode 100644 index 0000000..8c76bc4 --- /dev/null +++ b/pkg/collector/check/realtime/memory/memory_test.go @@ -0,0 +1,51 @@ +package memory + +import ( + "context" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp() *Check { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: base.MEM, + Name: string(base.MEM) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args).(*Check) + + return check +} + +func TestCollectMemoryUsage(t *testing.T) { + check := setUp() + + usage, err := check.collectMemoryUsage() + + assert.NoError(t, err, "Failed to get memory usage.") + assert.GreaterOrEqual(t, usage, 0.0, "Memory usage should be non-negative.") + assert.LessOrEqual(t, usage, 100.0, "Memory usage should not exceed 100%.") +} + +func TestSaveMemoryUsage(t *testing.T) { + check := setUp() + ctx := context.Background() + data := base.CheckResult{ + Timestamp: time.Now(), + Usage: 50.0, + } + + err := check.saveMemoryUsage(data, ctx) + + assert.NoError(t, err, "Failed to save memory usage.") +} diff --git a/pkg/collector/check/realtime/net/net_test.go b/pkg/collector/check/realtime/net/net_test.go new file mode 100644 index 0000000..01e5f05 --- /dev/null +++ b/pkg/collector/check/realtime/net/net_test.go @@ -0,0 +1,78 @@ +package net + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func setUp(checkType base.CheckType) base.CheckStrategy { + buffer := base.NewCheckBuffer(10) + ctx := context.Background() + args := &base.CheckArgs{ + Type: checkType, + Name: string(checkType) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: db.InitDB(ctx), + } + + check := NewCheck(args) + + return check +} + +func TestCollectIOCounters(t *testing.T) { + check := setUp(base.NET_COLLECTOR).(*CollectCheck) + + ioCounters, err := check.collectIOCounters() + assert.NoError(t, err, "Failed to get network IO.") + assert.NotEmpty(t, ioCounters, "Network IO should not be empty") +} + +func TestCollectInterfaces(t *testing.T) { + check := setUp(base.NET_COLLECTOR).(*CollectCheck) + + interfaces, err := check.collectInterfaces() + assert.NoError(t, err, "Failed to get interfaces.") + assert.NotEmpty(t, interfaces, "Interfaces should not be empty") +} + +func TestSaveTraffic(t *testing.T) { + check := setUp(base.NET_COLLECTOR).(*CollectCheck) + ctx := context.Background() + + ioCounters, interfaces, err := check.collectTraffic() + assert.NoError(t, err, "Failed to get traffic.") + assert.NotEmpty(t, ioCounters, "Network IO should not be empty") + assert.NotEmpty(t, interfaces, "Interfaces should not be empty") + + data := check.parseTraffic(ioCounters, interfaces) + + err = check.saveTraffic(data, ctx) + assert.NoError(t, err, "Failed to save traffic.") +} + +func TestGetTraffic(t *testing.T) { + check := setUp(base.NET).(*SendCheck) + ctx := context.Background() + + err := check.GetClient().Traffic.Create(). + SetTimestamp(time.Now()). + SetName(uuid.NewString()). + SetInputPps(rand.Float64()). + SetInputBps(rand.Float64()). + SetOutputPps(rand.Float64()). + SetOutputBps(rand.Float64()).Exec(ctx) + assert.NoError(t, err, "Failed to create traffic.") + + querySet, err := check.getTraffic(ctx) + assert.NoError(t, err, "Failed to get traffic queryset.") + assert.NotEmpty(t, querySet, "Traffic queryset should not be empty") +} From 94ba7e5d42af1153056e09e15880df025f9aaf25 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 13 Jan 2025 11:42:40 +0900 Subject: [PATCH 150/364] Add context for migration The context used in the migration process has been updated to limit 5, superseding the context originally defined in root.go. --- pkg/db/db.go | 8 ++++-- pkg/db/migrate.go | 64 +++++++++++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index b5f16b1..73cb57e 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "path/filepath" + "time" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/glebarez/go-sqlite" @@ -17,7 +18,7 @@ const ( dbFileName = "alpamon.db" ) -func InitDB(ctx context.Context) *ent.Client { +func InitDB(parentCtx context.Context) *ent.Client { fileName := fmt.Sprintf("%s/%s", dbDir, dbFileName) if _, err := os.Stat(dbDir); os.IsNotExist(err) { fileName, _ = filepath.Abs(dbFileName) @@ -32,7 +33,10 @@ func InitDB(ctx context.Context) *ent.Client { sql.Register("sqlite3", &sqlite.Driver{}) - err = RunMigration(dbFile.Name(), ctx) + migrationCtx, migrationCancle := context.WithTimeout(parentCtx, 5*time.Minute) + defer migrationCancle() + + err = RunMigration(dbFile.Name(), migrationCtx) if err != nil { log.Error().Err(err).Msgf("failed to migrate db: %v\n", err) os.Exit(1) diff --git a/pkg/db/migrate.go b/pkg/db/migrate.go index 7728794..43cca81 100644 --- a/pkg/db/migrate.go +++ b/pkg/db/migrate.go @@ -14,41 +14,51 @@ import ( var migrations embed.FS func RunMigration(path string, ctx context.Context) error { - migrationFS, err := getMigrationDir() - if err != nil { - log.Error().Err(err).Msg("failed to get migration filesystem") + if err := ctx.Err(); err != nil { + log.Error().Err(err).Msgf("context cancelled before migration: %v", err) return err } - workDir, err := atlasexec.NewWorkingDir( - atlasexec.WithMigrations( - migrationFS, - ), - ) - if err != nil { - log.Error().Err(err).Msgf("failed to open migration dir: %v", err) - return err - } - defer func() { _ = workDir.Close() }() + select { + case <-ctx.Done(): + return ctx.Err() + default: + migrationFS, err := getMigrationDir() + if err != nil { + log.Error().Err(err).Msg("failed to get migration filesystem") + return err + } - client, err := atlasexec.NewClient(workDir.Path(), "atlas") - if err != nil { - log.Error().Err(err).Msgf("failed to get atlas client: %v", err) - return err - } + workDir, err := atlasexec.NewWorkingDir( + atlasexec.WithMigrations( + migrationFS, + ), + ) + if err != nil { + log.Error().Err(err).Msgf("failed to open migration dir: %v", err) + return err + } + defer func() { _ = workDir.Close() }() - url := fmt.Sprintf("sqlite://%s", path) + client, err := atlasexec.NewClient(workDir.Path(), "atlas") + if err != nil { + log.Error().Err(err).Msgf("failed to get atlas client: %v", err) + return err + } - _, err = client.MigrateApply(ctx, &atlasexec.MigrateApplyParams{ - URL: url, - }) + url := fmt.Sprintf("sqlite://%s", path) - if err != nil { - log.Error().Err(err).Msgf("failed to migrate db: %v", err) - return err - } + _, err = client.MigrateApply(ctx, &atlasexec.MigrateApplyParams{ + URL: url, + }) + + if err != nil { + log.Error().Err(err).Msgf("failed to migrate db: %v", err) + return err + } - return nil + return nil + } } func getMigrationDir() (fs.FS, error) { From 333d54f34f643fa1fa004e50e2840fd4819cee2d Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 13 Jan 2025 15:06:34 +0900 Subject: [PATCH 151/364] Prevent infinite retries by setting 3-day timeout --- pkg/runner/client.go | 34 ++++++++++++++++++--------- pkg/scheduler/session.go | 50 ++++++++++++++++++++++++---------------- 2 files changed, 53 insertions(+), 31 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 5a1fbb3..c76726d 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -1,6 +1,7 @@ package runner import ( + "context" "encoding/json" "fmt" "github.com/alpacanetworks/alpamon-go/pkg/config" @@ -9,6 +10,7 @@ import ( "github.com/gorilla/websocket" "github.com/rs/zerolog/log" "net/http" + "os" "time" ) @@ -16,6 +18,7 @@ const ( minConnectInterval = 5 * time.Second maxConnectInterval = 60 * time.Second connectionReadTimeout = 35 * time.Minute + maxRetryTimeout = 3 * 24 * time.Hour eventCommandAckURL = "/api/events/commands/%s/ack/" eventCommandFinURL = "/api/events/commands/%s/fin/" @@ -89,6 +92,9 @@ func (wc *WebsocketClient) readMessage() (messageType int, message []byte, err e func (wc *WebsocketClient) connect() { log.Info().Msgf("Connecting to websocket at %s...", config.GlobalSettings.WSPath) + ctx, cancel := context.WithTimeout(context.Background(), maxRetryTimeout) + defer cancel() + wsBackoff := backoff.NewExponentialBackOff() wsBackoff.InitialInterval = minConnectInterval wsBackoff.MaxInterval = maxConnectInterval @@ -96,21 +102,27 @@ func (wc *WebsocketClient) connect() { wsBackoff.RandomizationFactor = 0 // Retry forever operation := func() error { - conn, _, err := websocket.DefaultDialer.Dial(config.GlobalSettings.WSPath, wc.requestHeader) - if err != nil { - nextInterval := wsBackoff.NextBackOff() - log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds.", config.GlobalSettings.WSPath, int(nextInterval.Seconds())) - return err - } + select { + case <-ctx.Done(): + log.Error().Msg("Maximum retry duration reached. Shutting down.") + return ctx.Err() + default: + conn, _, err := websocket.DefaultDialer.Dial(config.GlobalSettings.WSPath, wc.requestHeader) + if err != nil { + nextInterval := wsBackoff.NextBackOff() + log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds.", config.GlobalSettings.WSPath, int(nextInterval.Seconds())) + return err + } - wc.conn = conn - log.Debug().Msg("Backhaul connection established.") - return nil + wc.conn = conn + log.Debug().Msg("Backhaul connection established.") + return nil + } } - err := backoff.Retry(operation, wsBackoff) + err := backoff.Retry(operation, backoff.WithContext(wsBackoff, ctx)) if err != nil { - log.Error().Err(err).Msg("Unexpected error occurred during backoff.") + os.Exit(1) return } } diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index f2ea087..296ec56 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -2,6 +2,7 @@ package scheduler import ( "bytes" + "context" "crypto/tls" "crypto/x509" "encoding/json" @@ -18,6 +19,7 @@ import ( const ( checkSessionURL = "/api/servers/servers/-/" + maxRetryTimeout = 3 * 24 * time.Hour ) func InitSession() *Session { @@ -51,31 +53,39 @@ func InitSession() *Session { func (session *Session) CheckSession() bool { timeout := config.MinConnectInterval + ctx, cancel := context.WithTimeout(context.Background(), maxRetryTimeout) + defer cancel() for { - resp, _, err := session.Get(checkSessionURL, 5) - if err != nil { - log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds", config.GlobalSettings.ServerURL, int(timeout.Seconds())) - time.Sleep(timeout) - timeout *= 2 - if timeout > config.MaxConnectInterval { - timeout = config.MaxConnectInterval + select { + case <-ctx.Done(): + log.Error().Msg("Maximum retry duration reached. Shutting down.") + os.Exit(1) + default: + resp, _, err := session.Get(checkSessionURL, 5) + if err != nil { + log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds", config.GlobalSettings.ServerURL, int(timeout.Seconds())) + time.Sleep(timeout) + timeout *= 2 + if timeout > config.MaxConnectInterval { + timeout = config.MaxConnectInterval + } + continue } - continue - } - var response map[string]interface{} - err = json.Unmarshal(resp, &response) - if err != nil { - log.Debug().Err(err).Msg("Failed to unmarshal JSON") - continue - } + var response map[string]interface{} + err = json.Unmarshal(resp, &response) + if err != nil { + log.Debug().Err(err).Msg("Failed to unmarshal JSON") + continue + } - if commissioned, ok := response["commissioned"].(bool); ok { - return commissioned - } else { - log.Error().Msg("Unable to find 'commissioned' field in the response") - continue + if commissioned, ok := response["commissioned"].(bool); ok { + return commissioned + } else { + log.Error().Msg("Unable to find 'commissioned' field in the response") + continue + } } } } From 1bd2131593e2ca272e5bf060467f694c57695238 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 13 Jan 2025 15:58:21 +0900 Subject: [PATCH 152/364] Add exception handling for hidden file Exception handling has been added to ensure hidden files are only displayed when the "show hidden" field is true. --- pkg/runner/ftp.go | 14 +++++++++----- pkg/runner/ftp_types.go | 11 ++++++----- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 4e8ffab..d3da81a 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -134,7 +134,7 @@ func (fc *FtpClient) close() { func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (CommandResult, error) { switch command { case List: - return fc.list(data.Path, data.Depth) + return fc.list(data.Path, data.Depth, data.ShowHidden) case Mkd: return fc.mkd(data.Path) case Cwd: @@ -168,13 +168,13 @@ func (fc *FtpClient) parsePath(path string) string { return parsedPath } -func (fc *FtpClient) list(rootDir string, depth int) (CommandResult, error) { +func (fc *FtpClient) list(rootDir string, depth int, showHidden bool) (CommandResult, error) { path := fc.parsePath(rootDir) - cmdResult, err := fc.listRecursive(path, depth, 0) + cmdResult, err := fc.listRecursive(path, depth, 0, showHidden) return cmdResult, err } -func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResult, error) { +func (fc *FtpClient) listRecursive(path string, depth, current int, showHidden bool) (CommandResult, error) { if depth > 3 { return CommandResult{ Message: ErrTooLargeDepth, @@ -203,6 +203,10 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu } for _, entry := range entries { + if !showHidden && strings.HasPrefix(entry.Name(), ".") { + continue + } + fullPath := filepath.Join(path, entry.Name()) info, err := os.Lstat(fullPath) if err != nil { @@ -233,7 +237,7 @@ func (fc *FtpClient) listRecursive(path string, depth, current int) (CommandResu if entry.IsDir() { child.Type = "folder" if current < depth-1 { - childResult, err := fc.listRecursive(fullPath, depth, current+1) + childResult, err := fc.listRecursive(fullPath, depth, current+1, showHidden) if err != nil { result.Children = append(result.Children, childResult) continue diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 503c9e0..52c816b 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -38,11 +38,12 @@ type FtpConfigData struct { } type FtpData struct { - Path string `json:"path,omitempty"` - Depth int `json:"depth,omitempty"` - Recursive bool `json:"recursive,omitempty"` - Src string `json:"src,omitempty"` - Dst string `json:"dst,omitempty"` + Path string `json:"path,omitempty"` + Depth int `json:"depth,omitempty"` + Recursive bool `json:"recursive,omitempty"` + ShowHidden bool `json:"show_hidden,omitempty"` + Src string `json:"src,omitempty"` + Dst string `json:"dst,omitempty"` } type FtpContent struct { From f23ead09fe266d264691f1d299358e3d83756aa3 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 13 Jan 2025 16:52:53 +0900 Subject: [PATCH 153/364] Minor fix --- pkg/collector/collector.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index c712949..635a8d3 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -117,12 +117,10 @@ func NewCollector(args collectorArgs) (*Collector, error) { func (c *Collector) initTasks(args collectorArgs) error { for _, entry := range args.conf { - duration := time.Duration(entry.Interval) * time.Second - name := string(entry.Type) + "_" + uuid.NewString() checkArgs := base.CheckArgs{ Type: entry.Type, - Name: name, - Interval: time.Duration(duration.Seconds() * float64(time.Second)), + Name: fmt.Sprintf("%s_%s", entry.Type, uuid.NewString()), + Interval: time.Duration(entry.Interval) * time.Second, Buffer: c.buffer, Client: args.client, } From 5c2ac6f78bf3579885eec66fff8ab99c6302a824 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 13 Jan 2025 17:04:55 +0900 Subject: [PATCH 154/364] Minor fix --- pkg/collector/collector.go | 2 +- pkg/config/config.go | 4 ++-- pkg/pidfile/pidfile.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 635a8d3..42e094b 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -191,7 +191,7 @@ func (c *Collector) retryWithBackoff(ctx context.Context, metric base.MetricData for retryCount < maxRetryCount { select { case <-ctx.Done(): - return nil + return ctx.Err() case <-time.After(time.Duration(1< Date: Mon, 13 Jan 2025 17:10:46 +0900 Subject: [PATCH 155/364] Fix list command Fix list command to omit folder size from its output. --- pkg/runner/ftp.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index d3da81a..33fced5 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -185,7 +185,6 @@ func (fc *FtpClient) listRecursive(path string, depth, current int, showHidden b Name: filepath.Base(path), Type: "folder", Path: path, - Size: int64(0), ModTime: nil, Children: []CommandResult{}, } @@ -230,7 +229,6 @@ func (fc *FtpClient) listRecursive(path string, depth, current int, showHidden b Name: entry.Name(), Path: fullPath, Code: returnCodes[List].Success, - Size: info.Size(), ModTime: &modTime, } @@ -247,10 +245,10 @@ func (fc *FtpClient) listRecursive(path string, depth, current int, showHidden b } else { child.Type = "file" child.Code = returnCodes[List].Success + child.Size = info.Size() } result.Children = append(result.Children, child) - result.Size += child.Size } dirInfo, err := os.Stat(path) From 4c5a6005c69f57d31c3141752c31da3485816200 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 13 Jan 2025 17:18:12 +0900 Subject: [PATCH 156/364] Minor fix --- pkg/collector/scheduler/scheduler.go | 12 ++++++------ pkg/collector/transporter/transporter.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go index bd622cc..0c8e9af 100644 --- a/pkg/collector/scheduler/scheduler.go +++ b/pkg/collector/scheduler/scheduler.go @@ -11,9 +11,9 @@ import ( ) const ( - MAX_RETRIES = 5 - MAX_RETRY_TIMES = 1 * time.Minute - DEFAULT_DELAY = 1 * time.Second + MaxRetries = 5 + MaxRetryTimes = 1 * time.Minute + DefaultDelay = 1 * time.Second ) type Scheduler struct { @@ -46,9 +46,9 @@ type RetryStatus struct { func NewScheduler() *Scheduler { return &Scheduler{ retryConf: RetryConf{ - MaxRetries: MAX_RETRIES, - MaxRetryTime: MAX_RETRY_TIMES, - Delay: DEFAULT_DELAY, + MaxRetries: MaxRetries, + MaxRetryTime: MaxRetryTimes, + Delay: DefaultDelay, }, taskQueue: make(chan *ScheduledTask), stopChan: make(chan struct{}), diff --git a/pkg/collector/transporter/transporter.go b/pkg/collector/transporter/transporter.go index 58cb5c8..ee6d38f 100644 --- a/pkg/collector/transporter/transporter.go +++ b/pkg/collector/transporter/transporter.go @@ -57,7 +57,7 @@ func (t *Transporter) Send(data base.MetricData) error { if statusCode == http.StatusBadRequest { return fmt.Errorf("%d Bad Request: %s", statusCode, resp) } else { - return fmt.Errorf("%s %s Error: %d %s", "POST", url, statusCode, resp) + return fmt.Errorf("%s %s Error: %d %s", http.MethodPost, url, statusCode, resp) } } } From 312542b5901179cc728ff97ab7f43b0029addad0 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 13 Jan 2025 17:41:55 +0900 Subject: [PATCH 157/364] Refactor declaration location Refactor the code to move the context declaration for database migrations outside of root.go, allowing root.go to focus solely on initialization tasks. --- cmd/alpamon/command/root.go | 2 +- pkg/collector/check/batch/daily/cpu/daily_cpu_test.go | 3 +-- pkg/collector/check/batch/daily/disk/io/daily_io_test.go | 3 +-- .../check/batch/daily/disk/usage/daily_usage_test.go | 3 +-- .../check/batch/daily/memory/daily_memory_test.go | 3 +-- pkg/collector/check/batch/daily/net/daily_net_test.go | 3 +-- pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go | 3 +-- .../check/batch/hourly/disk/io/hourly_io_test.go | 3 +-- .../check/batch/hourly/disk/usage/hourly_usage_test.go | 3 +-- .../check/batch/hourly/memory/hourly_memory_test.go | 3 +-- pkg/collector/check/batch/hourly/net/hourly_net_test.go | 3 +-- pkg/collector/check/realtime/cpu/cpu_test.go | 3 +-- pkg/collector/check/realtime/disk/io/io_test.go | 3 +-- pkg/collector/check/realtime/disk/usage/usage_test.go | 3 +-- pkg/collector/check/realtime/memory/memory_test.go | 3 +-- pkg/collector/check/realtime/net/net_test.go | 3 +-- pkg/db/db.go | 8 ++++---- 17 files changed, 20 insertions(+), 35 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index cf64f55..765931e 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -68,7 +68,7 @@ func runAgent() { defer cancel() // DB - client := db.InitDB(ctx) + client := db.InitDB() // Collector metricCollector := collector.InitCollector(session, client) diff --git a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go index e7e70c1..4c33207 100644 --- a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go +++ b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go @@ -13,13 +13,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.CPU_PER_DAY, Name: string(base.CPU_PER_DAY) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go index ab9f6e9..fb306f1 100644 --- a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go +++ b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go @@ -14,13 +14,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.DISK_IO_PER_DAY, Name: string(base.DISK_IO_PER_DAY) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go index c45a783..65dba46 100644 --- a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go +++ b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go @@ -13,13 +13,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.DISK_USAGE_PER_DAY, Name: string(base.DISK_USAGE_PER_DAY) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/daily/memory/daily_memory_test.go b/pkg/collector/check/batch/daily/memory/daily_memory_test.go index a810840..0dbecb8 100644 --- a/pkg/collector/check/batch/daily/memory/daily_memory_test.go +++ b/pkg/collector/check/batch/daily/memory/daily_memory_test.go @@ -13,13 +13,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.MEM_PER_DAY, Name: string(base.MEM_PER_DAY) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/daily/net/daily_net_test.go b/pkg/collector/check/batch/daily/net/daily_net_test.go index c8e36ea..d2fc008 100644 --- a/pkg/collector/check/batch/daily/net/daily_net_test.go +++ b/pkg/collector/check/batch/daily/net/daily_net_test.go @@ -14,13 +14,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.NET_PER_DAY, Name: string(base.NET_PER_DAY) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go index 09d4762..ea95b25 100644 --- a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go +++ b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go @@ -14,13 +14,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.CPU_PER_HOUR, Name: string(base.CPU_PER_HOUR) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go index 562c9ec..c9f6152 100644 --- a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go +++ b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go @@ -14,13 +14,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.DISK_IO_PER_HOUR, Name: string(base.DISK_IO_PER_HOUR) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go index 735f1f3..ef1ed7b 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -14,13 +14,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.DISK_USAGE_PER_HOUR, Name: string(base.DISK_USAGE_PER_HOUR) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go index 172d6aa..be220b2 100644 --- a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go +++ b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go @@ -14,13 +14,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.MEM_PER_HOUR, Name: string(base.MEM_PER_HOUR) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/net/hourly_net_test.go b/pkg/collector/check/batch/hourly/net/hourly_net_test.go index 13c69cd..9055ddb 100644 --- a/pkg/collector/check/batch/hourly/net/hourly_net_test.go +++ b/pkg/collector/check/batch/hourly/net/hourly_net_test.go @@ -14,13 +14,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.NET_PER_HOUR, Name: string(base.NET_PER_HOUR) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/realtime/cpu/cpu_test.go b/pkg/collector/check/realtime/cpu/cpu_test.go index f4fdccf..fac6e60 100644 --- a/pkg/collector/check/realtime/cpu/cpu_test.go +++ b/pkg/collector/check/realtime/cpu/cpu_test.go @@ -13,13 +13,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.CPU, Name: string(base.CPU) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go index 3c98a4e..f168823 100644 --- a/pkg/collector/check/realtime/disk/io/io_test.go +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -14,13 +14,12 @@ import ( func setUp(checkType base.CheckType) base.CheckStrategy { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: checkType, Name: string(checkType) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args) diff --git a/pkg/collector/check/realtime/disk/usage/usage_test.go b/pkg/collector/check/realtime/disk/usage/usage_test.go index 60839da..930882e 100644 --- a/pkg/collector/check/realtime/disk/usage/usage_test.go +++ b/pkg/collector/check/realtime/disk/usage/usage_test.go @@ -13,13 +13,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.DISK_USAGE, Name: string(base.DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/realtime/memory/memory_test.go b/pkg/collector/check/realtime/memory/memory_test.go index 8c76bc4..a89176f 100644 --- a/pkg/collector/check/realtime/memory/memory_test.go +++ b/pkg/collector/check/realtime/memory/memory_test.go @@ -13,13 +13,12 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: base.MEM, Name: string(base.MEM) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/realtime/net/net_test.go b/pkg/collector/check/realtime/net/net_test.go index 01e5f05..39f0991 100644 --- a/pkg/collector/check/realtime/net/net_test.go +++ b/pkg/collector/check/realtime/net/net_test.go @@ -14,13 +14,12 @@ import ( func setUp(checkType base.CheckType) base.CheckStrategy { buffer := base.NewCheckBuffer(10) - ctx := context.Background() args := &base.CheckArgs{ Type: checkType, Name: string(checkType) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(ctx), + Client: db.InitDB(), } check := NewCheck(args) diff --git a/pkg/db/db.go b/pkg/db/db.go index 73cb57e..67c0852 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -18,7 +18,7 @@ const ( dbFileName = "alpamon.db" ) -func InitDB(parentCtx context.Context) *ent.Client { +func InitDB() *ent.Client { fileName := fmt.Sprintf("%s/%s", dbDir, dbFileName) if _, err := os.Stat(dbDir); os.IsNotExist(err) { fileName, _ = filepath.Abs(dbFileName) @@ -33,10 +33,10 @@ func InitDB(parentCtx context.Context) *ent.Client { sql.Register("sqlite3", &sqlite.Driver{}) - migrationCtx, migrationCancle := context.WithTimeout(parentCtx, 5*time.Minute) - defer migrationCancle() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() - err = RunMigration(dbFile.Name(), migrationCtx) + err = RunMigration(dbFile.Name(), ctx) if err != nil { log.Error().Err(err).Msgf("failed to migrate db: %v\n", err) os.Exit(1) From 1cb6a8f1016246883a67124b3b55ab84813e87f7 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 13 Jan 2025 19:11:22 +0900 Subject: [PATCH 158/364] Refactor collector initialization logic Refactor the code to move the context declaration for collector outside of root.go, allowing root.go to focus solely on initialization tasks. Refactor collector to utilize the "comma-ok" idiom for managing goroutines. --- cmd/alpamon/command/root.go | 11 ++------- pkg/collector/collector.go | 47 +++++++++++++++++++++++-------------- 2 files changed, 32 insertions(+), 26 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 765931e..9bd7a9c 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -1,7 +1,6 @@ package command import ( - "context" "fmt" "os" "syscall" @@ -64,19 +63,13 @@ func runAgent() { // Commit runner.CommitAsync(session, commissioned) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - // DB client := db.InitDB() // Collector metricCollector := collector.InitCollector(session, client) - metricCollector.Start(ctx) - - for err := range metricCollector.Errors() { - log.Error().Err(err).Msgf("Collector error: %v", err) - } + metricCollector.Start() + defer metricCollector.Stop() // Websocket Client wsClient := runner.NewWebsocketClient(session) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index c712949..01ddc11 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -31,7 +31,8 @@ type Collector struct { buffer *base.CheckBuffer errorChan chan error wg sync.WaitGroup - stopChan chan struct{} + ctx context.Context + cancel context.CancelFunc } type collectConf struct { @@ -104,7 +105,6 @@ func NewCollector(args collectorArgs) (*Collector, error) { scheduler: scheduler.NewScheduler(), buffer: checkBuffer, errorChan: make(chan error, 10), - stopChan: make(chan struct{}), } err = metricCollector.initTasks(args) @@ -136,16 +136,20 @@ func (c *Collector) initTasks(args collectorArgs) error { return nil } -func (c *Collector) Start(ctx context.Context) { - go c.scheduler.Start(ctx, c.buffer.Capacity) +func (c *Collector) Start() { + c.ctx, c.cancel = context.WithCancel(context.Background()) + + go c.scheduler.Start(c.ctx, c.buffer.Capacity) for i := 0; i < c.buffer.Capacity; i++ { c.wg.Add(1) - go c.successQueueWorker(ctx) + go c.successQueueWorker(c.ctx) } c.wg.Add(1) - go c.failureQueueWorker(ctx) + go c.failureQueueWorker(c.ctx) + + go c.handleErrors() } func (c *Collector) successQueueWorker(ctx context.Context) { @@ -155,9 +159,11 @@ func (c *Collector) successQueueWorker(ctx context.Context) { select { case <-ctx.Done(): return - case <-c.stopChan: - return - case metric := <-c.buffer.SuccessQueue: + case metric, ok := <-c.buffer.SuccessQueue: + if !ok { + return + } + err := c.transporter.Send(metric) if err != nil { c.buffer.FailureQueue <- metric @@ -176,10 +182,12 @@ func (c *Collector) failureQueueWorker(ctx context.Context) { select { case <-ctx.Done(): return - case <-c.stopChan: - return case <-retryTicker.C: - metric := <-c.buffer.FailureQueue + metric, ok := <-c.buffer.FailureQueue + if !ok { + return + } + err := c.retryWithBackoff(ctx, metric) if err != nil { log.Error().Err(err).Msgf("Failed to check metric: %s", metric.Type) @@ -208,8 +216,17 @@ func (c *Collector) retryWithBackoff(ctx context.Context, metric base.MetricData return fmt.Errorf("max retries exceeded for metric %s", metric.Type) } +func (c *Collector) handleErrors() { + for err := range c.errorChan { + log.Error().Err(err).Msgf("Collector error: %v", err) + } +} + func (c *Collector) Stop() { - close(c.stopChan) + if c.cancel != nil { + c.cancel() + } + c.scheduler.Stop() c.wg.Wait() @@ -217,7 +234,3 @@ func (c *Collector) Stop() { close(c.buffer.FailureQueue) close(c.errorChan) } - -func (c *Collector) Errors() <-chan error { - return c.errorChan -} From 090474b9863a24b64ce0de29ddbb7dd5ec0c2ebd Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 14 Jan 2025 13:56:45 +0900 Subject: [PATCH 159/364] Fix special character-related errors Fix errors caused by special characters in data.path when executing download command by adding Quote() function to utils.go to escape special characters. --- pkg/runner/command.go | 14 ++++++++------ pkg/utils/utils.go | 14 ++++++++++++++ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 2a6f3ca..af3e3b3 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -758,14 +758,16 @@ func fileDownload(data CommandData, sysProcAttr *syscall.SysProcAttr) (exitCode isZip := isZipFile(content, filepath.Ext(data.Path)) if isZip { - command := fmt.Sprintf("tee -a %s > /dev/null && unzip -n %s -d %s; rm %s", - strings.ReplaceAll(data.Path, " ", "\\ "), - strings.ReplaceAll(data.Path, " ", "\\ "), - strings.ReplaceAll(filepath.Dir(data.Path), " ", "\\ "), - strings.ReplaceAll(data.Path, " ", "\\ ")) + escapePath := utils.Quote(data.Path) + escapeDirPath := utils.Quote(filepath.Dir(data.Path)) + command := fmt.Sprintf("tee %s > /dev/null && unzip -n %s -d %s; rm %s", + escapePath, + escapePath, + escapeDirPath, + escapePath) cmd = exec.Command("sh", "-c", command) } else { - cmd = exec.Command("sh", "-c", fmt.Sprintf("tee -a %s > /dev/null", strings.ReplaceAll(data.Path, " ", "\\ "))) + cmd = exec.Command("sh", "-c", fmt.Sprintf("tee %s > /dev/null", utils.Quote(data.Path))) } cmd.SysProcAttr = sysProcAttr diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index b7b2621..a7d9343 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -5,6 +5,7 @@ import ( "fmt" "net/url" "os" + "regexp" "runtime" "strconv" "strings" @@ -15,6 +16,7 @@ import ( var ( PlatformLike string + pattern = regexp.MustCompile(`[^\w@%+=:,./-]`) ) func InitPlatform() { @@ -106,3 +108,15 @@ func ConvertGroupIds(groupIds []string) []uint32 { } return gids } + +func Quote(s string) string { + if len(s) == 0 { + return "''" + } + + if pattern.MatchString(s) { + return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'" + } + + return s +} From 29e22962877dca3cb7beef8070b087aa05376d66 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 14 Jan 2025 19:25:32 +0900 Subject: [PATCH 160/364] Add postrm script to delete files on package purge --- .goreleaser.yaml | 1 + scripts/postremove.sh | 15 +++++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 scripts/postremove.sh diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 0767516..ab514af 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -44,6 +44,7 @@ nfpms: scripts: postinstall: "scripts/postinstall.sh" preremove: "scripts/preremove.sh" + postremove: "scripts/postremove.sh" changelog: sort: asc diff --git a/scripts/postremove.sh b/scripts/postremove.sh new file mode 100644 index 0000000..f878a85 --- /dev/null +++ b/scripts/postremove.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +CONF_FILE_PATH="/etc/alpamon/alpamon.conf" +TMP_FILE_PATH="/usr/lib/tmpfiles.d/alpamon.conf" +SVC_FILE_PATH="/lib/systemd/system/alpamon.service" +LOG_FILE_PATH="/var/log/alpamon/alpamon.log" + +if [ "$1" = 'purge' ]; then + rm -f "$CONF_FILE_PATH" || true + rm -f "$TMP_FILE_PATH" || true + rm -f "$SVC_FILE_PATH" || true + rm -f "$LOG_FILE_PATH" || true + + echo "All related configuration, service, and log files have been deleted." +fi \ No newline at end of file From 0c70b4958df2fd799d9e9911d1ff97543f800a82 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 16 Jan 2025 15:16:21 +0900 Subject: [PATCH 161/364] Rename due to changes in alpacon-server Rename due to changes in alpacon-server. --- pkg/collector/check/base/types.go | 36 +++++----- .../check/batch/daily/cleanup/cleanup.go | 70 +++++++++---------- pkg/collector/check/batch/daily/cpu/cpu.go | 28 ++++---- .../check/batch/daily/cpu/daily_cpu_test.go | 26 +++---- .../batch/daily/disk/io/daily_io_test.go | 26 +++---- pkg/collector/check/batch/daily/disk/io/io.go | 34 ++++----- .../daily/disk/usage/daily_usage_test.go | 26 +++---- .../check/batch/daily/disk/usage/usage.go | 30 ++++---- .../batch/daily/memory/daily_memory_test.go | 26 +++---- .../check/batch/daily/memory/memory.go | 28 ++++---- .../check/batch/daily/net/daily_net_test.go | 26 +++---- pkg/collector/check/batch/daily/net/net.go | 42 +++++------ pkg/collector/check/batch/hourly/cpu/cpu.go | 8 +-- .../check/batch/hourly/cpu/hourly_cpu_test.go | 10 +-- .../batch/hourly/disk/io/hourly_io_test.go | 10 +-- .../check/batch/hourly/disk/io/io.go | 8 +-- .../hourly/disk/usage/hourly_usage_test.go | 10 +-- .../check/batch/hourly/disk/usage/usage.go | 8 +-- .../batch/hourly/memory/hourly_memory_test.go | 6 +- .../check/batch/hourly/memory/memory.go | 8 +-- .../check/batch/hourly/net/hourly_net_test.go | 10 +-- pkg/collector/check/batch/hourly/net/net.go | 8 +-- pkg/collector/check/check.go | 56 +++++++-------- pkg/collector/transporter/utils.go | 60 ++++++++-------- .../migration/20250107063722_init_schemas.sql | 40 ----------- .../migration/20250116061438_init_schemas.sql | 40 +++++++++++ pkg/db/migration/atlas.sum | 4 +- ...memory_per_hour.go => hourly_cpu_usage.go} | 10 +-- .../{diskio_per_hour.go => hourly_disk_io.go} | 10 +-- ...usage_per_hour.go => hourly_disk_usage.go} | 10 +-- ...cpu_per_hour.go => hourly_memory_usage.go} | 10 +-- ...{traffic_per_hour.go => hourly_traffic.go} | 10 +-- 32 files changed, 367 insertions(+), 367 deletions(-) delete mode 100644 pkg/db/migration/20250107063722_init_schemas.sql create mode 100644 pkg/db/migration/20250116061438_init_schemas.sql rename pkg/db/schema/{memory_per_hour.go => hourly_cpu_usage.go} (56%) rename pkg/db/schema/{diskio_per_hour.go => hourly_disk_io.go} (64%) rename pkg/db/schema/{diskusage_per_hour.go => hourly_disk_usage.go} (57%) rename pkg/db/schema/{cpu_per_hour.go => hourly_memory_usage.go} (54%) rename pkg/db/schema/{traffic_per_hour.go => hourly_traffic.go} (70%) diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index 6df934f..9bf8e82 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -7,24 +7,24 @@ import ( ) const ( - CPU CheckType = "cpu" - CPU_PER_HOUR CheckType = "cpu_per_hour" - CPU_PER_DAY CheckType = "cpu_per_day" - MEM CheckType = "memory" - MEM_PER_HOUR CheckType = "memory_per_hour" - MEM_PER_DAY CheckType = "memory_per_day" - DISK_USAGE CheckType = "disk_usage" - DISK_USAGE_PER_HOUR CheckType = "disk_usage_per_hour" - DISK_USAGE_PER_DAY CheckType = "disk_usage_per_day" - DISK_IO CheckType = "disk_io" - DISK_IO_COLLECTOR CheckType = "disk_io_collector" - DISK_IO_PER_HOUR CheckType = "disk_io_per_hour" - DISK_IO_PER_DAY CheckType = "disk_io_per_day" - NET CheckType = "net" - NET_COLLECTOR CheckType = "net_collector" - NET_PER_HOUR CheckType = "net_per_hour" - NET_PER_DAY CheckType = "net_per_day" - CLEANUP CheckType = "cleanup" + CPU CheckType = "cpu" + HOURLY_CPU_USAGE CheckType = "hourly-cpu-usage" + DAILY_CPU_USAGE CheckType = "daily-cpu-usage" + MEM CheckType = "memory" + HOURLY_MEM_USAGE CheckType = "hourly-memory-usage" + DAILY_MEM_USAGE CheckType = "daily-memory-usage" + DISK_USAGE CheckType = "disk_usage" + HOURLY_DISK_USAGE CheckType = "hourly-disk-usage" + DAILY_DISK_USAGE CheckType = "daily-disk-usage" + DISK_IO CheckType = "disk_io" + DISK_IO_COLLECTOR CheckType = "disk_io_collector" + HOURLY_DISK_IO CheckType = "hourly-disk-io" + DAILY_DISK_IO CheckType = "daily-disk-io" + NET CheckType = "net" + NET_COLLECTOR CheckType = "net_collector" + HOURLY_NET CheckType = "hourly-net" + DAILY_NET CheckType = "daily-net" + CLEANUP CheckType = "cleanup" ) type CheckType string diff --git a/pkg/collector/check/batch/daily/cleanup/cleanup.go b/pkg/collector/check/batch/daily/cleanup/cleanup.go index 889c824..c382fcc 100644 --- a/pkg/collector/check/batch/daily/cleanup/cleanup.go +++ b/pkg/collector/check/batch/daily/cleanup/cleanup.go @@ -7,41 +7,41 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpu" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpuperhour" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskioperhour" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusage" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusageperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlycpuusage" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskio" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskusage" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlymemoryusage" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlytraffic" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memory" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memoryperhour" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/trafficperhour" ) var ( tables = []base.CheckType{ base.CPU, - base.CPU_PER_HOUR, + base.HOURLY_CPU_USAGE, base.MEM, - base.MEM_PER_HOUR, + base.HOURLY_MEM_USAGE, base.DISK_USAGE, - base.DISK_USAGE_PER_HOUR, + base.HOURLY_DISK_USAGE, base.DISK_IO, - base.DISK_IO_PER_HOUR, + base.HOURLY_DISK_IO, base.NET, - base.NET_PER_HOUR, + base.HOURLY_NET, } deleteQueryMap = map[base.CheckType]deleteQuery{ - base.CPU: deleteAllCPU, - base.CPU_PER_HOUR: deleteAllCPUPerHour, - base.MEM: deleteAllMemory, - base.MEM_PER_HOUR: deleteAllMemoryPerHour, - base.DISK_USAGE: deleteAllDiskUsage, - base.DISK_USAGE_PER_HOUR: deleteAllDiskUsagePerHour, - base.DISK_IO: deleteAllDiskIO, - base.DISK_IO_PER_HOUR: deleteAllDiskIOPerHour, - base.NET: deleteAllTraffic, - base.NET_PER_HOUR: deleteAllTrafficPerHour, + base.CPU: deleteAllCPU, + base.HOURLY_CPU_USAGE: deleteAllHourlyCPUUsage, + base.MEM: deleteAllMemory, + base.HOURLY_MEM_USAGE: deleteAllHourlyMemoryUsage, + base.DISK_USAGE: deleteAllDiskUsage, + base.HOURLY_DISK_USAGE: deleteAllHourlyDiskUsage, + base.DISK_IO: deleteAllDiskIO, + base.HOURLY_DISK_IO: deleteAllHourlyDiskIO, + base.NET: deleteAllTraffic, + base.HOURLY_NET: deleteAllHourlyTraffic, } ) @@ -101,15 +101,15 @@ func deleteAllCPU(ctx context.Context, client *ent.Client, now time.Time) error return nil } -func deleteAllCPUPerHour(ctx context.Context, client *ent.Client, now time.Time) error { +func deleteAllHourlyCPUUsage(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.CPUPerHour.Delete(). - Where(cpuperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) + _, err = tx.HourlyCPUUsage.Delete(). + Where(hourlycpuusage.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -137,15 +137,15 @@ func deleteAllMemory(ctx context.Context, client *ent.Client, now time.Time) err return nil } -func deleteAllMemoryPerHour(ctx context.Context, client *ent.Client, now time.Time) error { +func deleteAllHourlyMemoryUsage(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.MemoryPerHour.Delete(). - Where(memoryperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) + _, err = tx.HourlyMemoryUsage.Delete(). + Where(hourlymemoryusage.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -173,15 +173,15 @@ func deleteAllDiskUsage(ctx context.Context, client *ent.Client, now time.Time) return nil } -func deleteAllDiskUsagePerHour(ctx context.Context, client *ent.Client, now time.Time) error { +func deleteAllHourlyDiskUsage(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.DiskUsagePerHour.Delete(). - Where(diskusageperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) + _, err = tx.HourlyDiskUsage.Delete(). + Where(hourlydiskusage.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -209,15 +209,15 @@ func deleteAllDiskIO(ctx context.Context, client *ent.Client, now time.Time) err return nil } -func deleteAllDiskIOPerHour(ctx context.Context, client *ent.Client, now time.Time) error { +func deleteAllHourlyDiskIO(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.DiskIOPerHour.Delete(). - Where(diskioperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) + _, err = tx.HourlyDiskIO.Delete(). + Where(hourlydiskio.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } @@ -245,15 +245,15 @@ func deleteAllTraffic(ctx context.Context, client *ent.Client, now time.Time) er return nil } -func deleteAllTrafficPerHour(ctx context.Context, client *ent.Client, now time.Time) error { +func deleteAllHourlyTraffic(ctx context.Context, client *ent.Client, now time.Time) error { tx, err := client.Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - _, err = tx.TrafficPerHour.Delete(). - Where(trafficperhour.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) + _, err = tx.HourlyTraffic.Delete(). + Where(hourlytraffic.TimestampLTE(now.Add(-24 * time.Hour))).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index 904ca14..bbe0960 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -6,7 +6,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpuperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlycpuusage" ) type Check struct { @@ -20,7 +20,7 @@ func NewCheck(args *base.CheckArgs) base.CheckStrategy { } func (c *Check) Execute(ctx context.Context) error { - metric, err := c.queryCPUPerHour(ctx) + metric, err := c.queryHourlyCPUUsage(ctx) if err != nil { return err } @@ -35,8 +35,8 @@ func (c *Check) Execute(ctx context.Context) error { return nil } -func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { - querySet, err := c.getCPUPerHour(ctx) +func (c *Check) queryHourlyCPUUsage(ctx context.Context) (base.MetricData, error) { + querySet, err := c.getHourlyCPUUsage(ctx) if err != nil { return base.MetricData{}, err } @@ -47,11 +47,11 @@ func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { Avg: querySet[0].AVG, } metric := base.MetricData{ - Type: base.CPU_PER_DAY, + Type: base.DAILY_CPU_USAGE, Data: []base.CheckResult{data}, } - err = c.deleteCPUPerHour(ctx) + err = c.deleteHourlyCPUUsage(ctx) if err != nil { return base.MetricData{}, err } @@ -59,17 +59,17 @@ func (c *Check) queryCPUPerHour(ctx context.Context) (base.MetricData, error) { return metric, nil } -func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { +func (c *Check) getHourlyCPUUsage(ctx context.Context) ([]base.CPUQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-24 * time.Hour) var querySet []base.CPUQuerySet - err := client.CPUPerHour.Query(). - Where(cpuperhour.TimestampGTE(from), cpuperhour.TimestampLTE(now)). + err := client.HourlyCPUUsage.Query(). + Where(hourlycpuusage.TimestampGTE(from), hourlycpuusage.TimestampLTE(now)). Aggregate( - ent.Max(cpuperhour.FieldPeak), - ent.Mean(cpuperhour.FieldAvg), + ent.Max(hourlycpuusage.FieldPeak), + ent.Mean(hourlycpuusage.FieldAvg), ).Scan(ctx, &querySet) if err != nil { return querySet, err @@ -78,7 +78,7 @@ func (c *Check) getCPUPerHour(ctx context.Context) ([]base.CPUQuerySet, error) { return querySet, nil } -func (c *Check) deleteCPUPerHour(ctx context.Context) error { +func (c *Check) deleteHourlyCPUUsage(ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err @@ -87,8 +87,8 @@ func (c *Check) deleteCPUPerHour(ctx context.Context) error { from := time.Now().Add(-24 * time.Hour) - _, err = tx.CPUPerHour.Delete(). - Where(cpuperhour.TimestampLTE(from)).Exec(ctx) + _, err = tx.HourlyCPUUsage.Delete(). + Where(hourlycpuusage.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go index 4c33207..cce5e05 100644 --- a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go +++ b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go @@ -14,8 +14,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.CPU_PER_DAY, - Name: string(base.CPU_PER_DAY) + "_" + uuid.NewString(), + Type: base.DAILY_CPU_USAGE, + Name: string(base.DAILY_CPU_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -26,31 +26,31 @@ func setUp() *Check { return check } -func TestGetCPUPerHour(t *testing.T) { +func TestGetHourlyCPUUsage(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().CPUPerHour.Create(). + err := check.GetClient().HourlyCPUUsage.Create(). SetTimestamp(time.Now()). SetPeak(50.0). SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create cpu usage per hour.") + assert.NoError(t, err, "Failed to create hourly cpu usage.") - querySet, err := check.getCPUPerHour(ctx) - assert.NoError(t, err, "Failed to get cpu usage per hour.") - assert.NotEmpty(t, querySet, "CPUPerHour queryset should not be empty") + querySet, err := check.getHourlyCPUUsage(ctx) + assert.NoError(t, err, "Failed to get hourly cpu usage.") + assert.NotEmpty(t, querySet, "HourlyCPUUsage queryset should not be empty") } -func TestDeleteCPUPerHour(t *testing.T) { +func TestDeleteHourlyCPUUsage(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().CPUPerHour.Create(). + err := check.GetClient().HourlyCPUUsage.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetPeak(50.0). SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create cpu usage per hour.") + assert.NoError(t, err, "Failed to create hourly cpu usage.") - err = check.deleteCPUPerHour(ctx) - assert.NoError(t, err, "Failed to delete cpu usage per hour.") + err = check.deleteHourlyCPUUsage(ctx) + assert.NoError(t, err, "Failed to delete hourly cpu usage.") } diff --git a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go index fb306f1..36c2edc 100644 --- a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go +++ b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go @@ -15,8 +15,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.DISK_IO_PER_DAY, - Name: string(base.DISK_IO_PER_DAY) + "_" + uuid.NewString(), + Type: base.DAILY_DISK_IO, + Name: string(base.DAILY_DISK_IO) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -27,37 +27,37 @@ func setUp() *Check { return check } -func TestGetDiskIOPerHour(t *testing.T) { +func TestGetHourlyDiskIO(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().DiskIOPerHour.Create(). + err := check.GetClient().HourlyDiskIO.Create(). SetTimestamp(time.Now()). SetDevice(uuid.NewString()). SetPeakReadBps(rand.Float64()). SetPeakWriteBps(rand.Float64()). SetAvgReadBps(rand.Float64()). SetAvgWriteBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create disk io per hour.") + assert.NoError(t, err, "Failed to create hourly disk io.") - querySet, err := check.getDiskIOPerHour(ctx) - assert.NoError(t, err, "Failed to get disk io per hour.") - assert.NotEmpty(t, querySet, "DiskIOPerHour queryset should not be empty") + querySet, err := check.getHourlyDiskIO(ctx) + assert.NoError(t, err, "Failed to get hourly disk io.") + assert.NotEmpty(t, querySet, "HourlyDiskIO queryset should not be empty") } -func TestDeleteDiskIOPerHour(t *testing.T) { +func TestDeleteHourlyDiskIO(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().DiskIOPerHour.Create(). + err := check.GetClient().HourlyDiskIO.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetDevice(uuid.NewString()). SetPeakReadBps(rand.Float64()). SetPeakWriteBps(rand.Float64()). SetAvgReadBps(rand.Float64()). SetAvgWriteBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create disk io per hour.") + assert.NoError(t, err, "Failed to create hourly disk io.") - err = check.deleteDiskIOPerHour(ctx) - assert.NoError(t, err, "Failed to delete disk io per hour.") + err = check.deleteHourlyDiskIO(ctx) + assert.NoError(t, err, "Failed to delete hourly disk io.") } diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index 4450b7c..b8ff337 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -6,7 +6,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskioperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskio" ) type Check struct { @@ -20,7 +20,7 @@ func NewCheck(args *base.CheckArgs) base.CheckStrategy { } func (c *Check) Execute(ctx context.Context) error { - metric, err := c.queryDiskIOPerHour(ctx) + metric, err := c.queryHourlyDiskIO(ctx) if err != nil { return err } @@ -35,8 +35,8 @@ func (c *Check) Execute(ctx context.Context) error { return nil } -func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) { - querySet, err := c.getDiskIOPerHour(ctx) +func (c *Check) queryHourlyDiskIO(ctx context.Context) (base.MetricData, error) { + querySet, err := c.getHourlyDiskIO(ctx) if err != nil { return base.MetricData{}, err } @@ -53,11 +53,11 @@ func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) }) } metric := base.MetricData{ - Type: base.DISK_IO_PER_DAY, + Type: base.DAILY_DISK_IO, Data: data, } - err = c.deleteDiskIOPerHour(ctx) + err = c.deleteHourlyDiskIO(ctx) if err != nil { return base.MetricData{}, err } @@ -65,20 +65,20 @@ func (c *Check) queryDiskIOPerHour(ctx context.Context) (base.MetricData, error) return metric, nil } -func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, error) { +func (c *Check) getHourlyDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-24 * time.Hour) var querySet []base.DiskIOQuerySet - err := client.DiskIOPerHour.Query(). - Where(diskioperhour.TimestampGTE(from), diskioperhour.TimestampLTE(now)). - GroupBy(diskioperhour.FieldDevice). + err := client.HourlyDiskIO.Query(). + Where(hourlydiskio.TimestampGTE(from), hourlydiskio.TimestampLTE(now)). + GroupBy(hourlydiskio.FieldDevice). Aggregate( - ent.As(ent.Max(diskioperhour.FieldPeakReadBps), "peak_read_bps"), - ent.As(ent.Max(diskioperhour.FieldPeakWriteBps), "peak_write_bps"), - ent.As(ent.Mean(diskioperhour.FieldAvgReadBps), "avg_read_bps"), - ent.As(ent.Mean(diskioperhour.FieldAvgWriteBps), "avg_write_bps"), + ent.As(ent.Max(hourlydiskio.FieldPeakReadBps), "peak_read_bps"), + ent.As(ent.Max(hourlydiskio.FieldPeakWriteBps), "peak_write_bps"), + ent.As(ent.Mean(hourlydiskio.FieldAvgReadBps), "avg_read_bps"), + ent.As(ent.Mean(hourlydiskio.FieldAvgWriteBps), "avg_write_bps"), ).Scan(ctx, &querySet) if err != nil { return querySet, err @@ -87,7 +87,7 @@ func (c *Check) getDiskIOPerHour(ctx context.Context) ([]base.DiskIOQuerySet, er return querySet, nil } -func (c *Check) deleteDiskIOPerHour(ctx context.Context) error { +func (c *Check) deleteHourlyDiskIO(ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err @@ -96,8 +96,8 @@ func (c *Check) deleteDiskIOPerHour(ctx context.Context) error { from := time.Now().Add(-24 * time.Hour) - _, err = tx.DiskIOPerHour.Delete(). - Where(diskioperhour.TimestampLTE(from)).Exec(ctx) + _, err = tx.HourlyDiskIO.Delete(). + Where(hourlydiskio.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go index 65dba46..e9b8de4 100644 --- a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go +++ b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go @@ -14,8 +14,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.DISK_USAGE_PER_DAY, - Name: string(base.DISK_USAGE_PER_DAY) + "_" + uuid.NewString(), + Type: base.DAILY_DISK_USAGE, + Name: string(base.DAILY_DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -26,33 +26,33 @@ func setUp() *Check { return check } -func TestGetDiskUsagePerHour(t *testing.T) { +func TestGetHourlyDiskUsage(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().DiskUsagePerHour.Create(). + err := check.GetClient().HourlyDiskUsage.Create(). SetTimestamp(time.Now()). SetDevice(uuid.NewString()). SetPeak(50.0). SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create disk usage per hour.") + assert.NoError(t, err, "Failed to create hourly disk usage.") - querySet, err := check.getDiskUsagePerHour(ctx) - assert.NoError(t, err, "Failed to get disk usage per hour.") - assert.NotEmpty(t, querySet, "DiskUsagePerHour queryset should not be empty") + querySet, err := check.getHourlyDiskUsage(ctx) + assert.NoError(t, err, "Failed to get hourly disk usage.") + assert.NotEmpty(t, querySet, "HourlyDiskUsage queryset should not be empty") } -func TestDeleteDiskUsagePerHour(t *testing.T) { +func TestDeleteHourlyDiskUsage(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().DiskUsagePerHour.Create(). + err := check.GetClient().HourlyDiskUsage.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetDevice(uuid.NewString()). SetPeak(50.0). SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create disk usage per hour.") + assert.NoError(t, err, "Failed to create hourly disk usage.") - err = check.deleteDiskUsagePerHour(ctx) - assert.NoError(t, err, "Failed to delete disk usage per hour.") + err = check.deleteHourlyDiskUsage(ctx) + assert.NoError(t, err, "Failed to delete hourly disk usage.") } diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index 37d20b0..a989625 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -6,7 +6,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusageperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskusage" ) type Check struct { @@ -20,7 +20,7 @@ func NewCheck(args *base.CheckArgs) base.CheckStrategy { } func (c *Check) Execute(ctx context.Context) error { - metric, err := c.queryDiskUsagePerHour(ctx) + metric, err := c.queryHourlyDiskUsage(ctx) if err != nil { return err } @@ -35,8 +35,8 @@ func (c *Check) Execute(ctx context.Context) error { return nil } -func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, error) { - querySet, err := c.getDiskUsagePerHour(ctx) +func (c *Check) queryHourlyDiskUsage(ctx context.Context) (base.MetricData, error) { + querySet, err := c.getHourlyDiskUsage(ctx) if err != nil { return base.MetricData{}, err } @@ -51,11 +51,11 @@ func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, err }) } metric := base.MetricData{ - Type: base.DISK_USAGE_PER_DAY, + Type: base.DAILY_DISK_USAGE, Data: data, } - err = c.deleteDiskUsagePerHour(ctx) + err = c.deleteHourlyDiskUsage(ctx) if err != nil { return base.MetricData{}, err } @@ -63,18 +63,18 @@ func (c *Check) queryDiskUsagePerHour(ctx context.Context) (base.MetricData, err return metric, nil } -func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQuerySet, error) { +func (c *Check) getHourlyDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-24 * time.Hour) var querySet []base.DiskUsageQuerySet - err := client.DiskUsagePerHour.Query(). - Where(diskusageperhour.TimestampGTE(from), diskusageperhour.TimestampLTE(now)). - GroupBy(diskusageperhour.FieldDevice). + err := client.HourlyDiskUsage.Query(). + Where(hourlydiskusage.TimestampGTE(from), hourlydiskusage.TimestampLTE(now)). + GroupBy(hourlydiskusage.FieldDevice). Aggregate( - ent.Max(diskusageperhour.FieldPeak), - ent.Mean(diskusageperhour.FieldAvg), + ent.Max(hourlydiskusage.FieldPeak), + ent.Mean(hourlydiskusage.FieldAvg), ).Scan(ctx, &querySet) if err != nil { return querySet, err @@ -83,7 +83,7 @@ func (c *Check) getDiskUsagePerHour(ctx context.Context) ([]base.DiskUsageQueryS return querySet, nil } -func (c *Check) deleteDiskUsagePerHour(ctx context.Context) error { +func (c *Check) deleteHourlyDiskUsage(ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err @@ -92,8 +92,8 @@ func (c *Check) deleteDiskUsagePerHour(ctx context.Context) error { from := time.Now().Add(-24 * time.Hour) - _, err = tx.DiskUsagePerHour.Delete(). - Where(diskusageperhour.TimestampLTE(from)).Exec(ctx) + _, err = tx.HourlyDiskUsage.Delete(). + Where(hourlydiskusage.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/memory/daily_memory_test.go b/pkg/collector/check/batch/daily/memory/daily_memory_test.go index 0dbecb8..9165cf1 100644 --- a/pkg/collector/check/batch/daily/memory/daily_memory_test.go +++ b/pkg/collector/check/batch/daily/memory/daily_memory_test.go @@ -14,8 +14,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.MEM_PER_DAY, - Name: string(base.MEM_PER_DAY) + "_" + uuid.NewString(), + Type: base.DAILY_MEM_USAGE, + Name: string(base.DAILY_MEM_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -26,31 +26,31 @@ func setUp() *Check { return check } -func TestGetMemoryPerHour(t *testing.T) { +func TestGetHourlyMemoryUsage(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().MemoryPerHour.Create(). + err := check.GetClient().HourlyMemoryUsage.Create(). SetTimestamp(time.Now()). SetPeak(50.0). SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create memory usage per hour.") + assert.NoError(t, err, "Failed to create hourly memory usage.") - querySet, err := check.getMemoryPerHour(ctx) - assert.NoError(t, err, "Failed to get memory usage per hour.") - assert.NotEmpty(t, querySet, "MemoryPerHour queryset should not be empty") + querySet, err := check.getHourlyMemoryUsage(ctx) + assert.NoError(t, err, "Failed to get hourly memory usage.") + assert.NotEmpty(t, querySet, "HouryMemoryUsage queryset should not be empty") } -func TestDeleteMemoryPerHour(t *testing.T) { +func TestDeleteHourlyMemoryUsage(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().MemoryPerHour.Create(). + err := check.GetClient().HourlyMemoryUsage.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetPeak(50.0). SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create memory usage per hour.") + assert.NoError(t, err, "Failed to create hourly memory usage.") - err = check.deleteMemoryPerHour(ctx) - assert.NoError(t, err, "Failed to delete memory usage per hour.") + err = check.deleteHourlyMemoryUsage(ctx) + assert.NoError(t, err, "Failed to delete hourly memory usage.") } diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index a654daf..088bfb1 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -6,7 +6,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memoryperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlymemoryusage" ) type Check struct { @@ -20,7 +20,7 @@ func NewCheck(args *base.CheckArgs) base.CheckStrategy { } func (c *Check) Execute(ctx context.Context) error { - metric, err := c.queryMemoryPerHour(ctx) + metric, err := c.queryHourlyMemoryUsage(ctx) if err != nil { return err } @@ -35,8 +35,8 @@ func (c *Check) Execute(ctx context.Context) error { return nil } -func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) { - querySet, err := c.getMemoryPerHour(ctx) +func (c *Check) queryHourlyMemoryUsage(ctx context.Context) (base.MetricData, error) { + querySet, err := c.getHourlyMemoryUsage(ctx) if err != nil { return base.MetricData{}, err } @@ -47,11 +47,11 @@ func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) Avg: querySet[0].AVG, } metric := base.MetricData{ - Type: base.MEM_PER_DAY, + Type: base.DAILY_MEM_USAGE, Data: []base.CheckResult{data}, } - err = c.deleteMemoryPerHour(ctx) + err = c.deleteHourlyMemoryUsage(ctx) if err != nil { return base.MetricData{}, err } @@ -59,17 +59,17 @@ func (c *Check) queryMemoryPerHour(ctx context.Context) (base.MetricData, error) return metric, nil } -func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, error) { +func (c *Check) getHourlyMemoryUsage(ctx context.Context) ([]base.MemoryQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-24 * time.Hour) var querySet []base.MemoryQuerySet - err := client.MemoryPerHour.Query(). - Where(memoryperhour.TimestampGTE(from), memoryperhour.TimestampLTE(now)). + err := client.HourlyMemoryUsage.Query(). + Where(hourlymemoryusage.TimestampGTE(from), hourlymemoryusage.TimestampLTE(now)). Aggregate( - ent.Max(memoryperhour.FieldPeak), - ent.Mean(memoryperhour.FieldAvg), + ent.Max(hourlymemoryusage.FieldPeak), + ent.Mean(hourlymemoryusage.FieldAvg), ).Scan(ctx, &querySet) if err != nil { return querySet, err @@ -78,7 +78,7 @@ func (c *Check) getMemoryPerHour(ctx context.Context) ([]base.MemoryQuerySet, er return querySet, nil } -func (c *Check) deleteMemoryPerHour(ctx context.Context) error { +func (c *Check) deleteHourlyMemoryUsage(ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err @@ -87,8 +87,8 @@ func (c *Check) deleteMemoryPerHour(ctx context.Context) error { from := time.Now().Add(-24 * time.Hour) - _, err = tx.MemoryPerHour.Delete(). - Where(memoryperhour.TimestampLTE(from)).Exec(ctx) + _, err = tx.HourlyMemoryUsage.Delete(). + Where(hourlymemoryusage.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/daily/net/daily_net_test.go b/pkg/collector/check/batch/daily/net/daily_net_test.go index d2fc008..2855a9a 100644 --- a/pkg/collector/check/batch/daily/net/daily_net_test.go +++ b/pkg/collector/check/batch/daily/net/daily_net_test.go @@ -15,8 +15,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.NET_PER_DAY, - Name: string(base.NET_PER_DAY) + "_" + uuid.NewString(), + Type: base.DAILY_NET, + Name: string(base.DAILY_NET) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -27,11 +27,11 @@ func setUp() *Check { return check } -func TestGetTrafficPerHour(t *testing.T) { +func TestGetHourlyTraffic(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().TrafficPerHour.Create(). + err := check.GetClient().HourlyTraffic.Create(). SetTimestamp(time.Now()). SetName(uuid.NewString()). SetPeakInputPps(rand.Float64()). @@ -42,18 +42,18 @@ func TestGetTrafficPerHour(t *testing.T) { SetAvgInputBps(rand.Float64()). SetAvgOutputPps(rand.Float64()). SetAvgOutputBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create traffic per hour.") + assert.NoError(t, err, "Failed to create hourly traffic.") - querySet, err := check.getTrafficPerHour(ctx) - assert.NoError(t, err, "Failed to get traffic per hour.") - assert.NotEmpty(t, querySet, "TrafficPerHour queryset should not be empty") + querySet, err := check.getHourlyTraffic(ctx) + assert.NoError(t, err, "Failed to get hourly traffic.") + assert.NotEmpty(t, querySet, "HourlyTraffic queryset should not be empty") } -func TestDeleteTrafficPerHour(t *testing.T) { +func TestDeleteHourlyTraffic(t *testing.T) { check := setUp() ctx := context.Background() - err := check.GetClient().TrafficPerHour.Create(). + err := check.GetClient().HourlyTraffic.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetName(uuid.NewString()). SetPeakInputPps(rand.Float64()). @@ -64,8 +64,8 @@ func TestDeleteTrafficPerHour(t *testing.T) { SetAvgInputBps(rand.Float64()). SetAvgOutputPps(rand.Float64()). SetAvgOutputBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create traffic per hour.") + assert.NoError(t, err, "Failed to create hourly traffic.") - err = check.deleteTrafficPerHour(ctx) - assert.NoError(t, err, "Failed to delete traffic per hour.") + err = check.deleteHourlyTraffic(ctx) + assert.NoError(t, err, "Failed to delete hourly traffic.") } diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index a85ed34..3e7b5b2 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -6,7 +6,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/trafficperhour" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlytraffic" ) type Check struct { @@ -20,7 +20,7 @@ func NewCheck(args *base.CheckArgs) base.CheckStrategy { } func (c *Check) Execute(ctx context.Context) error { - metric, err := c.queryTrafficPerHour(ctx) + metric, err := c.queryHourlyTraffic(ctx) if err != nil { return err } @@ -35,8 +35,8 @@ func (c *Check) Execute(ctx context.Context) error { return nil } -func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error) { - querySet, err := c.getTrafficPerHour(ctx) +func (c *Check) queryHourlyTraffic(ctx context.Context) (base.MetricData, error) { + querySet, err := c.getHourlyTraffic(ctx) if err != nil { return base.MetricData{}, err } @@ -57,11 +57,11 @@ func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error }) } metric := base.MetricData{ - Type: base.NET_PER_DAY, + Type: base.DAILY_NET, Data: data, } - err = c.deleteTrafficPerHour(ctx) + err = c.deleteHourlyTraffic(ctx) if err != nil { return base.MetricData{}, err } @@ -69,24 +69,24 @@ func (c *Check) queryTrafficPerHour(ctx context.Context) (base.MetricData, error return metric, nil } -func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, error) { +func (c *Check) getHourlyTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) { client := c.GetClient() now := time.Now() from := now.Add(-24 * time.Hour) var querySet []base.TrafficQuerySet - err := client.TrafficPerHour.Query(). - Where(trafficperhour.TimestampGTE(from), trafficperhour.TimestampLTE(now)). - GroupBy(trafficperhour.FieldName). + err := client.HourlyTraffic.Query(). + Where(hourlytraffic.TimestampGTE(from), hourlytraffic.TimestampLTE(now)). + GroupBy(hourlytraffic.FieldName). Aggregate( - ent.As(ent.Max(trafficperhour.FieldPeakInputPps), "peak_input_pps"), - ent.As(ent.Max(trafficperhour.FieldPeakInputBps), "peak_input_bps"), - ent.As(ent.Max(trafficperhour.FieldPeakOutputPps), "peak_output_pps"), - ent.As(ent.Max(trafficperhour.FieldPeakOutputBps), "peak_output_bps"), - ent.As(ent.Mean(trafficperhour.FieldAvgInputPps), "avg_input_pps"), - ent.As(ent.Mean(trafficperhour.FieldAvgInputBps), "avg_input_bps"), - ent.As(ent.Mean(trafficperhour.FieldAvgOutputPps), "avg_output_pps"), - ent.As(ent.Mean(trafficperhour.FieldAvgOutputBps), "avg_output_bps"), + ent.As(ent.Max(hourlytraffic.FieldPeakInputPps), "peak_input_pps"), + ent.As(ent.Max(hourlytraffic.FieldPeakInputBps), "peak_input_bps"), + ent.As(ent.Max(hourlytraffic.FieldPeakOutputPps), "peak_output_pps"), + ent.As(ent.Max(hourlytraffic.FieldPeakOutputBps), "peak_output_bps"), + ent.As(ent.Mean(hourlytraffic.FieldAvgInputPps), "avg_input_pps"), + ent.As(ent.Mean(hourlytraffic.FieldAvgInputBps), "avg_input_bps"), + ent.As(ent.Mean(hourlytraffic.FieldAvgOutputPps), "avg_output_pps"), + ent.As(ent.Mean(hourlytraffic.FieldAvgOutputBps), "avg_output_bps"), ).Scan(ctx, &querySet) if err != nil { return querySet, err @@ -95,7 +95,7 @@ func (c *Check) getTrafficPerHour(ctx context.Context) ([]base.TrafficQuerySet, return querySet, nil } -func (c *Check) deleteTrafficPerHour(ctx context.Context) error { +func (c *Check) deleteHourlyTraffic(ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err @@ -104,8 +104,8 @@ func (c *Check) deleteTrafficPerHour(ctx context.Context) error { from := time.Now().Add(-24 * time.Hour) - _, err = tx.TrafficPerHour.Delete(). - Where(trafficperhour.TimestampLTE(from)).Exec(ctx) + _, err = tx.HourlyTraffic.Delete(). + Where(hourlytraffic.TimestampLTE(from)).Exec(ctx) if err != nil { return err } diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index d11bf88..cbc050a 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -47,11 +47,11 @@ func (c *Check) queryCPUUsage(ctx context.Context) (base.MetricData, error) { Avg: querySet[0].AVG, } metric := base.MetricData{ - Type: base.CPU_PER_HOUR, + Type: base.HOURLY_CPU_USAGE, Data: []base.CheckResult{data}, } - err = c.saveCPUPerHour(data, ctx) + err = c.saveHourlyCPUUsage(data, ctx) if err != nil { return base.MetricData{}, err } @@ -83,14 +83,14 @@ func (c *Check) getCPU(ctx context.Context) ([]base.CPUQuerySet, error) { return querySet, nil } -func (c *Check) saveCPUPerHour(data base.CheckResult, ctx context.Context) error { +func (c *Check) saveHourlyCPUUsage(data base.CheckResult, ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - err = tx.CPUPerHour.Create(). + err = tx.HourlyCPUUsage.Create(). SetTimestamp(data.Timestamp). SetPeak(data.Peak). SetAvg(data.Avg).Exec(ctx) diff --git a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go index ea95b25..7a91445 100644 --- a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go +++ b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go @@ -15,8 +15,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.CPU_PER_HOUR, - Name: string(base.CPU_PER_HOUR) + "_" + uuid.NewString(), + Type: base.HOURLY_CPU_USAGE, + Name: string(base.HOURLY_CPU_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -41,7 +41,7 @@ func TestGetCPU(t *testing.T) { assert.NotEmpty(t, querySet, "CPU queryset should not be empty") } -func TestSaveCPUPerHour(t *testing.T) { +func TestSaveHourlyCPUUsage(t *testing.T) { check := setUp() ctx := context.Background() data := base.CheckResult{ @@ -50,8 +50,8 @@ func TestSaveCPUPerHour(t *testing.T) { Avg: 50.0, } - err := check.saveCPUPerHour(data, ctx) - assert.NoError(t, err, "Failed to save cpu usage per hour.") + err := check.saveHourlyCPUUsage(data, ctx) + assert.NoError(t, err, "Failed to save hourly cpu usage.") } func TestDeleteCPU(t *testing.T) { diff --git a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go index c9f6152..583e9f1 100644 --- a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go +++ b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go @@ -15,8 +15,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.DISK_IO_PER_HOUR, - Name: string(base.DISK_IO_PER_HOUR) + "_" + uuid.NewString(), + Type: base.HOURLY_DISK_IO, + Name: string(base.HOURLY_DISK_IO) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -43,7 +43,7 @@ func TestGetDiskIO(t *testing.T) { assert.NotEmpty(t, querySet, "Disk io queryset should not be empty") } -func TestSaveDiskIOPerHour(t *testing.T) { +func TestSaveHourlyDiskIO(t *testing.T) { check := setUp() ctx := context.Background() data := []base.CheckResult{ @@ -65,8 +65,8 @@ func TestSaveDiskIOPerHour(t *testing.T) { }, } - err := check.saveDiskIOPerHour(data, ctx) - assert.NoError(t, err, "Failed to save disk io per hour.") + err := check.saveHourlyDiskIO(data, ctx) + assert.NoError(t, err, "Failed to save hourly disk io.") } func TestDeleteDiskIO(t *testing.T) { diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index a8c435a..e2e03c5 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -53,11 +53,11 @@ func (c *Check) queryDiskIO(ctx context.Context) (base.MetricData, error) { }) } metric := base.MetricData{ - Type: base.DISK_IO_PER_HOUR, + Type: base.HOURLY_DISK_IO, Data: data, } - err = c.saveDiskIOPerHour(data, ctx) + err = c.saveHourlyDiskIO(data, ctx) if err != nil { return base.MetricData{}, err } @@ -92,14 +92,14 @@ func (c *Check) getDiskIO(ctx context.Context) ([]base.DiskIOQuerySet, error) { return querySet, nil } -func (c *Check) saveDiskIOPerHour(data []base.CheckResult, ctx context.Context) error { +func (c *Check) saveHourlyDiskIO(data []base.CheckResult, ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return nil } defer func() { _ = tx.Rollback() }() - err = tx.DiskIOPerHour.MapCreateBulk(data, func(q *ent.DiskIOPerHourCreate, i int) { + err = tx.HourlyDiskIO.MapCreateBulk(data, func(q *ent.HourlyDiskIOCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). SetPeakReadBps(data[i].PeakReadBps). diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go index ef1ed7b..f8b3ff3 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -15,8 +15,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.DISK_USAGE_PER_HOUR, - Name: string(base.DISK_USAGE_PER_HOUR) + "_" + uuid.NewString(), + Type: base.HOURLY_DISK_USAGE, + Name: string(base.HOURLY_DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -46,7 +46,7 @@ func TestGetDiskUsage(t *testing.T) { assert.NotEmpty(t, querySet, "Disk usage queryset should not be empty") } -func TestSaveDiskUsagePerHour(t *testing.T) { +func TestSaveHourlyDiskUsage(t *testing.T) { check := setUp() ctx := context.Background() data := []base.CheckResult{ @@ -64,8 +64,8 @@ func TestSaveDiskUsagePerHour(t *testing.T) { }, } - err := check.saveDiskUsagePerHour(data, ctx) - assert.NoError(t, err, "Failed to save disk usage per hour.") + err := check.saveHourlyDiskUsage(data, ctx) + assert.NoError(t, err, "Failed to save hourly disk usage.") } func TestDeleteDiskUsage(t *testing.T) { diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index a14d676..beb06bb 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -52,11 +52,11 @@ func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { }) } metric := base.MetricData{ - Type: base.DISK_USAGE_PER_HOUR, + Type: base.HOURLY_DISK_USAGE, Data: data, } - err = c.saveDiskUsagePerHour(data, ctx) + err = c.saveHourlyDiskUsage(data, ctx) if err != nil { return base.MetricData{}, err } @@ -107,14 +107,14 @@ func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, err return querySet, nil } -func (c *Check) saveDiskUsagePerHour(data []base.CheckResult, ctx context.Context) error { +func (c *Check) saveHourlyDiskUsage(data []base.CheckResult, ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - err = tx.DiskUsagePerHour.MapCreateBulk(data, func(q *ent.DiskUsagePerHourCreate, i int) { + err = tx.HourlyDiskUsage.MapCreateBulk(data, func(q *ent.HourlyDiskUsageCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). SetPeak(data[i].Peak). diff --git a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go index be220b2..2a479b6 100644 --- a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go +++ b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go @@ -15,8 +15,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.MEM_PER_HOUR, - Name: string(base.MEM_PER_HOUR) + "_" + uuid.NewString(), + Type: base.HOURLY_MEM_USAGE, + Name: string(base.HOURLY_MEM_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -50,7 +50,7 @@ func TestSaveMemoryPerHour(t *testing.T) { Avg: 50.0, } - err := check.saveMemoryPerHour(data, ctx) + err := check.saveHourlyMemoryUsage(data, ctx) assert.NoError(t, err, "Failed to save memory usage per hour.") } diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index f79f697..12b88f6 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -47,11 +47,11 @@ func (c *Check) queryMemoryUsage(ctx context.Context) (base.MetricData, error) { Avg: querySet[0].AVG, } metric := base.MetricData{ - Type: base.MEM_PER_HOUR, + Type: base.HOURLY_MEM_USAGE, Data: []base.CheckResult{data}, } - err = c.saveMemoryPerHour(data, ctx) + err = c.saveHourlyMemoryUsage(data, ctx) if err != nil { return base.MetricData{}, err } @@ -83,14 +83,14 @@ func (c *Check) getMemory(ctx context.Context) ([]base.MemoryQuerySet, error) { return querySet, nil } -func (c *Check) saveMemoryPerHour(data base.CheckResult, ctx context.Context) error { +func (c *Check) saveHourlyMemoryUsage(data base.CheckResult, ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - err = tx.MemoryPerHour.Create(). + err = tx.HourlyMemoryUsage.Create(). SetTimestamp(data.Timestamp). SetPeak(data.Peak). SetAvg(data.Avg).Exec(ctx) diff --git a/pkg/collector/check/batch/hourly/net/hourly_net_test.go b/pkg/collector/check/batch/hourly/net/hourly_net_test.go index 9055ddb..d21eccb 100644 --- a/pkg/collector/check/batch/hourly/net/hourly_net_test.go +++ b/pkg/collector/check/batch/hourly/net/hourly_net_test.go @@ -15,8 +15,8 @@ import ( func setUp() *Check { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ - Type: base.NET_PER_HOUR, - Name: string(base.NET_PER_HOUR) + "_" + uuid.NewString(), + Type: base.HOURLY_NET, + Name: string(base.HOURLY_NET) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, Client: db.InitDB(), @@ -45,7 +45,7 @@ func TestGetTraffic(t *testing.T) { assert.NotEmpty(t, querySet, "Traffic queryset should not be empty") } -func TestSaveTrafficPerHour(t *testing.T) { +func TestSaveHourlyTraffic(t *testing.T) { check := setUp() ctx := context.Background() data := []base.CheckResult{ @@ -75,8 +75,8 @@ func TestSaveTrafficPerHour(t *testing.T) { }, } - err := check.saveTrafficPerHour(data, ctx) - assert.NoError(t, err, "Failed to save traffic per hour.") + err := check.saveHourlyTraffic(data, ctx) + assert.NoError(t, err, "Failed to save houlry traffic.") } func TestDeleteTraffic(t *testing.T) { diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index 030e74b..10c7a24 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -57,11 +57,11 @@ func (c *Check) queryTraffic(ctx context.Context) (base.MetricData, error) { }) } metric := base.MetricData{ - Type: base.NET_PER_HOUR, + Type: base.HOURLY_NET, Data: data, } - err = c.saveTrafficPerHour(data, ctx) + err = c.saveHourlyTraffic(data, ctx) if err != nil { return base.MetricData{}, err } @@ -100,14 +100,14 @@ func (c *Check) getTraffic(ctx context.Context) ([]base.TrafficQuerySet, error) return queryset, nil } -func (c *Check) saveTrafficPerHour(data []base.CheckResult, ctx context.Context) error { +func (c *Check) saveHourlyTraffic(data []base.CheckResult, ctx context.Context) error { tx, err := c.GetClient().Tx(ctx) if err != nil { return err } defer func() { _ = tx.Rollback() }() - err = tx.TrafficPerHour.MapCreateBulk(data, func(q *ent.TrafficPerHourCreate, i int) { + err = tx.HourlyTraffic.MapCreateBulk(data, func(q *ent.HourlyTrafficCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetName(data[i].Name). SetPeakInputPps(data[i].PeakInputPps). diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index 66a2e09..2fee31f 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -6,16 +6,16 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" cleanup "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cleanup" - cpudaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cpu" - diskiodaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/io" - diskusagedaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/usage" - memorydaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/memory" - netdaily "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/net" - cpuhourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/cpu" - diskiohourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/io" - diskusagehourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/usage" - memoryhourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/memory" - nethourly "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/net" + dailycpu "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cpu" + dailydiskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/io" + dailydiskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/usage" + dailymemory "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/memory" + dailynet "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/net" + hourlycpu "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/cpu" + hourlydiskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/io" + hourlydiskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/usage" + hourlymemory "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/memory" + hourlynet "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/net" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/cpu" diskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/disk/io" diskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/disk/usage" @@ -24,24 +24,24 @@ import ( ) var checkFactories = map[base.CheckType]newCheck{ - base.CPU: cpu.NewCheck, - base.CPU_PER_HOUR: cpuhourly.NewCheck, - base.CPU_PER_DAY: cpudaily.NewCheck, - base.MEM: memory.NewCheck, - base.MEM_PER_HOUR: memoryhourly.NewCheck, - base.MEM_PER_DAY: memorydaily.NewCheck, - base.DISK_USAGE: diskusage.NewCheck, - base.DISK_USAGE_PER_HOUR: diskusagehourly.NewCheck, - base.DISK_USAGE_PER_DAY: diskusagedaily.NewCheck, - base.DISK_IO: diskio.NewCheck, - base.DISK_IO_COLLECTOR: diskio.NewCheck, - base.DISK_IO_PER_HOUR: diskiohourly.NewCheck, - base.DISK_IO_PER_DAY: diskiodaily.NewCheck, - base.NET: net.NewCheck, - base.NET_COLLECTOR: net.NewCheck, - base.NET_PER_HOUR: nethourly.NewCheck, - base.NET_PER_DAY: netdaily.NewCheck, - base.CLEANUP: cleanup.NewCheck, + base.CPU: cpu.NewCheck, + base.HOURLY_CPU_USAGE: hourlycpu.NewCheck, + base.DAILY_CPU_USAGE: dailycpu.NewCheck, + base.MEM: memory.NewCheck, + base.HOURLY_MEM_USAGE: hourlymemory.NewCheck, + base.DAILY_MEM_USAGE: dailymemory.NewCheck, + base.DISK_USAGE: diskusage.NewCheck, + base.HOURLY_DISK_USAGE: hourlydiskusage.NewCheck, + base.DAILY_DISK_USAGE: dailydiskusage.NewCheck, + base.DISK_IO: diskio.NewCheck, + base.DISK_IO_COLLECTOR: diskio.NewCheck, + base.HOURLY_DISK_IO: hourlydiskio.NewCheck, + base.DAILY_DISK_IO: dailydiskio.NewCheck, + base.NET: net.NewCheck, + base.NET_COLLECTOR: net.NewCheck, + base.HOURLY_NET: hourlynet.NewCheck, + base.DAILY_NET: dailynet.NewCheck, + base.CLEANUP: cleanup.NewCheck, } type Check interface { diff --git a/pkg/collector/transporter/utils.go b/pkg/collector/transporter/utils.go index aabdd96..14c2df0 100644 --- a/pkg/collector/transporter/utils.go +++ b/pkg/collector/transporter/utils.go @@ -7,21 +7,21 @@ import ( ) const ( - CPU string = "/api/metrics/realtime/cpu/" - CPU_PER_HOUR string = "/api/metrics/hourly/cpu/" - CPU_PER_DAY string = "/api/metrics/daily/cpu/" - MEM string = "/api/metrics/realtime/memory/" - MEM_PER_HOUR string = "/api/metrics/hourly/memory/" - MEM_PER_DAY string = "/api/metrics/daily/memory/" - DISK_USAGE string = "/api/metrics/realtime/disk-usage/" - DISK_USAGE_PER_HOUR string = "/api/metrics/hourly/disk-usage/" - DISK_USAGE_PER_DAY string = "/api/metrics/daily/disk-usage/" - DISK_IO string = "/api/metrics/realtime/disk-io/" - DISK_IO_PER_HOUR string = "/api/metrics/hourly/disk-io/" - DISK_IO_PER_DAY string = "/api/metrics/daily/disk-io/" - NET string = "/api/metrics/realtime/traffic/" - NET_PER_HOUR string = "/api/metrics/hourly/traffic/" - NET_PER_DAY string = "/api/metrics/daily/traffic/" + CPU string = "/api/metrics/realtime/cpu/" + HOURLY_CPU_USAGE string = "/api/metrics/hourly/cpu/" + DAILY_CPU_USAGE string = "/api/metrics/daily/cpu/" + MEM string = "/api/metrics/realtime/memory/" + HOURLY_MEM_USAGE string = "/api/metrics/hourly/memory/" + DAILY_MEM_USAGE string = "/api/metrics/daily/memory/" + DISK_USAGE string = "/api/metrics/realtime/disk-usage/" + HOURLY_DISK_USAGE string = "/api/metrics/hourly/disk-usage/" + DAILY_DISK_USAGE string = "/api/metrics/daily/disk-usage/" + DISK_IO string = "/api/metrics/realtime/disk-io/" + HOURLY_DISK_IO string = "/api/metrics/hourly/disk-io/" + DAILY_DISK_IO string = "/api/metrics/daily/disk-io/" + NET string = "/api/metrics/realtime/traffic/" + HOURLY_NET string = "/api/metrics/hourly/traffic/" + DAILY_NET string = "/api/metrics/daily/traffic/" ) type URLResolver struct { @@ -31,21 +31,21 @@ type URLResolver struct { func NewURLResolver() *URLResolver { return &URLResolver{ checkTypeToURL: map[base.CheckType]string{ - base.CPU: CPU, - base.CPU_PER_HOUR: CPU_PER_HOUR, - base.CPU_PER_DAY: CPU_PER_DAY, - base.MEM: MEM, - base.MEM_PER_HOUR: MEM_PER_HOUR, - base.MEM_PER_DAY: MEM_PER_DAY, - base.DISK_USAGE: DISK_USAGE, - base.DISK_USAGE_PER_HOUR: DISK_USAGE_PER_HOUR, - base.DISK_USAGE_PER_DAY: DISK_USAGE_PER_DAY, - base.DISK_IO: DISK_IO, - base.DISK_IO_PER_HOUR: DISK_IO_PER_HOUR, - base.DISK_IO_PER_DAY: DISK_IO_PER_DAY, - base.NET: NET, - base.NET_PER_HOUR: NET_PER_HOUR, - base.NET_PER_DAY: NET_PER_DAY, + base.CPU: CPU, + base.HOURLY_CPU_USAGE: HOURLY_CPU_USAGE, + base.DAILY_CPU_USAGE: DAILY_CPU_USAGE, + base.MEM: MEM, + base.HOURLY_MEM_USAGE: HOURLY_MEM_USAGE, + base.DAILY_MEM_USAGE: DAILY_MEM_USAGE, + base.DISK_USAGE: DISK_USAGE, + base.HOURLY_DISK_USAGE: HOURLY_DISK_USAGE, + base.DAILY_DISK_USAGE: DAILY_DISK_USAGE, + base.DISK_IO: DISK_IO, + base.HOURLY_DISK_IO: HOURLY_DISK_IO, + base.DAILY_DISK_IO: DAILY_DISK_IO, + base.NET: NET, + base.HOURLY_NET: HOURLY_NET, + base.DAILY_NET: DAILY_NET, }, } } diff --git a/pkg/db/migration/20250107063722_init_schemas.sql b/pkg/db/migration/20250107063722_init_schemas.sql deleted file mode 100644 index 12378cc..0000000 --- a/pkg/db/migration/20250107063722_init_schemas.sql +++ /dev/null @@ -1,40 +0,0 @@ --- Create "cp_us" table -CREATE TABLE `cp_us` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `usage` real NOT NULL); --- Create index "cpu_timestamp" to table: "cp_us" -CREATE INDEX `cpu_timestamp` ON `cp_us` (`timestamp`); --- Create "cpu_per_hours" table -CREATE TABLE `cpu_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); --- Create index "cpuperhour_timestamp" to table: "cpu_per_hours" -CREATE INDEX `cpuperhour_timestamp` ON `cpu_per_hours` (`timestamp`); --- Create "disk_ios" table -CREATE TABLE `disk_ios` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `read_bps` real NOT NULL, `write_bps` real NOT NULL); --- Create index "diskio_timestamp" to table: "disk_ios" -CREATE INDEX `diskio_timestamp` ON `disk_ios` (`timestamp`); --- Create "disk_io_per_hours" table -CREATE TABLE `disk_io_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak_read_bps` real NOT NULL, `peak_write_bps` real NOT NULL, `avg_read_bps` real NOT NULL, `avg_write_bps` real NOT NULL); --- Create index "diskioperhour_timestamp" to table: "disk_io_per_hours" -CREATE INDEX `diskioperhour_timestamp` ON `disk_io_per_hours` (`timestamp`); --- Create "disk_usages" table -CREATE TABLE `disk_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `mount_point` text NOT NULL, `usage` real NOT NULL, `total` integer NOT NULL, `free` integer NOT NULL, `used` integer NOT NULL); --- Create index "diskusage_timestamp" to table: "disk_usages" -CREATE INDEX `diskusage_timestamp` ON `disk_usages` (`timestamp`); --- Create "disk_usage_per_hours" table -CREATE TABLE `disk_usage_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); --- Create index "diskusageperhour_timestamp" to table: "disk_usage_per_hours" -CREATE INDEX `diskusageperhour_timestamp` ON `disk_usage_per_hours` (`timestamp`); --- Create "memories" table -CREATE TABLE `memories` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `usage` real NOT NULL); --- Create index "memory_timestamp" to table: "memories" -CREATE INDEX `memory_timestamp` ON `memories` (`timestamp`); --- Create "memory_per_hours" table -CREATE TABLE `memory_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); --- Create index "memoryperhour_timestamp" to table: "memory_per_hours" -CREATE INDEX `memoryperhour_timestamp` ON `memory_per_hours` (`timestamp`); --- Create "traffics" table -CREATE TABLE `traffics` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `name` text NOT NULL, `input_pps` real NOT NULL, `input_bps` real NOT NULL, `output_pps` real NOT NULL, `output_bps` real NOT NULL); --- Create index "traffic_timestamp" to table: "traffics" -CREATE INDEX `traffic_timestamp` ON `traffics` (`timestamp`); --- Create "traffic_per_hours" table -CREATE TABLE `traffic_per_hours` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `name` text NOT NULL, `peak_input_pps` real NOT NULL, `peak_input_bps` real NOT NULL, `peak_output_pps` real NOT NULL, `peak_output_bps` real NOT NULL, `avg_input_pps` real NOT NULL, `avg_input_bps` real NOT NULL, `avg_output_pps` real NOT NULL, `avg_output_bps` real NOT NULL); --- Create index "trafficperhour_timestamp" to table: "traffic_per_hours" -CREATE INDEX `trafficperhour_timestamp` ON `traffic_per_hours` (`timestamp`); diff --git a/pkg/db/migration/20250116061438_init_schemas.sql b/pkg/db/migration/20250116061438_init_schemas.sql new file mode 100644 index 0000000..9a3ac94 --- /dev/null +++ b/pkg/db/migration/20250116061438_init_schemas.sql @@ -0,0 +1,40 @@ +-- Create "cp_us" table +CREATE TABLE `cp_us` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `usage` real NOT NULL); +-- Create index "cpu_timestamp" to table: "cp_us" +CREATE INDEX `cpu_timestamp` ON `cp_us` (`timestamp`); +-- Create "disk_ios" table +CREATE TABLE `disk_ios` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `read_bps` real NOT NULL, `write_bps` real NOT NULL); +-- Create index "diskio_timestamp" to table: "disk_ios" +CREATE INDEX `diskio_timestamp` ON `disk_ios` (`timestamp`); +-- Create "disk_usages" table +CREATE TABLE `disk_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `mount_point` text NOT NULL, `usage` real NOT NULL, `total` integer NOT NULL, `free` integer NOT NULL, `used` integer NOT NULL); +-- Create index "diskusage_timestamp" to table: "disk_usages" +CREATE INDEX `diskusage_timestamp` ON `disk_usages` (`timestamp`); +-- Create "hourly_cpu_usages" table +CREATE TABLE `hourly_cpu_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); +-- Create index "hourlycpuusage_timestamp" to table: "hourly_cpu_usages" +CREATE INDEX `hourlycpuusage_timestamp` ON `hourly_cpu_usages` (`timestamp`); +-- Create "hourly_disk_ios" table +CREATE TABLE `hourly_disk_ios` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak_read_bps` real NOT NULL, `peak_write_bps` real NOT NULL, `avg_read_bps` real NOT NULL, `avg_write_bps` real NOT NULL); +-- Create index "hourlydiskio_timestamp" to table: "hourly_disk_ios" +CREATE INDEX `hourlydiskio_timestamp` ON `hourly_disk_ios` (`timestamp`); +-- Create "hourly_disk_usages" table +CREATE TABLE `hourly_disk_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); +-- Create index "hourlydiskusage_timestamp" to table: "hourly_disk_usages" +CREATE INDEX `hourlydiskusage_timestamp` ON `hourly_disk_usages` (`timestamp`); +-- Create "hourly_memory_usages" table +CREATE TABLE `hourly_memory_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL); +-- Create index "hourlymemoryusage_timestamp" to table: "hourly_memory_usages" +CREATE INDEX `hourlymemoryusage_timestamp` ON `hourly_memory_usages` (`timestamp`); +-- Create "hourly_traffics" table +CREATE TABLE `hourly_traffics` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `name` text NOT NULL, `peak_input_pps` real NOT NULL, `peak_input_bps` real NOT NULL, `peak_output_pps` real NOT NULL, `peak_output_bps` real NOT NULL, `avg_input_pps` real NOT NULL, `avg_input_bps` real NOT NULL, `avg_output_pps` real NOT NULL, `avg_output_bps` real NOT NULL); +-- Create index "hourlytraffic_timestamp" to table: "hourly_traffics" +CREATE INDEX `hourlytraffic_timestamp` ON `hourly_traffics` (`timestamp`); +-- Create "memories" table +CREATE TABLE `memories` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `usage` real NOT NULL); +-- Create index "memory_timestamp" to table: "memories" +CREATE INDEX `memory_timestamp` ON `memories` (`timestamp`); +-- Create "traffics" table +CREATE TABLE `traffics` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `name` text NOT NULL, `input_pps` real NOT NULL, `input_bps` real NOT NULL, `output_pps` real NOT NULL, `output_bps` real NOT NULL); +-- Create index "traffic_timestamp" to table: "traffics" +CREATE INDEX `traffic_timestamp` ON `traffics` (`timestamp`); diff --git a/pkg/db/migration/atlas.sum b/pkg/db/migration/atlas.sum index 193486d..7e755fd 100644 --- a/pkg/db/migration/atlas.sum +++ b/pkg/db/migration/atlas.sum @@ -1,2 +1,2 @@ -h1:k69B7UlJuvRJOvyjX4gVPNwJvuSsyldCoHWvTFEo8yw= -20250107063722_init_schemas.sql h1:LD/GOMLp+gzyK5owvHj++opu9v3nHF/6rTr/aRS0A1k= +h1:LXufv4SaVfmeeKLJ1RW1XrGphNbplJ1fd7m8GOUFhhk= +20250116061438_init_schemas.sql h1:/JHZWxaROODWtCQJJ9qOVEsCWR2xt3dnOH+0KrRZInw= diff --git a/pkg/db/schema/memory_per_hour.go b/pkg/db/schema/hourly_cpu_usage.go similarity index 56% rename from pkg/db/schema/memory_per_hour.go rename to pkg/db/schema/hourly_cpu_usage.go index db5fa4e..a074235 100644 --- a/pkg/db/schema/memory_per_hour.go +++ b/pkg/db/schema/hourly_cpu_usage.go @@ -8,13 +8,13 @@ import ( "entgo.io/ent/schema/index" ) -// MemoryPerHour holds the schema definition for the MemoryPerHour entity. -type MemoryPerHour struct { +// HourlyCPUUsage holds the schema definition for the HourlyCPUUsage entity. +type HourlyCPUUsage struct { ent.Schema } -// Fields of the MemoryPerHour. -func (MemoryPerHour) Fields() []ent.Field { +// Fields of the HourlyCPUUsage. +func (HourlyCPUUsage) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.Float("peak"), @@ -22,7 +22,7 @@ func (MemoryPerHour) Fields() []ent.Field { } } -func (MemoryPerHour) Indexes() []ent.Index { +func (HourlyCPUUsage) Indexes() []ent.Index { return []ent.Index{ index.Fields("timestamp"), } diff --git a/pkg/db/schema/diskio_per_hour.go b/pkg/db/schema/hourly_disk_io.go similarity index 64% rename from pkg/db/schema/diskio_per_hour.go rename to pkg/db/schema/hourly_disk_io.go index a78934f..6cad48f 100644 --- a/pkg/db/schema/diskio_per_hour.go +++ b/pkg/db/schema/hourly_disk_io.go @@ -8,13 +8,13 @@ import ( "entgo.io/ent/schema/index" ) -// DiskIOPerHour holds the schema definition for the DiskIOPerHour entity. -type DiskIOPerHour struct { +// HourlyDiskIO holds the schema definition for the HourlyDiskIO entity. +type HourlyDiskIO struct { ent.Schema } -// Fields of the DiskIOPerHour. -func (DiskIOPerHour) Fields() []ent.Field { +// Fields of the HourlyDiskIO. +func (HourlyDiskIO) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("device"), @@ -25,7 +25,7 @@ func (DiskIOPerHour) Fields() []ent.Field { } } -func (DiskIOPerHour) Indexes() []ent.Index { +func (HourlyDiskIO) Indexes() []ent.Index { return []ent.Index{ index.Fields("timestamp"), } diff --git a/pkg/db/schema/diskusage_per_hour.go b/pkg/db/schema/hourly_disk_usage.go similarity index 57% rename from pkg/db/schema/diskusage_per_hour.go rename to pkg/db/schema/hourly_disk_usage.go index 8a894d9..6d7170b 100644 --- a/pkg/db/schema/diskusage_per_hour.go +++ b/pkg/db/schema/hourly_disk_usage.go @@ -8,13 +8,13 @@ import ( "entgo.io/ent/schema/index" ) -// DiskUsagePerHour holds the schema definition for the DiskUsagePerHour entity. -type DiskUsagePerHour struct { +// HourlyDiskUsage holds the schema definition for the HourlyDiskUsage entity. +type HourlyDiskUsage struct { ent.Schema } -// Fields of the DiskUsagePerHour. -func (DiskUsagePerHour) Fields() []ent.Field { +// Fields of the HourlyDiskUsage. +func (HourlyDiskUsage) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("device"), @@ -23,7 +23,7 @@ func (DiskUsagePerHour) Fields() []ent.Field { } } -func (DiskUsagePerHour) Indexes() []ent.Index { +func (HourlyDiskUsage) Indexes() []ent.Index { return []ent.Index{ index.Fields("timestamp"), } diff --git a/pkg/db/schema/cpu_per_hour.go b/pkg/db/schema/hourly_memory_usage.go similarity index 54% rename from pkg/db/schema/cpu_per_hour.go rename to pkg/db/schema/hourly_memory_usage.go index b872f46..c387086 100644 --- a/pkg/db/schema/cpu_per_hour.go +++ b/pkg/db/schema/hourly_memory_usage.go @@ -8,13 +8,13 @@ import ( "entgo.io/ent/schema/index" ) -// CPUPerHour holds the schema definition for the CPUPerHour entity. -type CPUPerHour struct { +// HourlyMemoryUsage holds the schema definition for the HourlyMemoryUsage entity. +type HourlyMemoryUsage struct { ent.Schema } -// Fields of the CPUPerHour. -func (CPUPerHour) Fields() []ent.Field { +// Fields of the HourlyMemoryUsage. +func (HourlyMemoryUsage) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.Float("peak"), @@ -22,7 +22,7 @@ func (CPUPerHour) Fields() []ent.Field { } } -func (CPUPerHour) Indexes() []ent.Index { +func (HourlyMemoryUsage) Indexes() []ent.Index { return []ent.Index{ index.Fields("timestamp"), } diff --git a/pkg/db/schema/traffic_per_hour.go b/pkg/db/schema/hourly_traffic.go similarity index 70% rename from pkg/db/schema/traffic_per_hour.go rename to pkg/db/schema/hourly_traffic.go index d0700d6..8248014 100644 --- a/pkg/db/schema/traffic_per_hour.go +++ b/pkg/db/schema/hourly_traffic.go @@ -8,13 +8,13 @@ import ( "entgo.io/ent/schema/index" ) -// TrafficPerHour holds the schema definition for the TrafficPerHour entity. -type TrafficPerHour struct { +// HourlyTraffic holds the schema definition for the HourlyTraffic entity. +type HourlyTraffic struct { ent.Schema } -// Fields of the TrafficPerHour. -func (TrafficPerHour) Fields() []ent.Field { +// Fields of the HourlyTraffic. +func (HourlyTraffic) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("name"), @@ -29,7 +29,7 @@ func (TrafficPerHour) Fields() []ent.Field { } } -func (TrafficPerHour) Indexes() []ent.Index { +func (HourlyTraffic) Indexes() []ent.Index { return []ent.Index{ index.Fields("timestamp"), } From 211ad1dc760b2d0e7b503a2d5fd6368c1047d34f Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 16 Jan 2025 15:18:29 +0900 Subject: [PATCH 162/364] Add step for install atlas cli Add a step to install Atlas CLI for running check-related tests. --- .github/workflows/build-and-test.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 28aa791..ae4a3cf 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -30,6 +30,9 @@ jobs: - name: Generate go code run: go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema working-directory: ./ + + - name: Install Atlas CLI + run: curl -sSf https://atlasgo.sh | sh - name: Build run: go build -v . From 9592caedd93fffe93fe7ab14630b5ef7fb456eaa Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 16 Jan 2025 15:22:07 +0900 Subject: [PATCH 163/364] Minor fix Fix typo. --- pkg/collector/check/base/types.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index 9bf8e82..3268fb2 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -13,15 +13,15 @@ const ( MEM CheckType = "memory" HOURLY_MEM_USAGE CheckType = "hourly-memory-usage" DAILY_MEM_USAGE CheckType = "daily-memory-usage" - DISK_USAGE CheckType = "disk_usage" + DISK_USAGE CheckType = "disk-usage" HOURLY_DISK_USAGE CheckType = "hourly-disk-usage" DAILY_DISK_USAGE CheckType = "daily-disk-usage" - DISK_IO CheckType = "disk_io" - DISK_IO_COLLECTOR CheckType = "disk_io_collector" + DISK_IO CheckType = "disk-io" + DISK_IO_COLLECTOR CheckType = "disk-io-collector" HOURLY_DISK_IO CheckType = "hourly-disk-io" DAILY_DISK_IO CheckType = "daily-disk-io" NET CheckType = "net" - NET_COLLECTOR CheckType = "net_collector" + NET_COLLECTOR CheckType = "net-collector" HOURLY_NET CheckType = "hourly-net" DAILY_NET CheckType = "daily-net" CLEANUP CheckType = "cleanup" From 58f1c8dd174ea33552bef063d805906037919ec0 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 16 Jan 2025 15:40:39 +0900 Subject: [PATCH 164/364] Add devfs to excludedFileSystems Add devfs to excludedFileSystems as it's a virtual filesystem and doesn't require separate collection. --- pkg/collector/check/realtime/disk/usage/usage.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index 9f54c22..5e7fc76 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -17,6 +17,7 @@ var excludedFileSystems = map[string]bool{ "cgroup": true, "overlay": true, "autofs": true, + "devfs": true, } type Check struct { From d6464f70ab408683b4843290d793f365caa8526d Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 16 Jan 2025 17:00:06 +0900 Subject: [PATCH 165/364] Add script to delete db file Add script to delete db file. --- scripts/postremove.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/postremove.sh b/scripts/postremove.sh index f878a85..9d2e350 100644 --- a/scripts/postremove.sh +++ b/scripts/postremove.sh @@ -4,12 +4,14 @@ CONF_FILE_PATH="/etc/alpamon/alpamon.conf" TMP_FILE_PATH="/usr/lib/tmpfiles.d/alpamon.conf" SVC_FILE_PATH="/lib/systemd/system/alpamon.service" LOG_FILE_PATH="/var/log/alpamon/alpamon.log" +DB_FILE_PATH="/var/lib/alpamon/alpamon.db if [ "$1" = 'purge' ]; then rm -f "$CONF_FILE_PATH" || true rm -f "$TMP_FILE_PATH" || true rm -f "$SVC_FILE_PATH" || true rm -f "$LOG_FILE_PATH" || true + rm -f "$DB_FILE_PATH" || true echo "All related configuration, service, and log files have been deleted." fi \ No newline at end of file From d2e76f7f83486d318cc2da0dce80da89fedc992e Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 16 Jan 2025 19:29:38 +0900 Subject: [PATCH 166/364] Add InitTestDB() Add InitTestDB() to fix errors where SQLite driver was being registered twice in Build and Test pipeline. --- .../check/batch/daily/cpu/daily_cpu_test.go | 2 +- .../batch/daily/disk/io/daily_io_test.go | 2 +- .../daily/disk/usage/daily_usage_test.go | 2 +- .../batch/daily/memory/daily_memory_test.go | 2 +- .../check/batch/daily/net/daily_net_test.go | 2 +- .../check/batch/hourly/cpu/hourly_cpu_test.go | 2 +- .../batch/hourly/disk/io/hourly_io_test.go | 2 +- .../hourly/disk/usage/hourly_usage_test.go | 2 +- .../batch/hourly/memory/hourly_memory_test.go | 2 +- .../check/batch/hourly/net/hourly_net_test.go | 2 +- pkg/collector/check/realtime/cpu/cpu_test.go | 2 +- .../check/realtime/disk/io/io_test.go | 2 +- .../check/realtime/disk/usage/usage_test.go | 2 +- .../check/realtime/memory/memory_test.go | 2 +- pkg/collector/check/realtime/net/net_test.go | 2 +- pkg/db/db.go | 33 +++++++++++++++++++ 16 files changed, 48 insertions(+), 15 deletions(-) diff --git a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go index cce5e05..28144c9 100644 --- a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go +++ b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go @@ -18,7 +18,7 @@ func setUp() *Check { Name: string(base.DAILY_CPU_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go index 36c2edc..04fa988 100644 --- a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go +++ b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go @@ -19,7 +19,7 @@ func setUp() *Check { Name: string(base.DAILY_DISK_IO) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go index e9b8de4..62d2af0 100644 --- a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go +++ b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go @@ -18,7 +18,7 @@ func setUp() *Check { Name: string(base.DAILY_DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/daily/memory/daily_memory_test.go b/pkg/collector/check/batch/daily/memory/daily_memory_test.go index 9165cf1..b9dc70a 100644 --- a/pkg/collector/check/batch/daily/memory/daily_memory_test.go +++ b/pkg/collector/check/batch/daily/memory/daily_memory_test.go @@ -18,7 +18,7 @@ func setUp() *Check { Name: string(base.DAILY_MEM_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/daily/net/daily_net_test.go b/pkg/collector/check/batch/daily/net/daily_net_test.go index 2855a9a..e9a3b95 100644 --- a/pkg/collector/check/batch/daily/net/daily_net_test.go +++ b/pkg/collector/check/batch/daily/net/daily_net_test.go @@ -19,7 +19,7 @@ func setUp() *Check { Name: string(base.DAILY_NET) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go index 7a91445..017a1c4 100644 --- a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go +++ b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go @@ -19,7 +19,7 @@ func setUp() *Check { Name: string(base.HOURLY_CPU_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go index 583e9f1..66222ad 100644 --- a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go +++ b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go @@ -19,7 +19,7 @@ func setUp() *Check { Name: string(base.HOURLY_DISK_IO) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go index f8b3ff3..5af1d54 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -19,7 +19,7 @@ func setUp() *Check { Name: string(base.HOURLY_DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go index 2a479b6..108c31d 100644 --- a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go +++ b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go @@ -19,7 +19,7 @@ func setUp() *Check { Name: string(base.HOURLY_MEM_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/batch/hourly/net/hourly_net_test.go b/pkg/collector/check/batch/hourly/net/hourly_net_test.go index d21eccb..e754e00 100644 --- a/pkg/collector/check/batch/hourly/net/hourly_net_test.go +++ b/pkg/collector/check/batch/hourly/net/hourly_net_test.go @@ -19,7 +19,7 @@ func setUp() *Check { Name: string(base.HOURLY_NET) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/realtime/cpu/cpu_test.go b/pkg/collector/check/realtime/cpu/cpu_test.go index fac6e60..2be1df1 100644 --- a/pkg/collector/check/realtime/cpu/cpu_test.go +++ b/pkg/collector/check/realtime/cpu/cpu_test.go @@ -18,7 +18,7 @@ func setUp() *Check { Name: string(base.CPU) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go index f168823..5f3954e 100644 --- a/pkg/collector/check/realtime/disk/io/io_test.go +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -19,7 +19,7 @@ func setUp(checkType base.CheckType) base.CheckStrategy { Name: string(checkType) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args) diff --git a/pkg/collector/check/realtime/disk/usage/usage_test.go b/pkg/collector/check/realtime/disk/usage/usage_test.go index 930882e..a9e0c8d 100644 --- a/pkg/collector/check/realtime/disk/usage/usage_test.go +++ b/pkg/collector/check/realtime/disk/usage/usage_test.go @@ -18,7 +18,7 @@ func setUp() *Check { Name: string(base.DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/realtime/memory/memory_test.go b/pkg/collector/check/realtime/memory/memory_test.go index a89176f..fc88b9f 100644 --- a/pkg/collector/check/realtime/memory/memory_test.go +++ b/pkg/collector/check/realtime/memory/memory_test.go @@ -18,7 +18,7 @@ func setUp() *Check { Name: string(base.MEM) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args).(*Check) diff --git a/pkg/collector/check/realtime/net/net_test.go b/pkg/collector/check/realtime/net/net_test.go index 39f0991..92e0863 100644 --- a/pkg/collector/check/realtime/net/net_test.go +++ b/pkg/collector/check/realtime/net/net_test.go @@ -19,7 +19,7 @@ func setUp(checkType base.CheckType) base.CheckStrategy { Name: string(checkType) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitDB(), + Client: db.InitTestDB(), } check := NewCheck(args) diff --git a/pkg/db/db.go b/pkg/db/db.go index 67c0852..59506f5 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "path/filepath" + "sync" "time" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" @@ -51,3 +52,35 @@ func InitDB() *ent.Client { return client } + +func InitTestDB() *ent.Client { + fileName, _ := filepath.Abs(dbFileName) + dbFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0750) + if err != nil { + log.Error().Err(err).Msgf("failed to open test db file: %v", err) + _, _ = fmt.Fprintf(os.Stderr, "Failed to open test db file: %v\n", err) + os.Exit(1) + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + var once sync.Once + once.Do(func() { + sql.Register("sqlite3", &sqlite.Driver{}) + err = RunMigration(dbFile.Name(), ctx) + if err != nil { + log.Error().Err(err).Msgf("failed to migrate test db: %v\n", err) + os.Exit(1) + } + }) + + dbManager := NewDBClientManager(dbFile.Name()) + client, err := dbManager.GetClient() + if err != nil { + log.Error().Err(err).Msgf("failed to get db client: %v\n", err) + os.Exit(1) + } + + return client +} From c37a819245f8c4e3c3321a124922138fbef39e7a Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 17 Jan 2025 11:56:40 +0900 Subject: [PATCH 167/364] Test commit Commit for testing Suite --- pkg/collector/check/realtime/net/net_test.go | 57 ++++++++++++-------- 1 file changed, 36 insertions(+), 21 deletions(-) diff --git a/pkg/collector/check/realtime/net/net_test.go b/pkg/collector/check/realtime/net/net_test.go index 92e0863..d85a36b 100644 --- a/pkg/collector/check/realtime/net/net_test.go +++ b/pkg/collector/check/realtime/net/net_test.go @@ -8,18 +8,29 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp(checkType base.CheckType) base.CheckStrategy { +type NetCheckSuite struct { + suite.Suite + client *ent.Client +} + +func (suite *NetCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() +} + +func setUp(checkType base.CheckType, client *ent.Client) base.CheckStrategy { buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: checkType, Name: string(checkType) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: client, } check := NewCheck(args) @@ -27,39 +38,39 @@ func setUp(checkType base.CheckType) base.CheckStrategy { return check } -func TestCollectIOCounters(t *testing.T) { - check := setUp(base.NET_COLLECTOR).(*CollectCheck) +func (suite *NetCheckSuite) TestCollectIOCounters() { + check := setUp(base.NET_COLLECTOR, suite.client).(*CollectCheck) ioCounters, err := check.collectIOCounters() - assert.NoError(t, err, "Failed to get network IO.") - assert.NotEmpty(t, ioCounters, "Network IO should not be empty") + assert.NoError(suite.T(), err, "Failed to get network IO.") + assert.NotEmpty(suite.T(), ioCounters, "Network IO should not be empty") } -func TestCollectInterfaces(t *testing.T) { - check := setUp(base.NET_COLLECTOR).(*CollectCheck) +func (suite *NetCheckSuite) TestCollectInterfaces() { + check := setUp(base.NET_COLLECTOR, suite.client).(*CollectCheck) interfaces, err := check.collectInterfaces() - assert.NoError(t, err, "Failed to get interfaces.") - assert.NotEmpty(t, interfaces, "Interfaces should not be empty") + assert.NoError(suite.T(), err, "Failed to get interfaces.") + assert.NotEmpty(suite.T(), interfaces, "Interfaces should not be empty") } -func TestSaveTraffic(t *testing.T) { - check := setUp(base.NET_COLLECTOR).(*CollectCheck) +func (suite *NetCheckSuite) TestSaveTraffic() { + check := setUp(base.NET_COLLECTOR, suite.client).(*CollectCheck) ctx := context.Background() ioCounters, interfaces, err := check.collectTraffic() - assert.NoError(t, err, "Failed to get traffic.") - assert.NotEmpty(t, ioCounters, "Network IO should not be empty") - assert.NotEmpty(t, interfaces, "Interfaces should not be empty") + assert.NoError(suite.T(), err, "Failed to get traffic.") + assert.NotEmpty(suite.T(), ioCounters, "Network IO should not be empty") + assert.NotEmpty(suite.T(), interfaces, "Interfaces should not be empty") data := check.parseTraffic(ioCounters, interfaces) err = check.saveTraffic(data, ctx) - assert.NoError(t, err, "Failed to save traffic.") + assert.NoError(suite.T(), err, "Failed to save traffic.") } -func TestGetTraffic(t *testing.T) { - check := setUp(base.NET).(*SendCheck) +func (suite *NetCheckSuite) TestGetTraffic() { + check := setUp(base.NET, suite.client).(*SendCheck) ctx := context.Background() err := check.GetClient().Traffic.Create(). @@ -69,9 +80,13 @@ func TestGetTraffic(t *testing.T) { SetInputBps(rand.Float64()). SetOutputPps(rand.Float64()). SetOutputBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create traffic.") + assert.NoError(suite.T(), err, "Failed to create traffic.") querySet, err := check.getTraffic(ctx) - assert.NoError(t, err, "Failed to get traffic queryset.") - assert.NotEmpty(t, querySet, "Traffic queryset should not be empty") + assert.NoError(suite.T(), err, "Failed to get traffic queryset.") + assert.NotEmpty(suite.T(), querySet, "Traffic queryset should not be empty") +} + +func TestNetCheckSuite(t *testing.T) { + suite.Run(t, new(NetCheckSuite)) } From bbc4f7177523545d43b4dc44e7f4d318ccbe51d0 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 17 Jan 2025 16:17:24 +0900 Subject: [PATCH 168/364] Refactor collector related test casas To prevent redundant database migrations and SQL driver registrations, refactore collector related test cases using github.com/stretchr/testify/suite to ensure these operations occur only once per test case. --- .../check/batch/daily/cpu/daily_cpu_test.go | 60 +++++++----- .../batch/daily/disk/io/daily_io_test.go | 60 +++++++----- .../daily/disk/usage/daily_usage_test.go | 60 +++++++----- .../batch/daily/memory/daily_memory_test.go | 60 +++++++----- .../check/batch/daily/net/daily_net_test.go | 60 +++++++----- .../check/batch/hourly/cpu/hourly_cpu_test.go | 68 ++++++++------ .../batch/hourly/disk/io/hourly_io_test.go | 68 ++++++++------ .../hourly/disk/usage/hourly_usage_test.go | 68 ++++++++------ .../batch/hourly/memory/hourly_memory_test.go | 68 ++++++++------ .../check/batch/hourly/net/hourly_net_test.go | 68 ++++++++------ pkg/collector/check/realtime/cpu/cpu_test.go | 48 ++++++---- .../check/realtime/disk/io/io_test.go | 91 +++++++++++-------- .../check/realtime/disk/usage/usage_test.go | 67 ++++++++------ .../check/realtime/memory/memory_test.go | 48 ++++++---- pkg/collector/check/realtime/net/net_test.go | 60 ++++++------ 15 files changed, 562 insertions(+), 392 deletions(-) diff --git a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go index 28144c9..29136f4 100644 --- a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go +++ b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go @@ -2,55 +2,67 @@ package cpu import ( "context" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type DailyCPUUsageCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *DailyCPUUsageCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_CPU_USAGE, Name: string(base.DAILY_CPU_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetHourlyCPUUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *DailyCPUUsageCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().HourlyCPUUsage.Create(). +func (suite *DailyCPUUsageCheckSuite) TestGetHourlyCPUUsage() { + err := suite.check.GetClient().HourlyCPUUsage.Create(). SetTimestamp(time.Now()). SetPeak(50.0). - SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly cpu usage.") + SetAvg(50.0).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly cpu usage.") - querySet, err := check.getHourlyCPUUsage(ctx) - assert.NoError(t, err, "Failed to get hourly cpu usage.") - assert.NotEmpty(t, querySet, "HourlyCPUUsage queryset should not be empty") + querySet, err := suite.check.getHourlyCPUUsage(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get hourly cpu usage.") + assert.NotEmpty(suite.T(), querySet, "HourlyCPUUsage queryset should not be empty") } -func TestDeleteHourlyCPUUsage(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().HourlyCPUUsage.Create(). +func (suite *DailyCPUUsageCheckSuite) TestDeleteHourlyCPUUsage() { + err := suite.check.GetClient().HourlyCPUUsage.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetPeak(50.0). - SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly cpu usage.") + SetAvg(50.0).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly cpu usage.") + + err = suite.check.deleteHourlyCPUUsage(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete hourly cpu usage.") +} - err = check.deleteHourlyCPUUsage(ctx) - assert.NoError(t, err, "Failed to delete hourly cpu usage.") +func TestDailyCPUUsageCheckSuite(t *testing.T) { + suite.Run(t, new(DailyCPUUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go index 04fa988..05d2888 100644 --- a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go +++ b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go @@ -3,61 +3,73 @@ package io import ( "context" "math/rand" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type DailyDiskIOCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *DailyDiskIOCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_DISK_IO, Name: string(base.DAILY_DISK_IO) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetHourlyDiskIO(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *DailyDiskIOCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().HourlyDiskIO.Create(). +func (suite *DailyDiskIOCheckSuite) TestGetHourlyDiskIO() { + err := suite.check.GetClient().HourlyDiskIO.Create(). SetTimestamp(time.Now()). SetDevice(uuid.NewString()). SetPeakReadBps(rand.Float64()). SetPeakWriteBps(rand.Float64()). SetAvgReadBps(rand.Float64()). - SetAvgWriteBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly disk io.") + SetAvgWriteBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly disk io.") - querySet, err := check.getHourlyDiskIO(ctx) - assert.NoError(t, err, "Failed to get hourly disk io.") - assert.NotEmpty(t, querySet, "HourlyDiskIO queryset should not be empty") + querySet, err := suite.check.getHourlyDiskIO(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get hourly disk io.") + assert.NotEmpty(suite.T(), querySet, "HourlyDiskIO queryset should not be empty") } -func TestDeleteHourlyDiskIO(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().HourlyDiskIO.Create(). +func (suite *DailyDiskIOCheckSuite) TestDeleteHourlyDiskIO() { + err := suite.check.GetClient().HourlyDiskIO.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetDevice(uuid.NewString()). SetPeakReadBps(rand.Float64()). SetPeakWriteBps(rand.Float64()). SetAvgReadBps(rand.Float64()). - SetAvgWriteBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly disk io.") + SetAvgWriteBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly disk io.") + + err = suite.check.deleteHourlyDiskIO(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete hourly disk io.") +} - err = check.deleteHourlyDiskIO(ctx) - assert.NoError(t, err, "Failed to delete hourly disk io.") +func TestDailyDiskIOCheckSuite(t *testing.T) { + suite.Run(t, new(DailyDiskIOCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go index 62d2af0..c560a14 100644 --- a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go +++ b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go @@ -2,57 +2,69 @@ package usage import ( "context" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type DailyDiskUsageCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *DailyDiskUsageCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_DISK_USAGE, Name: string(base.DAILY_DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetHourlyDiskUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *DailyDiskUsageCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().HourlyDiskUsage.Create(). +func (suite *DailyDiskUsageCheckSuite) TestGetHourlyDiskUsage() { + err := suite.check.GetClient().HourlyDiskUsage.Create(). SetTimestamp(time.Now()). SetDevice(uuid.NewString()). SetPeak(50.0). - SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly disk usage.") + SetAvg(50.0).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly disk usage.") - querySet, err := check.getHourlyDiskUsage(ctx) - assert.NoError(t, err, "Failed to get hourly disk usage.") - assert.NotEmpty(t, querySet, "HourlyDiskUsage queryset should not be empty") + querySet, err := suite.check.getHourlyDiskUsage(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get hourly disk usage.") + assert.NotEmpty(suite.T(), querySet, "HourlyDiskUsage queryset should not be empty") } -func TestDeleteHourlyDiskUsage(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().HourlyDiskUsage.Create(). +func (suite *DailyDiskUsageCheckSuite) TestDeleteHourlyDiskUsage() { + err := suite.check.GetClient().HourlyDiskUsage.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetDevice(uuid.NewString()). SetPeak(50.0). - SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly disk usage.") + SetAvg(50.0).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly disk usage.") + + err = suite.check.deleteHourlyDiskUsage(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete hourly disk usage.") +} - err = check.deleteHourlyDiskUsage(ctx) - assert.NoError(t, err, "Failed to delete hourly disk usage.") +func TestDailyDiskUsageCheckSuite(t *testing.T) { + suite.Run(t, new(DailyDiskUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/memory/daily_memory_test.go b/pkg/collector/check/batch/daily/memory/daily_memory_test.go index b9dc70a..d6478c6 100644 --- a/pkg/collector/check/batch/daily/memory/daily_memory_test.go +++ b/pkg/collector/check/batch/daily/memory/daily_memory_test.go @@ -2,55 +2,67 @@ package memory import ( "context" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type DailyMemoryUsageCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *DailyMemoryUsageCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_MEM_USAGE, Name: string(base.DAILY_MEM_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetHourlyMemoryUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *DailyMemoryUsageCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().HourlyMemoryUsage.Create(). +func (suite *DailyMemoryUsageCheckSuite) TestGetHourlyMemoryUsage() { + err := suite.check.GetClient().HourlyMemoryUsage.Create(). SetTimestamp(time.Now()). SetPeak(50.0). - SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly memory usage.") + SetAvg(50.0).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly memory usage.") - querySet, err := check.getHourlyMemoryUsage(ctx) - assert.NoError(t, err, "Failed to get hourly memory usage.") - assert.NotEmpty(t, querySet, "HouryMemoryUsage queryset should not be empty") + querySet, err := suite.check.getHourlyMemoryUsage(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get hourly memory usage.") + assert.NotEmpty(suite.T(), querySet, "HouryMemoryUsage queryset should not be empty") } -func TestDeleteHourlyMemoryUsage(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().HourlyMemoryUsage.Create(). +func (suite *DailyMemoryUsageCheckSuite) TestDeleteHourlyMemoryUsage() { + err := suite.check.GetClient().HourlyMemoryUsage.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetPeak(50.0). - SetAvg(50.0).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly memory usage.") + SetAvg(50.0).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly memory usage.") + + err = suite.check.deleteHourlyMemoryUsage(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete hourly memory usage.") +} - err = check.deleteHourlyMemoryUsage(ctx) - assert.NoError(t, err, "Failed to delete hourly memory usage.") +func TestDailyMemoryUsageCheckSuite(t *testing.T) { + suite.Run(t, new(DailyMemoryUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/net/daily_net_test.go b/pkg/collector/check/batch/daily/net/daily_net_test.go index e9a3b95..fe7eb56 100644 --- a/pkg/collector/check/batch/daily/net/daily_net_test.go +++ b/pkg/collector/check/batch/daily/net/daily_net_test.go @@ -3,35 +3,46 @@ package net import ( "context" "math/rand" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type DailyNetCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *DailyNetCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_NET, Name: string(base.DAILY_NET) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetHourlyTraffic(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *DailyNetCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().HourlyTraffic.Create(). +func (suite *DailyNetCheckSuite) TestGetHourlyTraffic() { + err := suite.check.GetClient().HourlyTraffic.Create(). SetTimestamp(time.Now()). SetName(uuid.NewString()). SetPeakInputPps(rand.Float64()). @@ -41,19 +52,16 @@ func TestGetHourlyTraffic(t *testing.T) { SetAvgInputPps(rand.Float64()). SetAvgInputBps(rand.Float64()). SetAvgOutputPps(rand.Float64()). - SetAvgOutputBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly traffic.") + SetAvgOutputBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly traffic.") - querySet, err := check.getHourlyTraffic(ctx) - assert.NoError(t, err, "Failed to get hourly traffic.") - assert.NotEmpty(t, querySet, "HourlyTraffic queryset should not be empty") + querySet, err := suite.check.getHourlyTraffic(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get hourly traffic.") + assert.NotEmpty(suite.T(), querySet, "HourlyTraffic queryset should not be empty") } -func TestDeleteHourlyTraffic(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().HourlyTraffic.Create(). +func (suite *DailyNetCheckSuite) TestDeleteHourlyTraffic() { + err := suite.check.GetClient().HourlyTraffic.Create(). SetTimestamp(time.Now().Add(-25 * time.Hour)). SetName(uuid.NewString()). SetPeakInputPps(rand.Float64()). @@ -63,9 +71,13 @@ func TestDeleteHourlyTraffic(t *testing.T) { SetAvgInputPps(rand.Float64()). SetAvgInputBps(rand.Float64()). SetAvgOutputPps(rand.Float64()). - SetAvgOutputBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create hourly traffic.") + SetAvgOutputBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create hourly traffic.") + + err = suite.check.deleteHourlyTraffic(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete hourly traffic.") +} - err = check.deleteHourlyTraffic(ctx) - assert.NoError(t, err, "Failed to delete hourly traffic.") +func TestDailyNetCheckSuite(t *testing.T) { + suite.Run(t, new(DailyNetCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go index 017a1c4..022e07d 100644 --- a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go +++ b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go @@ -3,66 +3,76 @@ package cpu import ( "context" "math/rand" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type HourlyCPUUsageCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *HourlyCPUUsageCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_CPU_USAGE, Name: string(base.HOURLY_CPU_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetCPU(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyCPUUsageCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().CPU.Create(). +func (suite *HourlyCPUUsageCheckSuite) TestGetCPU() { + err := suite.check.GetClient().CPU.Create(). SetTimestamp(time.Now()). - SetUsage(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create cpu usage.") + SetUsage(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create cpu usage.") - querySet, err := check.getCPU(ctx) - assert.NoError(t, err, "Failed to get cpu usage.") - assert.NotEmpty(t, querySet, "CPU queryset should not be empty") + querySet, err := suite.check.getCPU(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get cpu usage.") + assert.NotEmpty(suite.T(), querySet, "CPU queryset should not be empty") } -func TestSaveHourlyCPUUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyCPUUsageCheckSuite) TestSaveHourlyCPUUsage() { data := base.CheckResult{ Timestamp: time.Now(), Peak: 50.0, Avg: 50.0, } - err := check.saveHourlyCPUUsage(data, ctx) - assert.NoError(t, err, "Failed to save hourly cpu usage.") + err := suite.check.saveHourlyCPUUsage(data, suite.ctx) + assert.NoError(suite.T(), err, "Failed to save hourly cpu usage.") } -func TestDeleteCPU(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().CPU.Create(). +func (suite *HourlyCPUUsageCheckSuite) TestDeleteCPU() { + err := suite.check.GetClient().CPU.Create(). SetTimestamp(time.Now().Add(-2 * time.Hour)). - SetUsage(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create cpu usage.") + SetUsage(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create cpu usage.") + + err = suite.check.deleteCPU(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete cpu usage.") +} - err = check.deleteCPU(ctx) - assert.NoError(t, err, "Failed to delete cpu usage.") +func TestHourlyCPUUsageCheckSuite(t *testing.T) { + suite.Run(t, new(HourlyCPUUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go index 66222ad..472f4b5 100644 --- a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go +++ b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go @@ -3,49 +3,58 @@ package io import ( "context" "math/rand" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type HourlyDiskIOCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *HourlyDiskIOCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_DISK_IO, Name: string(base.HOURLY_DISK_IO) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetDiskIO(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyDiskIOCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().DiskIO.Create(). +func (suite *HourlyDiskIOCheckSuite) TestGetDiskIO() { + err := suite.check.GetClient().DiskIO.Create(). SetTimestamp(time.Now()). SetDevice(uuid.NewString()). SetReadBps(rand.Float64()). - SetWriteBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create disk io.") + SetWriteBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create disk io.") - querySet, err := check.getDiskIO(ctx) - assert.NoError(t, err, "Failed to get disk io.") - assert.NotEmpty(t, querySet, "Disk io queryset should not be empty") + querySet, err := suite.check.getDiskIO(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get disk io.") + assert.NotEmpty(suite.T(), querySet, "Disk io queryset should not be empty") } -func TestSaveHourlyDiskIO(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyDiskIOCheckSuite) TestSaveHourlyDiskIO() { data := []base.CheckResult{ { Timestamp: time.Now(), @@ -65,21 +74,22 @@ func TestSaveHourlyDiskIO(t *testing.T) { }, } - err := check.saveHourlyDiskIO(data, ctx) - assert.NoError(t, err, "Failed to save hourly disk io.") + err := suite.check.saveHourlyDiskIO(data, suite.ctx) + assert.NoError(suite.T(), err, "Failed to save hourly disk io.") } -func TestDeleteDiskIO(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().DiskIO.Create(). +func (suite *HourlyDiskIOCheckSuite) TestDeleteDiskIO() { + err := suite.check.GetClient().DiskIO.Create(). SetTimestamp(time.Now().Add(-2 * time.Hour)). SetDevice(uuid.NewString()). SetReadBps(rand.Float64()). - SetWriteBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create disk io.") + SetWriteBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create disk io.") + + err = suite.check.deleteDiskIO(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete disk io.") +} - err = check.deleteDiskIO(ctx) - assert.NoError(t, err, "Failed to delete disk io.") +func TestHourlyDiskIOCheckSuite(t *testing.T) { + suite.Run(t, new(HourlyDiskIOCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go index 5af1d54..bdfa30d 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -3,52 +3,61 @@ package usage import ( "context" "math/rand" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type HourlyDiskUsageCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *HourlyDiskUsageCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_DISK_USAGE, Name: string(base.HOURLY_DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetDiskUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyDiskUsageCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().DiskUsage.Create(). +func (suite *HourlyDiskUsageCheckSuite) TestGetDiskUsage() { + err := suite.check.GetClient().DiskUsage.Create(). SetTimestamp(time.Now()). SetDevice(uuid.NewString()). SetMountPoint(uuid.NewString()). SetUsage(rand.Float64()). SetTotal(int64(rand.Int())). SetFree(int64(rand.Int())). - SetUsed(int64(rand.Int())).Exec(ctx) - assert.NoError(t, err, "Failed to create disk usage.") + SetUsed(int64(rand.Int())).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create disk usage.") - querySet, err := check.getDiskUsage(ctx) - assert.NoError(t, err, "Failed to get disk usage.") - assert.NotEmpty(t, querySet, "Disk usage queryset should not be empty") + querySet, err := suite.check.getDiskUsage(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get disk usage.") + assert.NotEmpty(suite.T(), querySet, "Disk usage queryset should not be empty") } -func TestSaveHourlyDiskUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyDiskUsageCheckSuite) TestSaveHourlyDiskUsage() { data := []base.CheckResult{ { Timestamp: time.Now(), @@ -64,24 +73,25 @@ func TestSaveHourlyDiskUsage(t *testing.T) { }, } - err := check.saveHourlyDiskUsage(data, ctx) - assert.NoError(t, err, "Failed to save hourly disk usage.") + err := suite.check.saveHourlyDiskUsage(data, suite.ctx) + assert.NoError(suite.T(), err, "Failed to save hourly disk usage.") } -func TestDeleteDiskUsage(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().DiskUsage.Create(). +func (suite *HourlyDiskUsageCheckSuite) TestDeleteDiskUsage() { + err := suite.check.GetClient().DiskUsage.Create(). SetTimestamp(time.Now().Add(-2 * time.Hour)). SetDevice(uuid.NewString()). SetMountPoint(uuid.NewString()). SetUsage(rand.Float64()). SetTotal(int64(rand.Int())). SetFree(int64(rand.Int())). - SetUsed(int64(rand.Int())).Exec(ctx) - assert.NoError(t, err, "Failed to create disk usage.") + SetUsed(int64(rand.Int())).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create disk usage.") + + err = suite.check.deleteDiskUsage(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete disk usage.") +} - err = check.deleteDiskUsage(ctx) - assert.NoError(t, err, "Failed to delete disk usage.") +func TestHourlyDiskUsageCheckSuite(t *testing.T) { + suite.Run(t, new(HourlyDiskUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go index 108c31d..e3ce3f5 100644 --- a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go +++ b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go @@ -3,66 +3,76 @@ package memory import ( "context" "math/rand" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type HourlyMemoryUsageCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *HourlyMemoryUsageCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_MEM_USAGE, Name: string(base.HOURLY_MEM_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetMemory(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyMemoryUsageCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().CPU.Create(). +func (suite *HourlyMemoryUsageCheckSuite) TestGetMemory() { + err := suite.check.GetClient().CPU.Create(). SetTimestamp(time.Now()). - SetUsage(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create memory usage.") + SetUsage(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create memory usage.") - querySet, err := check.getMemory(ctx) - assert.NoError(t, err, "Failed to get memory usage.") - assert.NotEmpty(t, querySet, "Memory queryset should not be empty") + querySet, err := suite.check.getMemory(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get memory usage.") + assert.NotEmpty(suite.T(), querySet, "Memory queryset should not be empty") } -func TestSaveMemoryPerHour(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyMemoryUsageCheckSuite) TestSaveMemoryPerHour() { data := base.CheckResult{ Timestamp: time.Now(), Peak: 50.0, Avg: 50.0, } - err := check.saveHourlyMemoryUsage(data, ctx) - assert.NoError(t, err, "Failed to save memory usage per hour.") + err := suite.check.saveHourlyMemoryUsage(data, suite.ctx) + assert.NoError(suite.T(), err, "Failed to save memory usage per hour.") } -func TestDeleteMemory(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().Memory.Create(). +func (suite *HourlyMemoryUsageCheckSuite) TestDeleteMemory() { + err := suite.check.GetClient().Memory.Create(). SetTimestamp(time.Now().Add(-2 * time.Hour)). - SetUsage(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create memory usage.") + SetUsage(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create memory usage.") + + err = suite.check.deleteMemory(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete memory usage.") +} - err = check.deleteMemory(ctx) - assert.NoError(t, err, "Failed to delete memory usage.") +func TestHourlyMemoryCheckSuite(t *testing.T) { + suite.Run(t, new(HourlyMemoryUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/net/hourly_net_test.go b/pkg/collector/check/batch/hourly/net/hourly_net_test.go index e754e00..0e24180 100644 --- a/pkg/collector/check/batch/hourly/net/hourly_net_test.go +++ b/pkg/collector/check/batch/hourly/net/hourly_net_test.go @@ -3,51 +3,60 @@ package net import ( "context" "math/rand" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type HourlyNetCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *HourlyNetCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_NET, Name: string(base.HOURLY_NET) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestGetTraffic(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyNetCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - err := check.GetClient().Traffic.Create(). +func (suite *HourlyNetCheckSuite) TestGetTraffic() { + err := suite.check.GetClient().Traffic.Create(). SetTimestamp(time.Now()). SetName(uuid.NewString()). SetInputPps(rand.Float64()). SetInputBps(rand.Float64()). SetOutputPps(rand.Float64()). - SetOutputBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create traffic.") + SetOutputBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create traffic.") - querySet, err := check.getTraffic(ctx) - assert.NoError(t, err, "Failed to get traffic.") - assert.NotEmpty(t, querySet, "Traffic queryset should not be empty") + querySet, err := suite.check.getTraffic(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get traffic.") + assert.NotEmpty(suite.T(), querySet, "Traffic queryset should not be empty") } -func TestSaveHourlyTraffic(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *HourlyNetCheckSuite) TestSaveHourlyTraffic() { data := []base.CheckResult{ { Timestamp: time.Now(), @@ -75,23 +84,24 @@ func TestSaveHourlyTraffic(t *testing.T) { }, } - err := check.saveHourlyTraffic(data, ctx) - assert.NoError(t, err, "Failed to save houlry traffic.") + err := suite.check.saveHourlyTraffic(data, suite.ctx) + assert.NoError(suite.T(), err, "Failed to save houlry traffic.") } -func TestDeleteTraffic(t *testing.T) { - check := setUp() - ctx := context.Background() - - err := check.GetClient().Traffic.Create(). +func (suite *HourlyNetCheckSuite) TestDeleteTraffic() { + err := suite.check.GetClient().Traffic.Create(). SetTimestamp(time.Now().Add(-2 * time.Hour)). SetName(uuid.NewString()). SetInputPps(rand.Float64()). SetInputBps(rand.Float64()). SetOutputPps(rand.Float64()). - SetOutputBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create traffic.") + SetOutputBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create traffic.") + + err = suite.check.deleteTraffic(suite.ctx) + assert.NoError(suite.T(), err, "Failed to delete traffic.") +} - err = check.deleteTraffic(ctx) - assert.NoError(t, err, "Failed to delete traffic.") +func TestHourlyNetCheckSuite(t *testing.T) { + suite.Run(t, new(HourlyNetCheckSuite)) } diff --git a/pkg/collector/check/realtime/cpu/cpu_test.go b/pkg/collector/check/realtime/cpu/cpu_test.go index 2be1df1..c765fdc 100644 --- a/pkg/collector/check/realtime/cpu/cpu_test.go +++ b/pkg/collector/check/realtime/cpu/cpu_test.go @@ -2,49 +2,63 @@ package cpu import ( "context" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type CPUCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *CPUCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.CPU, Name: string(base.CPU) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestCollectCPUUsage(t *testing.T) { - check := setUp() +func (suite *CPUCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - usage, err := check.collectCPUUsage() +func (suite *CPUCheckSuite) TestCollectCPUUsage() { + usage, err := suite.check.collectCPUUsage() - assert.NoError(t, err, "Failed to get cpu usage.") - assert.GreaterOrEqual(t, usage, 0.0, "CPU usage should be non-negative.") - assert.LessOrEqual(t, usage, 100.0, "CPU usage should not exceed 100%.") + assert.NoError(suite.T(), err, "Failed to get cpu usage.") + assert.GreaterOrEqual(suite.T(), usage, 0.0, "CPU usage should be non-negative.") + assert.LessOrEqual(suite.T(), usage, 100.0, "CPU usage should not exceed 100%.") } -func TestSaveCPUUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *CPUCheckSuite) TestSaveCPUUsage() { data := base.CheckResult{ Timestamp: time.Now(), Usage: 50.0, } - err := check.saveCPUUsage(data, ctx) + err := suite.check.saveCPUUsage(data, suite.ctx) + + assert.NoError(suite.T(), err, "Failed to save cpu usage.") +} - assert.NoError(t, err, "Failed to save cpu usage.") +func TestCPUCheckSuite(t *testing.T) { + suite.Run(t, new(CPUCheckSuite)) } diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go index 5f3954e..ce5d512 100644 --- a/pkg/collector/check/realtime/disk/io/io_test.go +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -3,69 +3,88 @@ package diskio import ( "context" "math/rand" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp(checkType base.CheckType) base.CheckStrategy { +type DiskIOCheckSuite struct { + suite.Suite + client *ent.Client + collectCheck *CollectCheck + sendCheck *SendCheck + ctx context.Context +} + +func (suite *DiskIOCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) - args := &base.CheckArgs{ - Type: checkType, - Name: string(checkType) + "_" + uuid.NewString(), + collect_args := &base.CheckArgs{ + Type: base.DISK_IO_COLLECTOR, + Name: string(base.DISK_IO_COLLECTOR) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args) - - return check + send_args := &base.CheckArgs{ + Type: base.DISK_IO, + Name: string(base.DISK_IO) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: suite.client, + } + suite.collectCheck = NewCheck(collect_args).(*CollectCheck) + suite.sendCheck = NewCheck(send_args).(*SendCheck) + suite.ctx = context.Background() } -func TestCollectDiskIO(t *testing.T) { - check := setUp(base.DISK_IO_COLLECTOR).(*CollectCheck) +func (suite *DiskIOCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - ioCounters, err := check.collectDiskIO() - assert.NoError(t, err, "Failed to get disk io.") +func (suite *DiskIOCheckSuite) TestCollectDiskIO() { + ioCounters, err := suite.collectCheck.collectDiskIO() + assert.NoError(suite.T(), err, "Failed to get disk io.") - assert.NotEmpty(t, ioCounters, "Disk IO should not be empty") + assert.NotEmpty(suite.T(), ioCounters, "Disk IO should not be empty") for name, ioCounter := range ioCounters { - assert.NotEmpty(t, name, "Device name should not be empty") - assert.True(t, ioCounter.ReadBytes > 0, "Read bytes should be non-negative.") - assert.True(t, ioCounter.WriteBytes > 0, "Write bytes should be non-negative.") + assert.NotEmpty(suite.T(), name, "Device name should not be empty") + assert.True(suite.T(), ioCounter.ReadBytes > 0, "Read bytes should be non-negative.") + assert.True(suite.T(), ioCounter.WriteBytes > 0, "Write bytes should be non-negative.") } } -func TestSaveDiskIO(t *testing.T) { - check := setUp(base.DISK_IO_COLLECTOR).(*CollectCheck) - ctx := context.Background() - - ioCounters, err := check.collectDiskIO() - assert.NoError(t, err, "Failed to get disk io.") +func (suite *DiskIOCheckSuite) TestSaveDiskIO() { + ioCounters, err := suite.collectCheck.collectDiskIO() + assert.NoError(suite.T(), err, "Failed to get disk io.") - data := check.parseDiskIO(ioCounters) + data := suite.collectCheck.parseDiskIO(ioCounters) - err = check.saveDiskIO(data, ctx) - assert.NoError(t, err, "Failed to save cpu usage.") + err = suite.collectCheck.saveDiskIO(data, suite.ctx) + assert.NoError(suite.T(), err, "Failed to save cpu usage.") } -func TestGetDiskIO(t *testing.T) { - check := setUp(base.DISK_IO).(*SendCheck) - ctx := context.Background() - - err := check.GetClient().DiskIO.Create(). +func (suite *DiskIOCheckSuite) TestGetDiskIO() { + err := suite.collectCheck.GetClient().DiskIO.Create(). SetTimestamp(time.Now()). SetDevice(uuid.NewString()). SetReadBps(rand.Float64()). - SetWriteBps(rand.Float64()).Exec(ctx) - assert.NoError(t, err, "Failed to create disk io.") + SetWriteBps(rand.Float64()).Exec(suite.ctx) + assert.NoError(suite.T(), err, "Failed to create disk io.") + + querySet, err := suite.sendCheck.getDiskIO(suite.ctx) + assert.NoError(suite.T(), err, "Failed to get disk io queryset.") + assert.NotEmpty(suite.T(), querySet, "Disk IO queryset should not be empty") +} - querySet, err := check.getDiskIO(ctx) - assert.NoError(t, err, "Failed to get disk io queryset.") - assert.NotEmpty(t, querySet, "Disk IO queryset should not be empty") +func TestDiskIOCheckSuite(t *testing.T) { + suite.Run(t, new(DiskIOCheckSuite)) } diff --git a/pkg/collector/check/realtime/disk/usage/usage_test.go b/pkg/collector/check/realtime/disk/usage/usage_test.go index a9e0c8d..f75e7bd 100644 --- a/pkg/collector/check/realtime/disk/usage/usage_test.go +++ b/pkg/collector/check/realtime/disk/usage/usage_test.go @@ -2,61 +2,72 @@ package diskusage import ( "context" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type DiskUsageCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *DiskUsageCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DISK_USAGE, Name: string(base.DISK_USAGE) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestCollectDiskPartitions(t *testing.T) { - check := setUp() +func (suite *DiskUsageCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - partitions, err := check.collectDiskPartitions() +func (suite *DiskUsageCheckSuite) TestCollectDiskPartitions() { + partitions, err := suite.check.collectDiskPartitions() - assert.NoError(t, err, "Failed to get disk partitions.") - assert.NotEmpty(t, partitions, "Disk partitions should not be empty") + assert.NoError(suite.T(), err, "Failed to get disk partitions.") + assert.NotEmpty(suite.T(), partitions, "Disk partitions should not be empty") } -func TestCollectDiskUsage(t *testing.T) { - check := setUp() - - partitions, err := check.collectDiskPartitions() - assert.NoError(t, err, "Failed to get disk partitions.") +func (suite *DiskUsageCheckSuite) TestCollectDiskUsage() { + partitions, err := suite.check.collectDiskPartitions() + assert.NoError(suite.T(), err, "Failed to get disk partitions.") - assert.NotEmpty(t, partitions, "Disk partitions should not be empty") + assert.NotEmpty(suite.T(), partitions, "Disk partitions should not be empty") for _, partition := range partitions { - usage, err := check.collectDiskUsage(partition.Mountpoint) - assert.NoError(t, err, "Failed to get disk usage.") - assert.GreaterOrEqual(t, usage.UsedPercent, 0.0, "Disk usage should be non-negative.") - assert.LessOrEqual(t, usage.UsedPercent, 100.0, "Disk usage should not exceed 100%.") + usage, err := suite.check.collectDiskUsage(partition.Mountpoint) + assert.NoError(suite.T(), err, "Failed to get disk usage.") + assert.GreaterOrEqual(suite.T(), usage.UsedPercent, 0.0, "Disk usage should be non-negative.") + assert.LessOrEqual(suite.T(), usage.UsedPercent, 100.0, "Disk usage should not exceed 100%.") } } -func TestSaveDiskUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *DiskUsageCheckSuite) TestSaveDiskUsage() { + partitions, err := suite.check.collectDiskPartitions() + assert.NoError(suite.T(), err, "Failed to get disk partitions.") - partitions, err := check.collectDiskPartitions() - assert.NoError(t, err, "Failed to get disk partitions.") + err = suite.check.saveDiskUsage(suite.check.parseDiskUsage(partitions), suite.ctx) + assert.NoError(suite.T(), err, "Failed to save disk usage.") +} - err = check.saveDiskUsage(check.parseDiskUsage(partitions), ctx) - assert.NoError(t, err, "Failed to save disk usage.") +func TestDiskUsageCheckSuite(t *testing.T) { + suite.Run(t, new(DiskUsageCheckSuite)) } diff --git a/pkg/collector/check/realtime/memory/memory_test.go b/pkg/collector/check/realtime/memory/memory_test.go index fc88b9f..bb877b7 100644 --- a/pkg/collector/check/realtime/memory/memory_test.go +++ b/pkg/collector/check/realtime/memory/memory_test.go @@ -2,49 +2,63 @@ package memory import ( "context" + "os" "testing" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db" + "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" ) -func setUp() *Check { +type MemoryCheckSuite struct { + suite.Suite + client *ent.Client + check *Check + ctx context.Context +} + +func (suite *MemoryCheckSuite) SetupSuite() { + suite.client = db.InitTestDB() buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.MEM, Name: string(base.MEM) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: db.InitTestDB(), + Client: suite.client, } - - check := NewCheck(args).(*Check) - - return check + suite.check = NewCheck(args).(*Check) + suite.ctx = context.Background() } -func TestCollectMemoryUsage(t *testing.T) { - check := setUp() +func (suite *MemoryCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") +} - usage, err := check.collectMemoryUsage() +func (suite *MemoryCheckSuite) TestCollectMemoryUsage() { + usage, err := suite.check.collectMemoryUsage() - assert.NoError(t, err, "Failed to get memory usage.") - assert.GreaterOrEqual(t, usage, 0.0, "Memory usage should be non-negative.") - assert.LessOrEqual(t, usage, 100.0, "Memory usage should not exceed 100%.") + assert.NoError(suite.T(), err, "Failed to get memory usage.") + assert.GreaterOrEqual(suite.T(), usage, 0.0, "Memory usage should be non-negative.") + assert.LessOrEqual(suite.T(), usage, 100.0, "Memory usage should not exceed 100%.") } -func TestSaveMemoryUsage(t *testing.T) { - check := setUp() - ctx := context.Background() +func (suite *MemoryCheckSuite) TestSaveMemoryUsage() { data := base.CheckResult{ Timestamp: time.Now(), Usage: 50.0, } - err := check.saveMemoryUsage(data, ctx) + err := suite.check.saveMemoryUsage(data, suite.ctx) + + assert.NoError(suite.T(), err, "Failed to save memory usage.") +} - assert.NoError(t, err, "Failed to save memory usage.") +func TestMemoryCheckSuite(t *testing.T) { + suite.Run(t, new(MemoryCheckSuite)) } diff --git a/pkg/collector/check/realtime/net/net_test.go b/pkg/collector/check/realtime/net/net_test.go index d85a36b..10d1156 100644 --- a/pkg/collector/check/realtime/net/net_test.go +++ b/pkg/collector/check/realtime/net/net_test.go @@ -3,6 +3,7 @@ package net import ( "context" "math/rand" + "os" "testing" "time" @@ -16,73 +17,74 @@ import ( type NetCheckSuite struct { suite.Suite - client *ent.Client + client *ent.Client + collectCheck *CollectCheck + sendCheck *SendCheck + ctx context.Context } func (suite *NetCheckSuite) SetupSuite() { suite.client = db.InitTestDB() -} - -func setUp(checkType base.CheckType, client *ent.Client) base.CheckStrategy { buffer := base.NewCheckBuffer(10) - args := &base.CheckArgs{ - Type: checkType, - Name: string(checkType) + "_" + uuid.NewString(), + collect_args := &base.CheckArgs{ + Type: base.NET_COLLECTOR, + Name: string(base.NET_COLLECTOR) + "_" + uuid.NewString(), Interval: time.Duration(1 * time.Second), Buffer: buffer, - Client: client, + Client: suite.client, } + send_args := &base.CheckArgs{ + Type: base.NET, + Name: string(base.NET) + "_" + uuid.NewString(), + Interval: time.Duration(1 * time.Second), + Buffer: buffer, + Client: suite.client, + } + suite.collectCheck = NewCheck(collect_args).(*CollectCheck) + suite.sendCheck = NewCheck(send_args).(*SendCheck) + suite.ctx = context.Background() +} - check := NewCheck(args) - - return check +func (suite *NetCheckSuite) TearDownSuite() { + err := os.Remove("alpamon.db") + suite.Require().NoError(err, "failed to delete test db file") } func (suite *NetCheckSuite) TestCollectIOCounters() { - check := setUp(base.NET_COLLECTOR, suite.client).(*CollectCheck) - - ioCounters, err := check.collectIOCounters() + ioCounters, err := suite.collectCheck.collectIOCounters() assert.NoError(suite.T(), err, "Failed to get network IO.") assert.NotEmpty(suite.T(), ioCounters, "Network IO should not be empty") } func (suite *NetCheckSuite) TestCollectInterfaces() { - check := setUp(base.NET_COLLECTOR, suite.client).(*CollectCheck) - - interfaces, err := check.collectInterfaces() + interfaces, err := suite.collectCheck.collectInterfaces() assert.NoError(suite.T(), err, "Failed to get interfaces.") assert.NotEmpty(suite.T(), interfaces, "Interfaces should not be empty") } func (suite *NetCheckSuite) TestSaveTraffic() { - check := setUp(base.NET_COLLECTOR, suite.client).(*CollectCheck) - ctx := context.Background() - - ioCounters, interfaces, err := check.collectTraffic() + ioCounters, interfaces, err := suite.collectCheck.collectTraffic() assert.NoError(suite.T(), err, "Failed to get traffic.") assert.NotEmpty(suite.T(), ioCounters, "Network IO should not be empty") assert.NotEmpty(suite.T(), interfaces, "Interfaces should not be empty") - data := check.parseTraffic(ioCounters, interfaces) + data := suite.collectCheck.parseTraffic(ioCounters, interfaces) - err = check.saveTraffic(data, ctx) + err = suite.collectCheck.saveTraffic(data, suite.ctx) assert.NoError(suite.T(), err, "Failed to save traffic.") } func (suite *NetCheckSuite) TestGetTraffic() { - check := setUp(base.NET, suite.client).(*SendCheck) - ctx := context.Background() - - err := check.GetClient().Traffic.Create(). + err := suite.sendCheck.GetClient().Traffic.Create(). SetTimestamp(time.Now()). SetName(uuid.NewString()). SetInputPps(rand.Float64()). SetInputBps(rand.Float64()). SetOutputPps(rand.Float64()). - SetOutputBps(rand.Float64()).Exec(ctx) + SetOutputBps(rand.Float64()).Exec(suite.ctx) assert.NoError(suite.T(), err, "Failed to create traffic.") - querySet, err := check.getTraffic(ctx) + querySet, err := suite.sendCheck.getTraffic(suite.ctx) assert.NoError(suite.T(), err, "Failed to get traffic queryset.") assert.NotEmpty(suite.T(), querySet, "Traffic queryset should not be empty") } From b9227ae82ecd8e698212872317695e26bb30ee25 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 17 Jan 2025 16:29:46 +0900 Subject: [PATCH 169/364] Minor fix Fix assert.True() to assert.GreaterEqual() --- pkg/collector/check/realtime/disk/io/io_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go index ce5d512..9e00655 100644 --- a/pkg/collector/check/realtime/disk/io/io_test.go +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -57,8 +57,8 @@ func (suite *DiskIOCheckSuite) TestCollectDiskIO() { assert.NotEmpty(suite.T(), ioCounters, "Disk IO should not be empty") for name, ioCounter := range ioCounters { assert.NotEmpty(suite.T(), name, "Device name should not be empty") - assert.True(suite.T(), ioCounter.ReadBytes > 0, "Read bytes should be non-negative.") - assert.True(suite.T(), ioCounter.WriteBytes > 0, "Write bytes should be non-negative.") + assert.GreaterOrEqual(suite.T(), ioCounter.ReadBytes, 0, "Read bytes should be non-negative.") + assert.GreaterOrEqual(suite.T(), ioCounter.WriteBytes, 0, "Write bytes should be non-negative.") } } From 321a7e413821105ed19ba581afb1e14aa7573eb9 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 17 Jan 2025 16:43:24 +0900 Subject: [PATCH 170/364] Minor fix Fix 0 to uint64(0) --- pkg/collector/check/realtime/disk/io/io_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go index 9e00655..906d779 100644 --- a/pkg/collector/check/realtime/disk/io/io_test.go +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -57,8 +57,8 @@ func (suite *DiskIOCheckSuite) TestCollectDiskIO() { assert.NotEmpty(suite.T(), ioCounters, "Disk IO should not be empty") for name, ioCounter := range ioCounters { assert.NotEmpty(suite.T(), name, "Device name should not be empty") - assert.GreaterOrEqual(suite.T(), ioCounter.ReadBytes, 0, "Read bytes should be non-negative.") - assert.GreaterOrEqual(suite.T(), ioCounter.WriteBytes, 0, "Write bytes should be non-negative.") + assert.GreaterOrEqual(suite.T(), ioCounter.ReadBytes, uint64(0), "Read bytes should be non-negative.") + assert.GreaterOrEqual(suite.T(), ioCounter.WriteBytes, uint64(0), "Write bytes should be non-negative.") } } From c0b8c963049c1648011ef071be22a41deac31ebb Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 20 Jan 2025 16:05:13 +0900 Subject: [PATCH 171/364] Add AllowOverwrite & AllowUnzip at CommandData To enhance the integrity and reliability of file/folder downloads, update alpacon-server. To accommodate these changes, add AllowOverwrite and AllowUnzip fields to CommandData structure. To accommodate these changes, update fileDownload(). --- pkg/runner/command.go | 23 ++++++++++++----- pkg/runner/command_types.go | 50 ++++++++++++++++++++----------------- 2 files changed, 44 insertions(+), 29 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index af3e3b3..c5b9411 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -566,11 +566,13 @@ func (cr *CommandRunner) runFileDownload(fileName string) (exitCode int, result } else { for _, file := range cr.data.Files { cmdData := CommandData{ - Username: file.Username, - Groupname: file.Groupname, - Type: file.Type, - Content: file.Content, - Path: file.Path, + Username: file.Username, + Groupname: file.Groupname, + Type: file.Type, + Content: file.Content, + Path: file.Path, + AllowOverwrite: file.AllowOverwrite, + AllowUnzip: file.AllowUnzip, } code, message = fileDownload(cmdData, sysProcAttr) if code != 0 { @@ -756,8 +758,12 @@ func fileDownload(data CommandData, sysProcAttr *syscall.SysProcAttr) (exitCode return 1, err.Error() } + if !data.AllowOverwrite && isFileExist(data.Path) { + return 1, fmt.Sprintf("%s already exists.", data.Path) + } + isZip := isZipFile(content, filepath.Ext(data.Path)) - if isZip { + if isZip && data.AllowUnzip { escapePath := utils.Quote(data.Path) escapeDirPath := utils.Quote(filepath.Dir(data.Path)) command := fmt.Sprintf("tee %s > /dev/null && unzip -n %s -d %s; rm %s", @@ -791,3 +797,8 @@ func isZipFile(content []byte, ext string) bool { return err == nil } + +func isFileExist(path string) bool { + _, err := os.Stat(path) + return !os.IsNotExist(err) +} diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index 156eb33..04e0b8b 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -19,32 +19,36 @@ type Command struct { } type File struct { - Username string `json:"username"` - Groupname string `json:"groupname"` - Type string `json:"type"` - Content string `json:"content"` - Path string `json:"path"` + Username string `json:"username"` + Groupname string `json:"groupname"` + Type string `json:"type"` + Content string `json:"content"` + Path string `json:"path"` + AllowOverwrite bool `json:"allow_overwrite"` + AllowUnzip bool `json:"allow_unzip"` } type CommandData struct { - SessionID string `json:"session_id"` - URL string `json:"url"` - Rows uint16 `json:"rows"` - Cols uint16 `json:"cols"` - Username string `json:"username"` - Groupname string `json:"groupname"` - HomeDirectory string `json:"home_directory"` - UID uint64 `json:"uid"` - GID uint64 `json:"gid"` - Comment string `json:"comment"` - Shell string `json:"shell"` - Groups []uint64 `json:"groups"` - Type string `json:"type"` - Content string `json:"content"` - Path string `json:"path"` - Paths []string `json:"paths"` - Files []File `json:"files,omitempty"` - Keys []string `json:"keys"` + SessionID string `json:"session_id"` + URL string `json:"url"` + Rows uint16 `json:"rows"` + Cols uint16 `json:"cols"` + Username string `json:"username"` + Groupname string `json:"groupname"` + HomeDirectory string `json:"home_directory"` + UID uint64 `json:"uid"` + GID uint64 `json:"gid"` + Comment string `json:"comment"` + Shell string `json:"shell"` + Groups []uint64 `json:"groups"` + Type string `json:"type"` + Content string `json:"content"` + Path string `json:"path"` + Paths []string `json:"paths"` + Files []File `json:"files,omitempty"` + AllowOverwrite bool `json:"allow_overwrite,omitempty"` + AllowUnzip bool `json:"allow_unzip,omitempty"` + Keys []string `json:"keys"` } type CommandRunner struct { From 51051ee93df8b8309e20ef2f15f813d4fd6aa4ea Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 21 Jan 2025 16:28:45 +0900 Subject: [PATCH 172/364] Add statFileTransfer() Add statFileTransfer() to report the progress of file and folder uploads/downloads to alpacon-server. --- pkg/runner/command.go | 20 +++++++++++++++++++- pkg/runner/command_types.go | 14 ++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index c5b9411..aaa1fc9 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -122,7 +122,10 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { case "download": return cr.runFileDownload(args[1]) case "upload": - return cr.runFileUpload(args[1]) + code, message := cr.runFileUpload(args[1]) + statFileTransfer(code, DOWNLOAD, message, cr.data) + + return code, message case "openpty": data := openPtyData{ SessionID: cr.data.SessionID, @@ -563,6 +566,7 @@ func (cr *CommandRunner) runFileDownload(fileName string) (exitCode int, result if len(cr.data.Files) == 0 { code, message = fileDownload(cr.data, sysProcAttr) + statFileTransfer(code, UPLOAD, message, cr.data) } else { for _, file := range cr.data.Files { cmdData := CommandData{ @@ -573,8 +577,10 @@ func (cr *CommandRunner) runFileDownload(fileName string) (exitCode int, result Path: file.Path, AllowOverwrite: file.AllowOverwrite, AllowUnzip: file.AllowUnzip, + URL: file.URL, } code, message = fileDownload(cmdData, sysProcAttr) + statFileTransfer(code, UPLOAD, message, cmdData) if code != 0 { break } @@ -802,3 +808,15 @@ func isFileExist(path string) bool { _, err := os.Stat(path) return !os.IsNotExist(err) } + +func statFileTransfer(code int, transferType transferType, message string, data CommandData) { + url := fmt.Sprint(data.URL + "stat/") + isSuccess := code == 0 + + payload := &commandStat{ + Success: isSuccess, + Message: message, + Type: transferType, + } + scheduler.Rqueue.Post(url, payload, 10, time.Time{}) +} diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index 04e0b8b..a7a8564 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -26,6 +26,7 @@ type File struct { Path string `json:"path"` AllowOverwrite bool `json:"allow_overwrite"` AllowUnzip bool `json:"allow_unzip"` + URL string `json:"url"` } type CommandData struct { @@ -108,6 +109,19 @@ type commandFin struct { ElapsedTime float64 `json:"elapsed_time"` } +type commandStat struct { + Success bool `json:"success"` + Message string `json:"message"` + Type transferType `json:"type"` +} + +type transferType string + +const ( + DOWNLOAD transferType = "download" + UPLOAD transferType = "upload" +) + var nonZipExt = map[string]bool{ ".jar": true, ".war": true, From 9c5d3ed2110c2433fe484269e4346ef72cdb2c44 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 21 Jan 2025 16:49:50 +0900 Subject: [PATCH 173/364] Minor fix Fix the logic to allow bulk uploads to continue even if individual file uploads fail. --- pkg/runner/command.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index aaa1fc9..b6d47bb 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -581,9 +581,6 @@ func (cr *CommandRunner) runFileDownload(fileName string) (exitCode int, result } code, message = fileDownload(cmdData, sysProcAttr) statFileTransfer(code, UPLOAD, message, cmdData) - if code != 0 { - break - } } } From 90f5c925af3b13652f5bc1659f102e8b8c03bca5 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 22 Jan 2025 22:07:19 +0900 Subject: [PATCH 174/364] Update due to changes in alpacon-server Update the upload process to include the name field when uploading folders to alpacon-server. --- pkg/runner/command.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index b6d47bb..6218711 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -537,6 +537,13 @@ func (cr *CommandRunner) runFileUpload(fileName string) (exitCode int, result st return 1, err.Error() } + if recursive { + err = writer.WriteField("name", filepath.Base(name)) + if err != nil { + return 1, err.Error() + } + } + _ = writer.Close() contentType := writer.FormDataContentType() From fe3421614e6f4c08eaccc53479f416b908095eaf Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 31 Jan 2025 11:45:08 +0900 Subject: [PATCH 175/364] Migrate artifact action to v4 --- .github/workflows/release.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6dd9059..2f9c2f4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -36,13 +36,13 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload AMD64 DEB artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: alpamon_${{ github.ref_name }}_linux_amd64.deb path: dist/alpamon_${{ github.ref_name }}_linux_amd64.deb - name: Upload AMD64 RPM artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: alpamon_${{ github.ref_name }}_linux_amd64.rpm path: dist/alpamon_${{ github.ref_name }}_linux_amd64.rpm @@ -52,12 +52,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Download AMD64 DEB Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: alpamon_${{ github.ref_name }}_linux_amd64.deb - name: Download AMD64 RPM Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: alpamon_${{ github.ref_name }}_linux_amd64.rpm From 6f7e0d22a7ed78e6f0ab3714d4b3ef619067d432 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 31 Jan 2025 15:56:49 +0900 Subject: [PATCH 176/364] Fix pty to properly inject environment variables --- pkg/runner/pty.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index ba4dd60..c3d8370 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -223,16 +223,18 @@ func (pc *PtyClient) getPtyUserAndEnv() (uid, gid int, groupIds []string, env ma if err != nil { return 0, 0, nil, env, fmt.Errorf("failed to get current user: %w", err) } + // If Alpamon is not running as root or username is not specified, use the current user + env["USER"] = usr.Username + env["HOME"] = usr.HomeDir } else { // If Alpamon is running as root, get the user by the provided username usr, err = user.Lookup(pc.username) if err != nil { return 0, 0, nil, env, fmt.Errorf("failed to lookup specified user: %w", err) } + env["USER"] = pc.username + env["HOME"] = pc.homeDirectory } - env["USER"] = usr.Username - env["HOME"] = usr.HomeDir - uid, err = strconv.Atoi(usr.Uid) if err != nil { return 0, 0, nil, env, fmt.Errorf("failed to convert UID: %w", err) From ea9e4225b3c83ccbb28c33882c0927390f931b58 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 31 Jan 2025 16:43:38 +0900 Subject: [PATCH 177/364] Ensure working directory is set for all command executions --- pkg/runner/command.go | 8 ++++---- pkg/runner/pty.go | 25 ++++++++++--------------- pkg/runner/shell.go | 8 +++++++- pkg/utils/utils.go | 20 ++++++++++++++++++++ 4 files changed, 41 insertions(+), 20 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 6218711..7fef725 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -64,14 +64,14 @@ func (cr *CommandRunner) Run() { } if result != "" && cr.command.ID != "" { - url := fmt.Sprintf(eventCommandFinURL, cr.command.ID) + finURL := fmt.Sprintf(eventCommandFinURL, cr.command.ID) payload := &commandFin{ Success: exitCode == 0, Result: result, ElapsedTime: time.Since(start).Seconds(), } - scheduler.Rqueue.Post(url, payload, 10, time.Time{}) + scheduler.Rqueue.Post(finURL, payload, 10, time.Time{}) } } @@ -814,7 +814,7 @@ func isFileExist(path string) bool { } func statFileTransfer(code int, transferType transferType, message string, data CommandData) { - url := fmt.Sprint(data.URL + "stat/") + statURL := fmt.Sprint(data.URL + "stat/") isSuccess := code == 0 payload := &commandStat{ @@ -822,5 +822,5 @@ func statFileTransfer(code int, transferType transferType, message string, data Message: message, Type: transferType, } - scheduler.Rqueue.Post(url, payload, 10, time.Time{}) + scheduler.Rqueue.Post(statURL, payload, 10, time.Time{}) } diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index c3d8370..435e5bd 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "github.com/alpacanetworks/alpamon-go/pkg/config" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/creack/pty" "github.com/gorilla/websocket" "github.com/rs/zerolog/log" @@ -12,7 +13,6 @@ import ( "net/http" "os" "os/exec" - "os/user" "strconv" "strings" ) @@ -214,40 +214,35 @@ func (pc *PtyClient) close() { // getPtyUserAndEnv retrieves user information and sets environment variables. func (pc *PtyClient) getPtyUserAndEnv() (uid, gid int, groupIds []string, env map[string]string, err error) { - var usr *user.User env = getDefaultEnv() + usr, err := utils.GetSystemUser(pc.username) + if err != nil { + return 0, 0, nil, nil, err + } + currentUID := os.Geteuid() if currentUID != 0 || pc.username == "" { - usr, err = user.Current() - if err != nil { - return 0, 0, nil, env, fmt.Errorf("failed to get current user: %w", err) - } - // If Alpamon is not running as root or username is not specified, use the current user env["USER"] = usr.Username env["HOME"] = usr.HomeDir - } else { // If Alpamon is running as root, get the user by the provided username - usr, err = user.Lookup(pc.username) - if err != nil { - return 0, 0, nil, env, fmt.Errorf("failed to lookup specified user: %w", err) - } + } else { env["USER"] = pc.username env["HOME"] = pc.homeDirectory } uid, err = strconv.Atoi(usr.Uid) if err != nil { - return 0, 0, nil, env, fmt.Errorf("failed to convert UID: %w", err) + return 0, 0, nil, nil, fmt.Errorf("failed to convert UID: %w", err) } gid, err = strconv.Atoi(usr.Gid) if err != nil { - return 0, 0, nil, env, fmt.Errorf("failed to convert GID: %w", err) + return 0, 0, nil, nil, fmt.Errorf("failed to convert GID: %w", err) } groupIds, err = usr.GroupIds() if err != nil { - return 0, 0, nil, env, fmt.Errorf("failed to get group IDs: %w", err) + return 0, 0, nil, nil, fmt.Errorf("failed to get group IDs: %w", err) } return uid, gid, groupIds, env, nil diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index ad97558..d5cfd2f 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -3,6 +3,7 @@ package runner import ( "context" "fmt" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/rs/zerolog/log" "os" "os/exec" @@ -97,7 +98,6 @@ func runCmd(args []string, username, groupname string, env map[string]string, ti } else { cmd = exec.CommandContext(ctx, args[0], args[1:]...) } - } else { if containsShellOperator(args) { cmd = exec.CommandContext(ctx, "bash", "-c", strings.Join(args, " ")) @@ -118,6 +118,12 @@ func runCmd(args []string, username, groupname string, env map[string]string, ti cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, value)) } + usr, err := utils.GetSystemUser(username) + if err != nil { + return 1, err.Error() + } + cmd.Dir = usr.HomeDir + output, err := cmd.Output() if err != nil { return 1, err.Error() diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index b492697..9c10c1a 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -5,6 +5,7 @@ import ( "fmt" "net/url" "os" + "os/user" "regexp" "runtime" "strconv" @@ -168,3 +169,22 @@ func Quote(s string) string { return s } + +func GetSystemUser(username string) (*user.User, error) { + currentUID := os.Getuid() + + // If Alpamon is not running as root or username is not specified, use the current user + if currentUID != 0 || username == "" { + usr, err := user.Current() + if err != nil { + return nil, fmt.Errorf("failed to get current user: %w", err) + } + return usr, nil + } + + usr, err := user.Lookup(username) + if err != nil { + return nil, fmt.Errorf("failed to lookup specified user: %w", err) + } + return usr, nil +} From 7a60b7bd7b74d79e3f2e7e399c5db60604b2f335 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 31 Jan 2025 20:37:33 +0900 Subject: [PATCH 178/364] Adjust execution timing for remote notification and fix upgrade command --- go.mod | 2 ++ go.sum | 5 +++++ pkg/runner/command.go | 34 +++++++++++++++++++++++++++------- pkg/utils/utils.go | 14 ++++++++++++++ 4 files changed, 48 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 364746a..b3cf9ad 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible github.com/creack/pty v1.1.23 github.com/glebarez/go-sqlite v1.22.0 + github.com/google/go-github v17.0.0+incompatible github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/knqyf263/go-rpmdb v0.1.1 @@ -31,6 +32,7 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/hashicorp/hcl/v2 v2.18.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect diff --git a/go.sum b/go.sum index c14f6ac..790bd37 100644 --- a/go.sum +++ b/go.sum @@ -35,9 +35,14 @@ github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91 github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 7fef725..e6fb41b 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "errors" "fmt" + "github.com/alpacanetworks/alpamon-go/pkg/version" "io" "mime/multipart" "net/http" @@ -91,6 +92,12 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { var cmd string switch args[0] { case "upgrade": + latestVersion := utils.GetLatestVersion() + + if version.Version == latestVersion { + return 0, fmt.Sprintf("Alpamon is already up-to-date (version: %s)", version.Version) + } + if utils.PlatformLike == "debian" { cmd = "apt-get update -y && " + "apt-get install --only-upgrade alpamon" @@ -99,8 +106,13 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { } else { return 1, fmt.Sprintf("Platform '%s' not supported.", utils.PlatformLike) } - log.Debug().Msgf("Upgrading Alpamon using command: '%s'...", cmd) - return cr.handleShellCmd(cmd, "root", "root", nil) + log.Debug().Msgf("Upgrading alpamon from %s to %s using command: '%s'...", version.Version, latestVersion, cmd) + + time.AfterFunc(1*time.Second, func() { + cr.handleShellCmd(cmd, "root", "root", nil) + }) + + return 0, fmt.Sprintf("Alpamon will upgrade from %s to %s in 1 second.", version.Version, latestVersion) case "commit": cr.commit() return 0, "Committed system information." @@ -177,6 +189,7 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { time.AfterFunc(1*time.Second, func() { cr.wsClient.restart() }) + return 0, "Alpamon will restart in 1 second." case "quit": time.AfterFunc(1*time.Second, func() { @@ -184,11 +197,19 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { }) return 0, "Alpamon will quit in 1 second." case "reboot": - log.Info().Msg("Reboot requested.") - return cr.handleShellCmd("reboot", "root", "root", nil) + log.Info().Msg("Reboot request received.") + time.AfterFunc(1*time.Second, func() { + cr.handleShellCmd("reboot", "root", "root", nil) + }) + + return 0, "Server will reboot in 1 second" case "shutdown": - log.Info().Msg("Shutdown requested.") - return cr.handleShellCmd("shutdown", "root", "root", nil) + log.Info().Msg("Shutdown request received.") + time.AfterFunc(1*time.Second, func() { + cr.handleShellCmd("shutdown", "root", "root", nil) + }) + + return 0, "Server will shutdown in 1 second" case "update": log.Info().Msg("Upgrade system requested.") if utils.PlatformLike == "debian" { @@ -201,7 +222,6 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, fmt.Sprintf("Platform '%s' not supported.", utils.PlatformLike) } - log.Debug().Msgf("Running '%s'...", cmd) return cr.handleShellCmd(cmd, "root", "root", nil) case "help": helpMessage := ` diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 9c10c1a..108aeb6 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -2,6 +2,7 @@ package utils import ( "bytes" + "context" "fmt" "net/url" "os" @@ -12,6 +13,7 @@ import ( "strings" "time" + "github.com/google/go-github/github" "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/disk" "github.com/shirou/gopsutil/v4/host" @@ -188,3 +190,15 @@ func GetSystemUser(username string) (*user.User, error) { } return usr, nil } + +func GetLatestVersion() string { + client := github.NewClient(nil) + ctx := context.Background() + + release, _, err := client.Repositories.GetLatestRelease(ctx, "alpacanetworks", "alpamon") + if err != nil { + return "" + } + + return release.GetTagName() +} From c13ae86235862026d8714252e5bd6e37eb2f151f Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 31 Jan 2025 20:52:56 +0900 Subject: [PATCH 179/364] Minor fix --- scripts/postremove.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postremove.sh b/scripts/postremove.sh index 9d2e350..e92709a 100644 --- a/scripts/postremove.sh +++ b/scripts/postremove.sh @@ -4,7 +4,7 @@ CONF_FILE_PATH="/etc/alpamon/alpamon.conf" TMP_FILE_PATH="/usr/lib/tmpfiles.d/alpamon.conf" SVC_FILE_PATH="/lib/systemd/system/alpamon.service" LOG_FILE_PATH="/var/log/alpamon/alpamon.log" -DB_FILE_PATH="/var/lib/alpamon/alpamon.db +DB_FILE_PATH="/var/lib/alpamon/alpamon.db" if [ "$1" = 'purge' ]; then rm -f "$CONF_FILE_PATH" || true From 388282a4b2eafce48f44da9eecf71714fde11b41 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 4 Feb 2025 13:25:04 +0900 Subject: [PATCH 180/364] Improve setup process with overwrite prompt --- cmd/alpamon/command/install.go | 34 ++++++++++++++++++++-------------- cmd/alpamon/command/root.go | 2 +- go.mod | 6 ++++++ go.sum | 12 ++++++++++++ scripts/postinstall.sh | 6 +++--- 5 files changed, 42 insertions(+), 18 deletions(-) diff --git a/cmd/alpamon/command/install.go b/cmd/alpamon/command/install.go index ce1632b..9afdaba 100644 --- a/cmd/alpamon/command/install.go +++ b/cmd/alpamon/command/install.go @@ -3,6 +3,7 @@ package command import ( "embed" "fmt" + cli "github.com/alpacanetworks/alpacon-cli/utils" "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/spf13/cobra" "os" @@ -34,11 +35,24 @@ type ConfigData struct { //go:embed configs/* var configFiles embed.FS -var installCmd = &cobra.Command{ - Use: "install", - Short: "Install Alpamon agent as a service", +var setupCmd = &cobra.Command{ + Use: "setup", + Short: "Setup and configure the Alpamon ", RunE: func(cmd *cobra.Command, args []string) error { - fmt.Println("Running Alpamon install command...") + fmt.Println("Starting Alpamon setup...") + + configExists := fileExists(configTarget) + isOverwrite := true + + if configExists { + fmt.Println("A configuration file already exists at:", configTarget) + isOverwrite = cli.PromptForBool("Do you want to overwrite it with a new configuration?: ") + } + + if !isOverwrite { + fmt.Println("Keeping the existing configuration file. Skipping configuration update.") + return nil + } err := copyEmbeddedFile(tmpFilePath, tmpFileTarget) if err != nil { @@ -60,16 +74,12 @@ var installCmd = &cobra.Command{ return err } - fmt.Println("Alpamon has been successfully installed.") + fmt.Println("Configuration file successfully updated.") return nil }, } func writeConfig() error { - if isConfigValid(configTarget) { - return nil - } - tmplData, err := configFiles.ReadFile(configTemplatePath) if err != nil { return fmt.Errorf("failed to read template file (%s): %v", configTemplatePath, err) @@ -119,10 +129,6 @@ func writeConfig() error { } func writeService() error { - if isConfigValid(serviceTarget) { - return nil - } - err := copyEmbeddedFile(serviceTemplatePath, serviceTarget) if err != nil { return fmt.Errorf("failed to write target file: %v", err) @@ -151,7 +157,7 @@ func copyEmbeddedFile(srcPath, dstPath string) error { return nil } -func isConfigValid(path string) bool { +func fileExists(path string) bool { fileInfo, err := os.Stat(path) if err != nil { return false diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 9bd7a9c..f48f6f4 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -27,7 +27,7 @@ var RootCmd = &cobra.Command{ } func init() { - RootCmd.AddCommand(installCmd, ftpCmd) + RootCmd.AddCommand(setupCmd, ftpCmd) } func runAgent() { diff --git a/go.mod b/go.mod index b3cf9ad..30bb29a 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( ariga.io/atlas-go-sdk v0.6.5 entgo.io/ent v0.14.0 github.com/adrianbrad/queue v1.3.0 + github.com/alpacanetworks/alpacon-cli v0.0.3-0.20250109074532-43ca315d9a7a github.com/cenkalti/backoff v2.2.1+incompatible github.com/creack/pty v1.1.23 github.com/glebarez/go-sqlite v1.22.0 @@ -33,14 +34,17 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect + github.com/gookit/color v1.5.4 // indirect github.com/hashicorp/hcl/v2 v2.18.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect @@ -49,10 +53,12 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect + github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zclconf/go-cty v1.14.1 // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.22.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/go.sum b/go.sum index 790bd37..3df323d 100644 --- a/go.sum +++ b/go.sum @@ -10,6 +10,8 @@ github.com/adrianbrad/queue v1.3.0 h1:8FH1N+93HXbqta5+URa1AL+diV7MP3VDXAEnP+DNp4 github.com/adrianbrad/queue v1.3.0/go.mod h1:wYiPC/3MPbyT45QHLrPR4zcqJWPePubM1oEP/xTwhUs= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alpacanetworks/alpacon-cli v0.0.3-0.20250109074532-43ca315d9a7a h1:9XZMcjj8FfItURuZH/y6eeOvpMWyFkROy5l1JKXrHpY= +github.com/alpacanetworks/alpacon-cli v0.0.3-0.20250109074532-43ca315d9a7a/go.mod h1:NGHAye9qmT70QD1H3Sd4evpHrTiphqJ6RXKIrp8unRQ= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -47,6 +49,8 @@ github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbu github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= +github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= @@ -71,6 +75,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -79,6 +85,8 @@ github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdh github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -110,6 +118,8 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= @@ -127,6 +137,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 76886b3..23bfd45 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -41,10 +41,10 @@ install_atlas_cli() { fi } -install_alpamon() { - "$ALPAMON_BIN" install +setup_alpamon() { + "$ALPAMON_BIN" setup if [ $? -ne 0 ]; then - echo "Error: Alpamon install command failed." + echo "Error: Alpamon setup command failed." exit 1 fi } From 090ae42418e5289a17e8f4be52fb8aba2050cbf1 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 4 Feb 2025 13:42:55 +0900 Subject: [PATCH 181/364] Modify postinstall.sh to skip setup on upgrade --- cmd/alpamon/command/{install.go => setup.go} | 0 scripts/postinstall.sh | 16 ++++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) rename cmd/alpamon/command/{install.go => setup.go} (100%) diff --git a/cmd/alpamon/command/install.go b/cmd/alpamon/command/setup.go similarity index 100% rename from cmd/alpamon/command/install.go rename to cmd/alpamon/command/setup.go diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 23bfd45..297e958 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -7,7 +7,11 @@ main() { check_systemd_status check_alpamon_binary install_atlas_cli - install_alpamon + + if is_new_installation "$@"; then + setup_alpamon + fi + start_systemd_service } @@ -60,5 +64,13 @@ start_systemd_service() { echo "Alpamon has been installed as a systemd service and will be launched automatically on system boot." } +is_new_installation() { + if [ -z "$2" ]; then + return 0 # first install + else + return 1 # upgrade + fi +} + set -ue -main \ No newline at end of file +main "$@" \ No newline at end of file From 0ece8982a464498ca00e660da8d046b67a8c338a Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 10 Feb 2025 11:06:25 +0900 Subject: [PATCH 182/364] Add User-Agent header to requests --- pkg/runner/client.go | 2 ++ pkg/runner/ftp.go | 4 +++- pkg/scheduler/session.go | 2 ++ pkg/utils/utils.go | 5 +++++ 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index c76726d..d4de8db 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/scheduler" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/cenkalti/backoff" "github.com/gorilla/websocket" "github.com/rs/zerolog/log" @@ -36,6 +37,7 @@ func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { headers := http.Header{ "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, config.GlobalSettings.ID, config.GlobalSettings.Key)}, "Origin": {config.GlobalSettings.ServerURL}, + "User-Agent": {utils.GetUserAgent()}, } return &WebsocketClient{ diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 33fced5..43028e1 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "io" "net/http" "os" @@ -25,7 +26,8 @@ type FtpClient struct { func NewFtpClient(data FtpConfigData) *FtpClient { headers := http.Header{ - "Origin": {data.ServerURL}, + "Origin": {data.ServerURL}, + "User-Agent": {utils.GetUserAgent()}, } return &FtpClient{ diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 9b67a2a..0807b86 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -114,6 +114,7 @@ func (session *Session) newRequest(method, url string, rawBody interface{}) (*ht func (session *Session) do(req *http.Request, timeout time.Duration) ([]byte, int, error) { session.Client.Timeout = timeout * time.Second req.Header.Set("Authorization", session.authorization) + req.Header.Set("User-Agent", utils.GetUserAgent()) if req.Method == http.MethodPost || req.Method == http.MethodPut || req.Method == http.MethodPatch { req.Header.Set("Content-Type", "application/json") @@ -165,6 +166,7 @@ func (session *Session) MultipartRequest(url string, body bytes.Buffer, contentT session.Client.Timeout = timeout * time.Second req.Header.Set("Authorization", session.authorization) + req.Header.Set("User-Agent", utils.GetUserAgent()) req.Header.Set("Content-Type", contentType) resp, err := session.Client.Do(req) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 108aeb6..eb4b71a 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "github.com/alpacanetworks/alpamon-go/pkg/version" "net/url" "os" "os/user" @@ -202,3 +203,7 @@ func GetLatestVersion() string { return release.GetTagName() } + +func GetUserAgent() string { + return fmt.Sprintf("%s/%s", "alpamon", version.Version) +} From f32f1281974928315a97b0de46b7ee95135644cf Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 17 Feb 2025 14:01:19 +0900 Subject: [PATCH 183/364] Update runFileUpload Update runFileUploadas as a result of WebFTP refactoring in Alpacon. Add makeMultiPartBody() to differentiate how the request body is created based on the value of UseBlob. Add fileUpload to differentiate the file upload method based on the value of UseBlob. --- pkg/runner/command.go | 89 +++++++++++++++++++++++++++++-------- pkg/runner/command_types.go | 1 + 2 files changed, 71 insertions(+), 19 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index e6fb41b..881a18a 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "errors" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/version" "io" "mime/multipart" "net/http" @@ -23,6 +22,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/config" "github.com/alpacanetworks/alpamon-go/pkg/scheduler" "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon-go/pkg/version" "github.com/google/uuid" "github.com/rs/zerolog/log" "gopkg.in/go-playground/validator.v9" @@ -544,40 +544,58 @@ func (cr *CommandRunner) runFileUpload(fileName string) (exitCode int, result st return 1, err.Error() } - var requestBody bytes.Buffer - writer := multipart.NewWriter(&requestBody) - - fileWriter, err := writer.CreateFormFile("content", filepath.Base(name)) + requestBody, contentType, err := makeMultiPartBody(output, filepath.Base(name), cr.data.UseBlob, recursive) if err != nil { + log.Error().Err(err).Msgf("Failed to make request body") return 1, err.Error() } - _, err = fileWriter.Write(output) + _, statusCode, err := cr.fileUpload(requestBody, contentType) if err != nil { + log.Error().Err(err).Msgf("Failed to upload file: %s", fileName) return 1, err.Error() } - if recursive { - err = writer.WriteField("name", filepath.Base(name)) + if statusCode == http.StatusOK { + return 0, fmt.Sprintf("Successfully uploaded %s.", fileName) + } + + return 1, "You do not have permission to read on the directory. or directory does not exist" +} + +func (cr *CommandRunner) fileUpload(body bytes.Buffer, contentType string) ([]byte, int, error) { + var responseBody []byte + var statusCode int + var err error + if cr.data.UseBlob { + req, err := http.NewRequest(http.MethodPut, cr.data.Content, &body) if err != nil { - return 1, err.Error() + return nil, 0, err } - } - _ = writer.Close() + req.Header.Set("Content-Type", contentType) - contentType := writer.FormDataContentType() + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, 0, err + } - _, statusCode, err := cr.wsClient.apiSession.MultipartRequest(cr.data.Content, requestBody, contentType, 600) - if err != nil { - return 1, err.Error() - } + defer func() { _ = resp.Body.Close() }() - if statusCode == http.StatusOK { - return 0, fmt.Sprintf("Successfully uploaded %s.", fileName) + statusCode = resp.StatusCode + responseBody, err = io.ReadAll(resp.Body) + if err != nil { + return nil, resp.StatusCode, err + } + } else { + responseBody, statusCode, err = cr.wsClient.apiSession.MultipartRequest(cr.data.Content, body, contentType, 600) + if err != nil { + return responseBody, statusCode, err + } } - return 1, "You do not have permission to read on the directory. or directory does not exist" + return responseBody, statusCode, nil } func (cr *CommandRunner) runFileDownload(fileName string) (exitCode int, result string) { @@ -781,6 +799,39 @@ func makeArchive(paths []string, bulk, recursive bool, sysProcAttr *syscall.SysP return archiveName, nil } +func makeMultiPartBody(output []byte, filePath string, useBlob, isRecursive bool) (bytes.Buffer, string, error) { + var requestBody bytes.Buffer + writer := multipart.NewWriter(&requestBody) + + fieldName := "file" + if !useBlob { + fieldName = "content" + } + + fileWriter, err := writer.CreateFormFile(fieldName, filePath) + if err != nil { + return bytes.Buffer{}, "", err + } + + if !useBlob && isRecursive { + err = writer.WriteField("name", filePath) + if err != nil { + return bytes.Buffer{}, "", err + } + } + + _, err = fileWriter.Write(output) + if err != nil { + return bytes.Buffer{}, "", err + } + + _ = writer.Close() + + contentType := writer.FormDataContentType() + + return requestBody, contentType, nil +} + func fileDownload(data CommandData, sysProcAttr *syscall.SysProcAttr) (exitCode int, result string) { var cmd *exec.Cmd content, err := getFileData(data) diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index a7a8564..9c076e7 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -49,6 +49,7 @@ type CommandData struct { Files []File `json:"files,omitempty"` AllowOverwrite bool `json:"allow_overwrite,omitempty"` AllowUnzip bool `json:"allow_unzip,omitempty"` + UseBlob bool `json:"use_blob,omitempty"` Keys []string `json:"keys"` } From 34cc617ae2c9ec120bf01e4378a2e529d31153e0 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 17 Feb 2025 15:46:25 +0900 Subject: [PATCH 184/364] Rename makeMultiPartBody to makeRequestBody Due to an issue where files were being corrupted when using multipart uploads with S3, fix makeMultiPartBody() to prevent creating a multipart body when useBlob is true. Following this change, rename makeMultiPartBody() to makeRequestBody(). --- pkg/runner/command.go | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 881a18a..3184758 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -544,7 +544,7 @@ func (cr *CommandRunner) runFileUpload(fileName string) (exitCode int, result st return 1, err.Error() } - requestBody, contentType, err := makeMultiPartBody(output, filepath.Base(name), cr.data.UseBlob, recursive) + requestBody, contentType, err := makeRequestBody(output, filepath.Base(name), cr.data.UseBlob, recursive) if err != nil { log.Error().Err(err).Msgf("Failed to make request body") return 1, err.Error() @@ -573,8 +573,6 @@ func (cr *CommandRunner) fileUpload(body bytes.Buffer, contentType string) ([]by return nil, 0, err } - req.Header.Set("Content-Type", contentType) - client := &http.Client{} resp, err := client.Do(req) if err != nil { @@ -799,37 +797,35 @@ func makeArchive(paths []string, bulk, recursive bool, sysProcAttr *syscall.SysP return archiveName, nil } -func makeMultiPartBody(output []byte, filePath string, useBlob, isRecursive bool) (bytes.Buffer, string, error) { +func makeRequestBody(output []byte, filePath string, useBlob, isRecursive bool) (bytes.Buffer, string, error) { var requestBody bytes.Buffer + + if useBlob { + requestBody = *bytes.NewBuffer(output) + return requestBody, "", nil + } + writer := multipart.NewWriter(&requestBody) + defer func() { _ = writer.Close() }() - fieldName := "file" - if !useBlob { - fieldName = "content" + fileWriter, err := writer.CreateFormFile("content", filePath) + if err != nil { + return bytes.Buffer{}, "", err } - fileWriter, err := writer.CreateFormFile(fieldName, filePath) + _, err = fileWriter.Write(output) if err != nil { return bytes.Buffer{}, "", err } - if !useBlob && isRecursive { + if isRecursive { err = writer.WriteField("name", filePath) if err != nil { return bytes.Buffer{}, "", err } } - _, err = fileWriter.Write(output) - if err != nil { - return bytes.Buffer{}, "", err - } - - _ = writer.Close() - - contentType := writer.FormDataContentType() - - return requestBody, contentType, nil + return requestBody, writer.FormDataContentType(), nil } func fileDownload(data CommandData, sysProcAttr *syscall.SysProcAttr) (exitCode int, result string) { From a54d8938a1f8ebb1edd623c4475531c3ab226cef Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 17 Feb 2025 20:13:33 +0900 Subject: [PATCH 185/364] Minor fix Fix to operate correctly even when UseBlob is false. --- pkg/runner/command.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 3184758..172558c 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -806,7 +806,6 @@ func makeRequestBody(output []byte, filePath string, useBlob, isRecursive bool) } writer := multipart.NewWriter(&requestBody) - defer func() { _ = writer.Close() }() fileWriter, err := writer.CreateFormFile("content", filePath) if err != nil { @@ -825,6 +824,8 @@ func makeRequestBody(output []byte, filePath string, useBlob, isRecursive bool) } } + _ = writer.Close() + return requestBody, writer.FormDataContentType(), nil } From 1efd6733bea8910980c9be3cbba8e91202258d8e Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 18 Feb 2025 10:51:00 +0900 Subject: [PATCH 186/364] Fix setup command to apply automatic configuration in non-interactive mode --- cmd/alpamon/command/setup.go | 20 ++++++++++++++------ scripts/postinstall.sh | 3 ++- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/cmd/alpamon/command/setup.go b/cmd/alpamon/command/setup.go index 9afdaba..e5aec46 100644 --- a/cmd/alpamon/command/setup.go +++ b/cmd/alpamon/command/setup.go @@ -6,9 +6,11 @@ import ( cli "github.com/alpacanetworks/alpacon-cli/utils" "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/spf13/cobra" + "golang.org/x/term" "os" "os/exec" "path/filepath" + "syscall" "text/template" ) @@ -44,14 +46,20 @@ var setupCmd = &cobra.Command{ configExists := fileExists(configTarget) isOverwrite := true - if configExists { - fmt.Println("A configuration file already exists at:", configTarget) - isOverwrite = cli.PromptForBool("Do you want to overwrite it with a new configuration?: ") + if term.IsTerminal(syscall.Stdin) { + if configExists { + fmt.Println("A configuration file already exists at:", configTarget) + isOverwrite = cli.PromptForBool("Do you want to overwrite it with a new configuration?: ") + } + + if !isOverwrite { + fmt.Println("Keeping the existing configuration file. Skipping configuration update.") + return nil + } } - if !isOverwrite { - fmt.Println("Keeping the existing configuration file. Skipping configuration update.") - return nil + if !configExists || isOverwrite { + fmt.Println("Applying a new configuration automatically.") } err := copyEmbeddedFile(tmpFilePath, tmpFileTarget) diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 297e958..26d5bd5 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -72,5 +72,6 @@ is_new_installation() { fi } -set -ue +# Exit on error +set -e main "$@" \ No newline at end of file From f7d5ca1c708aa075ce5c3df59039d1ec3b18caf3 Mon Sep 17 00:00:00 2001 From: Eunyoung Jeong Date: Sun, 23 Feb 2025 00:24:58 +0900 Subject: [PATCH 187/364] Update repository URL --- .github/workflows/release.yml | 5 ++--- README.md | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2f9c2f4..8c6e668 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -67,7 +67,7 @@ jobs: uses: danielmundi/upload-packagecloud@v1 with: package-name: alpamon_${{ github.ref_name }}_linux_amd64.deb - packagecloud-username: alpacanetworks + packagecloud-username: alpacax packagecloud-repo: alpamon packagecloud-distrib: any/any packagecloud-token: ${{ secrets.PACKAGECLOUD_TOKEN }} @@ -76,8 +76,7 @@ jobs: uses: danielmundi/upload-packagecloud@v1 with: package-name: alpamon_${{ github.ref_name }}_linux_amd64.rpm - packagecloud-username: alpacanetworks + packagecloud-username: alpacax packagecloud-repo: alpamon packagecloud-distrib: rpm_any/rpm_any packagecloud-token: ${{ secrets.PACKAGECLOUD_TOKEN }} - diff --git a/README.md b/README.md index 59f57da..1296167 100644 --- a/README.md +++ b/README.md @@ -17,14 +17,14 @@ Download the latest `Alpamon-Go` directly from our releases page or install it u #### Debian and Ubuntu ```bash -curl -s https://packagecloud.io/install/repositories/alpacanetworks/alpamon/script.deb.sh?any=true | sudo bash +curl -s https://packagecloud.io/install/repositories/alpacax/alpamon/script.deb.sh?any=true | sudo bash sudo apt-get install alpamon ``` #### CentOS and RHEL ```bash -curl -s https://packagecloud.io/install/repositories/alpacanetworks/alpamon/script.rpm.sh?any=true | sudo bash +curl -s https://packagecloud.io/install/repositories/alpacax/alpamon/script.rpm.sh?any=true | sudo bash sudo yum install alpamon ``` From 844b9e2d7f8b50786275c41bf8afc10c5cef015f Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 24 Feb 2025 16:54:24 +0900 Subject: [PATCH 188/364] Refactor fileUpload method --- pkg/runner/command.go | 33 ++++++--------------------------- pkg/scheduler/session.go | 27 ++++++++++++++++++--------- 2 files changed, 24 insertions(+), 36 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 172558c..a74cee0 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -28,6 +28,10 @@ import ( "gopkg.in/go-playground/validator.v9" ) +const ( + fileUploadTimeout = 60 * 10 * time.Second +) + func NewCommandRunner(wsClient *WebsocketClient, command Command, data CommandData) *CommandRunner { var name string if command.ID != "" { @@ -564,36 +568,11 @@ func (cr *CommandRunner) runFileUpload(fileName string) (exitCode int, result st } func (cr *CommandRunner) fileUpload(body bytes.Buffer, contentType string) ([]byte, int, error) { - var responseBody []byte - var statusCode int - var err error if cr.data.UseBlob { - req, err := http.NewRequest(http.MethodPut, cr.data.Content, &body) - if err != nil { - return nil, 0, err - } - - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return nil, 0, err - } - - defer func() { _ = resp.Body.Close() }() - - statusCode = resp.StatusCode - responseBody, err = io.ReadAll(resp.Body) - if err != nil { - return nil, resp.StatusCode, err - } - } else { - responseBody, statusCode, err = cr.wsClient.apiSession.MultipartRequest(cr.data.Content, body, contentType, 600) - if err != nil { - return responseBody, statusCode, err - } + return cr.wsClient.apiSession.Put(cr.data.Content, body, fileUploadTimeout) } - return responseBody, statusCode, nil + return cr.wsClient.apiSession.MultipartRequest(cr.data.Content, body, contentType, fileUploadTimeout) } func (cr *CommandRunner) runFileDownload(fileName string) (exitCode int, result string) { diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 0807b86..3fac722 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -158,6 +158,24 @@ func (session *Session) Get(url string, timeout time.Duration) ([]byte, int, err return session.do(req, timeout) } +func (session *Session) Post(url string, rawBody interface{}, timeout time.Duration) ([]byte, int, error) { + req, err := session.newRequest(http.MethodPost, url, rawBody) + if err != nil { + return nil, 0, err + } + + return session.do(req, timeout) +} + +func (session *Session) Put(url string, body bytes.Buffer, timeout time.Duration) ([]byte, int, error) { + req, err := session.newRequest(http.MethodPut, url, body) + if err != nil { + return nil, 0, err + } + + return session.do(req, timeout) +} + func (session *Session) MultipartRequest(url string, body bytes.Buffer, contentType string, timeout time.Duration) ([]byte, int, error) { req, err := http.NewRequest(http.MethodPost, url, &body) if err != nil { @@ -183,12 +201,3 @@ func (session *Session) MultipartRequest(url string, body bytes.Buffer, contentT return responseBody, resp.StatusCode, nil } - -func (session *Session) Post(url string, rawBody interface{}, timeout time.Duration) ([]byte, int, error) { - req, err := session.newRequest(http.MethodPost, url, rawBody) - if err != nil { - return nil, 0, err - } - - return session.do(req, timeout) -} From ba959f8ded39d6a9bd45020c65e52f2157448521 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 24 Feb 2025 17:06:37 +0900 Subject: [PATCH 189/364] Minor fix --- pkg/runner/command.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index a74cee0..8407d50 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -548,7 +548,7 @@ func (cr *CommandRunner) runFileUpload(fileName string) (exitCode int, result st return 1, err.Error() } - requestBody, contentType, err := makeRequestBody(output, filepath.Base(name), cr.data.UseBlob, recursive) + requestBody, contentType, err := createMultipartBody(output, filepath.Base(name), cr.data.UseBlob, recursive) if err != nil { log.Error().Err(err).Msgf("Failed to make request body") return 1, err.Error() @@ -776,14 +776,12 @@ func makeArchive(paths []string, bulk, recursive bool, sysProcAttr *syscall.SysP return archiveName, nil } -func makeRequestBody(output []byte, filePath string, useBlob, isRecursive bool) (bytes.Buffer, string, error) { - var requestBody bytes.Buffer - +func createMultipartBody(output []byte, filePath string, useBlob, isRecursive bool) (bytes.Buffer, string, error) { if useBlob { - requestBody = *bytes.NewBuffer(output) - return requestBody, "", nil + return *bytes.NewBuffer(output), "", nil } + var requestBody bytes.Buffer writer := multipart.NewWriter(&requestBody) fileWriter, err := writer.CreateFormFile("content", filePath) From 81bfe1cf45eac97f289c30422c23af50aa0f633f Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 24 Feb 2025 17:24:00 +0900 Subject: [PATCH 190/364] Minor fix --- pkg/runner/command.go | 2 +- pkg/utils/http_client.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 pkg/utils/http_client.go diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 8407d50..179e60d 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -569,7 +569,7 @@ func (cr *CommandRunner) runFileUpload(fileName string) (exitCode int, result st func (cr *CommandRunner) fileUpload(body bytes.Buffer, contentType string) ([]byte, int, error) { if cr.data.UseBlob { - return cr.wsClient.apiSession.Put(cr.data.Content, body, fileUploadTimeout) + return utils.Put(cr.data.Content, body, 0) } return cr.wsClient.apiSession.MultipartRequest(cr.data.Content, body, contentType, fileUploadTimeout) diff --git a/pkg/utils/http_client.go b/pkg/utils/http_client.go new file mode 100644 index 0000000..18f7a3c --- /dev/null +++ b/pkg/utils/http_client.go @@ -0,0 +1,30 @@ +package utils + +import ( + "bytes" + "io" + "net/http" + "time" +) + +func Put(url string, body bytes.Buffer, timeout time.Duration) ([]byte, int, error) { + req, err := http.NewRequest(http.MethodPut, url, &body) + if err != nil { + return nil, 0, err + } + + client := &http.Client{Timeout: timeout} + + resp, err := client.Do(req) + if err != nil { + return nil, 0, err + } + defer func() { _ = resp.Body.Close() }() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, resp.StatusCode, err + } + + return respBody, resp.StatusCode, nil +} From 5976d35080ceb2c6e01fbe336c50ff282fd72529 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 25 Feb 2025 09:48:13 +0900 Subject: [PATCH 191/364] Refactor pidFile --- cmd/alpamon/command/root.go | 2 +- pkg/pidfile/pidfile.go | 26 +------------------------- pkg/pidfile/pidfile_darwin.go | 8 ++++++++ pkg/pidfile/pidfile_linux.go | 5 +++++ 4 files changed, 15 insertions(+), 26 deletions(-) create mode 100644 pkg/pidfile/pidfile_darwin.go create mode 100644 pkg/pidfile/pidfile_linux.go diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index f48f6f4..b07b199 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -35,7 +35,7 @@ func runAgent() { utils.InitPlatform() // Pid - pidFilePath, err := pidfile.WritePID() + pidFilePath, err := pidfile.WritePID(pidfile.FilePath("alpamon")) if err != nil { _, _ = fmt.Fprintln(os.Stderr, "Failed to create PID file", err.Error()) os.Exit(1) diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go index 8fa2fc5..045d511 100644 --- a/pkg/pidfile/pidfile.go +++ b/pkg/pidfile/pidfile.go @@ -2,7 +2,6 @@ package pidfile import ( "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/utils" "os" "path/filepath" "strconv" @@ -10,22 +9,12 @@ import ( "syscall" ) -const ( - pidFilePathDarwin = "/tmp/alpamon.pid" - pidFilePathDefault = "/var/run/alpamon.pid" -) - // WritePID writes the current PID to a file, ensuring that the file // doesn't exist or doesn't contain a PID for a running process. // // Based on a function from the Datadog Agent. // Reference : https://github.com/DataDog/datadog-agent -func WritePID() (string, error) { - pidFilePath, err := setupPIDFilePath() - if err != nil { - return "", err - } - +func WritePID(pidFilePath string) (string, error) { // check whether the pidfile exists and contains the PID for a running proc... if byteContent, err := os.ReadFile(pidFilePath); err == nil { pidStr := strings.TrimSpace(string(byteContent)) @@ -54,16 +43,3 @@ func WritePID() (string, error) { func isProcess(pid int) bool { return syscall.Kill(pid, 0) == nil } - -func setupPIDFilePath() (string, error) { - var pidFilePath string - - switch utils.PlatformLike { - case "darwin": - pidFilePath = pidFilePathDarwin - default: - pidFilePath = pidFilePathDefault - } - - return pidFilePath, nil -} diff --git a/pkg/pidfile/pidfile_darwin.go b/pkg/pidfile/pidfile_darwin.go new file mode 100644 index 0000000..90c9bf7 --- /dev/null +++ b/pkg/pidfile/pidfile_darwin.go @@ -0,0 +1,8 @@ +package pidfile + +import "fmt" + +// Use /tmp for testing on macOS. +func FilePath(name string) string { + return fmt.Sprintf("/tmp/%s", name) +} diff --git a/pkg/pidfile/pidfile_linux.go b/pkg/pidfile/pidfile_linux.go new file mode 100644 index 0000000..f5126fd --- /dev/null +++ b/pkg/pidfile/pidfile_linux.go @@ -0,0 +1,5 @@ +package pidfile + +func FilePath(name string) string { + return fmt.Sprintf("/var/run/%s", name) +} From 8981b1d97646bb92736dd34d294b007665cc7118 Mon Sep 17 00:00:00 2001 From: royroyee Date: Wed, 26 Feb 2025 20:18:11 +0900 Subject: [PATCH 192/364] Fix prevent race condition by using context for timeouts --- pkg/scheduler/session.go | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 3fac722..c496993 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -20,7 +20,7 @@ import ( const ( checkSessionURL = "/api/servers/servers/-/" - maxRetryTimeout = 3 * 24 * time.Hour + MaxRetryTimeout = 3 * 24 * time.Hour ) func InitSession() *Session { @@ -54,7 +54,7 @@ func InitSession() *Session { func (session *Session) CheckSession() bool { timeout := config.MinConnectInterval - ctx, cancel := context.WithTimeout(context.Background(), maxRetryTimeout) + ctx, cancel := context.WithTimeout(context.Background(), MaxRetryTimeout) defer cancel() for { @@ -112,7 +112,11 @@ func (session *Session) newRequest(method, url string, rawBody interface{}) (*ht } func (session *Session) do(req *http.Request, timeout time.Duration) ([]byte, int, error) { - session.Client.Timeout = timeout * time.Second + ctx, cancel := context.WithTimeout(req.Context(), timeout*time.Second) + defer cancel() + + req = req.WithContext(ctx) + req.Header.Set("Authorization", session.authorization) req.Header.Set("User-Agent", utils.GetUserAgent()) @@ -167,8 +171,17 @@ func (session *Session) Post(url string, rawBody interface{}, timeout time.Durat return session.do(req, timeout) } -func (session *Session) Put(url string, body bytes.Buffer, timeout time.Duration) ([]byte, int, error) { - req, err := session.newRequest(http.MethodPut, url, body) +func (session *Session) Put(url string, rawBody interface{}, timeout time.Duration) ([]byte, int, error) { + req, err := session.newRequest(http.MethodPut, url, rawBody) + if err != nil { + return nil, 0, err + } + + return session.do(req, timeout) +} + +func (session *Session) Patch(url string, rawBody interface{}, timeout time.Duration) ([]byte, int, error) { + req, err := session.newRequest(http.MethodPatch, url, rawBody) if err != nil { return nil, 0, err } @@ -182,7 +195,11 @@ func (session *Session) MultipartRequest(url string, body bytes.Buffer, contentT return nil, 0, err } - session.Client.Timeout = timeout * time.Second + ctx, cancel := context.WithTimeout(req.Context(), timeout*time.Second) + defer cancel() + + req = req.WithContext(ctx) + req.Header.Set("Authorization", session.authorization) req.Header.Set("User-Agent", utils.GetUserAgent()) req.Header.Set("Content-Type", contentType) From 3ddfddc91b706f1226fe311863e20172abd87df5 Mon Sep 17 00:00:00 2001 From: royroyee Date: Wed, 26 Feb 2025 20:20:23 +0900 Subject: [PATCH 193/364] Minor fix --- cmd/alpamon/command/root.go | 6 ++++-- pkg/config/config.go | 15 +++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index b07b199..3d45c3a 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -26,6 +26,8 @@ var RootCmd = &cobra.Command{ }, } +const name = "alpamon" + func init() { RootCmd.AddCommand(setupCmd, ftpCmd) } @@ -35,7 +37,7 @@ func runAgent() { utils.InitPlatform() // Pid - pidFilePath, err := pidfile.WritePID(pidfile.FilePath("alpamon")) + pidFilePath, err := pidfile.WritePID(pidfile.FilePath(name)) if err != nil { _, _ = fmt.Fprintln(os.Stderr, "Failed to create PID file", err.Error()) os.Exit(1) @@ -45,7 +47,7 @@ func runAgent() { fmt.Printf("alpamon version %s starting.\n", version.Version) // Config & Settings - settings := config.LoadConfig() + settings := config.LoadConfig(config.Files(name)) config.InitSettings(settings) // Session diff --git a/pkg/config/config.go b/pkg/config/config.go index 2d1a90c..1b88ddd 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -2,6 +2,7 @@ package config import ( "crypto/tls" + "fmt" "os" "path/filepath" "strings" @@ -13,11 +14,6 @@ import ( ) var ( - configFiles = []string{ - "/etc/alpamon/alpamon.conf", - filepath.Join(os.Getenv("HOME"), ".alpamon.conf"), - } - GlobalSettings Settings ) @@ -31,7 +27,7 @@ func InitSettings(settings Settings) { GlobalSettings = settings } -func LoadConfig() Settings { +func LoadConfig(configFiles []string) Settings { var iniData *ini.File var err error var validConfigFile string @@ -139,3 +135,10 @@ func validateConfig(config Config) (bool, Settings) { } return valid, settings } + +func Files(name string) []string { + return []string{ + fmt.Sprintf("/etc/alpamon/%s.conf", name), + filepath.Join(os.Getenv("HOME"), fmt.Sprintf(".%s.conf", name)), + } +} From 108d67e8d27c86d3411487a3820ae1af0163b420 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 4 Mar 2025 09:27:49 +0900 Subject: [PATCH 194/364] Minor fix --- pkg/runner/client.go | 62 +++++++++++++++++++++---------------------- pkg/runner/command.go | 2 +- pkg/runner/pty.go | 8 ++---- 3 files changed, 34 insertions(+), 38 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index d4de8db..7d311c2 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -18,7 +18,7 @@ import ( const ( minConnectInterval = 5 * time.Second maxConnectInterval = 60 * time.Second - connectionReadTimeout = 35 * time.Minute + ConnectionReadTimeout = 35 * time.Minute maxRetryTimeout = 3 * 24 * time.Hour eventCommandAckURL = "/api/events/commands/%s/ack/" @@ -26,11 +26,11 @@ const ( ) type WebsocketClient struct { - conn *websocket.Conn + Conn *websocket.Conn requestHeader http.Header apiSession *scheduler.Session RestartRequested bool - quitChan chan struct{} + QuitChan chan struct{} } func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { @@ -44,35 +44,35 @@ func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { requestHeader: headers, apiSession: session, RestartRequested: false, - quitChan: make(chan struct{}), + QuitChan: make(chan struct{}), } } func (wc *WebsocketClient) RunForever() { - wc.connect() - defer wc.close() + wc.Connect() + defer wc.Close() for { select { - case <-wc.quitChan: + case <-wc.QuitChan: return default: - err := wc.conn.SetReadDeadline(time.Now().Add(connectionReadTimeout)) + err := wc.Conn.SetReadDeadline(time.Now().Add(ConnectionReadTimeout)) if err != nil { - wc.closeAndReconnect() + wc.CloseAndReconnect() } - _, message, err := wc.readMessage() + _, message, err := wc.ReadMessage() if err != nil { - wc.closeAndReconnect() + wc.CloseAndReconnect() } // Sends "ping" query for Alpacon to verify WebSocket session status without error handling. - _ = wc.sendPingQuery() + _ = wc.SendPingQuery() wc.commandRequestHandler(message) } } } -func (wc *WebsocketClient) sendPingQuery() error { +func (wc *WebsocketClient) SendPingQuery() error { pingQuery := map[string]string{"query": "ping"} err := wc.writeJSON(pingQuery) if err != nil { @@ -82,8 +82,8 @@ func (wc *WebsocketClient) sendPingQuery() error { return nil } -func (wc *WebsocketClient) readMessage() (messageType int, message []byte, err error) { - messageType, message, err = wc.conn.ReadMessage() +func (wc *WebsocketClient) ReadMessage() (messageType int, message []byte, err error) { + messageType, message, err = wc.Conn.ReadMessage() if err != nil { return 0, nil, err } @@ -91,7 +91,7 @@ func (wc *WebsocketClient) readMessage() (messageType int, message []byte, err e return messageType, message, nil } -func (wc *WebsocketClient) connect() { +func (wc *WebsocketClient) Connect() { log.Info().Msgf("Connecting to websocket at %s...", config.GlobalSettings.WSPath) ctx, cancel := context.WithTimeout(context.Background(), maxRetryTimeout) @@ -116,7 +116,7 @@ func (wc *WebsocketClient) connect() { return err } - wc.conn = conn + wc.Conn = conn log.Debug().Msg("Backhaul connection established.") return nil } @@ -129,32 +129,32 @@ func (wc *WebsocketClient) connect() { } } -func (wc *WebsocketClient) closeAndReconnect() { - wc.close() - wc.connect() +func (wc *WebsocketClient) CloseAndReconnect() { + wc.Close() + wc.Connect() } // Cleanly close the websocket connection by sending a close message // Do not close quitChan, as the purpose here is to disconnect the WebSocket, // not to terminate RunForever. -func (wc *WebsocketClient) close() { - if wc.conn != nil { - err := wc.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) +func (wc *WebsocketClient) Close() { + if wc.Conn != nil { + err := wc.Conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) if err != nil { log.Debug().Err(err).Msg("Failed to write close message to websocket.") } - _ = wc.conn.Close() + _ = wc.Conn.Close() } } -func (wc *WebsocketClient) quit() { - wc.close() - close(wc.quitChan) +func (wc *WebsocketClient) Quit() { + wc.Close() + close(wc.QuitChan) } func (wc *WebsocketClient) restart() { wc.RestartRequested = true - wc.quit() + wc.Quit() } func (wc *WebsocketClient) commandRequestHandler(message []byte) { @@ -190,17 +190,17 @@ func (wc *WebsocketClient) commandRequestHandler(message []byte) { go runner.Run() case "quit": log.Debug().Msgf("Quit requested for reason: %s", content.Reason) - wc.quit() + wc.Quit() case "reconnect": log.Debug().Msgf("Reconnect requested for reason: %s", content.Reason) - wc.close() + wc.Close() default: log.Warn().Msgf("Not implemented query: %s", content.Query) } } func (wc *WebsocketClient) writeJSON(data interface{}) error { - err := wc.conn.WriteJSON(data) + err := wc.Conn.WriteJSON(data) if err != nil { log.Debug().Err(err).Msgf("Failed to write json data to websocket.") return err diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 179e60d..9ba259f 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -197,7 +197,7 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 0, "Alpamon will restart in 1 second." case "quit": time.AfterFunc(1*time.Second, func() { - cr.wsClient.quit() + cr.wsClient.Quit() }) return 0, "Alpamon will quit in 1 second." case "reboot": diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 435e5bd..26f965b 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -91,12 +91,8 @@ func (pc *PtyClient) RunPtyBackground() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - go func() { - pc.readFromWebsocket(ctx, cancel) - }() - go func() { - pc.readFromPTY(ctx, cancel) - }() + pc.readFromWebsocket(ctx, cancel) + pc.readFromPTY(ctx, cancel) terminals[pc.sessionID] = pc From bce2a0c76247b98eb2c395c3b3d2ededba6dfd4b Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 4 Mar 2025 13:42:23 +0900 Subject: [PATCH 195/364] Return accurate exit code on command execution failure --- pkg/runner/shell.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index d5cfd2f..4034275 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -126,7 +126,10 @@ func runCmd(args []string, username, groupname string, env map[string]string, ti output, err := cmd.Output() if err != nil { - return 1, err.Error() + if exitError, ok := err.(*exec.ExitError); ok { + return exitError.ExitCode(), err.Error() + } + return -1, err.Error() } return 0, string(output) From f6c46807d05b871bf8bf5047333ad9dbe3f96bfc Mon Sep 17 00:00:00 2001 From: royroyee Date: Wed, 5 Mar 2025 16:58:56 +0900 Subject: [PATCH 196/364] Improve function naming and split utils package --- cmd/alpamon/command/root.go | 4 +- pkg/runner/client.go | 6 +-- pkg/runner/command.go | 28 ++++++------- pkg/runner/ftp.go | 79 +++--------------------------------- pkg/runner/shell.go | 15 ++++++- pkg/scheduler/session.go | 4 +- pkg/utils/fs.go | 80 +++++++++++++++++++++++++++++++++++++ pkg/utils/metrics.go | 52 ++++++++++++++++++++++++ pkg/utils/utils.go | 59 +++------------------------ 9 files changed, 176 insertions(+), 151 deletions(-) create mode 100644 pkg/utils/fs.go create mode 100644 pkg/utils/metrics.go diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 3d45c3a..d454d66 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -89,9 +89,7 @@ func runAgent() { return } - args := os.Args - - err = syscall.Exec(executable, args, os.Environ()) + err = syscall.Exec(executable, os.Args, os.Environ()) if err != nil { log.Error().Err(err).Msg("Failed to restart the program") } diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 7d311c2..3b4a34d 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -37,7 +37,7 @@ func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { headers := http.Header{ "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, config.GlobalSettings.ID, config.GlobalSettings.Key)}, "Origin": {config.GlobalSettings.ServerURL}, - "User-Agent": {utils.GetUserAgent()}, + "User-Agent": {utils.GetUserAgent("alpamon")}, } return &WebsocketClient{ @@ -186,8 +186,8 @@ func (wc *WebsocketClient) commandRequestHandler(message []byte) { 10, time.Time{}, ) - runner := NewCommandRunner(wc, content.Command, data) - go runner.Run() + commandRunner := NewCommandRunner(wc, content.Command, data) + go commandRunner.Run() case "quit": log.Debug().Msgf("Quit requested for reason: %s", content.Reason) wc.Quit() diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 9ba259f..6be5755 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -257,7 +257,7 @@ func (cr *CommandRunner) handleShellCmd(command, user, group string, env map[str for _, arg := range spl { switch arg { case "&&": - exitCode, result = runCmd(args, user, group, env, 0) + exitCode, result = runCmdWithOutput(args, user, group, env, 0) results += result // stop executing if command fails if exitCode != 0 { @@ -265,7 +265,7 @@ func (cr *CommandRunner) handleShellCmd(command, user, group string, env map[str } args = []string{} case "||": - exitCode, result = runCmd(args, user, group, env, 0) + exitCode, result = runCmdWithOutput(args, user, group, env, 0) results += result // execute next only if command fails if exitCode == 0 { @@ -273,13 +273,13 @@ func (cr *CommandRunner) handleShellCmd(command, user, group string, env map[str } args = []string{} case ";": - exitCode, result = runCmd(args, user, group, env, 0) + exitCode, result = runCmdWithOutput(args, user, group, env, 0) results += result args = []string{} default: if strings.HasSuffix(arg, ";") { args = append(args, strings.TrimSuffix(arg, ";")) - exitCode, result = runCmd(args, user, group, env, 0) + exitCode, result = runCmdWithOutput(args, user, group, env, 0) results += result args = []string{} } else { @@ -290,7 +290,7 @@ func (cr *CommandRunner) handleShellCmd(command, user, group string, env map[str if len(args) > 0 { log.Debug().Msgf("Running '%s'", strings.Join(args, " ")) - exitCode, result = runCmd(args, user, group, env, 0) + exitCode, result = runCmdWithOutput(args, user, group, env, 0) results += result } @@ -322,7 +322,7 @@ func (cr *CommandRunner) addUser() (exitCode int, result string) { } if utils.PlatformLike == "debian" { - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/adduser", "--home", data.HomeDirectory, @@ -350,7 +350,7 @@ func (cr *CommandRunner) addUser() (exitCode int, result string) { } // invoke adduser - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/adduser", data.Username, @@ -363,7 +363,7 @@ func (cr *CommandRunner) addUser() (exitCode int, result string) { } } } else if utils.PlatformLike == "rhel" { - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/useradd", "--home-dir", data.HomeDirectory, @@ -399,7 +399,7 @@ func (cr *CommandRunner) addGroup() (exitCode int, result string) { } if utils.PlatformLike == "debian" { - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/addgroup", "--gid", strconv.FormatUint(data.GID, 10), @@ -411,7 +411,7 @@ func (cr *CommandRunner) addGroup() (exitCode int, result string) { return exitCode, result } } else if utils.PlatformLike == "rhel" { - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/groupadd", "--gid", strconv.FormatUint(data.GID, 10), @@ -441,7 +441,7 @@ func (cr *CommandRunner) delUser() (exitCode int, result string) { } if utils.PlatformLike == "debian" { - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/deluser", data.Username, @@ -452,7 +452,7 @@ func (cr *CommandRunner) delUser() (exitCode int, result string) { return exitCode, result } } else if utils.PlatformLike == "rhel" { - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/userdel", data.Username, @@ -481,7 +481,7 @@ func (cr *CommandRunner) delGroup() (exitCode int, result string) { } if utils.PlatformLike == "debian" { - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/delgroup", data.Groupname, @@ -492,7 +492,7 @@ func (cr *CommandRunner) delGroup() (exitCode int, result string) { return exitCode, result } } else if utils.PlatformLike == "rhel" { - exitCode, result = runCmd( + exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/groupdel", data.Groupname, diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 43028e1..9fb29ef 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -4,15 +4,13 @@ import ( "context" "encoding/json" "fmt" + "github.com/alpacanetworks/alpamon-go/pkg/logger" "github.com/alpacanetworks/alpamon-go/pkg/utils" - "io" + "github.com/gorilla/websocket" "net/http" "os" "path/filepath" "strings" - - "github.com/alpacanetworks/alpamon-go/pkg/logger" - "github.com/gorilla/websocket" ) type FtpClient struct { @@ -27,7 +25,7 @@ type FtpClient struct { func NewFtpClient(data FtpConfigData) *FtpClient { headers := http.Header{ "Origin": {data.ServerURL}, - "User-Agent": {utils.GetUserAgent()}, + "User-Agent": {utils.GetUserAgent("alpamon")}, } return &FtpClient{ @@ -388,7 +386,7 @@ func (fc *FtpClient) cp(src, dst string) (CommandResult, error) { } func (fc *FtpClient) cpDir(src, dst string) (CommandResult, error) { - err := copyDir(src, dst) + err := utils.CopyDir(src, dst) if err != nil { return CommandResult{ Message: err.Error(), @@ -402,7 +400,7 @@ func (fc *FtpClient) cpDir(src, dst string) (CommandResult, error) { } func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { - err := copyFile(src, dst) + err := utils.CopyFile(src, dst) if err != nil { return CommandResult{ Message: err.Error(), @@ -414,70 +412,3 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { Message: fmt.Sprintf("Copy %s to %s", src, dst), }, nil } - -func copyFile(src, dst string) error { - srcFile, err := os.Open(src) - if err != nil { - return err - } - defer func() { _ = srcFile.Close() }() - - dstFile, err := os.Create(dst) - if err != nil { - return err - } - defer func() { _ = dstFile.Close() }() - - if _, err = io.Copy(dstFile, srcFile); err != nil { - return err - } - - srcInfo, err := os.Stat(src) - if err != nil { - return err - } - - if err = os.Chmod(dst, srcInfo.Mode()); err != nil { - return err - } - - return nil -} - -func copyDir(src, dst string) error { - if strings.HasPrefix(dst, src) { - return fmt.Errorf("%s is inside %s, causing infinite recursion", dst, src) - } - - srcInfo, err := os.Stat(src) - if err != nil { - return err - } - - err = os.MkdirAll(dst, srcInfo.Mode()) - if err != nil { - return err - } - - entries, err := os.ReadDir(src) - if err != nil { - return err - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - - if entry.IsDir() { - if err = copyDir(srcPath, dstPath); err != nil { - return err - } - } else { - if err = copyFile(srcPath, dstPath); err != nil { - return err - } - } - } - - return nil -} diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index 4034275..da191ae 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -57,7 +57,7 @@ func demote(username, groupname string) (*syscall.SysProcAttr, error) { }, nil } -func runCmd(args []string, username, groupname string, env map[string]string, timeout int) (exitCode int, result string) { +func runCmdWithOutput(args []string, username, groupname string, env map[string]string, timeout int) (exitCode int, result string) { if env != nil { defaultEnv := getDefaultEnv() for key, value := range defaultEnv { @@ -135,6 +135,19 @@ func runCmd(args []string, username, groupname string, env map[string]string, ti return 0, string(output) } +func RunCmd(command string, args ...string) int { + cmd := exec.Command(command, args...) + + err := cmd.Run() + if err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + return exitError.ExitCode() + } + return -1 + } + return 0 +} + // && and || operators are handled separately in handleShellCmd func containsShellOperator(args []string) bool { for _, arg := range args { diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index c496993..93a152b 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -118,7 +118,7 @@ func (session *Session) do(req *http.Request, timeout time.Duration) ([]byte, in req = req.WithContext(ctx) req.Header.Set("Authorization", session.authorization) - req.Header.Set("User-Agent", utils.GetUserAgent()) + req.Header.Set("User-Agent", utils.GetUserAgent("alpamon")) if req.Method == http.MethodPost || req.Method == http.MethodPut || req.Method == http.MethodPatch { req.Header.Set("Content-Type", "application/json") @@ -201,7 +201,7 @@ func (session *Session) MultipartRequest(url string, body bytes.Buffer, contentT req = req.WithContext(ctx) req.Header.Set("Authorization", session.authorization) - req.Header.Set("User-Agent", utils.GetUserAgent()) + req.Header.Set("User-Agent", utils.GetUserAgent("alpamon")) req.Header.Set("Content-Type", contentType) resp, err := session.Client.Do(req) diff --git a/pkg/utils/fs.go b/pkg/utils/fs.go new file mode 100644 index 0000000..a457fe5 --- /dev/null +++ b/pkg/utils/fs.go @@ -0,0 +1,80 @@ +package utils + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +func CopyFile(src, dst string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer func() { _ = srcFile.Close() }() + + dstFile, err := os.Create(dst) + if err != nil { + return err + } + defer func() { _ = dstFile.Close() }() + + _, err = io.Copy(dstFile, srcFile) + if err != nil { + return err + } + + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + + err = os.Chmod(dst, srcInfo.Mode()) + if err != nil { + return err + } + + return nil +} + +func CopyDir(src, dst string) error { + if strings.HasPrefix(dst, src) { + return fmt.Errorf("%s is inside %s, causing infinite recursion", dst, src) + } + + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + + err = os.MkdirAll(dst, srcInfo.Mode()) + if err != nil { + return err + } + + entries, err := os.ReadDir(src) + if err != nil { + return err + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + err = CopyDir(srcPath, dstPath) + if err != nil { + return err + } + } else { + err = CopyFile(srcPath, dstPath) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go new file mode 100644 index 0000000..143eb93 --- /dev/null +++ b/pkg/utils/metrics.go @@ -0,0 +1,52 @@ +package utils + +import ( + "github.com/shirou/gopsutil/v4/disk" + "github.com/shirou/gopsutil/v4/net" + "time" +) + +func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { + if interval == 0 { + return 0, 0 + } + + inputBytesDiff := float64(current.BytesRecv - last.BytesRecv) + outputBytesDiff := float64(current.BytesSent - last.BytesSent) + seconds := interval.Seconds() + + inputBps = (inputBytesDiff * 8) / seconds + outputBps = (outputBytesDiff * 8) / seconds + + return inputBps, outputBps +} + +func CalculateNetworkPps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputPps float64, outputPps float64) { + if interval == 0 { + return 0, 0 + } + + inputPktsDiff := float64(current.PacketsRecv - last.PacketsRecv) + outputPktsDiff := float64(current.PacketsSent - last.PacketsSent) + seconds := interval.Seconds() + + inputPps = inputPktsDiff / seconds + outputPps = outputPktsDiff / seconds + + return inputPps, outputPps +} + +func CalculateDiskIOBps(current disk.IOCountersStat, last disk.IOCountersStat, interval time.Duration) (readBps float64, writeBps float64) { + if interval == 0 { + return 0, 0 + } + + readBytesDiff := float64(current.ReadBytes - last.ReadBytes) + writeBytesDiff := float64(current.WriteBytes - last.WriteBytes) + seconds := interval.Seconds() + + readBps = readBytesDiff / seconds + writeBps = writeBytesDiff / seconds + + return readBps, writeBps +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index eb4b71a..bc46985 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -5,6 +5,9 @@ import ( "context" "fmt" "github.com/alpacanetworks/alpamon-go/pkg/version" + "github.com/google/go-github/github" + "github.com/rs/zerolog/log" + "github.com/shirou/gopsutil/v4/host" "net/url" "os" "os/user" @@ -12,13 +15,6 @@ import ( "runtime" "strconv" "strings" - "time" - - "github.com/google/go-github/github" - "github.com/rs/zerolog/log" - "github.com/shirou/gopsutil/v4/disk" - "github.com/shirou/gopsutil/v4/host" - "github.com/shirou/gopsutil/v4/net" ) var ( @@ -116,51 +112,6 @@ func ConvertGroupIds(groupIds []string) []uint32 { return gids } -func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { - if interval == 0 { - return 0, 0 - } - - inputBytesDiff := float64(current.BytesRecv - last.BytesRecv) - outputBytesDiff := float64(current.BytesSent - last.BytesSent) - seconds := interval.Seconds() - - inputBps = (inputBytesDiff * 8) / seconds - outputBps = (outputBytesDiff * 8) / seconds - - return inputBps, outputBps -} - -func CalculateNetworkPps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputPps float64, outputPps float64) { - if interval == 0 { - return 0, 0 - } - - inputPktsDiff := float64(current.PacketsRecv - last.PacketsRecv) - outputPktsDiff := float64(current.PacketsSent - last.PacketsSent) - seconds := interval.Seconds() - - inputPps = inputPktsDiff / seconds - outputPps = outputPktsDiff / seconds - - return inputPps, outputPps -} - -func CalculateDiskIOBps(current disk.IOCountersStat, last disk.IOCountersStat, interval time.Duration) (readBps float64, writeBps float64) { - if interval == 0 { - return 0, 0 - } - - readBytesDiff := float64(current.ReadBytes - last.ReadBytes) - writeBytesDiff := float64(current.WriteBytes - last.WriteBytes) - seconds := interval.Seconds() - - readBps = readBytesDiff / seconds - writeBps = writeBytesDiff / seconds - - return readBps, writeBps -} - func Quote(s string) string { if len(s) == 0 { return "''" @@ -204,6 +155,6 @@ func GetLatestVersion() string { return release.GetTagName() } -func GetUserAgent() string { - return fmt.Sprintf("%s/%s", "alpamon", version.Version) +func GetUserAgent(name string) string { + return fmt.Sprintf("%s/%s", name, version.Version) } From 37c2dd119561496dde1139c7a9a167b487302bb1 Mon Sep 17 00:00:00 2001 From: royroyee Date: Wed, 5 Mar 2025 17:06:21 +0900 Subject: [PATCH 197/364] Minor fix --- pkg/pidfile/pidfile_linux.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/pidfile/pidfile_linux.go b/pkg/pidfile/pidfile_linux.go index f5126fd..e536db1 100644 --- a/pkg/pidfile/pidfile_linux.go +++ b/pkg/pidfile/pidfile_linux.go @@ -1,5 +1,7 @@ package pidfile +import "fmt" + func FilePath(name string) string { return fmt.Sprintf("/var/run/%s", name) } From 2d2731127527057c4c6847c9cb4919ec424e290a Mon Sep 17 00:00:00 2001 From: royroyee Date: Wed, 5 Mar 2025 17:07:12 +0900 Subject: [PATCH 198/364] Minor fix --- pkg/runner/pty.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 26f965b..06c9681 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -91,8 +91,8 @@ func (pc *PtyClient) RunPtyBackground() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - pc.readFromWebsocket(ctx, cancel) - pc.readFromPTY(ctx, cancel) + go pc.readFromWebsocket(ctx, cancel) + go pc.readFromPTY(ctx, cancel) terminals[pc.sessionID] = pc From 9f4c0413a39b65bcd0525fd99e039ad15980b2da Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 6 Mar 2025 08:41:47 +0900 Subject: [PATCH 199/364] Minor fix --- .goreleaser.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 35676cd..9d047e9 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -33,7 +33,7 @@ nfpms: description: Alpamon homepage: https://github.com/alpacanetworks/alpamon-go license: MIT - vendor: Alpaca Networks + vendor: AlpacaX formats: - deb - rpm From 5aa30b710ae6a6fd7820a54ed2348f5def2ef375 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 6 Mar 2025 15:43:59 +0900 Subject: [PATCH 200/364] Fix setup command --- cmd/alpamon/command/root.go | 4 +--- cmd/alpamon/command/setup.go | 21 ++++++++++++++++----- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index d454d66..cdaf9b8 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -26,10 +26,8 @@ var RootCmd = &cobra.Command{ }, } -const name = "alpamon" - func init() { - RootCmd.AddCommand(setupCmd, ftpCmd) + RootCmd.AddCommand(SetupCmd, ftpCmd) } func runAgent() { diff --git a/cmd/alpamon/command/setup.go b/cmd/alpamon/command/setup.go index e5aec46..83ced12 100644 --- a/cmd/alpamon/command/setup.go +++ b/cmd/alpamon/command/setup.go @@ -14,7 +14,9 @@ import ( "text/template" ) -const ( +var ( + name = "alpamon" + configTemplatePath = "configs/alpamon.conf" configTarget = "/etc/alpamon/alpamon.conf" @@ -37,11 +39,11 @@ type ConfigData struct { //go:embed configs/* var configFiles embed.FS -var setupCmd = &cobra.Command{ +var SetupCmd = &cobra.Command{ Use: "setup", Short: "Setup and configure the Alpamon ", RunE: func(cmd *cobra.Command, args []string) error { - fmt.Println("Starting Alpamon setup...") + fmt.Printf("Starting %s setup...", name) configExists := fileExists(configTarget) isOverwrite := true @@ -93,7 +95,7 @@ func writeConfig() error { return fmt.Errorf("failed to read template file (%s): %v", configTemplatePath, err) } - tmpl, err := template.New("alpamon.conf").Parse(string(tmplData)) + tmpl, err := template.New(fmt.Sprintf("%s.conf", name)).Parse(string(tmplData)) if err != nil { return fmt.Errorf("failed to parse template: %v", err) } @@ -111,7 +113,7 @@ func writeConfig() error { return fmt.Errorf("environment variables ALPACON_URL, PLUGIN_ID, PLUGIN_KEY must be set") } - tmpFile, err := os.CreateTemp("", "alpamon.conf") + tmpFile, err := os.CreateTemp("", fmt.Sprintf("%s.conf", name)) if err != nil { return fmt.Errorf("failed to create temp file: %v", err) } @@ -172,3 +174,12 @@ func fileExists(path string) bool { } return fileInfo.Size() > 0 } + +func SetFile(plugin, newConfigTemplatePath, newConfigTargetPath, newTmpFilePath, newServiceTemplatePath, newServiceTargetPath string) { + name = plugin + configTemplatePath = newConfigTemplatePath + configTarget = newConfigTargetPath + tmpFileTarget = newTmpFilePath + serviceTemplatePath = newServiceTemplatePath + serviceTarget = newServiceTargetPath +} From 47eb9fe5be6bb870969e1e505fd6fba0e646fe03 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 6 Mar 2025 15:44:28 +0900 Subject: [PATCH 201/364] Remove darwin binary in goreleaser --- .goreleaser.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 9d047e9..8cac16e 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -14,7 +14,6 @@ builds: - CGO_ENABLED=0 goos: - linux - - darwin goarch: - amd64 - arm From 9de75f125789e317d3902775d4a155bdd3898dd7 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 6 Mar 2025 18:03:23 +0900 Subject: [PATCH 202/364] Fix setup cmd --- cmd/alpamon/command/root.go | 2 +- cmd/alpamon/command/setup.go | 11 +---------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index cdaf9b8..becf17a 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -27,7 +27,7 @@ var RootCmd = &cobra.Command{ } func init() { - RootCmd.AddCommand(SetupCmd, ftpCmd) + RootCmd.AddCommand(setupCmd, ftpCmd) } func runAgent() { diff --git a/cmd/alpamon/command/setup.go b/cmd/alpamon/command/setup.go index 83ced12..a8c34f7 100644 --- a/cmd/alpamon/command/setup.go +++ b/cmd/alpamon/command/setup.go @@ -39,7 +39,7 @@ type ConfigData struct { //go:embed configs/* var configFiles embed.FS -var SetupCmd = &cobra.Command{ +var setupCmd = &cobra.Command{ Use: "setup", Short: "Setup and configure the Alpamon ", RunE: func(cmd *cobra.Command, args []string) error { @@ -174,12 +174,3 @@ func fileExists(path string) bool { } return fileInfo.Size() > 0 } - -func SetFile(plugin, newConfigTemplatePath, newConfigTargetPath, newTmpFilePath, newServiceTemplatePath, newServiceTargetPath string) { - name = plugin - configTemplatePath = newConfigTemplatePath - configTarget = newConfigTargetPath - tmpFileTarget = newTmpFilePath - serviceTemplatePath = newServiceTemplatePath - serviceTarget = newServiceTargetPath -} From 320bece018428f637928d1b468d2ca93441906d4 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 6 Mar 2025 18:14:55 +0900 Subject: [PATCH 203/364] Minor fix --- pkg/pidfile/pidfile_darwin.go | 2 +- pkg/pidfile/pidfile_linux.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/pidfile/pidfile_darwin.go b/pkg/pidfile/pidfile_darwin.go index 90c9bf7..435079c 100644 --- a/pkg/pidfile/pidfile_darwin.go +++ b/pkg/pidfile/pidfile_darwin.go @@ -4,5 +4,5 @@ import "fmt" // Use /tmp for testing on macOS. func FilePath(name string) string { - return fmt.Sprintf("/tmp/%s", name) + return fmt.Sprintf("/tmp/%s.pid", name) } diff --git a/pkg/pidfile/pidfile_linux.go b/pkg/pidfile/pidfile_linux.go index e536db1..7f2c471 100644 --- a/pkg/pidfile/pidfile_linux.go +++ b/pkg/pidfile/pidfile_linux.go @@ -3,5 +3,5 @@ package pidfile import "fmt" func FilePath(name string) string { - return fmt.Sprintf("/var/run/%s", name) + return fmt.Sprintf("/var/run/%s.pid", name) } From 17f97018e94c2b2c553d4ebc3f0aaf9b8123a975 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 7 Mar 2025 09:39:28 +0900 Subject: [PATCH 204/364] Fix upgrade command --- pkg/runner/command.go | 7 +------ pkg/runner/shell.go | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 6be5755..23e67b0 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -111,12 +111,8 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, fmt.Sprintf("Platform '%s' not supported.", utils.PlatformLike) } log.Debug().Msgf("Upgrading alpamon from %s to %s using command: '%s'...", version.Version, latestVersion, cmd) + return cr.handleShellCmd(cmd, "root", "root", nil) - time.AfterFunc(1*time.Second, func() { - cr.handleShellCmd(cmd, "root", "root", nil) - }) - - return 0, fmt.Sprintf("Alpamon will upgrade from %s to %s in 1 second.", version.Version, latestVersion) case "commit": cr.commit() return 0, "Committed system information." @@ -289,7 +285,6 @@ func (cr *CommandRunner) handleShellCmd(command, user, group string, env map[str } if len(args) > 0 { - log.Debug().Msgf("Running '%s'", strings.Join(args, " ")) exitCode, result = runCmdWithOutput(args, user, group, env, 0) results += result } diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index da191ae..21bcf5b 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -92,7 +92,6 @@ func runCmdWithOutput(args []string, username, groupname string, env map[string] var cmd *exec.Cmd if username == "root" { - log.Debug().Msg("Executing the command with root privilege.") if containsShellOperator(args) { cmd = exec.CommandContext(ctx, "bash", "-c", strings.Join(args, " ")) } else { @@ -124,6 +123,7 @@ func runCmdWithOutput(args []string, username, groupname string, env map[string] } cmd.Dir = usr.HomeDir + log.Debug().Msgf("Executing command as user '%s' (group: '%s') -> '%s'", username, groupname, strings.Join(args, " ")) output, err := cmd.Output() if err != nil { if exitError, ok := err.(*exec.ExitError); ok { From 0c2d5828fff91685ae2b34d9a11a0d6c9085316d Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 10 Mar 2025 11:19:48 +0900 Subject: [PATCH 205/364] Add exception handling for file systems Add exception handling to prevent the collection of virtual or loop file systems. --- .../check/realtime/disk/usage/usage.go | 44 +++++++++++++++---- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index 5e7fc76..80295c3 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -2,22 +2,36 @@ package diskusage import ( "context" + "strings" "time" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/shirou/gopsutil/v4/disk" ) var excludedFileSystems = map[string]bool{ - "tmpfs": true, - "devtmpfs": true, - "proc": true, - "sysfs": true, - "cgroup": true, - "overlay": true, - "autofs": true, - "devfs": true, + "tmpfs": true, + "devtmpfs": true, + "proc": true, + "sysfs": true, + "cgroup": true, + "cgroup2": true, + "overlay": true, + "autofs": true, + "devfs": true, + "securityfs": true, + "fusectl": true, + "hugetlbfs": true, + "debugfs": true, + "pstore": true, + "tracefs": true, + "devpts": true, + "mqueue": true, + "bpf": true, + "configfs": true, + "binfmt_misc": true, } type Check struct { @@ -93,7 +107,19 @@ func (c *Check) collectDiskPartitions() ([]disk.PartitionStat, error) { var filteredPartitions []disk.PartitionStat for _, partition := range partitions { - if !excludedFileSystems[partition.Fstype] { + if excludedFileSystems[partition.Fstype] { + continue + } + + if strings.HasPrefix(partition.Device, "/dev/loop") { + continue + } + + if utils.IsVirtualFileSystem(partition.Mountpoint) { + continue + } + + if strings.HasPrefix(partition.Device, "/dev") { filteredPartitions = append(filteredPartitions, partition) } } From 607eca216f9c7a2d3c7784554cb6a95b5cbddc5b Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 10 Mar 2025 11:21:42 +0900 Subject: [PATCH 206/364] Add IsVirtualFileSystem() Add IsVirtualFileSystem() to determine if a file system is virtual based on the mountPoint. --- pkg/utils/metrics.go | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index 143eb93..37da177 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -1,9 +1,11 @@ package utils import ( + "regexp" + "time" + "github.com/shirou/gopsutil/v4/disk" "github.com/shirou/gopsutil/v4/net" - "time" ) func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { @@ -50,3 +52,23 @@ func CalculateDiskIOBps(current disk.IOCountersStat, last disk.IOCountersStat, i return readBps, writeBps } + +func IsVirtualFileSystem(mountPoint string) bool { + pattern := "^/(sys|proc|run|dev/)" + matched, _ := regexp.MatchString(pattern, mountPoint) + if matched { + return true + } + + virtualMountpoints := map[string]bool{ + "/sys": true, + "/proc": true, + "/dev": true, + } + + if virtualMountpoints[mountPoint] { + return true + } + + return false +} From f21458b7e044d2b47577cffb3f684c381d31f9f4 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 10 Mar 2025 11:33:22 +0900 Subject: [PATCH 207/364] Minor fix Apply go-lint. --- pkg/utils/metrics.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index 37da177..876be5a 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -66,9 +66,5 @@ func IsVirtualFileSystem(mountPoint string) bool { "/dev": true, } - if virtualMountpoints[mountPoint] { - return true - } - - return false + return virtualMountpoints[mountPoint] } From f178b1d8eb2c8f0e3958b5ddca52f83a20201d48 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 10 Mar 2025 16:33:31 +0900 Subject: [PATCH 208/364] Refactor command package for external import and plugin support --- cmd/alpamon/command/{ => ftp}/ftp.go | 4 +- cmd/alpamon/command/root.go | 9 +++- .../command/{ => setup}/configs/alpamon.conf | 0 .../{ => setup}/configs/alpamon.service | 0 .../command/{ => setup}/configs/tmpfile.conf | 0 cmd/alpamon/command/{ => setup}/setup.go | 41 +++++++++++-------- 6 files changed, 33 insertions(+), 21 deletions(-) rename cmd/alpamon/command/{ => ftp}/ftp.go (92%) rename cmd/alpamon/command/{ => setup}/configs/alpamon.conf (100%) rename cmd/alpamon/command/{ => setup}/configs/alpamon.service (100%) rename cmd/alpamon/command/{ => setup}/configs/tmpfile.conf (100%) rename cmd/alpamon/command/{ => setup}/setup.go (83%) diff --git a/cmd/alpamon/command/ftp.go b/cmd/alpamon/command/ftp/ftp.go similarity index 92% rename from cmd/alpamon/command/ftp.go rename to cmd/alpamon/command/ftp/ftp.go index e74dede..0898528 100644 --- a/cmd/alpamon/command/ftp.go +++ b/cmd/alpamon/command/ftp/ftp.go @@ -1,4 +1,4 @@ -package command +package ftp import ( "github.com/alpacanetworks/alpamon-go/pkg/logger" @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" ) -var ftpCmd = &cobra.Command{ +var FtpCmd = &cobra.Command{ Use: "ftp ", Short: "Start worker for Web FTP", Args: cobra.ExactArgs(3), diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index becf17a..1e338f2 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -2,6 +2,8 @@ package command import ( "fmt" + "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command/ftp" + "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command/setup" "os" "syscall" @@ -18,6 +20,10 @@ import ( "github.com/spf13/cobra" ) +const ( + name = "alpamon" +) + var RootCmd = &cobra.Command{ Use: "alpamon", Short: "Secure Server Agent for Alpacon", @@ -27,7 +33,8 @@ var RootCmd = &cobra.Command{ } func init() { - RootCmd.AddCommand(setupCmd, ftpCmd) + setup.SetConfigPaths(name) + RootCmd.AddCommand(setup.SetupCmd, ftp.FtpCmd) } func runAgent() { diff --git a/cmd/alpamon/command/configs/alpamon.conf b/cmd/alpamon/command/setup/configs/alpamon.conf similarity index 100% rename from cmd/alpamon/command/configs/alpamon.conf rename to cmd/alpamon/command/setup/configs/alpamon.conf diff --git a/cmd/alpamon/command/configs/alpamon.service b/cmd/alpamon/command/setup/configs/alpamon.service similarity index 100% rename from cmd/alpamon/command/configs/alpamon.service rename to cmd/alpamon/command/setup/configs/alpamon.service diff --git a/cmd/alpamon/command/configs/tmpfile.conf b/cmd/alpamon/command/setup/configs/tmpfile.conf similarity index 100% rename from cmd/alpamon/command/configs/tmpfile.conf rename to cmd/alpamon/command/setup/configs/tmpfile.conf diff --git a/cmd/alpamon/command/setup.go b/cmd/alpamon/command/setup/setup.go similarity index 83% rename from cmd/alpamon/command/setup.go rename to cmd/alpamon/command/setup/setup.go index a8c34f7..9b73586 100644 --- a/cmd/alpamon/command/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -1,4 +1,4 @@ -package command +package setup import ( "embed" @@ -14,19 +14,28 @@ import ( "text/template" ) -var ( - name = "alpamon" - - configTemplatePath = "configs/alpamon.conf" - configTarget = "/etc/alpamon/alpamon.conf" - - tmpFilePath = "configs/tmpfile.conf" - tmpFileTarget = "/usr/lib/tmpfiles.d/alpamon.conf" +//go:embed configs/* +var configFiles embed.FS - serviceTemplatePath = "configs/alpamon.service" - serviceTarget = "/lib/systemd/system/alpamon.service" +var ( + name string + configTemplatePath string + configTarget string + tmpFilePath = "configs/tmpfile.conf" + tmpFileTarget string + serviceTemplatePath string + serviceTarget string ) +func SetConfigPaths(serviceName string) { + name = serviceName + configTemplatePath = fmt.Sprintf("configs/%s.conf", name) + configTarget = fmt.Sprintf("/etc/%s/%s.conf", name, name) + tmpFileTarget = fmt.Sprintf("/usr/lib/tmpfiles.d/%s.conf", name) + serviceTemplatePath = fmt.Sprintf("configs/%s.service", name) + serviceTarget = fmt.Sprintf("/lib/systemd/system/%s.service", name) +} + type ConfigData struct { URL string ID string @@ -36,14 +45,11 @@ type ConfigData struct { Debug string } -//go:embed configs/* -var configFiles embed.FS - -var setupCmd = &cobra.Command{ +var SetupCmd = &cobra.Command{ Use: "setup", - Short: "Setup and configure the Alpamon ", + Short: "Setup and configure the application", RunE: func(cmd *cobra.Command, args []string) error { - fmt.Printf("Starting %s setup...", name) + fmt.Printf("Starting %s setup...\n", name) configExists := fileExists(configTarget) isOverwrite := true @@ -143,7 +149,6 @@ func writeService() error { if err != nil { return fmt.Errorf("failed to write target file: %v", err) } - return nil } From dd098b15e8722cc7a0d2d405bb86a9a8ef100dd5 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 10 Mar 2025 16:35:09 +0900 Subject: [PATCH 209/364] Minor fix --- cmd/alpamon/command/setup/setup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 9b73586..a519854 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -47,7 +47,7 @@ type ConfigData struct { var SetupCmd = &cobra.Command{ Use: "setup", - Short: "Setup and configure the application", + Short: "Setup and configure the alpamon", RunE: func(cmd *cobra.Command, args []string) error { fmt.Printf("Starting %s setup...\n", name) From 200f5826c57199c10bc6584b2be41980ac37a1e2 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 10 Mar 2025 16:38:42 +0900 Subject: [PATCH 210/364] Fix first install detection by checking binary existence instead of arguments --- scripts/postinstall.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 26d5bd5..852a23f 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -65,7 +65,7 @@ start_systemd_service() { } is_new_installation() { - if [ -z "$2" ]; then + if [ ! -f "$ALPAMON_BIN" ]; then return 0 # first install else return 1 # upgrade From 87b3109b45e4cd42fe945511e994d08589c00bac Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 10 Mar 2025 16:58:20 +0900 Subject: [PATCH 211/364] Minor fix --- cmd/alpamon/command/setup/setup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index a519854..8fff56d 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -47,7 +47,7 @@ type ConfigData struct { var SetupCmd = &cobra.Command{ Use: "setup", - Short: "Setup and configure the alpamon", + Short: fmt.Sprintf("Setup and configure the %s", name), RunE: func(cmd *cobra.Command, args []string) error { fmt.Printf("Starting %s setup...\n", name) From e7fcb3bcb4eaaf7042368b01c159b5a6bb1ad30d Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 10 Mar 2025 17:31:57 +0900 Subject: [PATCH 212/364] Minor fix --- scripts/postinstall.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 852a23f..26d5bd5 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -65,7 +65,7 @@ start_systemd_service() { } is_new_installation() { - if [ ! -f "$ALPAMON_BIN" ]; then + if [ -z "$2" ]; then return 0 # first install else return 1 # upgrade From d2b032b9fcc8ff7a7cdf8860866d5fcd00b4e4b3 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 10 Mar 2025 18:03:54 +0900 Subject: [PATCH 213/364] Minor fix --- cmd/alpamon/command/setup/setup.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 8fff56d..3e0afa7 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -27,8 +27,9 @@ var ( serviceTarget string ) -func SetConfigPaths(serviceName string) { +func SetConfigPaths(serviceName string, fs embed.FS) { name = serviceName + configFiles = fs configTemplatePath = fmt.Sprintf("configs/%s.conf", name) configTarget = fmt.Sprintf("/etc/%s/%s.conf", name, name) tmpFileTarget = fmt.Sprintf("/usr/lib/tmpfiles.d/%s.conf", name) From b153e6ccbb9bca56a04ea9c07c992d4d63ba1eee Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 10 Mar 2025 18:07:33 +0900 Subject: [PATCH 214/364] Minor fix --- cmd/alpamon/command/{setup => }/configs/alpamon.conf | 0 cmd/alpamon/command/{setup => }/configs/alpamon.service | 0 cmd/alpamon/command/{setup => }/configs/tmpfile.conf | 0 cmd/alpamon/command/root.go | 6 +++++- cmd/alpamon/command/setup/setup.go | 4 +--- 5 files changed, 6 insertions(+), 4 deletions(-) rename cmd/alpamon/command/{setup => }/configs/alpamon.conf (100%) rename cmd/alpamon/command/{setup => }/configs/alpamon.service (100%) rename cmd/alpamon/command/{setup => }/configs/tmpfile.conf (100%) diff --git a/cmd/alpamon/command/setup/configs/alpamon.conf b/cmd/alpamon/command/configs/alpamon.conf similarity index 100% rename from cmd/alpamon/command/setup/configs/alpamon.conf rename to cmd/alpamon/command/configs/alpamon.conf diff --git a/cmd/alpamon/command/setup/configs/alpamon.service b/cmd/alpamon/command/configs/alpamon.service similarity index 100% rename from cmd/alpamon/command/setup/configs/alpamon.service rename to cmd/alpamon/command/configs/alpamon.service diff --git a/cmd/alpamon/command/setup/configs/tmpfile.conf b/cmd/alpamon/command/configs/tmpfile.conf similarity index 100% rename from cmd/alpamon/command/setup/configs/tmpfile.conf rename to cmd/alpamon/command/configs/tmpfile.conf diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 1e338f2..4344223 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -1,6 +1,7 @@ package command import ( + "embed" "fmt" "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command/ftp" "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command/setup" @@ -32,8 +33,11 @@ var RootCmd = &cobra.Command{ }, } +//go:embed configs/* +var configFiles embed.FS + func init() { - setup.SetConfigPaths(name) + setup.SetConfigPaths(name, configFiles) RootCmd.AddCommand(setup.SetupCmd, ftp.FtpCmd) } diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 3e0afa7..80d01b4 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -14,11 +14,9 @@ import ( "text/template" ) -//go:embed configs/* -var configFiles embed.FS - var ( name string + configFiles embed.FS configTemplatePath string configTarget string tmpFilePath = "configs/tmpfile.conf" From 73717321568236399c435640c6aafa3fac56a136 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 10 Mar 2025 19:36:15 +0900 Subject: [PATCH 215/364] Add Disk & Partition type Define Disk and Partition types to collect disk-related system information. --- pkg/runner/commit_types.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/pkg/runner/commit_types.go b/pkg/runner/commit_types.go index 02c4308..0344750 100644 --- a/pkg/runner/commit_types.go +++ b/pkg/runner/commit_types.go @@ -52,6 +52,16 @@ var commitDefs = map[string]commitDef{ URL: "/api/proc/packages/", URLSuffix: "sync/", }, + "disks": { + MultiRow: true, + URL: "/api/proc/disks/", + URLSuffix: "sync", + }, + "partitions": { + MultiRow: true, + URL: "/api/proc/partitions/", + URLSuffix: "sync", + }, } type ServerData struct { @@ -136,6 +146,23 @@ type Address struct { Mask string `json:"mask"` } +type Disk struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + SerialNumber string `json:"serial_number"` + Label string `json:"label"` +} + +type Partition struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + DiskName string `json:"disk_name"` + Mountpoint string `json:"mount_point"` + Fstype string `json:"fs_type"` + Opts string `json:"options"` + IsVirtual bool `json:"is_virtual"` +} + type commitData struct { Version string `json:"version"` Load float64 `json:"load"` @@ -147,6 +174,8 @@ type commitData struct { Interfaces []Interface `json:"interfaces"` Addresses []Address `json:"addresses"` Packages []SystemPackageData `json:"packages"` + Disks []Disk `json:"disks"` + Partitions []Partition `json:"partitions"` } // Defines the ComparableData interface for comparing different types. From 12b2114b1d711b5b0e979728af608c6dc8be3061 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 10 Mar 2025 19:38:55 +0900 Subject: [PATCH 216/364] Add getDisks() and getPartitions() Add functions to collect disk-related system information. getDisks() is a function for collecting Disk information. getPartitions() is a function for collecting Partition information. --- pkg/runner/commit.go | 68 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 58 insertions(+), 10 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 73507d0..c7233fa 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -6,6 +6,16 @@ import ( "encoding/json" "errors" "fmt" + "io" + "net" + "net/http" + "net/textproto" + "os" + "strconv" + "strings" + "sync" + "time" + "github.com/alpacanetworks/alpamon-go/pkg/scheduler" "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/alpacanetworks/alpamon-go/pkg/version" @@ -13,19 +23,10 @@ import ( rpmdb "github.com/knqyf263/go-rpmdb/pkg" "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/disk" "github.com/shirou/gopsutil/v4/host" "github.com/shirou/gopsutil/v4/load" "github.com/shirou/gopsutil/v4/mem" - - "io" - "net" - "net/http" - "net/textproto" - "os" - "strconv" - "strings" - "sync" - "time" ) const ( @@ -253,6 +254,12 @@ func collectData() *commitData { if data.Packages, err = getSystemPackages(); err != nil { log.Debug().Err(err).Msg("Failed to retrieve system packages") } + if data.Disks, err = getDisks(); err != nil { + log.Debug().Err(err).Msg("Failed to retrieve disks") + } + if data.Partitions, err = getPartitions(); err != nil { + log.Debug().Err(err).Msg("Failed to retrieve disk partitions") + } return data } @@ -630,6 +637,47 @@ func getRpmPackage(path string) ([]SystemPackageData, error) { return packages, nil } +func getDisks() ([]Disk, error) { + ioCounters, err := disk.IOCounters() + if err != nil { + return []Disk{}, err + } + + disks := []Disk{} + for name, ioCounter := range ioCounters { + disks = append(disks, Disk{ + Name: name, + SerialNumber: ioCounter.SerialNumber, + Label: ioCounter.Label, + }) + } + + return disks, nil +} + +func getPartitions() ([]Partition, error) { + partitions, err := disk.Partitions(true) + if err != nil { + return []Partition{}, nil + } + + var partitionList []Partition + for _, partition := range partitions { + disk := utils.ParseDiskName(partition.Device) + + partitionList = append(partitionList, Partition{ + Name: partition.Device, + DiskName: disk, + Mountpoint: partition.Mountpoint, + Fstype: partition.Fstype, + Opts: strings.Join(partition.Opts, ","), + IsVirtual: utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint), + }) + } + + return partitionList, nil +} + func dispatchComparison(entry commitDef, currentData, remoteData any) { switch v := remoteData.(type) { case *[]GroupData: From 55545d47270fe371a048e054043e85cc2609ad98 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 10 Mar 2025 19:59:40 +0900 Subject: [PATCH 217/364] Add utilities for collecting disk-related system information Add utilities for collecting disk-related system information. Implement ParseDiskName() to map disk partitions to disks. Fix IsVirtualFileSystem() to determine virtual file systems in various ways. --- pkg/utils/metrics.go | 71 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 8 deletions(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index 876be5a..1f95df7 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -2,12 +2,45 @@ package utils import ( "regexp" + "strings" "time" "github.com/shirou/gopsutil/v4/disk" "github.com/shirou/gopsutil/v4/net" ) +var ( + virtualFileSystems = map[string]bool{ + "tmpfs": true, + "devtmpfs": true, + "proc": true, + "sysfs": true, + "cgroup": true, + "cgroup2": true, + "overlay": true, + "autofs": true, + "devfs": true, + "securityfs": true, + "fusectl": true, + "hugetlbfs": true, + "debugfs": true, + "pstore": true, + "tracefs": true, + "devpts": true, + "mqueue": true, + "bpf": true, + "configfs": true, + "binfmt_misc": true, + } + virtualMountpoints = map[string]bool{ + "/sys": true, + "/proc": true, + "/dev": true, + } + virtualMountPointPattern = "^/(sys|proc|run|dev/)" + loopFileSystemPrefix = "/dev/loop" +) + func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { if interval == 0 { return 0, 0 @@ -53,18 +86,40 @@ func CalculateDiskIOBps(current disk.IOCountersStat, last disk.IOCountersStat, i return readBps, writeBps } -func IsVirtualFileSystem(mountPoint string) bool { - pattern := "^/(sys|proc|run|dev/)" - matched, _ := regexp.MatchString(pattern, mountPoint) +func IsVirtualFileSystem(device string, fstype string, mountPoint string) bool { + if strings.HasPrefix(device, loopFileSystemPrefix) { + return true + } + + matched, _ := regexp.MatchString(virtualMountPointPattern, mountPoint) if matched { return true } - virtualMountpoints := map[string]bool{ - "/sys": true, - "/proc": true, - "/dev": true, + if virtualFileSystems[fstype] { + return true + } + + if virtualMountpoints[mountPoint] { + return true + } + + return false +} + +func ParseDiskName(device string) string { + device = strings.TrimPrefix(device, "/dev/") + + re := regexp.MustCompile(`^disk\d+`) + if match := re.FindString(device); match != "" { + return match + } + + for i := len(device) - 1; i >= 0; i-- { + if device[i] < '0' || device[i] > '9' { + return device[:i+1] + } } - return virtualMountpoints[mountPoint] + return device } From cd4f951e3abc4be34936628b3ac262e7b9b3eef8 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 10 Mar 2025 20:01:42 +0900 Subject: [PATCH 218/364] Update Disk partition collection logic to apply changes Updated Disk partition collection logic to apply IsVirtualFileSystem() changes. --- .../check/realtime/disk/usage/usage.go | 33 +------------------ 1 file changed, 1 insertion(+), 32 deletions(-) diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index 80295c3..831cbc6 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -11,29 +11,6 @@ import ( "github.com/shirou/gopsutil/v4/disk" ) -var excludedFileSystems = map[string]bool{ - "tmpfs": true, - "devtmpfs": true, - "proc": true, - "sysfs": true, - "cgroup": true, - "cgroup2": true, - "overlay": true, - "autofs": true, - "devfs": true, - "securityfs": true, - "fusectl": true, - "hugetlbfs": true, - "debugfs": true, - "pstore": true, - "tracefs": true, - "devpts": true, - "mqueue": true, - "bpf": true, - "configfs": true, - "binfmt_misc": true, -} - type Check struct { base.BaseCheck } @@ -107,15 +84,7 @@ func (c *Check) collectDiskPartitions() ([]disk.PartitionStat, error) { var filteredPartitions []disk.PartitionStat for _, partition := range partitions { - if excludedFileSystems[partition.Fstype] { - continue - } - - if strings.HasPrefix(partition.Device, "/dev/loop") { - continue - } - - if utils.IsVirtualFileSystem(partition.Mountpoint) { + if utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint) { continue } From 03f53f04b0756d84676b665ea2419dbdd4f6cd0b Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 10 Mar 2025 21:05:14 +0900 Subject: [PATCH 219/364] Fix regexp Fix regexp for linux file system. --- pkg/utils/metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index 1f95df7..3c97e88 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -110,7 +110,7 @@ func IsVirtualFileSystem(device string, fstype string, mountPoint string) bool { func ParseDiskName(device string) string { device = strings.TrimPrefix(device, "/dev/") - re := regexp.MustCompile(`^disk\d+`) + re := regexp.MustCompile(`^[a-zA-Z]+\d*`) if match := re.FindString(device); match != "" { return match } From a0d52ede6e850827d50c96e095f1e55bde345719 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 11 Mar 2025 09:41:28 +0900 Subject: [PATCH 220/364] Fix setup command --- cmd/alpamon/command/setup/setup.go | 33 +++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 80d01b4..9616eaf 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -25,16 +25,6 @@ var ( serviceTarget string ) -func SetConfigPaths(serviceName string, fs embed.FS) { - name = serviceName - configFiles = fs - configTemplatePath = fmt.Sprintf("configs/%s.conf", name) - configTarget = fmt.Sprintf("/etc/%s/%s.conf", name, name) - tmpFileTarget = fmt.Sprintf("/usr/lib/tmpfiles.d/%s.conf", name) - serviceTemplatePath = fmt.Sprintf("configs/%s.service", name) - serviceTarget = fmt.Sprintf("/lib/systemd/system/%s.service", name) -} - type ConfigData struct { URL string ID string @@ -44,6 +34,16 @@ type ConfigData struct { Debug string } +func SetConfigPaths(serviceName string, fs embed.FS) { + name = serviceName + configFiles = fs + configTemplatePath = fmt.Sprintf("configs/%s.conf", name) + configTarget = fmt.Sprintf("/etc/alpamon/%s.conf", name) + tmpFileTarget = fmt.Sprintf("/usr/lib/tmpfiles.d/%s.conf", name) + serviceTemplatePath = fmt.Sprintf("configs/%s.service", name) + serviceTarget = fmt.Sprintf("/lib/systemd/system/%s.service", name) +} + var SetupCmd = &cobra.Command{ Use: "setup", Short: fmt.Sprintf("Setup and configure the %s", name), @@ -53,21 +53,16 @@ var SetupCmd = &cobra.Command{ configExists := fileExists(configTarget) isOverwrite := true - if term.IsTerminal(syscall.Stdin) { - if configExists { - fmt.Println("A configuration file already exists at:", configTarget) - isOverwrite = cli.PromptForBool("Do you want to overwrite it with a new configuration?: ") - } - + if configExists && term.IsTerminal(syscall.Stdin) { + fmt.Println("A configuration file already exists at:", configTarget) + isOverwrite = cli.PromptForBool("Do you want to overwrite it with a new configuration?: ") if !isOverwrite { fmt.Println("Keeping the existing configuration file. Skipping configuration update.") return nil } } - if !configExists || isOverwrite { - fmt.Println("Applying a new configuration automatically.") - } + fmt.Println("Applying a new configuration automatically.") err := copyEmbeddedFile(tmpFilePath, tmpFileTarget) if err != nil { From 376b582719e0167440ecee583969ad1f34b6a7d5 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 11 Mar 2025 09:49:14 +0900 Subject: [PATCH 221/364] Minor fix --- cmd/alpamon/command/setup/setup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 9616eaf..ef4ace4 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -50,8 +50,8 @@ var SetupCmd = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { fmt.Printf("Starting %s setup...\n", name) + var isOverwrite bool configExists := fileExists(configTarget) - isOverwrite := true if configExists && term.IsTerminal(syscall.Stdin) { fmt.Println("A configuration file already exists at:", configTarget) From dbbf2309019a6b7aea5b8428045ce7d69c7a8382 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 11 Mar 2025 13:23:55 +0900 Subject: [PATCH 222/364] Minor fix --- cmd/alpamon/command/setup/setup.go | 12 ++++++++++-- pkg/runner/shell.go | 13 ------------- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index ef4ace4..320e79a 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -62,14 +62,22 @@ var SetupCmd = &cobra.Command{ } } - fmt.Println("Applying a new configuration automatically.") + fmt.Println("Applying a new configuration automatically...") err := copyEmbeddedFile(tmpFilePath, tmpFileTarget) if err != nil { return err } - output, err := exec.Command("systemd-tmpfiles", "--create").CombinedOutput() + command := exec.Command("systemd-tmpfiles", "--create") + command.SysProcAttr = &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: 0, + Gid: 0, + }, + } + + output, err := command.CombinedOutput() if err != nil { return fmt.Errorf("%w\n%s", err, string(output)) } diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index 21bcf5b..54f9c40 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -135,19 +135,6 @@ func runCmdWithOutput(args []string, username, groupname string, env map[string] return 0, string(output) } -func RunCmd(command string, args ...string) int { - cmd := exec.Command(command, args...) - - err := cmd.Run() - if err != nil { - if exitError, ok := err.(*exec.ExitError); ok { - return exitError.ExitCode() - } - return -1 - } - return 0 -} - // && and || operators are handled separately in handleShellCmd func containsShellOperator(args []string) bool { for _, arg := range args { From 00cd17b82d6694fe97a58938832295f9fc0cc374 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 11 Mar 2025 13:53:45 +0900 Subject: [PATCH 223/364] Add disks and partitions cases to syncSystemInfo() Add disks and partitions cases to syncSystemInfo() to synchronize disk-related system information, which is now being collected. --- pkg/runner/commit.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index c7233fa..e48745b 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -150,6 +150,16 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { log.Debug().Err(err).Msg("Failed to retrieve system packages") } remoteData = &[]SystemPackageData{} + case "disks": + if currentData, err = getDisks(); err != nil { + log.Debug().Err(err).Msg("Failed to retrieve disks") + } + remoteData = &[]Disk{} + case "partitions": + if currentData, err = getPartitions(); err != nil { + log.Debug().Err(err).Msg("Failed to retrieve disks") + } + remoteData = &[]Partition{} default: log.Warn().Msgf("Unknown key: %s", key) continue @@ -690,5 +700,9 @@ func dispatchComparison(entry commitDef, currentData, remoteData any) { compareListData(entry, currentData.([]Address), *v) case *[]SystemPackageData: compareListData(entry, currentData.([]SystemPackageData), *v) + case *[]Disk: + compareListData(entry, currentData.([]Disk), *v) + case *[]Partition: + compareListData(entry, currentData.([]Partition), *v) } } From 91137f2599721a1d32ce9b38e9b531760319e8cc Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 11 Mar 2025 13:56:50 +0900 Subject: [PATCH 224/364] Update commit types according to changes Update types to reflect the addition of cases for synchronizing disk-related system information --- pkg/runner/commit_types.go | 39 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/pkg/runner/commit_types.go b/pkg/runner/commit_types.go index 0344750..0d7af33 100644 --- a/pkg/runner/commit_types.go +++ b/pkg/runner/commit_types.go @@ -55,12 +55,12 @@ var commitDefs = map[string]commitDef{ "disks": { MultiRow: true, URL: "/api/proc/disks/", - URLSuffix: "sync", + URLSuffix: "sync/", }, "partitions": { MultiRow: true, URL: "/api/proc/partitions/", - URLSuffix: "sync", + URLSuffix: "sync/", }, } @@ -332,3 +332,38 @@ func (sp SystemPackageData) GetData() ComparableData { Arch: sp.Arch, } } + +func (d Disk) GetID() string { + return d.ID +} + +func (d Disk) GetKey() interface{} { + return d.Name +} + +func (d Disk) GetData() ComparableData { + return Disk{ + Name: d.Name, + SerialNumber: d.SerialNumber, + Label: d.Label, + } +} + +func (p Partition) GetID() string { + return p.ID +} + +func (p Partition) GetKey() interface{} { + return p.Name +} + +func (p Partition) GetData() ComparableData { + return Partition{ + Name: p.Name, + DiskName: p.DiskName, + Mountpoint: p.Mountpoint, + Fstype: p.Fstype, + Opts: p.Opts, + IsVirtual: p.IsVirtual, + } +} From 777dbc8210fd8e74c6f70de40e2c9585ef14eafc Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 11 Mar 2025 14:33:58 +0900 Subject: [PATCH 225/364] Replace syscall.Credential with sudo command execution in setup --- cmd/alpamon/command/setup/setup.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 320e79a..fbfb72c 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -69,15 +69,7 @@ var SetupCmd = &cobra.Command{ return err } - command := exec.Command("systemd-tmpfiles", "--create") - command.SysProcAttr = &syscall.SysProcAttr{ - Credential: &syscall.Credential{ - Uid: 0, - Gid: 0, - }, - } - - output, err := command.CombinedOutput() + output, err := exec.Command("sudo", "systemd-tmpfiles", "--create").CombinedOutput() if err != nil { return fmt.Errorf("%w\n%s", err, string(output)) } From 4158e06f985c7713a05081c0f20ceb83ee495eed Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 11 Mar 2025 14:55:11 +0900 Subject: [PATCH 226/364] Revert to syscall.Credential in setup --- cmd/alpamon/command/setup/setup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index fbfb72c..ee9894b 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -69,7 +69,7 @@ var SetupCmd = &cobra.Command{ return err } - output, err := exec.Command("sudo", "systemd-tmpfiles", "--create").CombinedOutput() + output, err := exec.Command("systemd-tmpfiles", "--create").CombinedOutput() if err != nil { return fmt.Errorf("%w\n%s", err, string(output)) } From 21777951a7c1b839807201a31c85c9e87e6ad92e Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 11 Mar 2025 16:57:45 +0900 Subject: [PATCH 227/364] Minor fix Fix Partition's GetKey(). --- pkg/runner/commit_types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/commit_types.go b/pkg/runner/commit_types.go index 0d7af33..4919527 100644 --- a/pkg/runner/commit_types.go +++ b/pkg/runner/commit_types.go @@ -354,7 +354,7 @@ func (p Partition) GetID() string { } func (p Partition) GetKey() interface{} { - return p.Name + return p.Mountpoint } func (p Partition) GetData() ComparableData { From a5a76ad8b018ca594651f7abe461917f71667c28 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 11 Mar 2025 19:30:14 +0900 Subject: [PATCH 228/364] Add MountPoint type Define MountPoint type to collect mount point value, which was previously collected in Partition. --- pkg/runner/commit.go | 40 +++++++++++++++++++++++----- pkg/runner/commit_types.go | 53 ++++++++++++++++++++++++++++---------- 2 files changed, 72 insertions(+), 21 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index e48745b..bd0b65e 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -157,9 +157,14 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { remoteData = &[]Disk{} case "partitions": if currentData, err = getPartitions(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve disks") + log.Debug().Err(err).Msg("Failed to retrieve partitions") } remoteData = &[]Partition{} + case "mounts": + if currentData, err = getMountPoints(); err != nil { + log.Debug().Err(err).Msg("Failed to retrieve mount points") + } + remoteData = &[]MountPoint{} default: log.Warn().Msgf("Unknown key: %s", key) continue @@ -270,6 +275,9 @@ func collectData() *commitData { if data.Partitions, err = getPartitions(); err != nil { log.Debug().Err(err).Msg("Failed to retrieve disk partitions") } + if data.Mounts, err = getMountPoints(); err != nil { + log.Debug().Err(err).Msg("Failed to retrieve mount points") + } return data } @@ -676,18 +684,34 @@ func getPartitions() ([]Partition, error) { disk := utils.ParseDiskName(partition.Device) partitionList = append(partitionList, Partition{ - Name: partition.Device, - DiskName: disk, - Mountpoint: partition.Mountpoint, - Fstype: partition.Fstype, - Opts: strings.Join(partition.Opts, ","), - IsVirtual: utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint), + Name: partition.Device, + DiskName: disk, + Fstype: partition.Fstype, + IsVirtual: utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint), }) } return partitionList, nil } +func getMountPoints() ([]MountPoint, error) { + partitions, err := disk.Partitions(true) + if err != nil { + return []MountPoint{}, nil + } + + var mountPoints []MountPoint + for _, partition := range partitions { + mountPoints = append(mountPoints, MountPoint{ + MountPoint: partition.Mountpoint, + PartitionName: partition.Device, + Opts: strings.Join(partition.Opts, ","), + }) + } + + return mountPoints, nil +} + func dispatchComparison(entry commitDef, currentData, remoteData any) { switch v := remoteData.(type) { case *[]GroupData: @@ -704,5 +728,7 @@ func dispatchComparison(entry commitDef, currentData, remoteData any) { compareListData(entry, currentData.([]Disk), *v) case *[]Partition: compareListData(entry, currentData.([]Partition), *v) + case *[]MountPoint: + compareListData(entry, currentData.([]MountPoint), *v) } } diff --git a/pkg/runner/commit_types.go b/pkg/runner/commit_types.go index 4919527..e87b2e2 100644 --- a/pkg/runner/commit_types.go +++ b/pkg/runner/commit_types.go @@ -62,6 +62,11 @@ var commitDefs = map[string]commitDef{ URL: "/api/proc/partitions/", URLSuffix: "sync/", }, + "mounts": { + MultiRow: true, + URL: "/api/proc/mounts/", + URLSuffix: "sync/", + }, } type ServerData struct { @@ -154,13 +159,18 @@ type Disk struct { } type Partition struct { - ID string `json:"id,omitempty"` - Name string `json:"name"` - DiskName string `json:"disk_name"` - Mountpoint string `json:"mount_point"` - Fstype string `json:"fs_type"` - Opts string `json:"options"` - IsVirtual bool `json:"is_virtual"` + ID string `json:"id,omitempty"` + Name string `json:"name"` + DiskName string `json:"disk_name"` + Fstype string `json:"fs_type"` + IsVirtual bool `json:"is_virtual"` +} + +type MountPoint struct { + ID string `json:"id,omitempty"` + MountPoint string `json:"mount_point"` + PartitionName string `json:"partition_name"` + Opts string `json:"options"` } type commitData struct { @@ -176,6 +186,7 @@ type commitData struct { Packages []SystemPackageData `json:"packages"` Disks []Disk `json:"disks"` Partitions []Partition `json:"partitions"` + Mounts []MountPoint `json:"mounts"` } // Defines the ComparableData interface for comparing different types. @@ -354,16 +365,30 @@ func (p Partition) GetID() string { } func (p Partition) GetKey() interface{} { - return p.Mountpoint + return p.Name } func (p Partition) GetData() ComparableData { return Partition{ - Name: p.Name, - DiskName: p.DiskName, - Mountpoint: p.Mountpoint, - Fstype: p.Fstype, - Opts: p.Opts, - IsVirtual: p.IsVirtual, + Name: p.Name, + DiskName: p.DiskName, + Fstype: p.Fstype, + IsVirtual: p.IsVirtual, + } +} + +func (m MountPoint) GetID() string { + return m.ID +} + +func (m MountPoint) GetKey() interface{} { + return m.MountPoint +} + +func (m MountPoint) GetData() ComparableData { + return MountPoint{ + MountPoint: m.MountPoint, + Opts: m.Opts, + PartitionName: m.PartitionName, } } From 5b4941ce03d411b7489ce20a49c6586b620ab55c Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 12 Mar 2025 20:36:12 +0900 Subject: [PATCH 229/364] Revert to previous commit Revert to previous commit. --- pkg/runner/commit.go | 51 +++++++++++++------------------------- pkg/runner/commit_types.go | 49 ++++++++---------------------------- 2 files changed, 28 insertions(+), 72 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index bd0b65e..70219b5 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -160,11 +160,6 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { log.Debug().Err(err).Msg("Failed to retrieve partitions") } remoteData = &[]Partition{} - case "mounts": - if currentData, err = getMountPoints(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve mount points") - } - remoteData = &[]MountPoint{} default: log.Warn().Msgf("Unknown key: %s", key) continue @@ -275,9 +270,6 @@ func collectData() *commitData { if data.Partitions, err = getPartitions(); err != nil { log.Debug().Err(err).Msg("Failed to retrieve disk partitions") } - if data.Mounts, err = getMountPoints(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve mount points") - } return data } @@ -674,6 +666,7 @@ func getDisks() ([]Disk, error) { } func getPartitions() ([]Partition, error) { + seen := make(map[string]Partition) partitions, err := disk.Partitions(true) if err != nil { return []Partition{}, nil @@ -681,35 +674,27 @@ func getPartitions() ([]Partition, error) { var partitionList []Partition for _, partition := range partitions { + if value, exists := seen[partition.Device]; exists { + value.MountPoint = append(value.MountPoint, partition.Mountpoint) + seen[partition.Device] = value + continue + } disk := utils.ParseDiskName(partition.Device) - - partitionList = append(partitionList, Partition{ - Name: partition.Device, - DiskName: disk, - Fstype: partition.Fstype, - IsVirtual: utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint), - }) - } - - return partitionList, nil -} - -func getMountPoints() ([]MountPoint, error) { - partitions, err := disk.Partitions(true) - if err != nil { - return []MountPoint{}, nil + seen[partition.Device] = Partition{ + Name: partition.Device, + MountPoint: []string{partition.Mountpoint}, + DiskName: disk, + Fstype: partition.Fstype, + IsVirtual: utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint), + } } - var mountPoints []MountPoint - for _, partition := range partitions { - mountPoints = append(mountPoints, MountPoint{ - MountPoint: partition.Mountpoint, - PartitionName: partition.Device, - Opts: strings.Join(partition.Opts, ","), - }) + for _, partition := range seen { + partitionList = append(partitionList, partition) } + fmt.Println(partitionList) - return mountPoints, nil + return partitionList, nil } func dispatchComparison(entry commitDef, currentData, remoteData any) { @@ -728,7 +713,5 @@ func dispatchComparison(entry commitDef, currentData, remoteData any) { compareListData(entry, currentData.([]Disk), *v) case *[]Partition: compareListData(entry, currentData.([]Partition), *v) - case *[]MountPoint: - compareListData(entry, currentData.([]MountPoint), *v) } } diff --git a/pkg/runner/commit_types.go b/pkg/runner/commit_types.go index e87b2e2..e209a97 100644 --- a/pkg/runner/commit_types.go +++ b/pkg/runner/commit_types.go @@ -62,11 +62,6 @@ var commitDefs = map[string]commitDef{ URL: "/api/proc/partitions/", URLSuffix: "sync/", }, - "mounts": { - MultiRow: true, - URL: "/api/proc/mounts/", - URLSuffix: "sync/", - }, } type ServerData struct { @@ -159,18 +154,12 @@ type Disk struct { } type Partition struct { - ID string `json:"id,omitempty"` - Name string `json:"name"` - DiskName string `json:"disk_name"` - Fstype string `json:"fs_type"` - IsVirtual bool `json:"is_virtual"` -} - -type MountPoint struct { - ID string `json:"id,omitempty"` - MountPoint string `json:"mount_point"` - PartitionName string `json:"partition_name"` - Opts string `json:"options"` + ID string `json:"id,omitempty"` + MountPoint []string `json:"mount_point"` + Name string `json:"name"` + DiskName string `json:"disk_name"` + Fstype string `json:"fs_type"` + IsVirtual bool `json:"is_virtual"` } type commitData struct { @@ -186,7 +175,6 @@ type commitData struct { Packages []SystemPackageData `json:"packages"` Disks []Disk `json:"disks"` Partitions []Partition `json:"partitions"` - Mounts []MountPoint `json:"mounts"` } // Defines the ComparableData interface for comparing different types. @@ -370,25 +358,10 @@ func (p Partition) GetKey() interface{} { func (p Partition) GetData() ComparableData { return Partition{ - Name: p.Name, - DiskName: p.DiskName, - Fstype: p.Fstype, - IsVirtual: p.IsVirtual, - } -} - -func (m MountPoint) GetID() string { - return m.ID -} - -func (m MountPoint) GetKey() interface{} { - return m.MountPoint -} - -func (m MountPoint) GetData() ComparableData { - return MountPoint{ - MountPoint: m.MountPoint, - Opts: m.Opts, - PartitionName: m.PartitionName, + Name: p.Name, + MountPoint: p.MountPoint, + DiskName: p.DiskName, + Fstype: p.Fstype, + IsVirtual: p.IsVirtual, } } From 64addf095d4889d6360acb4bf167da83428eda48 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 12 Mar 2025 20:36:59 +0900 Subject: [PATCH 230/364] Minor fix Delete fmt.Println() --- pkg/runner/commit.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 70219b5..42c23e7 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -672,7 +672,6 @@ func getPartitions() ([]Partition, error) { return []Partition{}, nil } - var partitionList []Partition for _, partition := range partitions { if value, exists := seen[partition.Device]; exists { value.MountPoint = append(value.MountPoint, partition.Mountpoint) @@ -689,10 +688,10 @@ func getPartitions() ([]Partition, error) { } } + var partitionList []Partition for _, partition := range seen { partitionList = append(partitionList, partition) } - fmt.Println(partitionList) return partitionList, nil } From 068414563891bafb50401a9697e2addf3c565bb8 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 12 Mar 2025 22:24:43 +0900 Subject: [PATCH 231/364] Fix the logic for comparing system info Due to the change of MountPoint from string to []string in the Partition, a panic occurred in the existing comparison logic. Therefore, the system information comparison was modified to use go-cmp package to resolve this issue. --- go.mod | 2 +- go.sum | 2 ++ pkg/runner/commit.go | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 30bb29a..9e3add4 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/go-openapi/inflect v0.19.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/hashicorp/hcl/v2 v2.18.1 // indirect diff --git a/go.sum b/go.sum index 3df323d..7cd45eb 100644 --- a/go.sum +++ b/go.sum @@ -41,6 +41,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 42c23e7..39a6aac 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -20,6 +20,7 @@ import ( "github.com/alpacanetworks/alpamon-go/pkg/utils" "github.com/alpacanetworks/alpamon-go/pkg/version" _ "github.com/glebarez/go-sqlite" + "github.com/google/go-cmp/cmp" rpmdb "github.com/knqyf263/go-rpmdb/pkg" "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/cpu" @@ -213,7 +214,7 @@ func compareListData[T ComparableData](entry commitDef, currentData, remoteData for _, remoteItem := range remoteData { if currentItem, exists := currentMap[remoteItem.GetKey()]; exists { - if currentItem != remoteItem.GetData() { + if !cmp.Equal(currentItem, remoteItem.GetData()) { scheduler.Rqueue.Patch(entry.URL+remoteItem.GetID()+"/", currentItem.GetData(), 80, time.Time{}) } delete(currentMap, currentItem.GetKey()) From 28ed5c48f2a8bf44feed67e0b2b0592b50ea84b5 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 13 Mar 2025 11:41:01 +0900 Subject: [PATCH 232/364] Improve postinstall script to prevent dpkg errors during upgrade --- .../command/configs/alpamon-restart.service | 6 +++++ .../command/configs/alpamon-restart.timer | 10 ++++++++ pkg/runner/command.go | 1 - scripts/postinstall.sh | 24 ++++++++++++++----- scripts/postremove.sh | 22 +++++++++-------- 5 files changed, 46 insertions(+), 17 deletions(-) create mode 100644 cmd/alpamon/command/configs/alpamon-restart.service create mode 100644 cmd/alpamon/command/configs/alpamon-restart.timer diff --git a/cmd/alpamon/command/configs/alpamon-restart.service b/cmd/alpamon/command/configs/alpamon-restart.service new file mode 100644 index 0000000..b86eb3c --- /dev/null +++ b/cmd/alpamon/command/configs/alpamon-restart.service @@ -0,0 +1,6 @@ +[Unit] +Description= Restart Alpamon service + +[Service] +Type=oneshot +ExecStart=/bin/systemctl restart alpamon.service \ No newline at end of file diff --git a/cmd/alpamon/command/configs/alpamon-restart.timer b/cmd/alpamon/command/configs/alpamon-restart.timer new file mode 100644 index 0000000..3dfdd48 --- /dev/null +++ b/cmd/alpamon/command/configs/alpamon-restart.timer @@ -0,0 +1,10 @@ +[Unit] +Description= Timer to restart Alpamon service 5 minutes after activation + +[Timer] +OnActiveSec=5min +Unit=alpamon-restart.service +Persistent=false + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 23e67b0..627be61 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -112,7 +112,6 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { } log.Debug().Msgf("Upgrading alpamon from %s to %s using command: '%s'...", version.Version, latestVersion, cmd) return cr.handleShellCmd(cmd, "root", "root", nil) - case "commit": cr.commit() return 0, "Committed system information." diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 26d5bd5..25b89e6 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -6,9 +6,11 @@ main() { check_root_permission check_systemd_status check_alpamon_binary - install_atlas_cli - if is_new_installation "$@"; then + if is_upgrade "$@"; then + restart_alpamon_by_timer + else + install_atlas_cli setup_alpamon fi @@ -64,11 +66,21 @@ start_systemd_service() { echo "Alpamon has been installed as a systemd service and will be launched automatically on system boot." } -is_new_installation() { - if [ -z "$2" ]; then - return 0 # first install +restart_alpamon_by_timer() { + echo "Setting up systemd timer to restart Alpamon..." + + systemctl daemon-reload || true + systemctl reset-failed alpamon-restart.timer || true + systemctl restart alpamon-restart.timer || true + + echo "Systemd timer to restart Alpamon has been set. It will restart the service in 5 minutes." +} + +is_upgrade() { + if [ -n "$2" ]; then + return 0 # Upgrade else - return 1 # upgrade + return 1 # First install fi } diff --git a/scripts/postremove.sh b/scripts/postremove.sh index e92709a..019ca7a 100644 --- a/scripts/postremove.sh +++ b/scripts/postremove.sh @@ -1,17 +1,19 @@ #!/bin/sh -CONF_FILE_PATH="/etc/alpamon/alpamon.conf" -TMP_FILE_PATH="/usr/lib/tmpfiles.d/alpamon.conf" -SVC_FILE_PATH="/lib/systemd/system/alpamon.service" -LOG_FILE_PATH="/var/log/alpamon/alpamon.log" -DB_FILE_PATH="/var/lib/alpamon/alpamon.db" +FILES_TO_REMOVE=" + /etc/alpamon/alpamon.conf + /usr/lib/tmpfiles.d/alpamon.conf + /lib/systemd/system/alpamon.service + /lib/systemd/system/alpamon-restart.service + /lib/systemd/system/alpamon-restart.timer + /var/log/alpamon/alpamon.log + /var/lib/alpamon/alpamon.db +" if [ "$1" = 'purge' ]; then - rm -f "$CONF_FILE_PATH" || true - rm -f "$TMP_FILE_PATH" || true - rm -f "$SVC_FILE_PATH" || true - rm -f "$LOG_FILE_PATH" || true - rm -f "$DB_FILE_PATH" || true + for file in $FILES_TO_REMOVE; do + rm -f "$file" || true + done echo "All related configuration, service, and log files have been deleted." fi \ No newline at end of file From 01e6ea02c152e5c76abbee31533c01a50426c304 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 13 Mar 2025 11:50:46 +0900 Subject: [PATCH 233/364] Modified setup command to generate additional required files --- cmd/alpamon/command/setup/setup.go | 49 +++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index ee9894b..54f076d 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -15,14 +15,18 @@ import ( ) var ( - name string - configFiles embed.FS - configTemplatePath string - configTarget string - tmpFilePath = "configs/tmpfile.conf" - tmpFileTarget string - serviceTemplatePath string - serviceTarget string + name string + configFiles embed.FS + configPath string + configTarget string + tmpFilePath = "configs/tmpfile.conf" + tmpFileTarget string + servicePath string + restartServicePath string + serviceTarget string + restartServiceTarget string + timerPath string + timerTarget string ) type ConfigData struct { @@ -37,11 +41,15 @@ type ConfigData struct { func SetConfigPaths(serviceName string, fs embed.FS) { name = serviceName configFiles = fs - configTemplatePath = fmt.Sprintf("configs/%s.conf", name) + configPath = fmt.Sprintf("configs/%s.conf", name) configTarget = fmt.Sprintf("/etc/alpamon/%s.conf", name) tmpFileTarget = fmt.Sprintf("/usr/lib/tmpfiles.d/%s.conf", name) - serviceTemplatePath = fmt.Sprintf("configs/%s.service", name) + servicePath = fmt.Sprintf("configs/%s.service", name) serviceTarget = fmt.Sprintf("/lib/systemd/system/%s.service", name) + restartServicePath = fmt.Sprintf("configs/%s-restart.service", name) + restartServiceTarget = fmt.Sprintf("lib/systemd/system/%s-restart.service", name) + timerPath = fmt.Sprintf("configs/%s-restart.timer", name) + timerTarget = fmt.Sprintf("/lib/systemd/system/%s-restart.timer", name) } var SetupCmd = &cobra.Command{ @@ -79,7 +87,7 @@ var SetupCmd = &cobra.Command{ return err } - err = writeService() + err = writeSystemdFiles() if err != nil { return err } @@ -90,9 +98,9 @@ var SetupCmd = &cobra.Command{ } func writeConfig() error { - tmplData, err := configFiles.ReadFile(configTemplatePath) + tmplData, err := configFiles.ReadFile(configPath) if err != nil { - return fmt.Errorf("failed to read template file (%s): %v", configTemplatePath, err) + return fmt.Errorf("failed to read template file (%s): %v", configPath, err) } tmpl, err := template.New(fmt.Sprintf("%s.conf", name)).Parse(string(tmplData)) @@ -138,11 +146,22 @@ func writeConfig() error { return nil } -func writeService() error { - err := copyEmbeddedFile(serviceTemplatePath, serviceTarget) +func writeSystemdFiles() error { + err := copyEmbeddedFile(servicePath, serviceTarget) if err != nil { return fmt.Errorf("failed to write target file: %v", err) } + + err = copyEmbeddedFile(restartServicePath, restartServiceTarget) + if err != nil { + return fmt.Errorf("failed to write target file: %v", err) + } + + err = copyEmbeddedFile(timerPath, timerTarget) + if err != nil { + return fmt.Errorf("failed to write target file: %v", err) + } + return nil } From cc27badccd8347cae212f6b7c32ea967c236ed9d Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 13 Mar 2025 11:58:28 +0900 Subject: [PATCH 234/364] Minor fix --- cmd/alpamon/command/setup/setup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 54f076d..4e89dfc 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -47,7 +47,7 @@ func SetConfigPaths(serviceName string, fs embed.FS) { servicePath = fmt.Sprintf("configs/%s.service", name) serviceTarget = fmt.Sprintf("/lib/systemd/system/%s.service", name) restartServicePath = fmt.Sprintf("configs/%s-restart.service", name) - restartServiceTarget = fmt.Sprintf("lib/systemd/system/%s-restart.service", name) + restartServiceTarget = fmt.Sprintf("/lib/systemd/system/%s-restart.service", name) timerPath = fmt.Sprintf("configs/%s-restart.timer", name) timerTarget = fmt.Sprintf("/lib/systemd/system/%s-restart.timer", name) } From 26344e80e6cdb4e92788e18de460b5ecfd212e8e Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 13 Mar 2025 17:00:52 +0900 Subject: [PATCH 235/364] Update schemas to reflect alpacon-server changes Update DiskUsage and HourlyDiskUsage to reflect changes in alpacon-server. --- ...20250313040545_alter_disk_usage_fields.sql | 20 +++++++++++++++++++ pkg/db/migration/atlas.sum | 3 ++- pkg/db/schema/diskusage.go | 1 - pkg/db/schema/hourly_disk_usage.go | 3 +++ 4 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 pkg/db/migration/20250313040545_alter_disk_usage_fields.sql diff --git a/pkg/db/migration/20250313040545_alter_disk_usage_fields.sql b/pkg/db/migration/20250313040545_alter_disk_usage_fields.sql new file mode 100644 index 0000000..84c267e --- /dev/null +++ b/pkg/db/migration/20250313040545_alter_disk_usage_fields.sql @@ -0,0 +1,20 @@ +-- Disable the enforcement of foreign-keys constraints +PRAGMA foreign_keys = off; +-- Create "new_disk_usages" table +CREATE TABLE `new_disk_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `usage` real NOT NULL, `total` integer NOT NULL, `free` integer NOT NULL, `used` integer NOT NULL); +-- Copy rows from old table "disk_usages" to new temporary table "new_disk_usages" +INSERT INTO `new_disk_usages` (`id`, `timestamp`, `device`, `usage`, `total`, `free`, `used`) SELECT `id`, `timestamp`, `device`, `usage`, `total`, `free`, `used` FROM `disk_usages`; +-- Drop "disk_usages" table after copying rows +DROP TABLE `disk_usages`; +-- Rename temporary table "new_disk_usages" to "disk_usages" +ALTER TABLE `new_disk_usages` RENAME TO `disk_usages`; +-- Create index "diskusage_timestamp" to table: "disk_usages" +CREATE INDEX `diskusage_timestamp` ON `disk_usages` (`timestamp`); +-- Add column "total" to table: "hourly_disk_usages" +ALTER TABLE `hourly_disk_usages` ADD COLUMN `total` integer NOT NULL; +-- Add column "free" to table: "hourly_disk_usages" +ALTER TABLE `hourly_disk_usages` ADD COLUMN `free` integer NOT NULL; +-- Add column "used" to table: "hourly_disk_usages" +ALTER TABLE `hourly_disk_usages` ADD COLUMN `used` integer NOT NULL; +-- Enable back the enforcement of foreign-keys constraints +PRAGMA foreign_keys = on; diff --git a/pkg/db/migration/atlas.sum b/pkg/db/migration/atlas.sum index 7e755fd..8d4c177 100644 --- a/pkg/db/migration/atlas.sum +++ b/pkg/db/migration/atlas.sum @@ -1,2 +1,3 @@ -h1:LXufv4SaVfmeeKLJ1RW1XrGphNbplJ1fd7m8GOUFhhk= +h1:hcE+sSVzP5X1RUGR7XOPRQh2d81bjU+z80imlium2PY= 20250116061438_init_schemas.sql h1:/JHZWxaROODWtCQJJ9qOVEsCWR2xt3dnOH+0KrRZInw= +20250313040545_alter_disk_usage_fields.sql h1:cbWRdbaNoyS3S9stNC6291QzPmwufPrGj9NvE4fyMrg= diff --git a/pkg/db/schema/diskusage.go b/pkg/db/schema/diskusage.go index 5d26a66..def7f21 100644 --- a/pkg/db/schema/diskusage.go +++ b/pkg/db/schema/diskusage.go @@ -18,7 +18,6 @@ func (DiskUsage) Fields() []ent.Field { return []ent.Field{ field.Time("timestamp").Default(time.Now()), field.String("device"), - field.String("mount_point"), field.Float("usage"), field.Int64("total"), field.Int64("free"), diff --git a/pkg/db/schema/hourly_disk_usage.go b/pkg/db/schema/hourly_disk_usage.go index 6d7170b..2043b8e 100644 --- a/pkg/db/schema/hourly_disk_usage.go +++ b/pkg/db/schema/hourly_disk_usage.go @@ -20,6 +20,9 @@ func (HourlyDiskUsage) Fields() []ent.Field { field.String("device"), field.Float("peak"), field.Float("avg"), + field.Int64("total"), + field.Int64("free"), + field.Int64("used"), } } From 80e0289113a0ec01f1bebce3687cb03aeaf1e2ff Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 13 Mar 2025 17:05:16 +0900 Subject: [PATCH 236/364] Update due to schemas changes Update the collector code to reflect changes in DiskUsage and HourlyDiskUsage. When collecting real-time disk usage, there was an issue where unnecessary data was stored due to the same partition having different mount points. To resolve this, fix parseDiskUsage to store data only once per partition. --- pkg/collector/check/base/types.go | 4 +- .../check/batch/daily/disk/usage/usage.go | 61 ++++++++++++++++--- .../hourly/disk/usage/hourly_usage_test.go | 2 - .../check/batch/hourly/disk/usage/usage.go | 40 ++++++++++-- .../check/realtime/disk/usage/usage.go | 20 +++--- 5 files changed, 101 insertions(+), 26 deletions(-) diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index 3268fb2..2a51d46 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -59,6 +59,9 @@ type DiskUsageQuerySet struct { Device string `json:"device"` Max float64 `json:"max"` AVG float64 `json:"avg"` + Total uint64 `json:"total"` + Free uint64 `json:"free"` + Used uint64 `json:"used"` } type TrafficQuerySet struct { @@ -78,7 +81,6 @@ type CheckResult struct { Usage float64 `json:"usage,omitempty"` Name string `json:"name,omitempty"` Device string `json:"device,omitempty"` - MountPoint string `json:"mount_point,omitempty"` Total uint64 `json:"total,omitempty"` Free uint64 `json:"free,omitempty"` Used uint64 `json:"used,omitempty"` diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index a989625..e403d96 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -4,8 +4,8 @@ import ( "context" "time" + "entgo.io/ent/dialect/sql" "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskusage" ) @@ -48,6 +48,9 @@ func (c *Check) queryHourlyDiskUsage(ctx context.Context) (base.MetricData, erro Device: row.Device, Peak: row.Max, Avg: row.AVG, + Total: row.Total, + Free: row.Free, + Used: row.Used, }) } metric := base.MetricData{ @@ -65,17 +68,57 @@ func (c *Check) queryHourlyDiskUsage(ctx context.Context) (base.MetricData, erro func (c *Check) getHourlyDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, error) { client := c.GetClient() - now := time.Now() - from := now.Add(-24 * time.Hour) var querySet []base.DiskUsageQuerySet err := client.HourlyDiskUsage.Query(). - Where(hourlydiskusage.TimestampGTE(from), hourlydiskusage.TimestampLTE(now)). - GroupBy(hourlydiskusage.FieldDevice). - Aggregate( - ent.Max(hourlydiskusage.FieldPeak), - ent.Mean(hourlydiskusage.FieldAvg), - ).Scan(ctx, &querySet) + Modify(func(s *sql.Selector) { + now := time.Now() + from := now.Add(-24 * time.Hour) + t := sql.Table(hourlydiskusage.Table) + + latestSubq := sql.Select( + sql.As("device", "l.device"), + sql.As("used", "l.used"), + sql.As("total", "l.total"), + sql.As("free", "l.free"), + ). + From(t). + Where( + sql.In("timestamp", + sql.Select(sql.Max("timestamp")).From(t).GroupBy("device"), + ), + ). + As("l") + + usageSubq := sql.Select( + sql.As("device", "u.device"), + "timestamp", + sql.As("peak", "u.peak"), + sql.As("avg", "u.avg"), + ). + From(t). + Where( + sql.And( + sql.GTE(t.C(hourlydiskusage.FieldTimestamp), from), + sql.LTE(t.C(hourlydiskusage.FieldTimestamp), now), + ), + ). + GroupBy("device", "timestamp"). + As("u") + + *s = *sql.Select( + sql.As("u.device", "device"), + sql.As(sql.Max("u.peak"), "max"), + sql.As(sql.Avg("u.avg"), "avg"), + sql.As("l.used", "used"), + sql.As("l.total", "total"), + sql.As("l.free", "free"), + ). + From(usageSubq). + Join(latestSubq). + On("u.device", "l.device"). + GroupBy("u.device") + }).Scan(ctx, &querySet) if err != nil { return querySet, err } diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go index bdfa30d..ec3e94e 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -45,7 +45,6 @@ func (suite *HourlyDiskUsageCheckSuite) TestGetDiskUsage() { err := suite.check.GetClient().DiskUsage.Create(). SetTimestamp(time.Now()). SetDevice(uuid.NewString()). - SetMountPoint(uuid.NewString()). SetUsage(rand.Float64()). SetTotal(int64(rand.Int())). SetFree(int64(rand.Int())). @@ -81,7 +80,6 @@ func (suite *HourlyDiskUsageCheckSuite) TestDeleteDiskUsage() { err := suite.check.GetClient().DiskUsage.Create(). SetTimestamp(time.Now().Add(-2 * time.Hour)). SetDevice(uuid.NewString()). - SetMountPoint(uuid.NewString()). SetUsage(rand.Float64()). SetTotal(int64(rand.Int())). SetFree(int64(rand.Int())). diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index beb06bb..e34862d 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -49,6 +49,9 @@ func (c *Check) queryDiskUsage(ctx context.Context) (base.MetricData, error) { Device: row.Device, Peak: row.Max, Avg: row.AVG, + Total: row.Total, + Free: row.Free, + Used: row.Used, }) } metric := base.MetricData{ @@ -80,8 +83,22 @@ func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, err usageExpr := "(CAST(SUM(used) AS FLOAT) * 100.0) / NULLIF(SUM(total), 0)" t := sql.Table(diskusage.Table) - subq := sql.Select( - "device", + latestSubq := sql.Select( + sql.As("device", "l.device"), + sql.As("used", "l.used"), + sql.As("total", "l.total"), + sql.As("free", "l.free"), + ). + From(t). + Where( + sql.In("timestamp", + sql.Select(sql.Max("timestamp")).From(t).GroupBy("device"), + ), + ). + As("l") + + usageSubq := sql.Select( + sql.As("device", "u.device"), "timestamp", sql.As(usageExpr, "usage"), ). @@ -92,13 +109,21 @@ func (c *Check) getDiskUsage(ctx context.Context) ([]base.DiskUsageQuerySet, err sql.LTE(t.C(diskusage.FieldTimestamp), now), ), ). - GroupBy("device", "timestamp") + GroupBy("device", "timestamp"). + As("u") *s = *sql.Select( - "device", + sql.As("u.device", "device"), sql.As(sql.Max("usage"), "max"), sql.As(sql.Avg("usage"), "avg"), - ).From(subq).GroupBy("device") + sql.As("l.used", "used"), + sql.As("l.total", "total"), + sql.As("l.free", "free"), + ). + From(usageSubq). + Join(latestSubq). + On("u.device", "l.device"). + GroupBy("u.device") }).Scan(ctx, &querySet) if err != nil { return querySet, err @@ -118,7 +143,10 @@ func (c *Check) saveHourlyDiskUsage(data []base.CheckResult, ctx context.Context q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). SetPeak(data[i].Peak). - SetAvg(data[i].Avg) + SetAvg(data[i].Avg). + SetTotal(int64(data[i].Total)). + SetFree(int64(data[i].Free)). + SetUsed(int64(data[i].Used)) }).Exec(ctx) if err != nil { return err diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index 831cbc6..cf486ea 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -58,17 +58,22 @@ func (c *Check) collectAndSaveDiskUsage(ctx context.Context) (base.MetricData, e func (c *Check) parseDiskUsage(partitions []disk.PartitionStat) []base.CheckResult { var data []base.CheckResult + seen := make(map[string]bool) for _, partition := range partitions { + if seen[partition.Device] { + continue + } + seen[partition.Device] = true + usage, err := c.collectDiskUsage(partition.Mountpoint) if err == nil { data = append(data, base.CheckResult{ - Timestamp: time.Now(), - Device: partition.Device, - MountPoint: partition.Mountpoint, - Usage: usage.UsedPercent, - Total: usage.Total, - Free: usage.Free, - Used: usage.Used, + Timestamp: time.Now(), + Device: partition.Device, + Usage: usage.UsedPercent, + Total: usage.Total, + Free: usage.Free, + Used: usage.Used, }) } } @@ -110,7 +115,6 @@ func (c *Check) saveDiskUsage(data []base.CheckResult, ctx context.Context) erro err := client.DiskUsage.MapCreateBulk(data, func(q *ent.DiskUsageCreate, i int) { q.SetTimestamp(data[i].Timestamp). SetDevice(data[i].Device). - SetMountPoint(data[i].MountPoint). SetUsage(data[i].Usage). SetTotal(int64(data[i].Total)). SetFree(int64(data[i].Free)). From 90fc1c34ca25ead249ef19922b501260d7bbb076 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 13 Mar 2025 17:25:54 +0900 Subject: [PATCH 237/364] Add default vaule to HourlyDiskUsage Due to the absence of total, free, and used fields in the previous HourlyDiskUsage, NOT NULL constraint related error occurred during migration. To resolve this, add 0 as the default value. --- ...> 20250313082232_alter_disk_usage_fields.sql} | 16 ++++++++++------ pkg/db/migration/atlas.sum | 4 ++-- pkg/db/schema/hourly_disk_usage.go | 6 +++--- 3 files changed, 15 insertions(+), 11 deletions(-) rename pkg/db/migration/{20250313040545_alter_disk_usage_fields.sql => 20250313082232_alter_disk_usage_fields.sql} (50%) diff --git a/pkg/db/migration/20250313040545_alter_disk_usage_fields.sql b/pkg/db/migration/20250313082232_alter_disk_usage_fields.sql similarity index 50% rename from pkg/db/migration/20250313040545_alter_disk_usage_fields.sql rename to pkg/db/migration/20250313082232_alter_disk_usage_fields.sql index 84c267e..ffd7dbc 100644 --- a/pkg/db/migration/20250313040545_alter_disk_usage_fields.sql +++ b/pkg/db/migration/20250313082232_alter_disk_usage_fields.sql @@ -10,11 +10,15 @@ DROP TABLE `disk_usages`; ALTER TABLE `new_disk_usages` RENAME TO `disk_usages`; -- Create index "diskusage_timestamp" to table: "disk_usages" CREATE INDEX `diskusage_timestamp` ON `disk_usages` (`timestamp`); --- Add column "total" to table: "hourly_disk_usages" -ALTER TABLE `hourly_disk_usages` ADD COLUMN `total` integer NOT NULL; --- Add column "free" to table: "hourly_disk_usages" -ALTER TABLE `hourly_disk_usages` ADD COLUMN `free` integer NOT NULL; --- Add column "used" to table: "hourly_disk_usages" -ALTER TABLE `hourly_disk_usages` ADD COLUMN `used` integer NOT NULL; +-- Create "new_hourly_disk_usages" table +CREATE TABLE `new_hourly_disk_usages` (`id` integer NOT NULL PRIMARY KEY AUTOINCREMENT, `timestamp` datetime NOT NULL, `device` text NOT NULL, `peak` real NOT NULL, `avg` real NOT NULL, `total` integer NOT NULL DEFAULT (0), `free` integer NOT NULL DEFAULT (0), `used` integer NOT NULL DEFAULT (0)); +-- Copy rows from old table "hourly_disk_usages" to new temporary table "new_hourly_disk_usages" +INSERT INTO `new_hourly_disk_usages` (`id`, `timestamp`, `device`, `peak`, `avg`) SELECT `id`, `timestamp`, `device`, `peak`, `avg` FROM `hourly_disk_usages`; +-- Drop "hourly_disk_usages" table after copying rows +DROP TABLE `hourly_disk_usages`; +-- Rename temporary table "new_hourly_disk_usages" to "hourly_disk_usages" +ALTER TABLE `new_hourly_disk_usages` RENAME TO `hourly_disk_usages`; +-- Create index "hourlydiskusage_timestamp" to table: "hourly_disk_usages" +CREATE INDEX `hourlydiskusage_timestamp` ON `hourly_disk_usages` (`timestamp`); -- Enable back the enforcement of foreign-keys constraints PRAGMA foreign_keys = on; diff --git a/pkg/db/migration/atlas.sum b/pkg/db/migration/atlas.sum index 8d4c177..43e0f11 100644 --- a/pkg/db/migration/atlas.sum +++ b/pkg/db/migration/atlas.sum @@ -1,3 +1,3 @@ -h1:hcE+sSVzP5X1RUGR7XOPRQh2d81bjU+z80imlium2PY= +h1:tsclYU4OpgYf8cgEIz00u2FL2lJ4n+2k6qtNRKErUPI= 20250116061438_init_schemas.sql h1:/JHZWxaROODWtCQJJ9qOVEsCWR2xt3dnOH+0KrRZInw= -20250313040545_alter_disk_usage_fields.sql h1:cbWRdbaNoyS3S9stNC6291QzPmwufPrGj9NvE4fyMrg= +20250313082232_alter_disk_usage_fields.sql h1:ojWzahPUgpQVscOC8acU7FWUJPLLUK9mvvg7ZrZOPEI= diff --git a/pkg/db/schema/hourly_disk_usage.go b/pkg/db/schema/hourly_disk_usage.go index 2043b8e..c3dc419 100644 --- a/pkg/db/schema/hourly_disk_usage.go +++ b/pkg/db/schema/hourly_disk_usage.go @@ -20,9 +20,9 @@ func (HourlyDiskUsage) Fields() []ent.Field { field.String("device"), field.Float("peak"), field.Float("avg"), - field.Int64("total"), - field.Int64("free"), - field.Int64("used"), + field.Int64("total").Default(0), + field.Int64("free").Default(0), + field.Int64("used").Default(0), } } From 2f38dc58123ce0a20672bcac43f63c37e910b37a Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 14 Mar 2025 09:50:14 +0900 Subject: [PATCH 238/364] Minor fix --- pkg/runner/client.go | 2 +- pkg/runner/command.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 3b4a34d..7add2a3 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -152,7 +152,7 @@ func (wc *WebsocketClient) Quit() { close(wc.QuitChan) } -func (wc *WebsocketClient) restart() { +func (wc *WebsocketClient) Restart() { wc.RestartRequested = true wc.Quit() } diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 627be61..185716d 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -186,7 +186,7 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, "Invalid session ID" case "restart": time.AfterFunc(1*time.Second, func() { - cr.wsClient.restart() + cr.wsClient.Restart() }) return 0, "Alpamon will restart in 1 second." From 7ed403f4f0e9f491b24b9fefd2f0830b7038aa58 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 14 Mar 2025 14:58:07 +0900 Subject: [PATCH 239/364] Minor fix Rename MountPoint to MountPoints in Partition. --- pkg/runner/commit.go | 12 ++++++------ pkg/runner/commit_types.go | 22 +++++++++++----------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 39a6aac..4d7f898 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -675,17 +675,17 @@ func getPartitions() ([]Partition, error) { for _, partition := range partitions { if value, exists := seen[partition.Device]; exists { - value.MountPoint = append(value.MountPoint, partition.Mountpoint) + value.MountPoints = append(value.MountPoints, partition.Mountpoint) seen[partition.Device] = value continue } disk := utils.ParseDiskName(partition.Device) seen[partition.Device] = Partition{ - Name: partition.Device, - MountPoint: []string{partition.Mountpoint}, - DiskName: disk, - Fstype: partition.Fstype, - IsVirtual: utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint), + Name: partition.Device, + MountPoints: []string{partition.Mountpoint}, + DiskName: disk, + Fstype: partition.Fstype, + IsVirtual: utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint), } } diff --git a/pkg/runner/commit_types.go b/pkg/runner/commit_types.go index e209a97..a60d560 100644 --- a/pkg/runner/commit_types.go +++ b/pkg/runner/commit_types.go @@ -154,12 +154,12 @@ type Disk struct { } type Partition struct { - ID string `json:"id,omitempty"` - MountPoint []string `json:"mount_point"` - Name string `json:"name"` - DiskName string `json:"disk_name"` - Fstype string `json:"fs_type"` - IsVirtual bool `json:"is_virtual"` + ID string `json:"id,omitempty"` + MountPoints []string `json:"mount_points"` + Name string `json:"name"` + DiskName string `json:"disk_name"` + Fstype string `json:"fs_type"` + IsVirtual bool `json:"is_virtual"` } type commitData struct { @@ -358,10 +358,10 @@ func (p Partition) GetKey() interface{} { func (p Partition) GetData() ComparableData { return Partition{ - Name: p.Name, - MountPoint: p.MountPoint, - DiskName: p.DiskName, - Fstype: p.Fstype, - IsVirtual: p.IsVirtual, + Name: p.Name, + MountPoints: p.MountPoints, + DiskName: p.DiskName, + Fstype: p.Fstype, + IsVirtual: p.IsVirtual, } } From 89065f47c622deb624632bbb2548ab74c9ed6116 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 17 Mar 2025 12:51:14 +0900 Subject: [PATCH 240/364] Replaced manual file handling in the code with GoReleaser configuration --- .goreleaser.yaml | 16 ++++ cmd/alpamon/command/root.go | 6 +- cmd/alpamon/command/setup/setup.go | 81 ++----------------- .../alpamon-restart.service | 0 .../configs => configs}/alpamon-restart.timer | 0 .../command/configs => configs}/alpamon.conf | 0 .../configs => configs}/alpamon.service | 2 +- .../command/configs => configs}/tmpfile.conf | 2 +- scripts/postinstall.sh | 10 +++ 9 files changed, 36 insertions(+), 81 deletions(-) rename {cmd/alpamon/command/configs => configs}/alpamon-restart.service (100%) rename {cmd/alpamon/command/configs => configs}/alpamon-restart.timer (100%) rename {cmd/alpamon/command/configs => configs}/alpamon.conf (100%) rename {cmd/alpamon/command/configs => configs}/alpamon.service (89%) rename {cmd/alpamon/command/configs => configs}/tmpfile.conf (80%) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 8cac16e..3fed8b6 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -40,6 +40,22 @@ nfpms: - zip bindir: /usr/local/bin/ + contents: + - src: "configs/tmpfile.conf" + dst: "/usr/lib/tmpfiles.d/{{ .ProjectName }}.conf" + + - src: "configs/alpamon.conf" + dst: "/etc/alpamon/{{ .ProjectName }}.config.tmpl" + + - src: "configs/{{ .ProjectName }}.service" + dst: "/lib/systemd/system/{{ .ProjectName }}.service" + + - src: "configs/{{ .ProjectName }}-restart.service" + dst: "/lib/systemd/system/{{ .ProjectName }}-restart.service" + + - src: "configs/{{ .ProjectName }}-restart.timer" + dst: "/lib/systemd/system/{{ .ProjectName }}-restart.timer" + scripts: postinstall: "scripts/postinstall.sh" preremove: "scripts/preremove.sh" diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 4344223..1e338f2 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -1,7 +1,6 @@ package command import ( - "embed" "fmt" "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command/ftp" "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command/setup" @@ -33,11 +32,8 @@ var RootCmd = &cobra.Command{ }, } -//go:embed configs/* -var configFiles embed.FS - func init() { - setup.SetConfigPaths(name, configFiles) + setup.SetConfigPaths(name) RootCmd.AddCommand(setup.SetupCmd, ftp.FtpCmd) } diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 4e89dfc..3455b41 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -1,7 +1,6 @@ package setup import ( - "embed" "fmt" cli "github.com/alpacanetworks/alpacon-cli/utils" "github.com/alpacanetworks/alpamon-go/pkg/utils" @@ -15,18 +14,9 @@ import ( ) var ( - name string - configFiles embed.FS - configPath string - configTarget string - tmpFilePath = "configs/tmpfile.conf" - tmpFileTarget string - servicePath string - restartServicePath string - serviceTarget string - restartServiceTarget string - timerPath string - timerTarget string + name string + configTarget string + templateFilePath string ) type ConfigData struct { @@ -38,18 +28,10 @@ type ConfigData struct { Debug string } -func SetConfigPaths(serviceName string, fs embed.FS) { +func SetConfigPaths(serviceName string) { name = serviceName - configFiles = fs - configPath = fmt.Sprintf("configs/%s.conf", name) configTarget = fmt.Sprintf("/etc/alpamon/%s.conf", name) - tmpFileTarget = fmt.Sprintf("/usr/lib/tmpfiles.d/%s.conf", name) - servicePath = fmt.Sprintf("configs/%s.service", name) - serviceTarget = fmt.Sprintf("/lib/systemd/system/%s.service", name) - restartServicePath = fmt.Sprintf("configs/%s-restart.service", name) - restartServiceTarget = fmt.Sprintf("/lib/systemd/system/%s-restart.service", name) - timerPath = fmt.Sprintf("configs/%s-restart.timer", name) - timerTarget = fmt.Sprintf("/lib/systemd/system/%s-restart.timer", name) + templateFilePath = fmt.Sprintf("/etc/alpamon/%s.config.tmpl", name) } var SetupCmd = &cobra.Command{ @@ -72,11 +54,6 @@ var SetupCmd = &cobra.Command{ fmt.Println("Applying a new configuration automatically...") - err := copyEmbeddedFile(tmpFilePath, tmpFileTarget) - if err != nil { - return err - } - output, err := exec.Command("systemd-tmpfiles", "--create").CombinedOutput() if err != nil { return fmt.Errorf("%w\n%s", err, string(output)) @@ -87,20 +64,15 @@ var SetupCmd = &cobra.Command{ return err } - err = writeSystemdFiles() - if err != nil { - return err - } - fmt.Println("Configuration file successfully updated.") return nil }, } func writeConfig() error { - tmplData, err := configFiles.ReadFile(configPath) + tmplData, err := os.ReadFile(templateFilePath) if err != nil { - return fmt.Errorf("failed to read template file (%s): %v", configPath, err) + return fmt.Errorf("failed to read template config (%s): %v", templateFilePath, err) } tmpl, err := template.New(fmt.Sprintf("%s.conf", name)).Parse(string(tmplData)) @@ -146,45 +118,6 @@ func writeConfig() error { return nil } -func writeSystemdFiles() error { - err := copyEmbeddedFile(servicePath, serviceTarget) - if err != nil { - return fmt.Errorf("failed to write target file: %v", err) - } - - err = copyEmbeddedFile(restartServicePath, restartServiceTarget) - if err != nil { - return fmt.Errorf("failed to write target file: %v", err) - } - - err = copyEmbeddedFile(timerPath, timerTarget) - if err != nil { - return fmt.Errorf("failed to write target file: %v", err) - } - - return nil -} - -func copyEmbeddedFile(srcPath, dstPath string) error { - fileData, err := configFiles.ReadFile(srcPath) - if err != nil { - return fmt.Errorf("failed to read embedded file: %v", err) - } - - outFile, err := os.Create(dstPath) - if err != nil { - return fmt.Errorf("failed to create destination file: %v", err) - } - defer func() { _ = outFile.Close() }() - - _, err = outFile.Write(fileData) - if err != nil { - return fmt.Errorf("failed to write to destination file: %v", err) - } - - return nil -} - func fileExists(path string) bool { fileInfo, err := os.Stat(path) if err != nil { diff --git a/cmd/alpamon/command/configs/alpamon-restart.service b/configs/alpamon-restart.service similarity index 100% rename from cmd/alpamon/command/configs/alpamon-restart.service rename to configs/alpamon-restart.service diff --git a/cmd/alpamon/command/configs/alpamon-restart.timer b/configs/alpamon-restart.timer similarity index 100% rename from cmd/alpamon/command/configs/alpamon-restart.timer rename to configs/alpamon-restart.timer diff --git a/cmd/alpamon/command/configs/alpamon.conf b/configs/alpamon.conf similarity index 100% rename from cmd/alpamon/command/configs/alpamon.conf rename to configs/alpamon.conf diff --git a/cmd/alpamon/command/configs/alpamon.service b/configs/alpamon.service similarity index 89% rename from cmd/alpamon/command/configs/alpamon.service rename to configs/alpamon.service index ebc6713..ba55e4c 100644 --- a/cmd/alpamon/command/configs/alpamon.service +++ b/configs/alpamon.service @@ -11,4 +11,4 @@ StandardOutput=null StandardError=null [Install] -WantedBy=multi-user.target +WantedBy=multi-user.target \ No newline at end of file diff --git a/cmd/alpamon/command/configs/tmpfile.conf b/configs/tmpfile.conf similarity index 80% rename from cmd/alpamon/command/configs/tmpfile.conf rename to configs/tmpfile.conf index 8c3fbf7..1a9901a 100644 --- a/cmd/alpamon/command/configs/tmpfile.conf +++ b/configs/tmpfile.conf @@ -2,4 +2,4 @@ d /etc/alpamon 0700 root root - - f /etc/alpamon/alpamon.conf 0600 root root - - d /var/lib/alpamon 0750 root root - - f /var/lib/alpamon/alpamon.db 0750 root root - - -d /var/log/alpamon 0750 root root - - +d /var/log/alpamon 0750 root root - - \ No newline at end of file diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 25b89e6..8a6019a 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -1,6 +1,7 @@ #!/bin/bash ALPAMON_BIN="/usr/local/bin/alpamon" +TEMPLATE_FILE="/etc/alpamon/alpamon.config.tmpl" main() { check_root_permission @@ -14,6 +15,7 @@ main() { setup_alpamon fi + cleanup_tmpl_files start_systemd_service } @@ -76,6 +78,14 @@ restart_alpamon_by_timer() { echo "Systemd timer to restart Alpamon has been set. It will restart the service in 5 minutes." } +cleanup_tmpl_files() { + if [ -f "$TEMPLATE_FILE" ]; then + echo "Removing template file: $TEMPLATE_FILE" + rm -f "$TEMPLATE_FILE" || true + fi +} + + is_upgrade() { if [ -n "$2" ]; then return 0 # Upgrade From e9f5b2bef0a9b46d27946d42fd49865a0451ce4c Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 17 Mar 2025 13:01:49 +0900 Subject: [PATCH 241/364] Minor fix --- .goreleaser.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 3fed8b6..440b5f0 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -44,7 +44,7 @@ nfpms: - src: "configs/tmpfile.conf" dst: "/usr/lib/tmpfiles.d/{{ .ProjectName }}.conf" - - src: "configs/alpamon.conf" + - src: "configs/{{ .ProjectName }}.conf" dst: "/etc/alpamon/{{ .ProjectName }}.config.tmpl" - src: "configs/{{ .ProjectName }}.service" From 008b1cc42632f6f68c7cc8e8a66c3611fa54bc2c Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 17 Mar 2025 13:10:17 +0900 Subject: [PATCH 242/364] Minor fix --- scripts/postinstall.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 8a6019a..c7c1345 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -13,10 +13,10 @@ main() { else install_atlas_cli setup_alpamon + start_systemd_service fi cleanup_tmpl_files - start_systemd_service } check_root_permission() { From 98caf64f37406af3483aacc739aba29552580c6d Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 17 Mar 2025 13:20:09 +0900 Subject: [PATCH 243/364] Minor fix --- scripts/postinstall.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index c7c1345..92f8f0f 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -72,6 +72,7 @@ restart_alpamon_by_timer() { echo "Setting up systemd timer to restart Alpamon..." systemctl daemon-reload || true + systemctl enable alpamon-restart.timer || true systemctl reset-failed alpamon-restart.timer || true systemctl restart alpamon-restart.timer || true From 5e49fbd786aa09254578994cadc4c9e72a77e32e Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 18 Mar 2025 12:00:52 +0900 Subject: [PATCH 244/364] Add ldflags for smaller production builds --- .goreleaser.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 440b5f0..be05f47 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -9,7 +9,7 @@ builds: - main: ./cmd/alpamon binary: alpamon ldflags: - - -X github.com/alpacanetworks/alpamon-go/pkg/version.Version={{.Version}} + - -s -w -X github.com/alpacanetworks/alpamon-go/pkg/version.Version={{.Version}} env: - CGO_ENABLED=0 goos: From 7740054662a9ccd81eac808d146799e25c931706 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 18 Mar 2025 15:25:47 +0900 Subject: [PATCH 245/364] Improve logger to use pretty output for both console and file --- pkg/logger/logger.go | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 2cf5ebc..3d285f5 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -33,38 +33,40 @@ func InitLogger() *os.File { os.Exit(1) } - var output io.Writer recordWriter := &logRecordWriter{} + var output io.Writer // In development, log to console; in production, log to file if version.Version == "dev" { - consoleWriter := zerolog.ConsoleWriter{ - Out: os.Stderr, - TimeFormat: time.RFC3339, - TimeLocation: time.Local, - FormatLevel: func(i interface{}) string { - return "[" + strings.ToUpper(i.(string)) + "]" - }, - FormatMessage: func(i interface{}) string { - return " " + i.(string) - }, - FormatFieldName: func(i interface{}) string { - return "(" + i.(string) + ")" - }, - FormatFieldValue: func(i interface{}) string { - return i.(string) - }, - } - output = zerolog.MultiLevelWriter(consoleWriter, recordWriter) + output = zerolog.MultiLevelWriter(newPrettyWriter(os.Stderr), recordWriter) } else { - output = zerolog.MultiLevelWriter(logFile, recordWriter) + output = zerolog.MultiLevelWriter(newPrettyWriter(logFile), recordWriter) } log.Logger = zerolog.New(output).With().Timestamp().Caller().Logger() - return logFile } +func newPrettyWriter(out io.Writer) zerolog.ConsoleWriter { + return zerolog.ConsoleWriter{ + Out: out, + TimeFormat: time.RFC3339, + TimeLocation: time.Local, + FormatLevel: func(i interface{}) string { + return "[" + strings.ToUpper(i.(string)) + "]" + }, + FormatMessage: func(i interface{}) string { + return " " + i.(string) + }, + FormatFieldName: func(i interface{}) string { + return "(" + i.(string) + ")" + }, + FormatFieldValue: func(i interface{}) string { + return i.(string) + }, + } +} + type logRecord struct { Date string `json:"date"` Level int `json:"level"` From 5fe5d0e21c93b2c73ec8152031fecc3bdcd4a82a Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 18 Mar 2025 17:14:45 +0900 Subject: [PATCH 246/364] Minor fix --- pkg/runner/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 185716d..f67f363 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -29,7 +29,7 @@ import ( ) const ( - fileUploadTimeout = 60 * 10 * time.Second + fileUploadTimeout = 60 * 10 ) func NewCommandRunner(wsClient *WebsocketClient, command Command, data CommandData) *CommandRunner { From 4dc10c1b5a8bcf937fea654242d35c8f4a672efa Mon Sep 17 00:00:00 2001 From: junho98 Date: Wed, 19 Mar 2025 12:22:01 +0900 Subject: [PATCH 247/364] Update dockerfile to install atlas cli --- Dockerfiles/centos/7/Dockerfile | 3 +++ Dockerfiles/debian/10/Dockerfile | 2 ++ Dockerfiles/debian/11/Dockerfile | 2 ++ Dockerfiles/redhat/8/Dockerfile | 3 +++ Dockerfiles/redhat/9/Dockerfile | 3 +++ Dockerfiles/ubuntu/18.04/Dockerfile | 2 ++ Dockerfiles/ubuntu/20.04/Dockerfile | 2 ++ Dockerfiles/ubuntu/22.04/Dockerfile | 2 ++ 8 files changed, 19 insertions(+) diff --git a/Dockerfiles/centos/7/Dockerfile b/Dockerfiles/centos/7/Dockerfile index bdedd19..a5ebf19 100644 --- a/Dockerfiles/centos/7/Dockerfile +++ b/Dockerfiles/centos/7/Dockerfile @@ -18,6 +18,9 @@ RUN go build -o alpamon ./cmd/alpamon/main.go FROM centos:centos7 +# RUN yum install -y curl +# RUN curl -sSf https://atlasgo.sh | sh + WORKDIR /usr/local/alpamon COPY --from=builder /build/alpamon ./alpamon diff --git a/Dockerfiles/debian/10/Dockerfile b/Dockerfiles/debian/10/Dockerfile index 7cfe59f..7ef3f37 100644 --- a/Dockerfiles/debian/10/Dockerfile +++ b/Dockerfiles/debian/10/Dockerfile @@ -20,6 +20,8 @@ FROM debian:10 RUN apt-get update && apt-get install -y --no-install-recommends curl systemd ca-certificates +RUN curl -sSf https://atlasgo.sh | sh + WORKDIR /usr/local/alpamon COPY --from=builder /build/alpamon ./alpamon diff --git a/Dockerfiles/debian/11/Dockerfile b/Dockerfiles/debian/11/Dockerfile index 6e14bc6..47a0b23 100644 --- a/Dockerfiles/debian/11/Dockerfile +++ b/Dockerfiles/debian/11/Dockerfile @@ -20,6 +20,8 @@ FROM debian:11 RUN apt-get update && apt-get install -y --no-install-recommends curl systemd ca-certificates +RUN curl -sSf https://atlasgo.sh | sh + WORKDIR /usr/local/alpamon COPY --from=builder /build/alpamon ./alpamon diff --git a/Dockerfiles/redhat/8/Dockerfile b/Dockerfiles/redhat/8/Dockerfile index 4247cac..0e53505 100644 --- a/Dockerfiles/redhat/8/Dockerfile +++ b/Dockerfiles/redhat/8/Dockerfile @@ -18,6 +18,9 @@ RUN go build -o alpamon ./cmd/alpamon/main.go FROM redhat/ubi8 +RUN dnf install -y curl +RUN curl -sSf https://atlasgo.sh | sh + WORKDIR /usr/local/alpamon COPY --from=builder /build/alpamon ./alpamon diff --git a/Dockerfiles/redhat/9/Dockerfile b/Dockerfiles/redhat/9/Dockerfile index f3bd2fa..de1f7dc 100644 --- a/Dockerfiles/redhat/9/Dockerfile +++ b/Dockerfiles/redhat/9/Dockerfile @@ -18,6 +18,9 @@ RUN go build -o alpamon ./cmd/alpamon/main.go FROM redhat/ubi9 +RUN dnf install -y curl +RUN curl -sSf https://atlasgo.sh | sh + WORKDIR /usr/local/alpamon COPY --from=builder /build/alpamon ./alpamon diff --git a/Dockerfiles/ubuntu/18.04/Dockerfile b/Dockerfiles/ubuntu/18.04/Dockerfile index ea60054..6475899 100644 --- a/Dockerfiles/ubuntu/18.04/Dockerfile +++ b/Dockerfiles/ubuntu/18.04/Dockerfile @@ -20,6 +20,8 @@ FROM ubuntu:18.04 RUN apt-get update && apt-get install -y --no-install-recommends curl systemd ca-certificates +RUN curl -sSf https://atlasgo.sh | sh + WORKDIR /usr/local/alpamon COPY --from=builder /build/alpamon ./alpamon diff --git a/Dockerfiles/ubuntu/20.04/Dockerfile b/Dockerfiles/ubuntu/20.04/Dockerfile index cc14633..fbe4920 100644 --- a/Dockerfiles/ubuntu/20.04/Dockerfile +++ b/Dockerfiles/ubuntu/20.04/Dockerfile @@ -20,6 +20,8 @@ FROM ubuntu:20.04 RUN apt-get update && apt-get install -y --no-install-recommends curl systemd ca-certificates +RUN curl -sSf https://atlasgo.sh | sh + WORKDIR /usr/local/alpamon COPY --from=builder /build/alpamon ./alpamon diff --git a/Dockerfiles/ubuntu/22.04/Dockerfile b/Dockerfiles/ubuntu/22.04/Dockerfile index 80c3ea0..f9293c1 100644 --- a/Dockerfiles/ubuntu/22.04/Dockerfile +++ b/Dockerfiles/ubuntu/22.04/Dockerfile @@ -20,6 +20,8 @@ FROM ubuntu:22.04 RUN apt-get update && apt-get install -y --no-install-recommends curl systemd ca-certificates +RUN curl -sSf https://atlasgo.sh | sh + WORKDIR /usr/local/alpamon COPY --from=builder /build/alpamon ./alpamon From c1a1ccbc14f21af5fc1c0f967ee7b38b04131932 Mon Sep 17 00:00:00 2001 From: junho98 Date: Wed, 19 Mar 2025 12:26:05 +0900 Subject: [PATCH 248/364] Fix minor --- Dockerfiles/centos/7/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfiles/centos/7/Dockerfile b/Dockerfiles/centos/7/Dockerfile index a5ebf19..3d70699 100644 --- a/Dockerfiles/centos/7/Dockerfile +++ b/Dockerfiles/centos/7/Dockerfile @@ -18,8 +18,8 @@ RUN go build -o alpamon ./cmd/alpamon/main.go FROM centos:centos7 -# RUN yum install -y curl -# RUN curl -sSf https://atlasgo.sh | sh +RUN yum install -y curl +RUN curl -sSf https://atlasgo.sh | sh WORKDIR /usr/local/alpamon From 394a5a0f441867b5126b8143945deaa19c98119b Mon Sep 17 00:00:00 2001 From: junho98 Date: Wed, 19 Mar 2025 14:12:51 +0900 Subject: [PATCH 249/364] Fix minor --- Dockerfiles/centos/7/Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfiles/centos/7/Dockerfile b/Dockerfiles/centos/7/Dockerfile index 3d70699..08e9e3c 100644 --- a/Dockerfiles/centos/7/Dockerfile +++ b/Dockerfiles/centos/7/Dockerfile @@ -18,7 +18,6 @@ RUN go build -o alpamon ./cmd/alpamon/main.go FROM centos:centos7 -RUN yum install -y curl RUN curl -sSf https://atlasgo.sh | sh WORKDIR /usr/local/alpamon From 108714a7172295cf526169f15609470e69541121 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 20 Mar 2025 14:17:23 +0900 Subject: [PATCH 250/364] Fix Atlas CLI installation Update the Atlas CLI installation procedure to integrate the binary file during Linux packaging, replacing the postinstall.sh script. --- .goreleaser.yaml | 7 +++++++ scripts/install_atlas.sh | 13 +++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 scripts/install_atlas.sh diff --git a/.goreleaser.yaml b/.goreleaser.yaml index be05f47..b08229b 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -4,6 +4,7 @@ before: hooks: - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema - go mod tidy + - ./scripts/install_atlas.sh builds: - main: ./cmd/alpamon @@ -25,6 +26,9 @@ checksum: archives: - id: alpamon name_template: "{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}" + extra_files: + - src: bin/atlas + dst: usr/local/bin/atlas nfpms: - package_name: alpamon @@ -55,6 +59,9 @@ nfpms: - src: "configs/{{ .ProjectName }}-restart.timer" dst: "/lib/systemd/system/{{ .ProjectName }}-restart.timer" + + - src: "bin/atlas" + dst: "/usr/local/bin/atlas" scripts: postinstall: "scripts/postinstall.sh" diff --git a/scripts/install_atlas.sh b/scripts/install_atlas.sh new file mode 100644 index 0000000..614f91a --- /dev/null +++ b/scripts/install_atlas.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +mkdir -p bin + +ARCH=$(uname -m) + +if [ "$ARCH" = "x86_64" ]; then + curl -L -o bin/atlas https://release.ariga.io/atlas/atlas-community-linux-amd64-latest +elif [ "$ARCH" = "aarch64" ]; then + curl -L -o bin/atlas https://release.ariga.io/atlas/atlas-community-linux-arm64-latest +fi + +chmod +x bin/atlas From dc413597f23091543c316610951d0c47fa674d97 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 20 Mar 2025 15:07:30 +0900 Subject: [PATCH 251/364] Upgrade goreleaser version from 0 to 2 Upgrade goreleaser version from 0 to 2 --- .goreleaser.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index b08229b..851d29c 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,9 +1,12 @@ +version: 2 + project_name: alpamon before: hooks: - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema - go mod tidy + - chmod +x ./scripts/install_atlas.sh - ./scripts/install_atlas.sh builds: @@ -26,7 +29,7 @@ checksum: archives: - id: alpamon name_template: "{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}" - extra_files: + files: - src: bin/atlas dst: usr/local/bin/atlas From 0cdd6efe5fabb8a04e700c23fc15f4de6962b05b Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 20 Mar 2025 15:08:26 +0900 Subject: [PATCH 252/364] Remove install_atlas_cli() Removed the unnecessary install_atlas_cli(). --- scripts/postinstall.sh | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 92f8f0f..900debd 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -11,7 +11,6 @@ main() { if is_upgrade "$@"; then restart_alpamon_by_timer else - install_atlas_cli setup_alpamon start_systemd_service fi @@ -40,15 +39,6 @@ check_alpamon_binary() { fi } -install_atlas_cli() { - echo "Installing Atlas CLI..." - curl -sSf https://atlasgo.sh | sh -s -- -y - if [ $? -ne 0 ]; then - echo "Error: Failed to install Atlas CLI." - exit 1 - fi -} - setup_alpamon() { "$ALPAMON_BIN" setup if [ $? -ne 0 ]; then From 9ee4dc89425101e794ce119e65ee29a9cbc63f6a Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 20 Mar 2025 18:13:56 +0900 Subject: [PATCH 253/364] Fix test cases. Fix test cases. --- pkg/collector/check/batch/daily/cpu/daily_cpu_test.go | 1 + pkg/collector/check/batch/daily/disk/io/daily_io_test.go | 1 + pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go | 1 + pkg/collector/check/batch/daily/memory/daily_memory_test.go | 1 + pkg/collector/check/batch/daily/net/daily_net_test.go | 1 + pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go | 1 + pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go | 1 + pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go | 1 + pkg/collector/check/batch/hourly/memory/hourly_memory_test.go | 1 + pkg/collector/check/batch/hourly/net/hourly_net_test.go | 1 + pkg/collector/check/realtime/cpu/cpu_test.go | 1 + pkg/collector/check/realtime/disk/io/io_test.go | 1 + pkg/collector/check/realtime/disk/usage/usage_test.go | 1 + pkg/collector/check/realtime/memory/memory_test.go | 1 + pkg/collector/check/realtime/net/net_test.go | 1 + 15 files changed, 15 insertions(+) diff --git a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go index 29136f4..3820ee8 100644 --- a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go +++ b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go @@ -64,5 +64,6 @@ func (suite *DailyCPUUsageCheckSuite) TestDeleteHourlyCPUUsage() { } func TestDailyCPUUsageCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyCPUUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go index 05d2888..e2d0c2d 100644 --- a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go +++ b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go @@ -71,5 +71,6 @@ func (suite *DailyDiskIOCheckSuite) TestDeleteHourlyDiskIO() { } func TestDailyDiskIOCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyDiskIOCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go index c560a14..403abfd 100644 --- a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go +++ b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go @@ -66,5 +66,6 @@ func (suite *DailyDiskUsageCheckSuite) TestDeleteHourlyDiskUsage() { } func TestDailyDiskUsageCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyDiskUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/memory/daily_memory_test.go b/pkg/collector/check/batch/daily/memory/daily_memory_test.go index d6478c6..76ed120 100644 --- a/pkg/collector/check/batch/daily/memory/daily_memory_test.go +++ b/pkg/collector/check/batch/daily/memory/daily_memory_test.go @@ -64,5 +64,6 @@ func (suite *DailyMemoryUsageCheckSuite) TestDeleteHourlyMemoryUsage() { } func TestDailyMemoryUsageCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyMemoryUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/net/daily_net_test.go b/pkg/collector/check/batch/daily/net/daily_net_test.go index fe7eb56..1849dfe 100644 --- a/pkg/collector/check/batch/daily/net/daily_net_test.go +++ b/pkg/collector/check/batch/daily/net/daily_net_test.go @@ -79,5 +79,6 @@ func (suite *DailyNetCheckSuite) TestDeleteHourlyTraffic() { } func TestDailyNetCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyNetCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go index 022e07d..933d384 100644 --- a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go +++ b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go @@ -74,5 +74,6 @@ func (suite *HourlyCPUUsageCheckSuite) TestDeleteCPU() { } func TestHourlyCPUUsageCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyCPUUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go index 472f4b5..5aea7e8 100644 --- a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go +++ b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go @@ -91,5 +91,6 @@ func (suite *HourlyDiskIOCheckSuite) TestDeleteDiskIO() { } func TestHourlyDiskIOCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyDiskIOCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go index ec3e94e..b2815e8 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -91,5 +91,6 @@ func (suite *HourlyDiskUsageCheckSuite) TestDeleteDiskUsage() { } func TestHourlyDiskUsageCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyDiskUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go index e3ce3f5..5a40713 100644 --- a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go +++ b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go @@ -74,5 +74,6 @@ func (suite *HourlyMemoryUsageCheckSuite) TestDeleteMemory() { } func TestHourlyMemoryCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyMemoryUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/net/hourly_net_test.go b/pkg/collector/check/batch/hourly/net/hourly_net_test.go index 0e24180..d9f566d 100644 --- a/pkg/collector/check/batch/hourly/net/hourly_net_test.go +++ b/pkg/collector/check/batch/hourly/net/hourly_net_test.go @@ -103,5 +103,6 @@ func (suite *HourlyNetCheckSuite) TestDeleteTraffic() { } func TestHourlyNetCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyNetCheckSuite)) } diff --git a/pkg/collector/check/realtime/cpu/cpu_test.go b/pkg/collector/check/realtime/cpu/cpu_test.go index c765fdc..f788bf0 100644 --- a/pkg/collector/check/realtime/cpu/cpu_test.go +++ b/pkg/collector/check/realtime/cpu/cpu_test.go @@ -60,5 +60,6 @@ func (suite *CPUCheckSuite) TestSaveCPUUsage() { } func TestCPUCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(CPUCheckSuite)) } diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go index 906d779..9d10495 100644 --- a/pkg/collector/check/realtime/disk/io/io_test.go +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -86,5 +86,6 @@ func (suite *DiskIOCheckSuite) TestGetDiskIO() { } func TestDiskIOCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DiskIOCheckSuite)) } diff --git a/pkg/collector/check/realtime/disk/usage/usage_test.go b/pkg/collector/check/realtime/disk/usage/usage_test.go index f75e7bd..23b10d8 100644 --- a/pkg/collector/check/realtime/disk/usage/usage_test.go +++ b/pkg/collector/check/realtime/disk/usage/usage_test.go @@ -69,5 +69,6 @@ func (suite *DiskUsageCheckSuite) TestSaveDiskUsage() { } func TestDiskUsageCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DiskUsageCheckSuite)) } diff --git a/pkg/collector/check/realtime/memory/memory_test.go b/pkg/collector/check/realtime/memory/memory_test.go index bb877b7..d0d441a 100644 --- a/pkg/collector/check/realtime/memory/memory_test.go +++ b/pkg/collector/check/realtime/memory/memory_test.go @@ -60,5 +60,6 @@ func (suite *MemoryCheckSuite) TestSaveMemoryUsage() { } func TestMemoryCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(MemoryCheckSuite)) } diff --git a/pkg/collector/check/realtime/net/net_test.go b/pkg/collector/check/realtime/net/net_test.go index 10d1156..8000c34 100644 --- a/pkg/collector/check/realtime/net/net_test.go +++ b/pkg/collector/check/realtime/net/net_test.go @@ -90,5 +90,6 @@ func (suite *NetCheckSuite) TestGetTraffic() { } func TestNetCheckSuite(t *testing.T) { + t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(NetCheckSuite)) } From b5bdfa945502e981184d2fc199b7c90dc049b684 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 21 Mar 2025 11:48:19 +0900 Subject: [PATCH 254/364] Add wspath as configurable parameter in settings --- cmd/alpamon/command/root.go | 6 +++--- pkg/config/config.go | 7 +++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 1e338f2..6b63f4e 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -21,7 +21,8 @@ import ( ) const ( - name = "alpamon" + name = "alpamon" + wsPath = "/ws/servers/backhaul/" ) var RootCmd = &cobra.Command{ @@ -52,7 +53,7 @@ func runAgent() { fmt.Printf("alpamon version %s starting.\n", version.Version) // Config & Settings - settings := config.LoadConfig(config.Files(name)) + settings := config.LoadConfig(config.Files(name), wsPath) config.InitSettings(settings) // Session @@ -99,6 +100,5 @@ func runAgent() { log.Error().Err(err).Msg("Failed to restart the program") } } - log.Debug().Msg("Bye.") } diff --git a/pkg/config/config.go b/pkg/config/config.go index 1b88ddd..bb70c70 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -18,7 +18,6 @@ var ( ) const ( - wsPath = "/ws/servers/backhaul/" MinConnectInterval = 5 * time.Second MaxConnectInterval = 300 * time.Second ) @@ -27,7 +26,7 @@ func InitSettings(settings Settings) { GlobalSettings = settings } -func LoadConfig(configFiles []string) Settings { +func LoadConfig(configFiles []string, wsPath string) Settings { var iniData *ini.File var err error var validConfigFile string @@ -74,7 +73,7 @@ func LoadConfig(configFiles []string) Settings { zerolog.SetGlobalLevel(zerolog.InfoLevel) } - isValid, settings := validateConfig(config) + isValid, settings := validateConfig(config, wsPath) if !isValid { log.Fatal().Msg("Aborting...") @@ -83,7 +82,7 @@ func LoadConfig(configFiles []string) Settings { return settings } -func validateConfig(config Config) (bool, Settings) { +func validateConfig(config Config, wsPath string) (bool, Settings) { log.Debug().Msg("Validating configuration fields...") settings := Settings{ From fa99ce4acb1e80d13b7107920685eb59e9af3924 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 21 Mar 2025 13:07:03 +0900 Subject: [PATCH 255/364] Fix release.yaml --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8c6e668..1b66181 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,7 +27,7 @@ jobs: go-version: 1.22.x - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser version: latest From b86c748f9a141bff67b599918d5d0bd7c6b59f0d Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 24 Mar 2025 15:30:26 +0900 Subject: [PATCH 256/364] Export Authorization field in Session --- pkg/scheduler/session.go | 6 +++--- pkg/scheduler/types.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 93a152b..92c44e1 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -47,7 +47,7 @@ func InitSession() *Session { } session.Client = &client - session.authorization = fmt.Sprintf(`id="%s", key="%s"`, config.GlobalSettings.ID, config.GlobalSettings.Key) + session.Authorization = fmt.Sprintf(`id="%s", key="%s"`, config.GlobalSettings.ID, config.GlobalSettings.Key) return session } @@ -117,7 +117,7 @@ func (session *Session) do(req *http.Request, timeout time.Duration) ([]byte, in req = req.WithContext(ctx) - req.Header.Set("Authorization", session.authorization) + req.Header.Set("Authorization", session.Authorization) req.Header.Set("User-Agent", utils.GetUserAgent("alpamon")) if req.Method == http.MethodPost || req.Method == http.MethodPut || req.Method == http.MethodPatch { @@ -200,7 +200,7 @@ func (session *Session) MultipartRequest(url string, body bytes.Buffer, contentT req = req.WithContext(ctx) - req.Header.Set("Authorization", session.authorization) + req.Header.Set("Authorization", session.Authorization) req.Header.Set("User-Agent", utils.GetUserAgent("alpamon")) req.Header.Set("Content-Type", contentType) diff --git a/pkg/scheduler/types.go b/pkg/scheduler/types.go index e96dc52..5f1e54e 100644 --- a/pkg/scheduler/types.go +++ b/pkg/scheduler/types.go @@ -10,7 +10,7 @@ import ( type Session struct { BaseURL string Client *http.Client - authorization string + Authorization string } // queue // From 70701a412dfaf1c3139ffbded4ea8c03e4c70618 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 24 Mar 2025 17:34:57 +0900 Subject: [PATCH 257/364] Rename all package references from alpamon-go to alpamon --- .goreleaser.yaml | 4 +-- README.md | 2 +- cmd/alpamon/command/ftp/ftp.go | 4 +-- cmd/alpamon/command/root.go | 22 ++++++------ cmd/alpamon/command/setup/setup.go | 2 +- cmd/alpamon/main.go | 2 +- go.mod | 6 ++-- go.sum | 2 -- pkg/collector/check/base/base.go | 2 +- pkg/collector/check/base/types.go | 2 +- .../check/batch/daily/cleanup/cleanup.go | 24 ++++++------- pkg/collector/check/batch/daily/cpu/cpu.go | 6 ++-- .../check/batch/daily/cpu/daily_cpu_test.go | 6 ++-- .../batch/daily/disk/io/daily_io_test.go | 6 ++-- pkg/collector/check/batch/daily/disk/io/io.go | 6 ++-- .../daily/disk/usage/daily_usage_test.go | 6 ++-- .../check/batch/daily/disk/usage/usage.go | 4 +-- .../batch/daily/memory/daily_memory_test.go | 6 ++-- .../check/batch/daily/memory/memory.go | 6 ++-- .../check/batch/daily/net/daily_net_test.go | 6 ++-- pkg/collector/check/batch/daily/net/net.go | 6 ++-- pkg/collector/check/batch/hourly/cpu/cpu.go | 6 ++-- .../check/batch/hourly/cpu/hourly_cpu_test.go | 6 ++-- .../batch/hourly/disk/io/hourly_io_test.go | 6 ++-- .../check/batch/hourly/disk/io/io.go | 6 ++-- .../hourly/disk/usage/hourly_usage_test.go | 6 ++-- .../check/batch/hourly/disk/usage/usage.go | 6 ++-- .../batch/hourly/memory/hourly_memory_test.go | 6 ++-- .../check/batch/hourly/memory/memory.go | 6 ++-- .../check/batch/hourly/net/hourly_net_test.go | 6 ++-- pkg/collector/check/batch/hourly/net/net.go | 6 ++-- pkg/collector/check/check.go | 34 +++++++++---------- pkg/collector/check/realtime/cpu/cpu.go | 2 +- pkg/collector/check/realtime/cpu/cpu_test.go | 6 ++-- pkg/collector/check/realtime/disk/io/base.go | 2 +- .../check/realtime/disk/io/io_collect.go | 6 ++-- .../check/realtime/disk/io/io_send.go | 6 ++-- .../check/realtime/disk/io/io_test.go | 6 ++-- .../check/realtime/disk/usage/usage.go | 6 ++-- .../check/realtime/disk/usage/usage_test.go | 6 ++-- pkg/collector/check/realtime/memory/memory.go | 2 +- .../check/realtime/memory/memory_test.go | 6 ++-- pkg/collector/check/realtime/net/base.go | 2 +- .../check/realtime/net/net_collect.go | 6 ++-- pkg/collector/check/realtime/net/net_send.go | 6 ++-- pkg/collector/check/realtime/net/net_test.go | 6 ++-- pkg/collector/collector.go | 12 +++---- pkg/collector/scheduler/scheduler.go | 2 +- pkg/collector/transporter/transporter.go | 6 ++-- pkg/collector/transporter/utils.go | 2 +- pkg/db/client.go | 2 +- pkg/db/db.go | 2 +- pkg/logger/logger.go | 4 +-- pkg/runner/client.go | 6 ++-- pkg/runner/command.go | 10 +++--- pkg/runner/commit.go | 6 ++-- pkg/runner/ftp.go | 4 +-- pkg/runner/ftp_types.go | 2 +- pkg/runner/pty.go | 4 +-- pkg/runner/pty_linux.go | 2 +- pkg/runner/shell.go | 2 +- pkg/scheduler/reporter.go | 6 ++-- pkg/scheduler/session.go | 4 +-- pkg/utils/utils.go | 2 +- 64 files changed, 184 insertions(+), 186 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 851d29c..b719ffa 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -13,7 +13,7 @@ builds: - main: ./cmd/alpamon binary: alpamon ldflags: - - -s -w -X github.com/alpacanetworks/alpamon-go/pkg/version.Version={{.Version}} + - -s -w -X github.com/alpacanetworks/alpamon/pkg/version.Version={{.Version}} env: - CGO_ENABLED=0 goos: @@ -37,7 +37,7 @@ nfpms: - package_name: alpamon maintainer: Younghwan Kim description: Alpamon - homepage: https://github.com/alpacanetworks/alpamon-go + homepage: https://github.com/alpacanetworks/alpamon license: MIT vendor: AlpacaX formats: diff --git a/README.md b/README.md index 1296167..ecd7dc3 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ To build Alpamon, ensure you have: - Make sure `$GOPATH` is set and `$GOPATH/bin` is added to your system’s `PATH`. ## Installation -Download the latest `Alpamon-Go` directly from our releases page or install it using package managers on Linux. +Download the latest `alpamon` directly from our releases page or install it using package managers on Linux. ### Linux diff --git a/cmd/alpamon/command/ftp/ftp.go b/cmd/alpamon/command/ftp/ftp.go index 0898528..4ee37fe 100644 --- a/cmd/alpamon/command/ftp/ftp.go +++ b/cmd/alpamon/command/ftp/ftp.go @@ -1,8 +1,8 @@ package ftp import ( - "github.com/alpacanetworks/alpamon-go/pkg/logger" - "github.com/alpacanetworks/alpamon-go/pkg/runner" + "github.com/alpacanetworks/alpamon/pkg/logger" + "github.com/alpacanetworks/alpamon/pkg/runner" "github.com/spf13/cobra" ) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 6b63f4e..9e11831 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -2,20 +2,20 @@ package command import ( "fmt" - "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command/ftp" - "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command/setup" + "github.com/alpacanetworks/alpamon/cmd/alpamon/command/ftp" + "github.com/alpacanetworks/alpamon/cmd/alpamon/command/setup" "os" "syscall" - "github.com/alpacanetworks/alpamon-go/pkg/collector" - "github.com/alpacanetworks/alpamon-go/pkg/config" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/logger" - "github.com/alpacanetworks/alpamon-go/pkg/pidfile" - "github.com/alpacanetworks/alpamon-go/pkg/runner" - "github.com/alpacanetworks/alpamon-go/pkg/scheduler" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/alpacanetworks/alpamon-go/pkg/version" + "github.com/alpacanetworks/alpamon/pkg/collector" + "github.com/alpacanetworks/alpamon/pkg/config" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/logger" + "github.com/alpacanetworks/alpamon/pkg/pidfile" + "github.com/alpacanetworks/alpamon/pkg/runner" + "github.com/alpacanetworks/alpamon/pkg/scheduler" + "github.com/alpacanetworks/alpamon/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/version" "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 3455b41..882bfdb 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -3,7 +3,7 @@ package setup import ( "fmt" cli "github.com/alpacanetworks/alpacon-cli/utils" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/spf13/cobra" "golang.org/x/term" "os" diff --git a/cmd/alpamon/main.go b/cmd/alpamon/main.go index 320dad2..80b0af7 100644 --- a/cmd/alpamon/main.go +++ b/cmd/alpamon/main.go @@ -3,7 +3,7 @@ package main import ( "os" - "github.com/alpacanetworks/alpamon-go/cmd/alpamon/command" + "github.com/alpacanetworks/alpamon/cmd/alpamon/command" ) func main() { diff --git a/go.mod b/go.mod index 9e3add4..e51342b 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/alpacanetworks/alpamon-go +module github.com/alpacanetworks/alpamon go 1.22.5 @@ -10,6 +10,7 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible github.com/creack/pty v1.1.23 github.com/glebarez/go-sqlite v1.22.0 + github.com/google/go-cmp v0.7.0 github.com/google/go-github v17.0.0+incompatible github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 @@ -18,6 +19,7 @@ require ( github.com/shirou/gopsutil/v4 v4.24.8 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 + golang.org/x/term v0.14.0 gopkg.in/go-playground/validator.v9 v9.31.0 gopkg.in/ini.v1 v1.67.0 ) @@ -32,7 +34,6 @@ require ( github.com/go-openapi/inflect v0.19.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/hashicorp/hcl/v2 v2.18.1 // indirect @@ -58,7 +59,6 @@ require ( github.com/zclconf/go-cty v1.14.1 // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.22.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/go.sum b/go.sum index 7cd45eb..b45f7c0 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,6 @@ github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= diff --git a/pkg/collector/check/base/base.go b/pkg/collector/check/base/base.go index 99bb6cb..b8ba04e 100644 --- a/pkg/collector/check/base/base.go +++ b/pkg/collector/check/base/base.go @@ -4,7 +4,7 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent" ) type CheckStrategy interface { diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index 2a51d46..5f9b75b 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -3,7 +3,7 @@ package base import ( "time" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent" ) const ( diff --git a/pkg/collector/check/batch/daily/cleanup/cleanup.go b/pkg/collector/check/batch/daily/cleanup/cleanup.go index c382fcc..d66ff94 100644 --- a/pkg/collector/check/batch/daily/cleanup/cleanup.go +++ b/pkg/collector/check/batch/daily/cleanup/cleanup.go @@ -4,18 +4,18 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpu" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusage" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlycpuusage" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskio" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskusage" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlymemoryusage" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlytraffic" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memory" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/cpu" + "github.com/alpacanetworks/alpamon/pkg/db/ent/diskio" + "github.com/alpacanetworks/alpamon/pkg/db/ent/diskusage" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlycpuusage" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlydiskio" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlydiskusage" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlymemoryusage" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlytraffic" + "github.com/alpacanetworks/alpamon/pkg/db/ent/memory" + "github.com/alpacanetworks/alpamon/pkg/db/ent/traffic" ) var ( diff --git a/pkg/collector/check/batch/daily/cpu/cpu.go b/pkg/collector/check/batch/daily/cpu/cpu.go index bbe0960..6c88717 100644 --- a/pkg/collector/check/batch/daily/cpu/cpu.go +++ b/pkg/collector/check/batch/daily/cpu/cpu.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlycpuusage" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlycpuusage" ) type Check struct { diff --git a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go index 3820ee8..bf0d34d 100644 --- a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go +++ b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go index e2d0c2d..7d4fdf4 100644 --- a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go +++ b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/daily/disk/io/io.go b/pkg/collector/check/batch/daily/disk/io/io.go index b8ff337..dbcb197 100644 --- a/pkg/collector/check/batch/daily/disk/io/io.go +++ b/pkg/collector/check/batch/daily/disk/io/io.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskio" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlydiskio" ) type Check struct { diff --git a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go index 403abfd..d8d68f8 100644 --- a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go +++ b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/daily/disk/usage/usage.go b/pkg/collector/check/batch/daily/disk/usage/usage.go index e403d96..99db36d 100644 --- a/pkg/collector/check/batch/daily/disk/usage/usage.go +++ b/pkg/collector/check/batch/daily/disk/usage/usage.go @@ -5,8 +5,8 @@ import ( "time" "entgo.io/ent/dialect/sql" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlydiskusage" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlydiskusage" ) type Check struct { diff --git a/pkg/collector/check/batch/daily/memory/daily_memory_test.go b/pkg/collector/check/batch/daily/memory/daily_memory_test.go index 76ed120..afe1b34 100644 --- a/pkg/collector/check/batch/daily/memory/daily_memory_test.go +++ b/pkg/collector/check/batch/daily/memory/daily_memory_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/daily/memory/memory.go b/pkg/collector/check/batch/daily/memory/memory.go index 088bfb1..d9ede7f 100644 --- a/pkg/collector/check/batch/daily/memory/memory.go +++ b/pkg/collector/check/batch/daily/memory/memory.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlymemoryusage" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlymemoryusage" ) type Check struct { diff --git a/pkg/collector/check/batch/daily/net/daily_net_test.go b/pkg/collector/check/batch/daily/net/daily_net_test.go index 1849dfe..b29f792 100644 --- a/pkg/collector/check/batch/daily/net/daily_net_test.go +++ b/pkg/collector/check/batch/daily/net/daily_net_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/daily/net/net.go b/pkg/collector/check/batch/daily/net/net.go index 3e7b5b2..5fd714b 100644 --- a/pkg/collector/check/batch/daily/net/net.go +++ b/pkg/collector/check/batch/daily/net/net.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/hourlytraffic" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/hourlytraffic" ) type Check struct { diff --git a/pkg/collector/check/batch/hourly/cpu/cpu.go b/pkg/collector/check/batch/hourly/cpu/cpu.go index cbc050a..23dfb19 100644 --- a/pkg/collector/check/batch/hourly/cpu/cpu.go +++ b/pkg/collector/check/batch/hourly/cpu/cpu.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/cpu" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/cpu" ) type Check struct { diff --git a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go index 933d384..841de28 100644 --- a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go +++ b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go index 5aea7e8..bf1bd46 100644 --- a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go +++ b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/hourly/disk/io/io.go b/pkg/collector/check/batch/hourly/disk/io/io.go index e2e03c5..a115107 100644 --- a/pkg/collector/check/batch/hourly/disk/io/io.go +++ b/pkg/collector/check/batch/hourly/disk/io/io.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/diskio" ) type Check struct { diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go index b2815e8..dc116c7 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/hourly/disk/usage/usage.go b/pkg/collector/check/batch/hourly/disk/usage/usage.go index e34862d..e978ade 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/usage.go +++ b/pkg/collector/check/batch/hourly/disk/usage/usage.go @@ -5,9 +5,9 @@ import ( "time" "entgo.io/ent/dialect/sql" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskusage" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/diskusage" ) type Check struct { diff --git a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go index 5a40713..e896941 100644 --- a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go +++ b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/hourly/memory/memory.go b/pkg/collector/check/batch/hourly/memory/memory.go index 12b88f6..ec890ff 100644 --- a/pkg/collector/check/batch/hourly/memory/memory.go +++ b/pkg/collector/check/batch/hourly/memory/memory.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/memory" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/memory" ) type Check struct { diff --git a/pkg/collector/check/batch/hourly/net/hourly_net_test.go b/pkg/collector/check/batch/hourly/net/hourly_net_test.go index d9f566d..27dafbd 100644 --- a/pkg/collector/check/batch/hourly/net/hourly_net_test.go +++ b/pkg/collector/check/batch/hourly/net/hourly_net_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/batch/hourly/net/net.go b/pkg/collector/check/batch/hourly/net/net.go index 10c7a24..a56e789 100644 --- a/pkg/collector/check/batch/hourly/net/net.go +++ b/pkg/collector/check/batch/hourly/net/net.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/traffic" ) type Check struct { diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index 2fee31f..24611c0 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -4,23 +4,23 @@ import ( "context" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - cleanup "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cleanup" - dailycpu "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/cpu" - dailydiskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/io" - dailydiskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/disk/usage" - dailymemory "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/memory" - dailynet "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/daily/net" - hourlycpu "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/cpu" - hourlydiskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/io" - hourlydiskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/disk/usage" - hourlymemory "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/memory" - hourlynet "github.com/alpacanetworks/alpamon-go/pkg/collector/check/batch/hourly/net" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/cpu" - diskio "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/disk/io" - diskusage "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/disk/usage" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/memory" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/realtime/net" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + cleanup "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/daily/cleanup" + dailycpu "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/daily/cpu" + dailydiskio "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/daily/disk/io" + dailydiskusage "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/daily/disk/usage" + dailymemory "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/daily/memory" + dailynet "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/daily/net" + hourlycpu "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/hourly/cpu" + hourlydiskio "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/hourly/disk/io" + hourlydiskusage "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/hourly/disk/usage" + hourlymemory "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/hourly/memory" + hourlynet "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/hourly/net" + "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/cpu" + diskio "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/disk/io" + diskusage "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/disk/usage" + "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/memory" + "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/net" ) var checkFactories = map[base.CheckType]newCheck{ diff --git a/pkg/collector/check/realtime/cpu/cpu.go b/pkg/collector/check/realtime/cpu/cpu.go index e3238c7..f57efec 100644 --- a/pkg/collector/check/realtime/cpu/cpu.go +++ b/pkg/collector/check/realtime/cpu/cpu.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" "github.com/shirou/gopsutil/v4/cpu" ) diff --git a/pkg/collector/check/realtime/cpu/cpu_test.go b/pkg/collector/check/realtime/cpu/cpu_test.go index f788bf0..75a17a0 100644 --- a/pkg/collector/check/realtime/cpu/cpu_test.go +++ b/pkg/collector/check/realtime/cpu/cpu_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/realtime/disk/io/base.go b/pkg/collector/check/realtime/disk/io/base.go index 0787923..2c5f4b8 100644 --- a/pkg/collector/check/realtime/disk/io/base.go +++ b/pkg/collector/check/realtime/disk/io/base.go @@ -1,7 +1,7 @@ package diskio import ( - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" "github.com/shirou/gopsutil/v4/disk" ) diff --git a/pkg/collector/check/realtime/disk/io/io_collect.go b/pkg/collector/check/realtime/disk/io/io_collect.go index 415e156..0171c05 100644 --- a/pkg/collector/check/realtime/disk/io/io_collect.go +++ b/pkg/collector/check/realtime/disk/io/io_collect.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/shirou/gopsutil/v4/disk" ) diff --git a/pkg/collector/check/realtime/disk/io/io_send.go b/pkg/collector/check/realtime/disk/io/io_send.go index dadb885..c744225 100644 --- a/pkg/collector/check/realtime/disk/io/io_send.go +++ b/pkg/collector/check/realtime/disk/io/io_send.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/diskio" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/diskio" ) type SendCheck struct { diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go index 9d10495..b454b97 100644 --- a/pkg/collector/check/realtime/disk/io/io_test.go +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/realtime/disk/usage/usage.go b/pkg/collector/check/realtime/disk/usage/usage.go index cf486ea..d8289e9 100644 --- a/pkg/collector/check/realtime/disk/usage/usage.go +++ b/pkg/collector/check/realtime/disk/usage/usage.go @@ -5,9 +5,9 @@ import ( "strings" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/shirou/gopsutil/v4/disk" ) diff --git a/pkg/collector/check/realtime/disk/usage/usage_test.go b/pkg/collector/check/realtime/disk/usage/usage_test.go index 23b10d8..ff9ea08 100644 --- a/pkg/collector/check/realtime/disk/usage/usage_test.go +++ b/pkg/collector/check/realtime/disk/usage/usage_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/realtime/memory/memory.go b/pkg/collector/check/realtime/memory/memory.go index 640c596..47a36fa 100644 --- a/pkg/collector/check/realtime/memory/memory.go +++ b/pkg/collector/check/realtime/memory/memory.go @@ -4,7 +4,7 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" "github.com/shirou/gopsutil/v4/mem" ) diff --git a/pkg/collector/check/realtime/memory/memory_test.go b/pkg/collector/check/realtime/memory/memory_test.go index d0d441a..474a072 100644 --- a/pkg/collector/check/realtime/memory/memory_test.go +++ b/pkg/collector/check/realtime/memory/memory_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/check/realtime/net/base.go b/pkg/collector/check/realtime/net/base.go index cc5d35f..cf07720 100644 --- a/pkg/collector/check/realtime/net/base.go +++ b/pkg/collector/check/realtime/net/base.go @@ -1,7 +1,7 @@ package net import ( - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" "github.com/shirou/gopsutil/v4/net" ) diff --git a/pkg/collector/check/realtime/net/net_collect.go b/pkg/collector/check/realtime/net/net_collect.go index d93bdd9..c9507d6 100644 --- a/pkg/collector/check/realtime/net/net_collect.go +++ b/pkg/collector/check/realtime/net/net_collect.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/shirou/gopsutil/v4/net" ) diff --git a/pkg/collector/check/realtime/net/net_send.go b/pkg/collector/check/realtime/net/net_send.go index 1e09999..5f867a6 100644 --- a/pkg/collector/check/realtime/net/net_send.go +++ b/pkg/collector/check/realtime/net/net_send.go @@ -4,9 +4,9 @@ import ( "context" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent/traffic" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent/traffic" ) type SendCheck struct { diff --git a/pkg/collector/check/realtime/net/net_test.go b/pkg/collector/check/realtime/net/net_test.go index 8000c34..9086050 100644 --- a/pkg/collector/check/realtime/net/net_test.go +++ b/pkg/collector/check/realtime/net/net_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/db" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/db" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index b0ad4bf..45b4486 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -9,12 +9,12 @@ import ( "sync" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/collector/scheduler" - "github.com/alpacanetworks/alpamon-go/pkg/collector/transporter" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" - session "github.com/alpacanetworks/alpamon-go/pkg/scheduler" + "github.com/alpacanetworks/alpamon/pkg/collector/check" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/collector/scheduler" + "github.com/alpacanetworks/alpamon/pkg/collector/transporter" + "github.com/alpacanetworks/alpamon/pkg/db/ent" + session "github.com/alpacanetworks/alpamon/pkg/scheduler" "github.com/google/uuid" "github.com/rs/zerolog/log" ) diff --git a/pkg/collector/scheduler/scheduler.go b/pkg/collector/scheduler/scheduler.go index 0c8e9af..201e60d 100644 --- a/pkg/collector/scheduler/scheduler.go +++ b/pkg/collector/scheduler/scheduler.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" "github.com/rs/zerolog/log" ) diff --git a/pkg/collector/transporter/transporter.go b/pkg/collector/transporter/transporter.go index ee6d38f..7de2c5f 100644 --- a/pkg/collector/transporter/transporter.go +++ b/pkg/collector/transporter/transporter.go @@ -4,9 +4,9 @@ import ( "fmt" "net/http" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" - "github.com/alpacanetworks/alpamon-go/pkg/scheduler" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/scheduler" + "github.com/alpacanetworks/alpamon/pkg/utils" ) type TransportStrategy interface { diff --git a/pkg/collector/transporter/utils.go b/pkg/collector/transporter/utils.go index 14c2df0..4c9bdc0 100644 --- a/pkg/collector/transporter/utils.go +++ b/pkg/collector/transporter/utils.go @@ -3,7 +3,7 @@ package transporter import ( "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" ) const ( diff --git a/pkg/db/client.go b/pkg/db/client.go index 034351c..5b57686 100644 --- a/pkg/db/client.go +++ b/pkg/db/client.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent" _ "github.com/glebarez/go-sqlite" ) diff --git a/pkg/db/db.go b/pkg/db/db.go index 59506f5..432bcc0 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/alpacanetworks/alpamon-go/pkg/db/ent" + "github.com/alpacanetworks/alpamon/pkg/db/ent" "github.com/glebarez/go-sqlite" "github.com/rs/zerolog/log" ) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 3d285f5..b33984a 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -9,8 +9,8 @@ import ( "strings" "time" - "github.com/alpacanetworks/alpamon-go/pkg/scheduler" - "github.com/alpacanetworks/alpamon-go/pkg/version" + "github.com/alpacanetworks/alpamon/pkg/scheduler" + "github.com/alpacanetworks/alpamon/pkg/version" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 7add2a3..ffd6129 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -4,9 +4,9 @@ import ( "context" "encoding/json" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/config" - "github.com/alpacanetworks/alpamon-go/pkg/scheduler" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/config" + "github.com/alpacanetworks/alpamon/pkg/scheduler" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/cenkalti/backoff" "github.com/gorilla/websocket" "github.com/rs/zerolog/log" diff --git a/pkg/runner/command.go b/pkg/runner/command.go index f67f363..66300dd 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -19,10 +19,10 @@ import ( "syscall" "time" - "github.com/alpacanetworks/alpamon-go/pkg/config" - "github.com/alpacanetworks/alpamon-go/pkg/scheduler" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/alpacanetworks/alpamon-go/pkg/version" + "github.com/alpacanetworks/alpamon/pkg/config" + "github.com/alpacanetworks/alpamon/pkg/scheduler" + "github.com/alpacanetworks/alpamon/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/version" "github.com/google/uuid" "github.com/rs/zerolog/log" "gopkg.in/go-playground/validator.v9" @@ -61,7 +61,7 @@ func (cr *CommandRunner) Run() { exitCode, result = cr.handleShellCmd(cr.command.Line, cr.command.User, cr.command.Group, cr.command.Env) case "osquery": // TODO DEPRECATED: This case will be removed in a future release. exitCode = 1 - result = "alpamon-go does not use osquery. Please update alpacon-server." + result = "alpamon does not use osquery. Please update alpacon-server." log.Warn().Msg(result) default: exitCode = 1 diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 4d7f898..13f2c79 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -16,9 +16,9 @@ import ( "sync" "time" - "github.com/alpacanetworks/alpamon-go/pkg/scheduler" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/alpacanetworks/alpamon-go/pkg/version" + "github.com/alpacanetworks/alpamon/pkg/scheduler" + "github.com/alpacanetworks/alpamon/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/version" _ "github.com/glebarez/go-sqlite" "github.com/google/go-cmp/cmp" rpmdb "github.com/knqyf263/go-rpmdb/pkg" diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 9fb29ef..22b24f7 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -4,8 +4,8 @@ import ( "context" "encoding/json" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/logger" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/logger" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/gorilla/websocket" "net/http" "os" diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 52c816b..27e1812 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -4,7 +4,7 @@ import ( "strings" "time" - "github.com/alpacanetworks/alpamon-go/pkg/logger" + "github.com/alpacanetworks/alpamon/pkg/logger" ) type FtpCommand string diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 06c9681..5317183 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -4,8 +4,8 @@ import ( "context" "errors" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/config" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/config" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/creack/pty" "github.com/gorilla/websocket" "github.com/rs/zerolog/log" diff --git a/pkg/runner/pty_linux.go b/pkg/runner/pty_linux.go index c30cfe9..16b5aaf 100644 --- a/pkg/runner/pty_linux.go +++ b/pkg/runner/pty_linux.go @@ -4,7 +4,7 @@ import ( "fmt" "syscall" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/utils" ) func (pc *PtyClient) setPtyCmdSysProcAttrAndEnv(uid, gid int, groupIds []string, env map[string]string) { diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index 54f9c40..015841d 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -3,7 +3,7 @@ package runner import ( "context" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/rs/zerolog/log" "os" "os/exec" diff --git a/pkg/scheduler/reporter.go b/pkg/scheduler/reporter.go index e9db9ff..8f2d6a1 100644 --- a/pkg/scheduler/reporter.go +++ b/pkg/scheduler/reporter.go @@ -3,9 +3,9 @@ package scheduler import ( "encoding/json" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/config" - "github.com/alpacanetworks/alpamon-go/pkg/utils" - "github.com/alpacanetworks/alpamon-go/pkg/version" + "github.com/alpacanetworks/alpamon/pkg/config" + "github.com/alpacanetworks/alpamon/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/version" "github.com/rs/zerolog/log" "math" "net/http" diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 92c44e1..5ab4995 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -13,8 +13,8 @@ import ( "strings" "time" - "github.com/alpacanetworks/alpamon-go/pkg/config" - "github.com/alpacanetworks/alpamon-go/pkg/utils" + "github.com/alpacanetworks/alpamon/pkg/config" + "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/rs/zerolog/log" ) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index bc46985..32aa46c 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "fmt" - "github.com/alpacanetworks/alpamon-go/pkg/version" + "github.com/alpacanetworks/alpamon/pkg/version" "github.com/google/go-github/github" "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v4/host" From 7615287a149d7b9640bda4d6bd877550b64f9647 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 25 Mar 2025 10:54:35 +0900 Subject: [PATCH 258/364] Export client function --- pkg/runner/client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 7add2a3..bb935ca 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -74,7 +74,7 @@ func (wc *WebsocketClient) RunForever() { func (wc *WebsocketClient) SendPingQuery() error { pingQuery := map[string]string{"query": "ping"} - err := wc.writeJSON(pingQuery) + err := wc.WriteJSON(pingQuery) if err != nil { return err } @@ -199,7 +199,7 @@ func (wc *WebsocketClient) commandRequestHandler(message []byte) { } } -func (wc *WebsocketClient) writeJSON(data interface{}) error { +func (wc *WebsocketClient) WriteJSON(data interface{}) error { err := wc.Conn.WriteJSON(data) if err != nil { log.Debug().Err(err).Msgf("Failed to write json data to websocket.") From 741cb10d8ddaa66d46fbb60633aa86268ace7270 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 26 Mar 2025 15:43:43 +0900 Subject: [PATCH 259/364] Add alert check Add alert check to periodically send requests to alpacon-server to verify if any collected metrics exceed the threshold. --- pkg/collector/check/base/types.go | 8 +++++ pkg/collector/check/check.go | 2 ++ pkg/collector/check/realtime/alert/alert.go | 35 +++++++++++++++++++++ 3 files changed, 45 insertions(+) create mode 100644 pkg/collector/check/realtime/alert/alert.go diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index 5f9b75b..3945a79 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -25,6 +25,7 @@ const ( HOURLY_NET CheckType = "hourly-net" DAILY_NET CheckType = "daily-net" CLEANUP CheckType = "cleanup" + ALERT CheckType = "alert" ) type CheckType string @@ -37,6 +38,13 @@ type CheckArgs struct { Client *ent.Client } +type AlertData struct { + Timestamp time.Time `json:"timestamp"` + Reporter string `json:"reporter"` + Record string `json:"record"` + Description string `json:"description"` +} + type CPUQuerySet struct { Max float64 AVG float64 diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index 24611c0..5297991 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -16,6 +16,7 @@ import ( hourlydiskusage "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/hourly/disk/usage" hourlymemory "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/hourly/memory" hourlynet "github.com/alpacanetworks/alpamon/pkg/collector/check/batch/hourly/net" + "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/alert" "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/cpu" diskio "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/disk/io" diskusage "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/disk/usage" @@ -42,6 +43,7 @@ var checkFactories = map[base.CheckType]newCheck{ base.HOURLY_NET: hourlynet.NewCheck, base.DAILY_NET: dailynet.NewCheck, base.CLEANUP: cleanup.NewCheck, + base.ALERT: alert.NewCheck, } type Check interface { diff --git a/pkg/collector/check/realtime/alert/alert.go b/pkg/collector/check/realtime/alert/alert.go new file mode 100644 index 0000000..ecf538a --- /dev/null +++ b/pkg/collector/check/realtime/alert/alert.go @@ -0,0 +1,35 @@ +package alert + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/scheduler" +) + +const ( + alertURL = "/api/metrics/alert-rules/check/" +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(args *base.CheckArgs) base.CheckStrategy { + return &Check{ + BaseCheck: base.NewBaseCheck(args), + } +} + +func (c *Check) Execute(ctx context.Context) error { + data := base.AlertData{ + Timestamp: time.Now().Add(-1 * c.GetInterval()), + Reporter: "alpamon", + Record: "alert", + Description: "Alert: detected anomaly", + } + scheduler.Rqueue.Post(alertURL, data, 80, time.Time{}) + + return nil +} From 55ebfba194a9afab56dd6c23c047cbbe4b16e8dd Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 27 Mar 2025 17:51:10 +0900 Subject: [PATCH 260/364] Add status check Add status check to periodically send requests to alpacon-server to refresh server's status. --- pkg/collector/check/base/types.go | 1 + pkg/collector/check/check.go | 2 ++ pkg/collector/check/realtime/status/status.go | 29 +++++++++++++++++++ 3 files changed, 32 insertions(+) create mode 100644 pkg/collector/check/realtime/status/status.go diff --git a/pkg/collector/check/base/types.go b/pkg/collector/check/base/types.go index 3945a79..1b0a9c3 100644 --- a/pkg/collector/check/base/types.go +++ b/pkg/collector/check/base/types.go @@ -26,6 +26,7 @@ const ( DAILY_NET CheckType = "daily-net" CLEANUP CheckType = "cleanup" ALERT CheckType = "alert" + STATUS CheckType = "status" ) type CheckType string diff --git a/pkg/collector/check/check.go b/pkg/collector/check/check.go index 5297991..1f315d1 100644 --- a/pkg/collector/check/check.go +++ b/pkg/collector/check/check.go @@ -22,6 +22,7 @@ import ( diskusage "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/disk/usage" "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/memory" "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/net" + "github.com/alpacanetworks/alpamon/pkg/collector/check/realtime/status" ) var checkFactories = map[base.CheckType]newCheck{ @@ -44,6 +45,7 @@ var checkFactories = map[base.CheckType]newCheck{ base.DAILY_NET: dailynet.NewCheck, base.CLEANUP: cleanup.NewCheck, base.ALERT: alert.NewCheck, + base.STATUS: status.NewCheck, } type Check interface { diff --git a/pkg/collector/check/realtime/status/status.go b/pkg/collector/check/realtime/status/status.go new file mode 100644 index 0000000..b2e1f93 --- /dev/null +++ b/pkg/collector/check/realtime/status/status.go @@ -0,0 +1,29 @@ +package status + +import ( + "context" + "time" + + "github.com/alpacanetworks/alpamon/pkg/collector/check/base" + "github.com/alpacanetworks/alpamon/pkg/scheduler" +) + +const ( + statusURL = "/api/servers/servers/-/status/" +) + +type Check struct { + base.BaseCheck +} + +func NewCheck(args *base.CheckArgs) base.CheckStrategy { + return &Check{ + BaseCheck: base.NewBaseCheck(args), + } +} + +func (c *Check) Execute(ctx context.Context) error { + scheduler.Rqueue.Post(statusURL, nil, 80, time.Time{}) + + return nil +} From 2f6c867114b62ba0a85e9577acd5c16d39fd72b4 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 28 Mar 2025 12:15:12 +0900 Subject: [PATCH 261/364] MInor fix Fix http method. --- pkg/collector/check/realtime/status/status.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/collector/check/realtime/status/status.go b/pkg/collector/check/realtime/status/status.go index b2e1f93..3062661 100644 --- a/pkg/collector/check/realtime/status/status.go +++ b/pkg/collector/check/realtime/status/status.go @@ -23,7 +23,7 @@ func NewCheck(args *base.CheckArgs) base.CheckStrategy { } func (c *Check) Execute(ctx context.Context) error { - scheduler.Rqueue.Post(statusURL, nil, 80, time.Time{}) + scheduler.Rqueue.Patch(statusURL, nil, 80, time.Time{}) return nil } From 8a939221cc246e53e8fa1b645833fb6d918a645c Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 28 Mar 2025 15:04:26 +0900 Subject: [PATCH 262/364] Support rocky platform --- pkg/utils/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 32aa46c..3f0b1b9 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -39,7 +39,7 @@ func getPlatformLike() { switch platformInfo.Platform { case "ubuntu", "debian": PlatformLike = "debian" - case "centos", "rhel", "redhat", "amazon", "fedora": + case "centos", "rhel", "redhat", "amazon", "fedora", "rocky": PlatformLike = "rhel" default: log.Fatal().Msgf("Platform %s not supported", platformInfo.Platform) From 3d9fe5fbe32c56957049aef25bc7eb76e647f20a Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 28 Mar 2025 19:03:12 +0900 Subject: [PATCH 263/364] Add more OS variants to platform detection --- pkg/utils/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 3f0b1b9..4ce56fa 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -39,7 +39,7 @@ func getPlatformLike() { switch platformInfo.Platform { case "ubuntu", "debian": PlatformLike = "debian" - case "centos", "rhel", "redhat", "amazon", "fedora", "rocky": + case "centos", "rhel", "redhat", "amazon", "amzn", "fedora", "rocky", "oracle", "ol": PlatformLike = "rhel" default: log.Fatal().Msgf("Platform %s not supported", platformInfo.Platform) From 806688ef6701e1be36c38d048b8e2ab0f1acb689 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 31 Mar 2025 15:32:25 +0900 Subject: [PATCH 264/364] Fix test cases Resolve SQLite lock issues by generating separate DB files for each test case. --- pkg/collector/check/batch/daily/cpu/daily_cpu_test.go | 7 ++++--- pkg/collector/check/batch/daily/disk/io/daily_io_test.go | 7 ++++--- .../check/batch/daily/disk/usage/daily_usage_test.go | 7 ++++--- .../check/batch/daily/memory/daily_memory_test.go | 7 ++++--- pkg/collector/check/batch/daily/net/daily_net_test.go | 7 ++++--- pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go | 7 ++++--- pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go | 7 ++++--- .../check/batch/hourly/disk/usage/hourly_usage_test.go | 7 ++++--- .../check/batch/hourly/memory/hourly_memory_test.go | 7 ++++--- pkg/collector/check/batch/hourly/net/hourly_net_test.go | 7 ++++--- pkg/collector/check/realtime/cpu/cpu_test.go | 7 ++++--- pkg/collector/check/realtime/disk/io/io_test.go | 7 ++++--- pkg/collector/check/realtime/disk/usage/usage_test.go | 7 ++++--- pkg/collector/check/realtime/memory/memory_test.go | 7 ++++--- pkg/collector/check/realtime/net/net_test.go | 7 ++++--- pkg/db/db.go | 4 ++-- 16 files changed, 62 insertions(+), 47 deletions(-) diff --git a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go index bf0d34d..136e795 100644 --- a/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go +++ b/pkg/collector/check/batch/daily/cpu/daily_cpu_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "daily_cpu.db" + type DailyCPUUsageCheckSuite struct { suite.Suite client *ent.Client @@ -22,7 +24,7 @@ type DailyCPUUsageCheckSuite struct { } func (suite *DailyCPUUsageCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_CPU_USAGE, @@ -36,7 +38,7 @@ func (suite *DailyCPUUsageCheckSuite) SetupSuite() { } func (suite *DailyCPUUsageCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -64,6 +66,5 @@ func (suite *DailyCPUUsageCheckSuite) TestDeleteHourlyCPUUsage() { } func TestDailyCPUUsageCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyCPUUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go index 7d4fdf4..57e1e26 100644 --- a/pkg/collector/check/batch/daily/disk/io/daily_io_test.go +++ b/pkg/collector/check/batch/daily/disk/io/daily_io_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "daily_io.db" + type DailyDiskIOCheckSuite struct { suite.Suite client *ent.Client @@ -23,7 +25,7 @@ type DailyDiskIOCheckSuite struct { } func (suite *DailyDiskIOCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_DISK_IO, @@ -37,7 +39,7 @@ func (suite *DailyDiskIOCheckSuite) SetupSuite() { } func (suite *DailyDiskIOCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -71,6 +73,5 @@ func (suite *DailyDiskIOCheckSuite) TestDeleteHourlyDiskIO() { } func TestDailyDiskIOCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyDiskIOCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go index d8d68f8..5b61e7f 100644 --- a/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go +++ b/pkg/collector/check/batch/daily/disk/usage/daily_usage_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "daily_disk_usage.db" + type DailyDiskUsageCheckSuite struct { suite.Suite client *ent.Client @@ -22,7 +24,7 @@ type DailyDiskUsageCheckSuite struct { } func (suite *DailyDiskUsageCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_DISK_USAGE, @@ -36,7 +38,7 @@ func (suite *DailyDiskUsageCheckSuite) SetupSuite() { } func (suite *DailyDiskUsageCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -66,6 +68,5 @@ func (suite *DailyDiskUsageCheckSuite) TestDeleteHourlyDiskUsage() { } func TestDailyDiskUsageCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyDiskUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/memory/daily_memory_test.go b/pkg/collector/check/batch/daily/memory/daily_memory_test.go index afe1b34..b6f8ccb 100644 --- a/pkg/collector/check/batch/daily/memory/daily_memory_test.go +++ b/pkg/collector/check/batch/daily/memory/daily_memory_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "daily_memory.db" + type DailyMemoryUsageCheckSuite struct { suite.Suite client *ent.Client @@ -22,7 +24,7 @@ type DailyMemoryUsageCheckSuite struct { } func (suite *DailyMemoryUsageCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_MEM_USAGE, @@ -36,7 +38,7 @@ func (suite *DailyMemoryUsageCheckSuite) SetupSuite() { } func (suite *DailyMemoryUsageCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -64,6 +66,5 @@ func (suite *DailyMemoryUsageCheckSuite) TestDeleteHourlyMemoryUsage() { } func TestDailyMemoryUsageCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyMemoryUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/daily/net/daily_net_test.go b/pkg/collector/check/batch/daily/net/daily_net_test.go index b29f792..d9b7a7e 100644 --- a/pkg/collector/check/batch/daily/net/daily_net_test.go +++ b/pkg/collector/check/batch/daily/net/daily_net_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "daily_net.db" + type DailyNetCheckSuite struct { suite.Suite client *ent.Client @@ -23,7 +25,7 @@ type DailyNetCheckSuite struct { } func (suite *DailyNetCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DAILY_NET, @@ -37,7 +39,7 @@ func (suite *DailyNetCheckSuite) SetupSuite() { } func (suite *DailyNetCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -79,6 +81,5 @@ func (suite *DailyNetCheckSuite) TestDeleteHourlyTraffic() { } func TestDailyNetCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DailyNetCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go index 841de28..0248e1a 100644 --- a/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go +++ b/pkg/collector/check/batch/hourly/cpu/hourly_cpu_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "hourly_cpu.db" + type HourlyCPUUsageCheckSuite struct { suite.Suite client *ent.Client @@ -23,7 +25,7 @@ type HourlyCPUUsageCheckSuite struct { } func (suite *HourlyCPUUsageCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_CPU_USAGE, @@ -37,7 +39,7 @@ func (suite *HourlyCPUUsageCheckSuite) SetupSuite() { } func (suite *HourlyCPUUsageCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -74,6 +76,5 @@ func (suite *HourlyCPUUsageCheckSuite) TestDeleteCPU() { } func TestHourlyCPUUsageCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyCPUUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go index bf1bd46..5cc2c13 100644 --- a/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go +++ b/pkg/collector/check/batch/hourly/disk/io/hourly_io_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "hourly_io.db" + type HourlyDiskIOCheckSuite struct { suite.Suite client *ent.Client @@ -23,7 +25,7 @@ type HourlyDiskIOCheckSuite struct { } func (suite *HourlyDiskIOCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_DISK_IO, @@ -37,7 +39,7 @@ func (suite *HourlyDiskIOCheckSuite) SetupSuite() { } func (suite *HourlyDiskIOCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -91,6 +93,5 @@ func (suite *HourlyDiskIOCheckSuite) TestDeleteDiskIO() { } func TestHourlyDiskIOCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyDiskIOCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go index dc116c7..79a7007 100644 --- a/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go +++ b/pkg/collector/check/batch/hourly/disk/usage/hourly_usage_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "hourly_disk_usage.db" + type HourlyDiskUsageCheckSuite struct { suite.Suite client *ent.Client @@ -23,7 +25,7 @@ type HourlyDiskUsageCheckSuite struct { } func (suite *HourlyDiskUsageCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_DISK_USAGE, @@ -37,7 +39,7 @@ func (suite *HourlyDiskUsageCheckSuite) SetupSuite() { } func (suite *HourlyDiskUsageCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -91,6 +93,5 @@ func (suite *HourlyDiskUsageCheckSuite) TestDeleteDiskUsage() { } func TestHourlyDiskUsageCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyDiskUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go index e896941..f7edbaf 100644 --- a/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go +++ b/pkg/collector/check/batch/hourly/memory/hourly_memory_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "hourly_memory.db" + type HourlyMemoryUsageCheckSuite struct { suite.Suite client *ent.Client @@ -23,7 +25,7 @@ type HourlyMemoryUsageCheckSuite struct { } func (suite *HourlyMemoryUsageCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_MEM_USAGE, @@ -37,7 +39,7 @@ func (suite *HourlyMemoryUsageCheckSuite) SetupSuite() { } func (suite *HourlyMemoryUsageCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -74,6 +76,5 @@ func (suite *HourlyMemoryUsageCheckSuite) TestDeleteMemory() { } func TestHourlyMemoryCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyMemoryUsageCheckSuite)) } diff --git a/pkg/collector/check/batch/hourly/net/hourly_net_test.go b/pkg/collector/check/batch/hourly/net/hourly_net_test.go index 27dafbd..84ce0c2 100644 --- a/pkg/collector/check/batch/hourly/net/hourly_net_test.go +++ b/pkg/collector/check/batch/hourly/net/hourly_net_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "hourly_net.db" + type HourlyNetCheckSuite struct { suite.Suite client *ent.Client @@ -23,7 +25,7 @@ type HourlyNetCheckSuite struct { } func (suite *HourlyNetCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.HOURLY_NET, @@ -37,7 +39,7 @@ func (suite *HourlyNetCheckSuite) SetupSuite() { } func (suite *HourlyNetCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -103,6 +105,5 @@ func (suite *HourlyNetCheckSuite) TestDeleteTraffic() { } func TestHourlyNetCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(HourlyNetCheckSuite)) } diff --git a/pkg/collector/check/realtime/cpu/cpu_test.go b/pkg/collector/check/realtime/cpu/cpu_test.go index 75a17a0..8509bb4 100644 --- a/pkg/collector/check/realtime/cpu/cpu_test.go +++ b/pkg/collector/check/realtime/cpu/cpu_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "cpu.db" + type CPUCheckSuite struct { suite.Suite client *ent.Client @@ -22,7 +24,7 @@ type CPUCheckSuite struct { } func (suite *CPUCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.CPU, @@ -36,7 +38,7 @@ func (suite *CPUCheckSuite) SetupSuite() { } func (suite *CPUCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -60,6 +62,5 @@ func (suite *CPUCheckSuite) TestSaveCPUUsage() { } func TestCPUCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(CPUCheckSuite)) } diff --git a/pkg/collector/check/realtime/disk/io/io_test.go b/pkg/collector/check/realtime/disk/io/io_test.go index b454b97..2372345 100644 --- a/pkg/collector/check/realtime/disk/io/io_test.go +++ b/pkg/collector/check/realtime/disk/io/io_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "io.db" + type DiskIOCheckSuite struct { suite.Suite client *ent.Client @@ -24,7 +26,7 @@ type DiskIOCheckSuite struct { } func (suite *DiskIOCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) collect_args := &base.CheckArgs{ Type: base.DISK_IO_COLLECTOR, @@ -46,7 +48,7 @@ func (suite *DiskIOCheckSuite) SetupSuite() { } func (suite *DiskIOCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -86,6 +88,5 @@ func (suite *DiskIOCheckSuite) TestGetDiskIO() { } func TestDiskIOCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DiskIOCheckSuite)) } diff --git a/pkg/collector/check/realtime/disk/usage/usage_test.go b/pkg/collector/check/realtime/disk/usage/usage_test.go index ff9ea08..c91ddce 100644 --- a/pkg/collector/check/realtime/disk/usage/usage_test.go +++ b/pkg/collector/check/realtime/disk/usage/usage_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "disk_usage.db" + type DiskUsageCheckSuite struct { suite.Suite client *ent.Client @@ -22,7 +24,7 @@ type DiskUsageCheckSuite struct { } func (suite *DiskUsageCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.DISK_USAGE, @@ -36,7 +38,7 @@ func (suite *DiskUsageCheckSuite) SetupSuite() { } func (suite *DiskUsageCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -69,6 +71,5 @@ func (suite *DiskUsageCheckSuite) TestSaveDiskUsage() { } func TestDiskUsageCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(DiskUsageCheckSuite)) } diff --git a/pkg/collector/check/realtime/memory/memory_test.go b/pkg/collector/check/realtime/memory/memory_test.go index 474a072..fe42065 100644 --- a/pkg/collector/check/realtime/memory/memory_test.go +++ b/pkg/collector/check/realtime/memory/memory_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "memory.db" + type MemoryCheckSuite struct { suite.Suite client *ent.Client @@ -22,7 +24,7 @@ type MemoryCheckSuite struct { } func (suite *MemoryCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) args := &base.CheckArgs{ Type: base.MEM, @@ -36,7 +38,7 @@ func (suite *MemoryCheckSuite) SetupSuite() { } func (suite *MemoryCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -60,6 +62,5 @@ func (suite *MemoryCheckSuite) TestSaveMemoryUsage() { } func TestMemoryCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(MemoryCheckSuite)) } diff --git a/pkg/collector/check/realtime/net/net_test.go b/pkg/collector/check/realtime/net/net_test.go index 9086050..344bc10 100644 --- a/pkg/collector/check/realtime/net/net_test.go +++ b/pkg/collector/check/realtime/net/net_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/suite" ) +var dbFileName = "net.db" + type NetCheckSuite struct { suite.Suite client *ent.Client @@ -24,7 +26,7 @@ type NetCheckSuite struct { } func (suite *NetCheckSuite) SetupSuite() { - suite.client = db.InitTestDB() + suite.client = db.InitTestDB(dbFileName) buffer := base.NewCheckBuffer(10) collect_args := &base.CheckArgs{ Type: base.NET_COLLECTOR, @@ -46,7 +48,7 @@ func (suite *NetCheckSuite) SetupSuite() { } func (suite *NetCheckSuite) TearDownSuite() { - err := os.Remove("alpamon.db") + err := os.Remove(dbFileName) suite.Require().NoError(err, "failed to delete test db file") } @@ -90,6 +92,5 @@ func (suite *NetCheckSuite) TestGetTraffic() { } func TestNetCheckSuite(t *testing.T) { - t.Setenv("GOMAXPROCS", "1") suite.Run(t, new(NetCheckSuite)) } diff --git a/pkg/db/db.go b/pkg/db/db.go index 432bcc0..6d06015 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -53,8 +53,8 @@ func InitDB() *ent.Client { return client } -func InitTestDB() *ent.Client { - fileName, _ := filepath.Abs(dbFileName) +func InitTestDB(path string) *ent.Client { + fileName, _ := filepath.Abs(path) dbFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0750) if err != nil { log.Error().Err(err).Msgf("failed to open test db file: %v", err) From 58f2bf07211cdac24ebb0ace4463df597eb77270 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 31 Mar 2025 18:17:47 +0900 Subject: [PATCH 265/364] Add chmod & chown commands Add chown and chmod to FtpCommand for providing the same functionality as Linux's chown and chmod in webftp. --- pkg/runner/ftp.go | 42 ++++++++++++++++++++++++++++++++++++++--- pkg/runner/ftp_types.go | 39 ++++++++++++++++++++++++++++++-------- 2 files changed, 70 insertions(+), 11 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 22b24f7..05034d2 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -4,13 +4,14 @@ import ( "context" "encoding/json" "fmt" - "github.com/alpacanetworks/alpamon/pkg/logger" - "github.com/alpacanetworks/alpamon/pkg/utils" - "github.com/gorilla/websocket" "net/http" "os" "path/filepath" "strings" + + "github.com/alpacanetworks/alpamon/pkg/logger" + "github.com/alpacanetworks/alpamon/pkg/utils" + "github.com/gorilla/websocket" ) type FtpClient struct { @@ -149,6 +150,10 @@ func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (Command return fc.mv(data.Src, data.Dst) case Cp: return fc.cp(data.Src, data.Dst) + case Chmod: + return fc.chmod(data.Path, data.Mode) + case Chown: + return fc.chown(data.Path, data.UID, data.GID) default: return CommandResult{}, fmt.Errorf("unknown FTP command: %s", command) } @@ -412,3 +417,34 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { Message: fmt.Sprintf("Copy %s to %s", src, dst), }, nil } + +func (fc *FtpClient) chmod(path string, mode int) (CommandResult, error) { + path = fc.parsePath(path) + fileMode := os.FileMode(mode) + + err := os.Chmod(path, fileMode) + if err != nil { + return CommandResult{ + Message: err.Error(), + }, err + } + + return CommandResult{ + Message: fmt.Sprintf("Changed permissions of %s to %o", path, fileMode), + }, nil +} + +func (fc *FtpClient) chown(path string, uid, gid int) (CommandResult, error) { + path = fc.parsePath(path) + + err := os.Chown(path, uid, gid) + if err != nil { + return CommandResult{ + Message: err.Error(), + }, err + } + + return CommandResult{ + Message: fmt.Sprintf("Changed owner of %s to UID: %d, GID: %d", path, uid, gid), + }, nil +} diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 27e1812..cdf01e3 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -10,14 +10,16 @@ import ( type FtpCommand string const ( - List FtpCommand = "list" - Mkd FtpCommand = "mkd" - Cwd FtpCommand = "cwd" - Pwd FtpCommand = "pwd" - Dele FtpCommand = "dele" - Rmd FtpCommand = "rmd" - Mv FtpCommand = "mv" - Cp FtpCommand = "cp" + List FtpCommand = "list" + Mkd FtpCommand = "mkd" + Cwd FtpCommand = "cwd" + Pwd FtpCommand = "pwd" + Dele FtpCommand = "dele" + Rmd FtpCommand = "rmd" + Mv FtpCommand = "mv" + Cp FtpCommand = "cp" + Chmod FtpCommand = "chmod" + Chown FtpCommand = "chown" ) const ( @@ -44,6 +46,9 @@ type FtpData struct { ShowHidden bool `json:"show_hidden,omitempty"` Src string `json:"src,omitempty"` Dst string `json:"dst,omitempty"` + Mode int `json:"mode,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` } type FtpContent struct { @@ -150,6 +155,24 @@ var returnCodes = map[FtpCommand]returnCode{ ErrFileExists: 552, }, }, + Chmod: { + Success: 250, + Error: map[string]int{ + ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, + ErrInvalidArgument: 452, + ErrNoSuchFileOrDirectory: 550, + }, + }, + Chown: { + Success: 250, + Error: map[string]int{ + ErrPermissionDenied: 450, + ErrOperationNotPermitted: 450, + ErrInvalidArgument: 452, + ErrNoSuchFileOrDirectory: 550, + }, + }, } func GetFtpErrorCode(command FtpCommand, result CommandResult) (CommandResult, int) { From f42b278d0ce6ca4fc6c349653c6b75c8dbb75672 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 31 Mar 2025 22:09:29 +0900 Subject: [PATCH 266/364] Refactor WebSocket client for graceful shutdown --- cmd/alpamon/command/root.go | 67 ++++++++++++++++++++++++------------- pkg/runner/client.go | 35 ++++++++++--------- pkg/runner/command.go | 4 +-- 3 files changed, 62 insertions(+), 44 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 9e11831..7b82ce6 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -4,9 +4,6 @@ import ( "fmt" "github.com/alpacanetworks/alpamon/cmd/alpamon/command/ftp" "github.com/alpacanetworks/alpamon/cmd/alpamon/command/setup" - "os" - "syscall" - "github.com/alpacanetworks/alpamon/pkg/collector" "github.com/alpacanetworks/alpamon/pkg/config" "github.com/alpacanetworks/alpamon/pkg/db" @@ -18,6 +15,9 @@ import ( "github.com/alpacanetworks/alpamon/pkg/version" "github.com/rs/zerolog/log" "github.com/spf13/cobra" + "os" + "os/signal" + "syscall" ) const ( @@ -39,6 +39,9 @@ func init() { } func runAgent() { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGPIPE) + // platform utils.InitPlatform() @@ -48,7 +51,6 @@ func runAgent() { _, _ = fmt.Fprintln(os.Stderr, "Failed to create PID file", err.Error()) os.Exit(1) } - defer func() { _ = os.Remove(pidFilePath) }() fmt.Printf("alpamon version %s starting.\n", version.Version) @@ -65,7 +67,6 @@ func runAgent() { // Logger logFile := logger.InitLogger() - defer func() { _ = logFile.Close() }() log.Info().Msg("alpamon initialized and running.") // Commit @@ -77,28 +78,46 @@ func runAgent() { // Collector metricCollector := collector.InitCollector(session, client) metricCollector.Start() - defer metricCollector.Stop() // Websocket Client wsClient := runner.NewWebsocketClient(session) - wsClient.RunForever() - - if wsClient.RestartRequested { - if err = os.Remove(pidFilePath); err != nil { - log.Error().Err(err).Msg("Failed to remove PID file") - return - } - - executable, err := os.Executable() - if err != nil { - log.Error().Err(err).Msg("Failed to get executable path") - return - } - - err = syscall.Exec(executable, os.Args, os.Environ()) - if err != nil { - log.Error().Err(err).Msg("Failed to restart the program") - } + go wsClient.RunForever() + + select { + case <-sigChan: + log.Info().Msg("Received termination signal. Shutting down...") + break + case <-wsClient.ShutDownChan: + log.Info().Msg("Shutdown command received. Shutting down...") + break + case <-wsClient.RestartChan: + log.Info().Msg("Restart requested internally.") + metricCollector.Stop() + wsClient.Close() + log.Debug().Msg("Bye.") + _ = logFile.Close() + _ = os.Remove(pidFilePath) + restartAgent() + return } + + // TODO : improve + metricCollector.Stop() + wsClient.Close() log.Debug().Msg("Bye.") + _ = logFile.Close() + _ = os.Remove(pidFilePath) +} + +func restartAgent() { + executable, err := os.Executable() + if err != nil { + log.Error().Err(err).Msg("Failed to get executable path") + return + } + + err = syscall.Exec(executable, os.Args, os.Environ()) + if err != nil { + log.Error().Err(err).Msg("Failed to restart the program") + } } diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 68d4ca7..dfd38af 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -26,11 +26,12 @@ const ( ) type WebsocketClient struct { - Conn *websocket.Conn - requestHeader http.Header - apiSession *scheduler.Session - RestartRequested bool - QuitChan chan struct{} + Conn *websocket.Conn + requestHeader http.Header + apiSession *scheduler.Session + QuitChan chan struct{} + RestartChan chan struct{} + ShutDownChan chan struct{} } func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { @@ -41,16 +42,16 @@ func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { } return &WebsocketClient{ - requestHeader: headers, - apiSession: session, - RestartRequested: false, - QuitChan: make(chan struct{}), + requestHeader: headers, + apiSession: session, + QuitChan: make(chan struct{}), + RestartChan: make(chan struct{}), + ShutDownChan: make(chan struct{}), } } func (wc *WebsocketClient) RunForever() { wc.Connect() - defer wc.Close() for { select { @@ -67,7 +68,7 @@ func (wc *WebsocketClient) RunForever() { } // Sends "ping" query for Alpacon to verify WebSocket session status without error handling. _ = wc.SendPingQuery() - wc.commandRequestHandler(message) + wc.CommandRequestHandler(message) } } } @@ -147,17 +148,15 @@ func (wc *WebsocketClient) Close() { } } -func (wc *WebsocketClient) Quit() { - wc.Close() - close(wc.QuitChan) +func (wc *WebsocketClient) ShutDown() { + close(wc.ShutDownChan) } func (wc *WebsocketClient) Restart() { - wc.RestartRequested = true - wc.Quit() + close(wc.RestartChan) } -func (wc *WebsocketClient) commandRequestHandler(message []byte) { +func (wc *WebsocketClient) CommandRequestHandler(message []byte) { var content Content var data CommandData @@ -190,7 +189,7 @@ func (wc *WebsocketClient) commandRequestHandler(message []byte) { go commandRunner.Run() case "quit": log.Debug().Msgf("Quit requested for reason: %s", content.Reason) - wc.Quit() + wc.ShutDown() case "reconnect": log.Debug().Msgf("Reconnect requested for reason: %s", content.Reason) wc.Close() diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 66300dd..36104bf 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -192,9 +192,9 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 0, "Alpamon will restart in 1 second." case "quit": time.AfterFunc(1*time.Second, func() { - cr.wsClient.Quit() + cr.wsClient.ShutDown() }) - return 0, "Alpamon will quit in 1 second." + return 0, "Alpamon will shutdown in 1 second." case "reboot": log.Info().Msg("Reboot request received.") time.AfterFunc(1*time.Second, func() { From cbae3ddbf411b7ed653899ec819e0558c08eaaf0 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 1 Apr 2025 10:05:52 +0900 Subject: [PATCH 267/364] Improve CheckSession retry logic with timeout-aware graceful exit --- cmd/alpamon/command/root.go | 44 +++++++++++++++++++++----------- pkg/scheduler/session.go | 50 ++++++++++++++++++------------------- 2 files changed, 54 insertions(+), 40 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 7b82ce6..8b32b60 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -1,6 +1,7 @@ package command import ( + "context" "fmt" "github.com/alpacanetworks/alpamon/cmd/alpamon/command/ftp" "github.com/alpacanetworks/alpamon/cmd/alpamon/command/setup" @@ -39,8 +40,18 @@ func init() { } func runAgent() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGPIPE) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + go func() { + select { + case <-sigChan: + cancel() + } + }() // platform utils.InitPlatform() @@ -60,7 +71,7 @@ func runAgent() { // Session session := scheduler.InitSession() - commissioned := session.CheckSession() + commissioned := session.CheckSession(ctx) // Reporter scheduler.StartReporters(session) @@ -84,7 +95,7 @@ func runAgent() { go wsClient.RunForever() select { - case <-sigChan: + case <-ctx.Done(): log.Info().Msg("Received termination signal. Shutting down...") break case <-wsClient.ShutDownChan: @@ -92,21 +103,12 @@ func runAgent() { break case <-wsClient.RestartChan: log.Info().Msg("Restart requested internally.") - metricCollector.Stop() - wsClient.Close() - log.Debug().Msg("Bye.") - _ = logFile.Close() - _ = os.Remove(pidFilePath) + gracefulShutdown(metricCollector, wsClient, logFile, pidFilePath) restartAgent() return } - // TODO : improve - metricCollector.Stop() - wsClient.Close() - log.Debug().Msg("Bye.") - _ = logFile.Close() - _ = os.Remove(pidFilePath) + gracefulShutdown(metricCollector, wsClient, logFile, pidFilePath) } func restartAgent() { @@ -121,3 +123,17 @@ func restartAgent() { log.Error().Err(err).Msg("Failed to restart the program") } } + +func gracefulShutdown(collector *collector.Collector, wsClient *runner.WebsocketClient, logFile *os.File, pidPath string) { + if collector != nil { + collector.Stop() + } + if wsClient != nil { + wsClient.Close() + } + log.Debug().Msg("Bye.") + if logFile != nil { + _ = logFile.Close() + } + _ = os.Remove(pidPath) +} diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 5ab4995..af0661e 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -20,7 +20,7 @@ import ( const ( checkSessionURL = "/api/servers/servers/-/" - MaxRetryTimeout = 3 * 24 * time.Hour + MaxRetryTimeout = 3 * 24 * time.Second ) func InitSession() *Session { @@ -52,40 +52,38 @@ func InitSession() *Session { return session } -func (session *Session) CheckSession() bool { - timeout := config.MinConnectInterval - ctx, cancel := context.WithTimeout(context.Background(), MaxRetryTimeout) +func (session *Session) CheckSession(ctx context.Context) bool { + timeout := 0 * time.Second + ctxWithTimeout, cancel := context.WithTimeout(ctx, MaxRetryTimeout) defer cancel() for { select { - case <-ctx.Done(): - log.Error().Msg("Maximum retry duration reached. Shutting down.") + case <-ctxWithTimeout.Done(): + log.Error().Msg("Session check cancelled or timed out.") os.Exit(1) - default: - resp, _, err := session.Get(checkSessionURL, 5) - if err != nil { + case <-time.After(timeout): + resp, statusCode, err := session.Get(checkSessionURL, 5) + if err != nil || statusCode != http.StatusOK { log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds", config.GlobalSettings.ServerURL, int(timeout.Seconds())) - time.Sleep(timeout) - timeout *= 2 - if timeout > config.MaxConnectInterval { - timeout = config.MaxConnectInterval + } else { + var response map[string]interface{} + err = json.Unmarshal(resp, &response) + if err != nil { + log.Debug().Err(err).Msgf("Failed to unmarshal JSON, will try again in %ds", int(timeout.Seconds())) + } else { + if commissioned, ok := response["commissioned"].(bool); ok { + return commissioned + } } - continue } - - var response map[string]interface{} - err = json.Unmarshal(resp, &response) - if err != nil { - log.Debug().Err(err).Msg("Failed to unmarshal JSON") - continue + // time.Sleep(timeout) + if timeout == 0 { + timeout = config.MinConnectInterval } - - if commissioned, ok := response["commissioned"].(bool); ok { - return commissioned - } else { - log.Error().Msg("Unable to find 'commissioned' field in the response") - continue + timeout *= 2 + if timeout > config.MaxConnectInterval { + timeout = config.MaxConnectInterval } } } From 4a147e3f3838cb2f6945ee37cf350be5daffd201 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 1 Apr 2025 13:18:46 +0900 Subject: [PATCH 268/364] Initialize logger earlier to capture startup logs and minor fix --- cmd/alpamon/command/root.go | 7 ++++--- pkg/utils/utils.go | 12 ++++++------ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 8b32b60..48421bb 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -53,6 +53,9 @@ func runAgent() { } }() + // Logger + logFile := logger.InitLogger() + // platform utils.InitPlatform() @@ -63,7 +66,7 @@ func runAgent() { os.Exit(1) } - fmt.Printf("alpamon version %s starting.\n", version.Version) + log.Info().Msgf("Starting alpamon... (version: %s)", version.Version) // Config & Settings settings := config.LoadConfig(config.Files(name), wsPath) @@ -76,8 +79,6 @@ func runAgent() { // Reporter scheduler.StartReporters(session) - // Logger - logFile := logger.InitLogger() log.Info().Msg("alpamon initialized and running.") // Commit diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 4ce56fa..102191f 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -30,10 +30,12 @@ func getPlatformLike() { system := runtime.GOOS switch system { + case "darwin": + PlatformLike = system case "linux": platformInfo, err := host.Info() if err != nil { - log.Error().Err(err).Msg("Failed to get platform information") + log.Error().Err(err).Msg("Failed to retrieve platform information.") os.Exit(1) } switch platformInfo.Platform { @@ -42,19 +44,17 @@ func getPlatformLike() { case "centos", "rhel", "redhat", "amazon", "amzn", "fedora", "rocky", "oracle", "ol": PlatformLike = "rhel" default: - log.Fatal().Msgf("Platform %s not supported", platformInfo.Platform) + log.Fatal().Msgf("Platform %s not supported.", platformInfo.Platform) } - case "windows", "darwin": - PlatformLike = system default: - log.Fatal().Msgf("Platform %s not supported", system) + log.Fatal().Msgf("unsupported os: %s", runtime.GOOS) } } func JoinPath(base string, paths ...string) string { fullURL, err := url.JoinPath(base, paths...) if err != nil { - log.Error().Err(err).Msg("Failed to join path") + log.Error().Err(err).Msg("Failed to join path.") return "" } From 526791d00db9410e9d4814e906127f47f7c3541c Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 1 Apr 2025 13:52:55 +0900 Subject: [PATCH 269/364] Fix blocking issue in FailureQueueWorker Fix FailureQueueWorker to prevent blocking on empty failure queue. Add non-blocking check for FailureQueu to prevent infinite waiting. Ensure that failureQueueWorker continues execution even when the queue is empty Prevent potential deadlocks when shutting down t he worker --- pkg/collector/collector.go | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 45b4486..c14c4d3 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -181,16 +181,23 @@ func (c *Collector) failureQueueWorker(ctx context.Context) { case <-ctx.Done(): return case <-retryTicker.C: - metric, ok := <-c.buffer.FailureQueue - if !ok { - return - } + c.retryFailedMetrics(ctx) + } + } +} - err := c.retryWithBackoff(ctx, metric) - if err != nil { - log.Error().Err(err).Msgf("Failed to check metric: %s", metric.Type) - } +func (c *Collector) retryFailedMetrics(ctx context.Context) { + select { + case metric, ok := <-c.buffer.FailureQueue: + if !ok { + return + } + err := c.retryWithBackoff(ctx, metric) + if err != nil { + log.Error().Err(err).Msgf("Failed to check metric: %s", metric.Type) } + default: + return } } From d9429e1e6611263e563daad569a168eabcfd1e40 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 1 Apr 2025 14:40:16 +0900 Subject: [PATCH 270/364] Use context to safely handle shutdown and prevent reconnect during termination --- cmd/alpamon/command/root.go | 6 ++++-- pkg/runner/client.go | 17 ++++++++++------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 48421bb..7282945 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -93,7 +93,7 @@ func runAgent() { // Websocket Client wsClient := runner.NewWebsocketClient(session) - go wsClient.RunForever() + go wsClient.RunForever(ctx) select { case <-ctx.Done(): @@ -101,9 +101,11 @@ func runAgent() { break case <-wsClient.ShutDownChan: log.Info().Msg("Shutdown command received. Shutting down...") + cancel() break case <-wsClient.RestartChan: - log.Info().Msg("Restart requested internally.") + log.Info().Msg("Restart command received. Restarting... ") + cancel() gracefulShutdown(metricCollector, wsClient, logFile, pidFilePath) restartAgent() return diff --git a/pkg/runner/client.go b/pkg/runner/client.go index dfd38af..195c5fb 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -29,7 +29,6 @@ type WebsocketClient struct { Conn *websocket.Conn requestHeader http.Header apiSession *scheduler.Session - QuitChan chan struct{} RestartChan chan struct{} ShutDownChan chan struct{} } @@ -44,27 +43,28 @@ func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { return &WebsocketClient{ requestHeader: headers, apiSession: session, - QuitChan: make(chan struct{}), RestartChan: make(chan struct{}), ShutDownChan: make(chan struct{}), } } -func (wc *WebsocketClient) RunForever() { +func (wc *WebsocketClient) RunForever(ctx context.Context) { wc.Connect() for { select { - case <-wc.QuitChan: + case <-ctx.Done(): return default: err := wc.Conn.SetReadDeadline(time.Now().Add(ConnectionReadTimeout)) if err != nil { - wc.CloseAndReconnect() + wc.CloseAndReconnect(ctx) + continue } _, message, err := wc.ReadMessage() if err != nil { - wc.CloseAndReconnect() + wc.CloseAndReconnect(ctx) + continue } // Sends "ping" query for Alpacon to verify WebSocket session status without error handling. _ = wc.SendPingQuery() @@ -130,7 +130,10 @@ func (wc *WebsocketClient) Connect() { } } -func (wc *WebsocketClient) CloseAndReconnect() { +func (wc *WebsocketClient) CloseAndReconnect(ctx context.Context) { + if ctx.Err() != nil { + return + } wc.Close() wc.Connect() } From 599374b5e7993b414572ca2ca61dc9b6841f97c7 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 1 Apr 2025 14:49:53 +0900 Subject: [PATCH 271/364] Minor fix --- pkg/scheduler/session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index af0661e..4d0e29d 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -20,7 +20,7 @@ import ( const ( checkSessionURL = "/api/servers/servers/-/" - MaxRetryTimeout = 3 * 24 * time.Second + MaxRetryTimeout = 3 * 24 * time.Hour ) func InitSession() *Session { From 876d5b8709f9de6eeceb55ba030df7085c116186 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 1 Apr 2025 14:51:46 +0900 Subject: [PATCH 272/364] Minor fix --- cmd/alpamon/command/root.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 7282945..58765d3 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -47,10 +47,8 @@ func runAgent() { signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) go func() { - select { - case <-sigChan: - cancel() - } + <-sigChan + cancel() }() // Logger From 2024409d68965da3fe1296480108f92ab319e892 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 1 Apr 2025 14:51:52 +0900 Subject: [PATCH 273/364] Fix mode's type to string in chmod() Fix mode's type to string because os.FileMode() requires octal. Used strconv.parseUint() within chmod() to convert mode to octal. --- pkg/runner/ftp.go | 12 +++++++++--- pkg/runner/ftp_types.go | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 05034d2..a19aaeb 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -7,6 +7,7 @@ import ( "net/http" "os" "path/filepath" + "strconv" "strings" "github.com/alpacanetworks/alpamon/pkg/logger" @@ -418,11 +419,16 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { }, nil } -func (fc *FtpClient) chmod(path string, mode int) (CommandResult, error) { +func (fc *FtpClient) chmod(path string, mode string) (CommandResult, error) { path = fc.parsePath(path) - fileMode := os.FileMode(mode) + fileMode, err := strconv.ParseUint(mode, 8, 32) + if err != nil { + return CommandResult{ + Message: err.Error(), + }, err + } - err := os.Chmod(path, fileMode) + err = os.Chmod(path, os.FileMode(fileMode)) if err != nil { return CommandResult{ Message: err.Error(), diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index cdf01e3..6b18843 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -46,7 +46,7 @@ type FtpData struct { ShowHidden bool `json:"show_hidden,omitempty"` Src string `json:"src,omitempty"` Dst string `json:"dst,omitempty"` - Mode int `json:"mode,omitempty"` + Mode string `json:"mode,omitempty"` UID int `json:"uid,omitempty"` GID int `json:"gid,omitempty"` } From 225e1d09c310134d43e3dcad05939047986d9a1b Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 1 Apr 2025 14:53:57 +0900 Subject: [PATCH 274/364] Minor fix --- pkg/scheduler/session.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 4d0e29d..1fe6d2e 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -77,8 +77,7 @@ func (session *Session) CheckSession(ctx context.Context) bool { } } } - // time.Sleep(timeout) - if timeout == 0 { + if timeout == 0 { // first time timeout = config.MinConnectInterval } timeout *= 2 From 67a6f217e058ee24a17ca45adf6cb151be675bea Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 1 Apr 2025 16:38:08 +0900 Subject: [PATCH 275/364] Fix uid, gid's type to pointer in chown() Fix chown() to allow modification of either uid or gid. --- pkg/runner/ftp.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index a19aaeb..828a993 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -154,7 +154,7 @@ func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (Command case Chmod: return fc.chmod(data.Path, data.Mode) case Chown: - return fc.chown(data.Path, data.UID, data.GID) + return fc.chown(data.Path, &data.UID, &data.GID) default: return CommandResult{}, fmt.Errorf("unknown FTP command: %s", command) } @@ -440,10 +440,20 @@ func (fc *FtpClient) chmod(path string, mode string) (CommandResult, error) { }, nil } -func (fc *FtpClient) chown(path string, uid, gid int) (CommandResult, error) { +func (fc *FtpClient) chown(path string, uid, gid *int) (CommandResult, error) { path = fc.parsePath(path) - err := os.Chown(path, uid, gid) + finalUID := -1 + if uid != nil { + finalUID = *uid + } + + finalGID := -1 + if gid != nil { + finalGID = *gid + } + + err := os.Chown(path, finalUID, finalGID) if err != nil { return CommandResult{ Message: err.Error(), From 97e0a446e106e3d261e71479645a624f0e267565 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 1 Apr 2025 17:19:09 +0900 Subject: [PATCH 276/364] Revert to previous version Revert to previous version because UID, GID being part of FtpData structure, cannot be nil pointers. --- pkg/runner/ftp.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 828a993..a19aaeb 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -154,7 +154,7 @@ func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (Command case Chmod: return fc.chmod(data.Path, data.Mode) case Chown: - return fc.chown(data.Path, &data.UID, &data.GID) + return fc.chown(data.Path, data.UID, data.GID) default: return CommandResult{}, fmt.Errorf("unknown FTP command: %s", command) } @@ -440,20 +440,10 @@ func (fc *FtpClient) chmod(path string, mode string) (CommandResult, error) { }, nil } -func (fc *FtpClient) chown(path string, uid, gid *int) (CommandResult, error) { +func (fc *FtpClient) chown(path string, uid, gid int) (CommandResult, error) { path = fc.parsePath(path) - finalUID := -1 - if uid != nil { - finalUID = *uid - } - - finalGID := -1 - if gid != nil { - finalGID = *gid - } - - err := os.Chown(path, finalUID, finalGID) + err := os.Chown(path, uid, gid) if err != nil { return CommandResult{ Message: err.Error(), From db540d1c5b3cdeb1e2f00a6fa8cdf0e7c34982d0 Mon Sep 17 00:00:00 2001 From: royroyee Date: Wed, 2 Apr 2025 08:58:09 +0900 Subject: [PATCH 277/364] Fix log message format by adding period --- cmd/alpamon/command/root.go | 4 +- cmd/alpamon/command/setup/setup.go | 2 +- pkg/collector/collector.go | 10 ++--- pkg/config/config.go | 14 +++---- pkg/db/db.go | 12 +++--- pkg/db/migrate.go | 10 ++--- pkg/runner/client.go | 10 ++--- pkg/runner/command.go | 4 -- pkg/runner/commit.go | 60 +++++++++++++++--------------- pkg/runner/ftp.go | 26 ++++++------- pkg/runner/pty.go | 16 ++++---- pkg/scheduler/reporter.go | 2 +- pkg/scheduler/session.go | 6 +-- pkg/utils/utils.go | 2 +- 14 files changed, 87 insertions(+), 91 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 58765d3..eb006c4 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -115,13 +115,13 @@ func runAgent() { func restartAgent() { executable, err := os.Executable() if err != nil { - log.Error().Err(err).Msg("Failed to get executable path") + log.Error().Err(err).Msg("Failed to get executable path.") return } err = syscall.Exec(executable, os.Args, os.Environ()) if err != nil { - log.Error().Err(err).Msg("Failed to restart the program") + log.Error().Err(err).Msg("Failed to restart the program.") } } diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 882bfdb..0771764 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -36,7 +36,7 @@ func SetConfigPaths(serviceName string) { var SetupCmd = &cobra.Command{ Use: "setup", - Short: fmt.Sprintf("Setup and configure the %s", name), + Short: fmt.Sprintf("Setup and configure the %s.", name), RunE: func(cmd *cobra.Command, args []string) error { fmt.Printf("Starting %s setup...\n", name) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index c14c4d3..6d6781e 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -51,7 +51,7 @@ type collectorArgs struct { func InitCollector(session *session.Session, client *ent.Client) *Collector { conf, err := fetchConfig(session) if err != nil { - log.Error().Err(err).Msg("Failed to fetch collector config") + log.Error().Err(err).Msg("Failed to fetch collector config.") os.Exit(1) } @@ -68,7 +68,7 @@ func InitCollector(session *session.Session, client *ent.Client) *Collector { collector, err := NewCollector(args) if err != nil { - log.Error().Err(err).Msg("Failed to create collector") + log.Error().Err(err).Msg("Failed to create collector.") os.Exit(1) } @@ -81,7 +81,7 @@ func fetchConfig(session *session.Session) ([]collectConf, error) { return nil, err } if statusCode != http.StatusOK { - return nil, fmt.Errorf("failed to get collection config: %d status code", statusCode) + return nil, fmt.Errorf("failed to get collection config: %d status code.", statusCode) } var conf []collectConf @@ -194,7 +194,7 @@ func (c *Collector) retryFailedMetrics(ctx context.Context) { } err := c.retryWithBackoff(ctx, metric) if err != nil { - log.Error().Err(err).Msgf("Failed to check metric: %s", metric.Type) + log.Error().Err(err).Msgf("Failed to check metric: %s.", metric.Type) } default: return @@ -223,7 +223,7 @@ func (c *Collector) retryWithBackoff(ctx context.Context, metric base.MetricData func (c *Collector) handleErrors() { for err := range c.errorChan { - log.Error().Err(err).Msgf("Collector error: %v", err) + log.Error().Err(err).Msgf("Collector error: %v.", err) } } diff --git a/pkg/config/config.go b/pkg/config/config.go index bb70c70..4e9635e 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -37,7 +37,7 @@ func LoadConfig(configFiles []string, wsPath string) Settings { if os.IsNotExist(statErr) { continue } else { - log.Error().Err(statErr).Msgf("Error accessing config file %s", configFile) + log.Error().Err(statErr).Msgf("Error accessing config file %s.", configFile) continue } } @@ -47,24 +47,24 @@ func LoadConfig(configFiles []string, wsPath string) Settings { continue } - log.Debug().Msgf("Using config file %s", configFile) + log.Debug().Msgf("Using config file %s.", configFile) validConfigFile = configFile break } if validConfigFile == "" { - log.Fatal().Msg("No valid config file found") + log.Fatal().Msg("No valid config file found.") } iniData, err = ini.Load(validConfigFile) if err != nil { - log.Fatal().Err(err).Msgf("failed to load config file %s", validConfigFile) + log.Fatal().Err(err).Msgf("failed to load config file %s.", validConfigFile) } var config Config err = iniData.MapTo(&config) if err != nil { - log.Fatal().Err(err).Msgf("failed to parse config file %s", validConfigFile) + log.Fatal().Err(err).Msgf("failed to parse config file %s.", validConfigFile) } if config.Logging.Debug { @@ -101,7 +101,7 @@ func validateConfig(config Config, wsPath string) (bool, Settings) { settings.WSPath = strings.Replace(val, "http", "ws", 1) + settings.WSPath settings.UseSSL = strings.HasPrefix(val, "https://") } else { - log.Error().Msg("Server url is invalid") + log.Error().Msg("Server url is invalid.") valid = false } @@ -109,7 +109,7 @@ func validateConfig(config Config, wsPath string) (bool, Settings) { settings.ID = config.Server.ID settings.Key = config.Server.Key } else { - log.Error().Msg("Server ID, KEY is empty") + log.Error().Msg("Server ID, KEY is empty.") valid = false } diff --git a/pkg/db/db.go b/pkg/db/db.go index 6d06015..c617cd4 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -27,7 +27,7 @@ func InitDB() *ent.Client { dbFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0750) if err != nil { - log.Error().Err(err).Msgf("failed to open db file: %v", err) + log.Error().Err(err).Msgf("failed to open db file: %v.", err) _, _ = fmt.Fprintf(os.Stderr, "Failed to open db file: %v\n", err) os.Exit(1) } @@ -39,14 +39,14 @@ func InitDB() *ent.Client { err = RunMigration(dbFile.Name(), ctx) if err != nil { - log.Error().Err(err).Msgf("failed to migrate db: %v\n", err) + log.Error().Err(err).Msgf("failed to migrate db: %v.", err) os.Exit(1) } dbManager := NewDBClientManager(dbFile.Name()) client, err := dbManager.GetClient() if err != nil { - log.Error().Err(err).Msgf("failed to get db client: %v\n", err) + log.Error().Err(err).Msgf("failed to get db client: %v.", err) os.Exit(1) } @@ -57,7 +57,7 @@ func InitTestDB(path string) *ent.Client { fileName, _ := filepath.Abs(path) dbFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0750) if err != nil { - log.Error().Err(err).Msgf("failed to open test db file: %v", err) + log.Error().Err(err).Msgf("failed to open test db file: %v.", err) _, _ = fmt.Fprintf(os.Stderr, "Failed to open test db file: %v\n", err) os.Exit(1) } @@ -70,7 +70,7 @@ func InitTestDB(path string) *ent.Client { sql.Register("sqlite3", &sqlite.Driver{}) err = RunMigration(dbFile.Name(), ctx) if err != nil { - log.Error().Err(err).Msgf("failed to migrate test db: %v\n", err) + log.Error().Err(err).Msgf("failed to migrate test db: %v.", err) os.Exit(1) } }) @@ -78,7 +78,7 @@ func InitTestDB(path string) *ent.Client { dbManager := NewDBClientManager(dbFile.Name()) client, err := dbManager.GetClient() if err != nil { - log.Error().Err(err).Msgf("failed to get db client: %v\n", err) + log.Error().Err(err).Msgf("failed to get db client: %v.", err) os.Exit(1) } diff --git a/pkg/db/migrate.go b/pkg/db/migrate.go index 43cca81..9b7b8bb 100644 --- a/pkg/db/migrate.go +++ b/pkg/db/migrate.go @@ -15,7 +15,7 @@ var migrations embed.FS func RunMigration(path string, ctx context.Context) error { if err := ctx.Err(); err != nil { - log.Error().Err(err).Msgf("context cancelled before migration: %v", err) + log.Error().Err(err).Msgf("context cancelled before migration: %v.", err) return err } @@ -25,7 +25,7 @@ func RunMigration(path string, ctx context.Context) error { default: migrationFS, err := getMigrationDir() if err != nil { - log.Error().Err(err).Msg("failed to get migration filesystem") + log.Error().Err(err).Msg("failed to get migration filesystem.") return err } @@ -35,14 +35,14 @@ func RunMigration(path string, ctx context.Context) error { ), ) if err != nil { - log.Error().Err(err).Msgf("failed to open migration dir: %v", err) + log.Error().Err(err).Msgf("failed to open migration dir: %v.", err) return err } defer func() { _ = workDir.Close() }() client, err := atlasexec.NewClient(workDir.Path(), "atlas") if err != nil { - log.Error().Err(err).Msgf("failed to get atlas client: %v", err) + log.Error().Err(err).Msgf("failed to get atlas client: %v.", err) return err } @@ -53,7 +53,7 @@ func RunMigration(path string, ctx context.Context) error { }) if err != nil { - log.Error().Err(err).Msgf("failed to migrate db: %v", err) + log.Error().Err(err).Msgf("failed to migrate db: %v.", err) return err } diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 195c5fb..e419d61 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -169,14 +169,14 @@ func (wc *WebsocketClient) CommandRequestHandler(message []byte) { err := json.Unmarshal(message, &content) if err != nil { - log.Error().Err(err).Msgf("Inappropriate message: %s", string(message)) + log.Error().Err(err).Msgf("Inappropriate message: %s.", string(message)) return } if content.Command.Data != "" { err = json.Unmarshal([]byte(content.Command.Data), &data) if err != nil { - log.Error().Err(err).Msgf("Inappropriate message: %s", string(message)) + log.Error().Err(err).Msgf("Inappropriate message: %s.", string(message)) return } } @@ -191,13 +191,13 @@ func (wc *WebsocketClient) CommandRequestHandler(message []byte) { commandRunner := NewCommandRunner(wc, content.Command, data) go commandRunner.Run() case "quit": - log.Debug().Msgf("Quit requested for reason: %s", content.Reason) + log.Debug().Msgf("Quit requested for reason: %s.", content.Reason) wc.ShutDown() case "reconnect": - log.Debug().Msgf("Reconnect requested for reason: %s", content.Reason) + log.Debug().Msgf("Reconnect requested for reason: %s.", content.Reason) wc.Close() default: - log.Warn().Msgf("Not implemented query: %s", content.Query) + log.Warn().Msgf("Not implemented query: %s.", content.Query) } } diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 36104bf..19e6023 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -59,10 +59,6 @@ func (cr *CommandRunner) Run() { exitCode, result = cr.handleInternalCmd() case "system": exitCode, result = cr.handleShellCmd(cr.command.Line, cr.command.User, cr.command.Group, cr.command.Env) - case "osquery": // TODO DEPRECATED: This case will be removed in a future release. - exitCode = 1 - result = "alpamon does not use osquery. Please update alpacon-server." - log.Warn().Msg(result) default: exitCode = 1 result = "Invalid command shell argument." diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 13f2c79..84c81f3 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -103,7 +103,7 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { case "server": loadAvg, err := getLoadAverage() if err != nil { - log.Debug().Err(err).Msg("Failed to retrieve load average") + log.Debug().Err(err).Msg("Failed to retrieve load average.") } currentData = &ServerData{ Version: version.Version, @@ -113,52 +113,52 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { continue case "info": if currentData, err = getSystemData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve system info") + log.Debug().Err(err).Msg("Failed to retrieve system info.") } remoteData = &SystemData{} case "os": if currentData, err = getOsData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve os info") + log.Debug().Err(err).Msg("Failed to retrieve os info.") } remoteData = &OSData{} case "time": if currentData, err = getTimeData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve time info") + log.Debug().Err(err).Msg("Failed to retrieve time info.") } remoteData = &TimeData{} case "groups": if currentData, err = getGroupData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve group info") + log.Debug().Err(err).Msg("Failed to retrieve group info.") } remoteData = &[]GroupData{} case "users": if currentData, err = getUserData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve user info") + log.Debug().Err(err).Msg("Failed to retrieve user info.") } remoteData = &[]UserData{} case "interfaces": if currentData, err = getNetworkInterfaces(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve network interfaces") + log.Debug().Err(err).Msg("Failed to retrieve network interfaces.") } remoteData = &[]Interface{} case "addresses": if currentData, err = getNetworkAddresses(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve network addresses") + log.Debug().Err(err).Msg("Failed to retrieve network addresses.") } remoteData = &[]Address{} case "packages": if currentData, err = getSystemPackages(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve system packages") + log.Debug().Err(err).Msg("Failed to retrieve system packages.") } remoteData = &[]SystemPackageData{} case "disks": if currentData, err = getDisks(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve disks") + log.Debug().Err(err).Msg("Failed to retrieve disks.") } remoteData = &[]Disk{} case "partitions": if currentData, err = getPartitions(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve partitions") + log.Debug().Err(err).Msg("Failed to retrieve partitions.") } remoteData = &[]Partition{} default: @@ -170,13 +170,13 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { if statusCode == http.StatusOK { err = json.Unmarshal(resp, &remoteData) if err != nil { - log.Error().Err(err).Msg("Failed to unmarshal remote data") + log.Error().Err(err).Msg("Failed to unmarshal remote data.") continue } } else if statusCode == http.StatusNotFound { remoteData = nil } else { - log.Error().Err(err).Msgf("HTTP %d: Failed to get data for %s", statusCode, key) + log.Error().Err(err).Msgf("HTTP %d: Failed to get data for %s.", statusCode, key) continue } @@ -186,7 +186,7 @@ func syncSystemInfo(session *scheduler.Session, keys []string) { compareData(entry, currentData.(ComparableData), remoteData.(ComparableData)) } } - log.Info().Msg("Completed system information synchronization") + log.Info().Msg("Completed system information synchronization.") } func compareData(entry commitDef, currentData, remoteData ComparableData) { @@ -239,37 +239,37 @@ func collectData() *commitData { data.Version = version.Version if data.Load, err = getLoadAverage(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve load average") + log.Debug().Err(err).Msg("Failed to retrieve load average.") } if data.Info, err = getSystemData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve system info") + log.Debug().Err(err).Msg("Failed to retrieve system info.") } if data.OS, err = getOsData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve os info") + log.Debug().Err(err).Msg("Failed to retrieve os info.") } if data.Time, err = getTimeData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve time data") + log.Debug().Err(err).Msg("Failed to retrieve time data.") } if data.Users, err = getUserData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve user data") + log.Debug().Err(err).Msg("Failed to retrieve user data.") } if data.Groups, err = getGroupData(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve group data") + log.Debug().Err(err).Msg("Failed to retrieve group data.") } if data.Interfaces, err = getNetworkInterfaces(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve network interfaces") + log.Debug().Err(err).Msg("Failed to retrieve network interfaces.") } if data.Addresses, err = getNetworkAddresses(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve network addresses") + log.Debug().Err(err).Msg("Failed to retrieve network addresses.") } if data.Packages, err = getSystemPackages(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve system packages") + log.Debug().Err(err).Msg("Failed to retrieve system packages.") } if data.Disks, err = getDisks(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve disks") + log.Debug().Err(err).Msg("Failed to retrieve disks.") } if data.Partitions, err = getPartitions(); err != nil { - log.Debug().Err(err).Msg("Failed to retrieve disk partitions") + log.Debug().Err(err).Msg("Failed to retrieve disk partitions.") } return data @@ -377,7 +377,7 @@ func getUserData() ([]UserData, error) { file, err := os.Open(passwdFilePath) if err != nil { - log.Debug().Err(err).Msg("Failed to open passwd file") + log.Debug().Err(err).Msg("Failed to open passwd file.") return users, err } @@ -422,7 +422,7 @@ func getGroupData() ([]GroupData, error) { file, err := os.Open(groupFilePath) if err != nil { - log.Debug().Err(err).Msg("Failed to open group file") + log.Debug().Err(err).Msg("Failed to open group file.") return groups, err } defer func() { _ = file.Close() }() @@ -569,7 +569,7 @@ func getSystemPackages() ([]SystemPackageData, error) { func getDpkgPackage() ([]SystemPackageData, error) { fd, err := os.Open(dpkgDbPath) if err != nil { - log.Debug().Err(err).Msgf("Failed to open %s file", dpkgDbPath) + log.Debug().Err(err).Msgf("Failed to open %s file.", dpkgDbPath) return []SystemPackageData{}, err } defer func() { _ = fd.Close() }() @@ -621,7 +621,7 @@ func getDpkgPackage() ([]SystemPackageData, error) { func getRpmPackage(path string) ([]SystemPackageData, error) { db, err := rpmdb.Open(path) if err != nil { - log.Debug().Err(err).Msgf("Failed to open %s file: %v", path, err) + log.Debug().Err(err).Msgf("Failed to open %s file: %v.", path, err) return []SystemPackageData{}, err } @@ -629,7 +629,7 @@ func getRpmPackage(path string) ([]SystemPackageData, error) { pkgList, err := db.ListPackages() if err != nil { - log.Debug().Err(err).Msgf("Failed to list packages: %v", err) + log.Debug().Err(err).Msgf("Failed to list packages: %v.", err) return []SystemPackageData{}, err } diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 22b24f7..4d543ce 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -43,7 +43,7 @@ func (fc *FtpClient) RunFtpBackground() { var err error fc.conn, _, err = websocket.DefaultDialer.Dial(fc.url, fc.requestHeader) if err != nil { - fc.log.Debug().Err(err).Msgf("Failed to connect to pty websocket at %s", fc.url) + fc.log.Debug().Err(err).Msgf("Failed to connect to pty websocket at %s.", fc.url) return } defer fc.close() @@ -68,7 +68,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { return } if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { - fc.log.Debug().Err(err).Msg("Failed to read from ftp websocket") + fc.log.Debug().Err(err).Msg("Failed to read from ftp websocket.") } cancel() return @@ -77,7 +77,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { var content FtpContent err = json.Unmarshal(message, &content) if err != nil { - fc.log.Debug().Err(err).Msg("Failed to unmarshal websocket message") + fc.log.Debug().Err(err).Msg("Failed to unmarshal websocket message.") cancel() return } @@ -101,7 +101,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { if ctx.Err() != nil { return } - fc.log.Debug().Err(err).Msg("Failed to marshal response") + fc.log.Debug().Err(err).Msg("Failed to marshal response.") cancel() return } @@ -112,7 +112,7 @@ func (fc *FtpClient) read(ctx context.Context, cancel context.CancelFunc) { return } if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { - fc.log.Debug().Err(err).Msg("Failed to send websocket message") + fc.log.Debug().Err(err).Msg("Failed to send websocket message.") } cancel() return @@ -275,7 +275,7 @@ func (fc *FtpClient) mkd(path string) (CommandResult, error) { } return CommandResult{ - Message: fmt.Sprintf("Make %s successfully", path), + Message: fmt.Sprintf("Make %s successfully.", path), }, nil } @@ -298,13 +298,13 @@ func (fc *FtpClient) cwd(path string) (CommandResult, error) { fc.workingDirectory = path return CommandResult{ - Message: fmt.Sprintf("Change working directory to %s", path), + Message: fmt.Sprintf("Change working directory to %s.", path), }, nil } func (fc *FtpClient) pwd() (CommandResult, error) { return CommandResult{ - Message: fmt.Sprintf("Current working directory: %s", fc.workingDirectory), + Message: fmt.Sprintf("Current working directory: %s.", fc.workingDirectory), Path: fc.workingDirectory, }, nil } @@ -320,7 +320,7 @@ func (fc *FtpClient) dele(path string) (CommandResult, error) { } return CommandResult{ - Message: fmt.Sprintf("Delete %s successfully", path), + Message: fmt.Sprintf("Delete %s successfully.", path), }, nil } @@ -347,7 +347,7 @@ func (fc *FtpClient) rmd(path string, recursive bool) (CommandResult, error) { } return CommandResult{ - Message: fmt.Sprintf("Delete %s successfully", path), + Message: fmt.Sprintf("Delete %s successfully.", path), }, nil } @@ -364,7 +364,7 @@ func (fc *FtpClient) mv(src, dst string) (CommandResult, error) { return CommandResult{ Dst: dst, - Message: fmt.Sprintf("Move %s to %s", src, dst), + Message: fmt.Sprintf("Move %s to %s.", src, dst), }, nil } @@ -395,7 +395,7 @@ func (fc *FtpClient) cpDir(src, dst string) (CommandResult, error) { return CommandResult{ Dst: dst, - Message: fmt.Sprintf("Copy %s to %s", src, dst), + Message: fmt.Sprintf("Copy %s to %s.", src, dst), }, nil } @@ -409,6 +409,6 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { return CommandResult{ Dst: dst, - Message: fmt.Sprintf("Copy %s to %s", src, dst), + Message: fmt.Sprintf("Copy %s to %s.", src, dst), }, nil } diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 5317183..121e0b6 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -61,7 +61,7 @@ func (pc *PtyClient) RunPtyBackground() { var err error pc.conn, _, err = websocket.DefaultDialer.Dial(pc.url, pc.requestHeader) if err != nil { - log.Error().Err(err).Msgf("Failed to connect to pty websocket at %s", pc.url) + log.Error().Err(err).Msgf("Failed to connect to pty websocket at %s.", pc.url) return } defer pc.close() @@ -70,7 +70,7 @@ func (pc *PtyClient) RunPtyBackground() { uid, gid, groupIds, env, err := pc.getPtyUserAndEnv() if err != nil { - log.Error().Err(err).Msgf("Failed to get pty user and env") + log.Error().Err(err).Msgf("Failed to get pty user and env.") return } @@ -83,7 +83,7 @@ func (pc *PtyClient) RunPtyBackground() { pc.ptmx, err = pty.StartWithSize(pc.cmd, initialSize) if err != nil { - log.Error().Err(err).Msg("Failed to start pty") + log.Error().Err(err).Msg("Failed to start pty.") pc.close() return } @@ -112,7 +112,7 @@ func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.Cance return } if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { - log.Debug().Err(err).Msg("Failed to read from pty websocket") + log.Debug().Err(err).Msg("Failed to read from pty websocket.") } cancel() return @@ -124,7 +124,7 @@ func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.Cance return } if !errors.Is(err, os.ErrClosed) { - log.Debug().Err(err).Msg("Failed to write to pty") + log.Debug().Err(err).Msg("Failed to write to pty.") } cancel() return @@ -149,7 +149,7 @@ func (pc *PtyClient) readFromPTY(ctx context.Context, cancel context.CancelFunc) if err == io.EOF { log.Debug().Msg("pty session exited.") } else { - log.Debug().Err(err).Msg("Failed to read from PTY") + log.Debug().Err(err).Msg("Failed to read from pty.") } cancel() return @@ -160,7 +160,7 @@ func (pc *PtyClient) readFromPTY(ctx context.Context, cancel context.CancelFunc) return } if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { - log.Debug().Err(err).Msg("Failed to write to pty") + log.Debug().Err(err).Msg("Failed to write to pty.") } cancel() return @@ -175,7 +175,7 @@ func (pc *PtyClient) resize(rows, cols uint16) error { Cols: cols, }) if err != nil { - log.Warn().Err(err).Msg("Failed to resize terminal") + log.Warn().Err(err).Msg("Failed to resize terminal.") return err } pc.rows = rows diff --git a/pkg/scheduler/reporter.go b/pkg/scheduler/reporter.go index 8f2d6a1..013fe5b 100644 --- a/pkg/scheduler/reporter.go +++ b/pkg/scheduler/reporter.go @@ -75,7 +75,7 @@ func (r *Reporter) query(entry PriorityEntry) { if statusCode == http.StatusBadRequest { log.Error().Err(err).Msgf("%d Bad Request: %s", statusCode, resp) } else { - log.Debug().Msgf("%s %s Error: %d %s", entry.method, entry.url, statusCode, resp) + log.Error().Msgf("%s %s: %d %s.", entry.method, entry.url, statusCode, resp) } success = false } diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 1fe6d2e..5ec9a0c 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -35,7 +35,7 @@ func InitSession() *Session { caCertPool := x509.NewCertPool() caCert, err := os.ReadFile(config.GlobalSettings.CaCert) if err != nil { - log.Fatal().Err(err).Msg("Failed to read CA certificate") + log.Fatal().Err(err).Msg("Failed to read CA certificate.") } caCertPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caCertPool @@ -65,12 +65,12 @@ func (session *Session) CheckSession(ctx context.Context) bool { case <-time.After(timeout): resp, statusCode, err := session.Get(checkSessionURL, 5) if err != nil || statusCode != http.StatusOK { - log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds", config.GlobalSettings.ServerURL, int(timeout.Seconds())) + log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds.", config.GlobalSettings.ServerURL, int(timeout.Seconds())) } else { var response map[string]interface{} err = json.Unmarshal(resp, &response) if err != nil { - log.Debug().Err(err).Msgf("Failed to unmarshal JSON, will try again in %ds", int(timeout.Seconds())) + log.Debug().Err(err).Msgf("Failed to unmarshal JSON, will try again in %ds.", int(timeout.Seconds())) } else { if commissioned, ok := response["commissioned"].(bool); ok { return commissioned diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 102191f..78d374c 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -47,7 +47,7 @@ func getPlatformLike() { log.Fatal().Msgf("Platform %s not supported.", platformInfo.Platform) } default: - log.Fatal().Msgf("unsupported os: %s", runtime.GOOS) + log.Fatal().Msgf("Unsupported os: %s.", runtime.GOOS) } } From 5142763e45fb61862e0b7b46b9c0dce87db3aebb Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 2 Apr 2025 11:52:54 +0900 Subject: [PATCH 278/364] Update list command's response Add PermissionOctal, which represents permissions in octal format, to list command response. Add PermissionString, which represents permissions in string format, to list command response. Add Owner to list command response to display the file/folder ownership user. Add Group to list command response to display the file/folder ownership group. --- pkg/runner/ftp.go | 82 +++++++++++++++++++++++++++++++++++++++-- pkg/runner/ftp_types.go | 22 ++++++----- 2 files changed, 91 insertions(+), 13 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index a19aaeb..ffa752e 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -6,9 +6,11 @@ import ( "fmt" "net/http" "os" + "os/user" "path/filepath" "strconv" "strings" + "syscall" "github.com/alpacanetworks/alpamon/pkg/logger" "github.com/alpacanetworks/alpamon/pkg/utils" @@ -230,12 +232,60 @@ func (fc *FtpClient) listRecursive(path string, depth, current int, showHidden b continue } + permString := utils.FormatPermissions(info.Mode()) + permOctal := fmt.Sprintf("%o", info.Mode().Perm()) + + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + errChild := CommandResult{ + Name: entry.Name(), + Path: fullPath, + Message: "Failed to get system stat information", + } + _, errChild.Code = GetFtpErrorCode(List, errChild) + result.Children = append(result.Children, errChild) + + continue + } + + uid := fmt.Sprintf("%d", stat.Uid) + gid := fmt.Sprintf("%d", stat.Gid) + owner, err := user.LookupId(uid) + if err != nil { + errChild := CommandResult{ + Name: entry.Name(), + Path: fullPath, + Message: err.Error(), + } + _, errChild.Code = GetFtpErrorCode(List, errChild) + result.Children = append(result.Children, errChild) + + continue + } + + group, err := user.LookupGroupId(gid) + if err != nil { + errChild := CommandResult{ + Name: entry.Name(), + Path: fullPath, + Message: err.Error(), + } + _, errChild.Code = GetFtpErrorCode(List, errChild) + result.Children = append(result.Children, errChild) + + continue + } + modTime := info.ModTime() child := CommandResult{ - Name: entry.Name(), - Path: fullPath, - Code: returnCodes[List].Success, - ModTime: &modTime, + Name: entry.Name(), + Path: fullPath, + Code: returnCodes[List].Success, + ModTime: &modTime, + PermissionString: permString, + PermissionOctal: permOctal, + Owner: owner.Username, + Group: group.Name, } if entry.IsDir() { @@ -265,6 +315,30 @@ func (fc *FtpClient) listRecursive(path string, depth, current int, showHidden b modTime := dirInfo.ModTime() result.ModTime = &modTime result.Code = returnCodes[List].Success + result.PermissionString = utils.FormatPermissions(dirInfo.Mode()) + result.PermissionOctal = fmt.Sprintf("%o", dirInfo.Mode().Perm()) + + stat, ok := dirInfo.Sys().(*syscall.Stat_t) + if !ok { + result.Message = "Failed to get system stat information" + } else { + uid := fmt.Sprintf("%d", stat.Uid) + gid := fmt.Sprintf("%d", stat.Gid) + owner, err := user.LookupId(uid) + if err != nil { + result.Message = err.Error() + _, result.Code = GetFtpErrorCode(List, result) + } + + group, err := user.LookupGroupId(gid) + if err != nil { + result.Message = err.Error() + _, result.Code = GetFtpErrorCode(List, result) + } + + result.Owner = owner.Username + result.Group = group.Name + } } return result, nil diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 6b18843..ee7819b 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -64,15 +64,19 @@ type FtpResult struct { } type CommandResult struct { - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - Path string `json:"path,omitempty"` - Dst string `json:"dst,omitempty"` - Code int `json:"code,omitempty"` - Size int64 `json:"size,omitempty"` - Children []CommandResult `json:"children,omitempty"` - ModTime *time.Time `json:"mod_time,omitempty"` - Message string `json:"message,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Path string `json:"path,omitempty"` + Dst string `json:"dst,omitempty"` + Code int `json:"code,omitempty"` + Size int64 `json:"size,omitempty"` + Children []CommandResult `json:"children,omitempty"` + ModTime *time.Time `json:"mod_time,omitempty"` + Message string `json:"message,omitempty"` + PermissionString string `json:"permission_str,omitempty"` + PermissionOctal string `json:"permission_octal,omitempty"` + Owner string `json:"owner,omitempty"` + Group string `json:"group,omitempty"` } type returnCode struct { From dfd862c86054eaa0f7f4779cd791b26896fb2e4f Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 2 Apr 2025 11:54:27 +0900 Subject: [PATCH 279/364] Add FormatPermissions() Implement FormatPermissions() to convert file/folder permissions represented in octal to string format. --- pkg/utils/fs.go | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/pkg/utils/fs.go b/pkg/utils/fs.go index a457fe5..ed98754 100644 --- a/pkg/utils/fs.go +++ b/pkg/utils/fs.go @@ -78,3 +78,43 @@ func CopyDir(src, dst string) error { return nil } + +func FormatPermissions(mode os.FileMode) string { + permissions := []byte{'-', '-', '-', '-', '-', '-', '-', '-', '-', '-'} + + if mode.IsDir() { + permissions[0] = 'd' + } + + rwxBits := []os.FileMode{0400, 0200, 0100, 0040, 0020, 0010, 0004, 0002, 0001} + rwxChars := []byte{'r', 'w', 'x'} + + for i, bit := range rwxBits { + if mode&bit != 0 { + permissions[i+1] = rwxChars[i%3] + } + } + + specialBits := []struct { + mask os.FileMode + position int + execPos int + char byte + }{ + {os.ModeSetuid, 3, 3, 's'}, + {os.ModeSetgid, 6, 6, 's'}, + {os.ModeSticky, 9, 9, 't'}, + } + + for _, sp := range specialBits { + if mode&sp.mask != 0 { + if permissions[sp.execPos] == 'x' { + permissions[sp.position] = sp.char + } else { + permissions[sp.position] = sp.char - ('x' - 'X') + } + } + } + + return string(permissions) +} From 76a6cde1908d25c5f0b9b716b985aa2f5ba0b07d Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 2 Apr 2025 14:50:47 +0900 Subject: [PATCH 280/364] Refactor listRecursive() Due to the expanding range of data presented in the list command, the size of listRecursive() has increased, diminishing its readability. To address this issue, common logic in listRecursive() was separated into functions to improve readability and maintainability. --- pkg/runner/ftp.go | 193 +++++++++++++++++----------------------------- pkg/utils/fs.go | 27 +++++++ 2 files changed, 99 insertions(+), 121 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index ffa752e..2c19a10 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -6,11 +6,9 @@ import ( "fmt" "net/http" "os" - "os/user" "path/filepath" "strconv" "strings" - "syscall" "github.com/alpacanetworks/alpamon/pkg/logger" "github.com/alpacanetworks/alpamon/pkg/utils" @@ -199,149 +197,102 @@ func (fc *FtpClient) listRecursive(path string, depth, current int, showHidden b entries, err := os.ReadDir(path) if err != nil { - errResult := CommandResult{ - Name: filepath.Base(path), - Path: path, - Message: err.Error(), - } - _, errResult.Code = GetFtpErrorCode(List, errResult) - - return errResult, nil + return fc.handleListErrorResult(path, err), nil } for _, entry := range entries { - if !showHidden && strings.HasPrefix(entry.Name(), ".") { - continue + child := fc.getDiretoryStructure(entry, path, depth, current, showHidden) + if child != nil { + result.Children = append(result.Children, *child) } + } - fullPath := filepath.Join(path, entry.Name()) - info, err := os.Lstat(fullPath) + dirInfo, err := os.Stat(path) + if err != nil { + result.Message = err.Error() + _, result.Code = GetFtpErrorCode(List, result) + } else { + modTime := dirInfo.ModTime() + permString, permOctal, owner, group, err := utils.GetFileInfo(dirInfo, path) if err != nil { - errChild := CommandResult{ - Name: entry.Name(), - Path: fullPath, - Message: err.Error(), - } - _, errChild.Code = GetFtpErrorCode(List, errChild) - result.Children = append(result.Children, errChild) - - continue - } - - if info.Mode()&os.ModeSymlink != 0 { - continue + result.Message = err.Error() + _, result.Code = GetFtpErrorCode(List, result) } - permString := utils.FormatPermissions(info.Mode()) - permOctal := fmt.Sprintf("%o", info.Mode().Perm()) - - stat, ok := info.Sys().(*syscall.Stat_t) - if !ok { - errChild := CommandResult{ - Name: entry.Name(), - Path: fullPath, - Message: "Failed to get system stat information", - } - _, errChild.Code = GetFtpErrorCode(List, errChild) - result.Children = append(result.Children, errChild) + result.PermissionString = permString + result.PermissionOctal = permOctal + result.Owner = owner + result.Group = group + result.ModTime = &modTime + result.Code = returnCodes[List].Success + } - continue - } + return result, nil +} - uid := fmt.Sprintf("%d", stat.Uid) - gid := fmt.Sprintf("%d", stat.Gid) - owner, err := user.LookupId(uid) - if err != nil { - errChild := CommandResult{ - Name: entry.Name(), - Path: fullPath, - Message: err.Error(), - } - _, errChild.Code = GetFtpErrorCode(List, errChild) - result.Children = append(result.Children, errChild) +func (fc *FtpClient) getDiretoryStructure(entry os.DirEntry, path string, depth, current int, showHidden bool) *CommandResult { + if !showHidden && strings.HasPrefix(entry.Name(), ".") { + return nil + } - continue - } + fullPath := filepath.Join(path, entry.Name()) + info, err := os.Lstat(fullPath) + if err != nil { + result := fc.handleListErrorResult(fullPath, err) - group, err := user.LookupGroupId(gid) - if err != nil { - errChild := CommandResult{ - Name: entry.Name(), - Path: fullPath, - Message: err.Error(), - } - _, errChild.Code = GetFtpErrorCode(List, errChild) - result.Children = append(result.Children, errChild) + return &result + } - continue - } + if info.Mode()&os.ModeSymlink != 0 { + return nil + } - modTime := info.ModTime() - child := CommandResult{ - Name: entry.Name(), - Path: fullPath, - Code: returnCodes[List].Success, - ModTime: &modTime, - PermissionString: permString, - PermissionOctal: permOctal, - Owner: owner.Username, - Group: group.Name, - } + permString, permOctal, owner, group, err := utils.GetFileInfo(info, fullPath) + if err != nil { + result := fc.handleListErrorResult(fullPath, err) - if entry.IsDir() { - child.Type = "folder" - if current < depth-1 { - childResult, err := fc.listRecursive(fullPath, depth, current+1, showHidden) - if err != nil { - result.Children = append(result.Children, childResult) - continue - } - child = childResult - } - } else { - child.Type = "file" - child.Code = returnCodes[List].Success - child.Size = info.Size() - } + return &result + } - result.Children = append(result.Children, child) + modTime := info.ModTime() + child := &CommandResult{ + Name: entry.Name(), + Path: fullPath, + Code: returnCodes[List].Success, + ModTime: &modTime, + PermissionString: permString, + PermissionOctal: permOctal, + Owner: owner, + Group: group, } - dirInfo, err := os.Stat(path) - if err != nil { - result.Message = err.Error() - _, result.Code = GetFtpErrorCode(List, result) - } else { - modTime := dirInfo.ModTime() - result.ModTime = &modTime - result.Code = returnCodes[List].Success - result.PermissionString = utils.FormatPermissions(dirInfo.Mode()) - result.PermissionOctal = fmt.Sprintf("%o", dirInfo.Mode().Perm()) - - stat, ok := dirInfo.Sys().(*syscall.Stat_t) - if !ok { - result.Message = "Failed to get system stat information" - } else { - uid := fmt.Sprintf("%d", stat.Uid) - gid := fmt.Sprintf("%d", stat.Gid) - owner, err := user.LookupId(uid) + if entry.IsDir() { + child.Type = "folder" + if current < depth-1 { + childResult, err := fc.listRecursive(fullPath, depth, current+1, showHidden) if err != nil { - result.Message = err.Error() - _, result.Code = GetFtpErrorCode(List, result) + return &childResult } + child = &childResult + } + } else { + child.Type = "file" + child.Code = returnCodes[List].Success + child.Size = info.Size() + } - group, err := user.LookupGroupId(gid) - if err != nil { - result.Message = err.Error() - _, result.Code = GetFtpErrorCode(List, result) - } + return child +} - result.Owner = owner.Username - result.Group = group.Name - } +func (fc *FtpClient) handleListErrorResult(path string, err error) CommandResult { + result := CommandResult{ + Name: filepath.Base(path), + Path: path, + Message: err.Error(), } + _, result.Code = GetFtpErrorCode(List, result) - return result, nil + return result } func (fc *FtpClient) mkd(path string) (CommandResult, error) { diff --git a/pkg/utils/fs.go b/pkg/utils/fs.go index ed98754..5f2fc7d 100644 --- a/pkg/utils/fs.go +++ b/pkg/utils/fs.go @@ -4,8 +4,11 @@ import ( "fmt" "io" "os" + "os/user" "path/filepath" + "strconv" "strings" + "syscall" ) func CopyFile(src, dst string) error { @@ -118,3 +121,27 @@ func FormatPermissions(mode os.FileMode) string { return string(permissions) } + +func GetFileInfo(info os.FileInfo, path string) (permString, permOctal, owner, group string, err error) { + permString = FormatPermissions(info.Mode()) + permOctal = fmt.Sprintf("%o", info.Mode().Perm()) + + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return "", "", "", "", fmt.Errorf("failed to get system stat information") + } + + uidStr := strconv.Itoa(int(stat.Uid)) + gidStr := strconv.Itoa(int(stat.Gid)) + + ownerInfo, err := user.LookupId(uidStr) + if err != nil { + return "", "", "", "", err + } + groupInfo, err := user.LookupGroupId(gidStr) + if err != nil { + return "", "", "", "", err + } + + return permString, permOctal, ownerInfo.Username, groupInfo.Name, nil +} From 3f846ed1df4b27bf8c302b12ae0aa98092f488d7 Mon Sep 17 00:00:00 2001 From: royroyee Date: Wed, 2 Apr 2025 15:29:26 +0900 Subject: [PATCH 281/364] Minor fix --- pkg/runner/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 19e6023..4694217 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -51,7 +51,7 @@ func (cr *CommandRunner) Run() { var exitCode int var result string - log.Debug().Msgf("Received command: %s> %s", cr.command.Shell, cr.command.Line) + log.Debug().Msgf("Received command: %s > %s", cr.command.Shell, cr.command.Line) start := time.Now() switch cr.command.Shell { From b98978c31a92f14255e3e62e50daa06a207c3de7 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 4 Apr 2025 13:34:57 +0900 Subject: [PATCH 282/364] Improve logger Write logic and export functions --- pkg/logger/logger.go | 114 ++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 56 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index b33984a..5d26fca 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "path/filepath" "strconv" "strings" "time" @@ -21,6 +22,36 @@ const ( recordURL = "/api/history/logs/" ) +type LogRecord struct { + Date string `json:"date"` + Level int `json:"level"` + Program string `json:"program"` + Path string `json:"path"` + Lineno int `json:"lineno"` + PID int `json:"pid"` + Msg string `json:"msg"` +} + +type ZerologEntry struct { + Level string `json:"level"` + Time string `json:"time"` + Caller string `json:"caller"` + Message string `json:"message"` +} + +type logRecordWriter struct{} + +// logRecordFileHandlers defines log level thresholds for specific files. +// Only files listed here will have their logs sent to the remote server. +// Logs from files not listed will be ignored entirely. +// Logs below the specified level for a listed file will also be ignored. +var logRecordFileHandlers = map[string]int{ + "command.go": 30, + "commit.go": 20, + "pty.go": 30, + "shell.go": 30, +} + func InitLogger() *os.File { fileName := fmt.Sprintf("%s/%s", logDir, logFileName) if _, err := os.Stat(logDir); os.IsNotExist(err) { @@ -67,70 +98,36 @@ func newPrettyWriter(out io.Writer) zerolog.ConsoleWriter { } } -type logRecord struct { - Date string `json:"date"` - Level int `json:"level"` - Program string `json:"program"` - Path string `json:"path"` - Lineno int `json:"lineno"` - PID int `json:"pid"` - Msg string `json:"msg"` -} - -type zerologEntry struct { - Level string `json:"level"` - Time string `json:"time"` - Caller string `json:"caller"` - Message string `json:"message"` -} - -type logRecordWriter struct{} - -// logRecordFileHandlers defines log level thresholds for specific files. -// Only files listed here will have their logs sent to the remote server. -// Logs from files not listed will be ignored entirely. -// Logs below the specified level for a listed file will also be ignored. -var logRecordFileHandlers = map[string]int{ - "command.go": 30, - "commit.go": 20, - "pty.go": 30, - "shell.go": 30, -} - func (w *logRecordWriter) Write(p []byte) (n int, err error) { - var entry zerologEntry + var entry ZerologEntry err = json.Unmarshal(p, &entry) if err != nil { - return n, err + return 0, err } - caller := entry.Caller - if caller == "" { - return len(p), nil + n = len(p) + if entry.Caller == "" { + return n, nil } - lineno := 0 - if parts := strings.Split(caller, ":"); len(parts) > 1 { - lineno, _ = strconv.Atoi(parts[1]) - } - - callerFileName := getCallerFileName(caller) + callerFileName, lineNo := ParseCaller(entry.Caller) levelThreshold, exists := logRecordFileHandlers[callerFileName] if !exists { - return len(p), nil + return n, nil } - if convertLevelToNumber(entry.Level) < levelThreshold { - return len(p), nil + level := ConvertLevelToNumber(entry.Level) + if level < levelThreshold { + return n, nil } - record := logRecord{ + record := LogRecord{ Date: entry.Time, - Level: convertLevelToNumber(entry.Level), + Level: level, Program: "alpamon", - Path: caller, - Lineno: lineno, + Path: entry.Caller, + Lineno: lineNo, PID: os.Getpid(), Msg: entry.Message, } @@ -139,12 +136,12 @@ func (w *logRecordWriter) Write(p []byte) (n int, err error) { scheduler.Rqueue.Post(recordURL, record, 90, time.Time{}) }() - return len(p), nil + return n, nil } // alpacon-server uses Python's logging package, which has different log levels from zerolog. // This function maps zerolog log levels to Python logging levels. -func convertLevelToNumber(level string) int { +func ConvertLevelToNumber(level string) int { switch level { case "fatal": return 50 // CRITICAL, FATAL @@ -161,12 +158,17 @@ func convertLevelToNumber(level string) int { } } -func getCallerFileName(caller string) string { - parts := strings.Split(caller, "/") +func ParseCaller(caller string) (fileName string, lineno int) { + parts := strings.Split(caller, ":") + fileName = "" + lineno = 0 if len(parts) > 0 { - fileWithLine := parts[len(parts)-1] - fileParts := strings.Split(fileWithLine, ":") - return fileParts[0] + fileName = filepath.Base(parts[0]) + } + if len(parts) > 1 { + if n, err := strconv.Atoi(parts[1]); err == nil { + lineno = n + } } - return "" + return fileName, lineno } From f759083a0656e04bda09654b27eae9d20eaa51fa Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 4 Apr 2025 14:24:36 +0900 Subject: [PATCH 283/364] Minor fix --- pkg/logger/logger.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 5d26fca..0f7e7cf 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -50,6 +50,7 @@ var logRecordFileHandlers = map[string]int{ "commit.go": 20, "pty.go": 30, "shell.go": 30, + "server.go": 40, // logger/server.go } func InitLogger() *os.File { @@ -69,16 +70,17 @@ func InitLogger() *os.File { var output io.Writer // In development, log to console; in production, log to file if version.Version == "dev" { - output = zerolog.MultiLevelWriter(newPrettyWriter(os.Stderr), recordWriter) + output = zerolog.MultiLevelWriter(PrettyWriter(os.Stderr), recordWriter) } else { - output = zerolog.MultiLevelWriter(newPrettyWriter(logFile), recordWriter) + output = zerolog.MultiLevelWriter(PrettyWriter(logFile), recordWriter) } log.Logger = zerolog.New(output).With().Timestamp().Caller().Logger() + return logFile } -func newPrettyWriter(out io.Writer) zerolog.ConsoleWriter { +func PrettyWriter(out io.Writer) zerolog.ConsoleWriter { return zerolog.ConsoleWriter{ Out: out, TimeFormat: time.RFC3339, @@ -133,6 +135,9 @@ func (w *logRecordWriter) Write(p []byte) (n int, err error) { } go func() { + if scheduler.Rqueue == nil { + return + } scheduler.Rqueue.Post(recordURL, record, 90, time.Time{}) }() From 2ad9691a1da096ffea5422bf89c0b452003183fd Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 4 Apr 2025 14:42:42 +0900 Subject: [PATCH 284/364] Add tcp log server to receive and process logs --- cmd/alpamon/command/root.go | 17 +++++-- pkg/logger/server.go | 98 +++++++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 4 deletions(-) create mode 100644 pkg/logger/server.go diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index eb006c4..dd139a2 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -53,6 +53,10 @@ func runAgent() { // Logger logFile := logger.InitLogger() + logServer := logger.NewLogServer() + if logServer != nil { + go logServer.StartLogServer() + } // platform utils.InitPlatform() @@ -104,12 +108,12 @@ func runAgent() { case <-wsClient.RestartChan: log.Info().Msg("Restart command received. Restarting... ") cancel() - gracefulShutdown(metricCollector, wsClient, logFile, pidFilePath) + gracefulShutdown(metricCollector, wsClient, logFile, logServer, pidFilePath) restartAgent() return } - gracefulShutdown(metricCollector, wsClient, logFile, pidFilePath) + gracefulShutdown(metricCollector, wsClient, logFile, logServer, pidFilePath) } func restartAgent() { @@ -121,18 +125,23 @@ func restartAgent() { err = syscall.Exec(executable, os.Args, os.Environ()) if err != nil { - log.Error().Err(err).Msg("Failed to restart the program.") + log.Error().Err(err).Msg("Failed to restart the alpamon.") } } -func gracefulShutdown(collector *collector.Collector, wsClient *runner.WebsocketClient, logFile *os.File, pidPath string) { +func gracefulShutdown(collector *collector.Collector, wsClient *runner.WebsocketClient, logFile *os.File, logServer *logger.LogServer, pidPath string) { if collector != nil { collector.Stop() } if wsClient != nil { wsClient.Close() } + if logServer != nil { + logServer.Stop() + } + log.Debug().Msg("Bye.") + if logFile != nil { _ = logFile.Close() } diff --git a/pkg/logger/server.go b/pkg/logger/server.go new file mode 100644 index 0000000..69461c0 --- /dev/null +++ b/pkg/logger/server.go @@ -0,0 +1,98 @@ +package logger + +import ( + "encoding/binary" + "encoding/json" + "errors" + "github.com/alpacanetworks/alpamon/pkg/scheduler" + "github.com/rs/zerolog/log" + "io" + "net" + "time" +) + +const ( + address = "0.0.0.0:9020" +) + +type LogServer struct { + listener net.Listener + shutDownChan chan struct{} +} + +func NewLogServer() *LogServer { + listener, err := net.Listen("tcp", address) + if err != nil { + log.Error().Err(err).Msgf("Log server startup failed: cannot bind to %s.", address) + return nil + } + + return &LogServer{ + listener: listener, + shutDownChan: make(chan struct{}), + } +} + +func (ls *LogServer) StartLogServer() { + log.Debug().Msgf("Started log server on %s", address) + + for { + select { + case <-ls.shutDownChan: + return + default: + conn, err := ls.listener.Accept() + if err != nil { + log.Error().Err(err).Msg("Failed to accept socket.") + continue + } + go ls.handleConnection(conn) + } + } +} + +func (ls *LogServer) handleConnection(conn net.Conn) { + for { + lengthBuf := make([]byte, 4) + _, err := io.ReadFull(conn, lengthBuf) + if err != nil { + if errors.Is(err, io.EOF) { + return // connection closed by client, terminating read loop + } + log.Warn().Err(err).Msg("Couldn't read message length from connection.") + return + } + + length := binary.BigEndian.Uint32(lengthBuf) + body := make([]byte, length) + _, err = io.ReadFull(conn, body) + if err != nil { + if errors.Is(err, io.EOF) { + return // connection closed by client, terminating read loop + } + log.Warn().Err(err).Msg("Failed to read log body.") + return + } + + var record LogRecord + err = json.Unmarshal(body, &record) + if err != nil { + log.Debug().Err(err).Msg("Failed to unmarshal log record.") + continue + } + + go ls.handleRecord(record) + } +} + +func (ls *LogServer) handleRecord(record LogRecord) { + if scheduler.Rqueue == nil { + return + } + scheduler.Rqueue.Post(recordURL, record, 90, time.Time{}) +} + +func (ls *LogServer) Stop() { + close(ls.shutDownChan) + _ = ls.listener.Close() +} From c9722aafc017eadb831e58028431713a03988892 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 4 Apr 2025 14:44:43 +0900 Subject: [PATCH 285/364] Minor fix --- cmd/alpamon/command/root.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index dd139a2..2b91e37 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -53,10 +53,6 @@ func runAgent() { // Logger logFile := logger.InitLogger() - logServer := logger.NewLogServer() - if logServer != nil { - go logServer.StartLogServer() - } // platform utils.InitPlatform() @@ -81,6 +77,12 @@ func runAgent() { // Reporter scheduler.StartReporters(session) + // Log server + logServer := logger.NewLogServer() + if logServer != nil { + go logServer.StartLogServer() + } + log.Info().Msg("alpamon initialized and running.") // Commit From 3d1651394dc5d4d0037f713054c43ac6dc7beb32 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 4 Apr 2025 14:49:36 +0900 Subject: [PATCH 286/364] Minor fix --- pkg/logger/server.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/logger/server.go b/pkg/logger/server.go index 69461c0..0786bcd 100644 --- a/pkg/logger/server.go +++ b/pkg/logger/server.go @@ -43,6 +43,9 @@ func (ls *LogServer) StartLogServer() { default: conn, err := ls.listener.Accept() if err != nil { + if errors.Is(err, net.ErrClosed) { + return + } log.Error().Err(err).Msg("Failed to accept socket.") continue } From e15ee7a2a62f51d763fd6edbf72e9abcf72624a0 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 4 Apr 2025 15:12:23 +0900 Subject: [PATCH 287/364] Prevent zerolog from logging internal write errors --- pkg/logger/logger.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 0f7e7cf..c446aa3 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -100,11 +100,12 @@ func PrettyWriter(out io.Writer) zerolog.ConsoleWriter { } } +// Note : Always return nil error to avoid zerolog internal error logs func (w *logRecordWriter) Write(p []byte) (n int, err error) { var entry ZerologEntry err = json.Unmarshal(p, &entry) if err != nil { - return 0, err + return 0, nil } n = len(p) From 480cd5c46655dbc53737921f76ccbf8016834a5a Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 7 Apr 2025 14:14:19 +0900 Subject: [PATCH 288/364] Minor fix Fix UID, GID's data type in FtpData. --- pkg/runner/ftp.go | 18 ++++++++++++++++-- pkg/runner/ftp_types.go | 4 ++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 2c19a10..9203751 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -465,10 +465,24 @@ func (fc *FtpClient) chmod(path string, mode string) (CommandResult, error) { }, nil } -func (fc *FtpClient) chown(path string, uid, gid int) (CommandResult, error) { +func (fc *FtpClient) chown(path, uidStr, gidStr string) (CommandResult, error) { path = fc.parsePath(path) - err := os.Chown(path, uid, gid) + uid, err := strconv.Atoi(uidStr) + if err != nil { + return CommandResult{ + Message: err.Error(), + }, err + } + + gid, err := strconv.Atoi(gidStr) + if err != nil { + return CommandResult{ + Message: err.Error(), + }, err + } + + err = os.Chown(path, uid, gid) if err != nil { return CommandResult{ Message: err.Error(), diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index ee7819b..31458a5 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -47,8 +47,8 @@ type FtpData struct { Src string `json:"src,omitempty"` Dst string `json:"dst,omitempty"` Mode string `json:"mode,omitempty"` - UID int `json:"uid,omitempty"` - GID int `json:"gid,omitempty"` + UID string `json:"uid,omitempty"` + GID string `json:"gid,omitempty"` } type FtpContent struct { From 38efaebdacf32a9bad9cd302f9faf219b1b6d8d0 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 7 Apr 2025 14:47:16 +0900 Subject: [PATCH 289/364] Minor fix --- cmd/alpamon/command/root.go | 6 +++--- pkg/runner/client.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 2b91e37..962c48d 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -83,7 +83,7 @@ func runAgent() { go logServer.StartLogServer() } - log.Info().Msg("alpamon initialized and running.") + log.Info().Msgf("%s initialized and running.", name) // Commit runner.CommitAsync(session, commissioned) @@ -121,13 +121,13 @@ func runAgent() { func restartAgent() { executable, err := os.Executable() if err != nil { - log.Error().Err(err).Msg("Failed to get executable path.") + log.Error().Err(err).Msgf("Failed to restart the %s.", name) return } err = syscall.Exec(executable, os.Args, os.Environ()) if err != nil { - log.Error().Err(err).Msg("Failed to restart the alpamon.") + log.Error().Err(err).Msgf("Failed to restart the %s.", name) } } diff --git a/pkg/runner/client.go b/pkg/runner/client.go index e419d61..d48be0e 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -169,14 +169,14 @@ func (wc *WebsocketClient) CommandRequestHandler(message []byte) { err := json.Unmarshal(message, &content) if err != nil { - log.Error().Err(err).Msgf("Inappropriate message: %s.", string(message)) + log.Warn().Err(err).Msgf("Inappropriate message: %s.", string(message)) return } if content.Command.Data != "" { err = json.Unmarshal([]byte(content.Command.Data), &data) if err != nil { - log.Error().Err(err).Msgf("Inappropriate message: %s.", string(message)) + log.Warn().Err(err).Msgf("Inappropriate message: %s.", string(message)) return } } From 4870e1b0791abfc09c49fecf200f6a6082cd697f Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 7 Apr 2025 17:02:35 +0900 Subject: [PATCH 290/364] Add arm64 architecture support --- .github/workflows/release.yml | 18 ++++++++++++++++++ .goreleaser.yaml | 1 - 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1b66181..f568ab7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -72,6 +72,15 @@ jobs: packagecloud-distrib: any/any packagecloud-token: ${{ secrets.PACKAGECLOUD_TOKEN }} + - name: Upload ARM64 DEB to PackageCloud + uses: danielmundi/upload-packagecloud@v1 + with: + package-name: alpamon_${{ github.ref_name }}_linux_arm64.deb + packagecloud-username: alpacax + packagecloud-repo: alpamon + packagecloud-distrib: any/any + packagecloud-token: ${{ secrets.PACKAGECLOUD_TOKEN }} + - name: Upload AMD64 RPM to PackageCloud uses: danielmundi/upload-packagecloud@v1 with: @@ -80,3 +89,12 @@ jobs: packagecloud-repo: alpamon packagecloud-distrib: rpm_any/rpm_any packagecloud-token: ${{ secrets.PACKAGECLOUD_TOKEN }} + + - name: Upload ARM64 RPM to PackageCloud + uses: danielmundi/upload-packagecloud@v1 + with: + package-name: alpamon_${{ github.ref_name }}_linux_arm64.rpm + packagecloud-username: alpacax + packagecloud-repo: alpamon + packagecloud-distrib: rpm_any/rpm_any + packagecloud-token: ${{ secrets.PACKAGECLOUD_TOKEN }} \ No newline at end of file diff --git a/.goreleaser.yaml b/.goreleaser.yaml index b719ffa..e199cb7 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -20,7 +20,6 @@ builds: - linux goarch: - amd64 - - arm - arm64 checksum: From 6b8e7486c39399cc4ee68e50a3447a23e46a3f1f Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 7 Apr 2025 20:03:52 +0900 Subject: [PATCH 291/364] Add recursive option to chmod, chown Add recursive option to chmod and chown commands. --- pkg/runner/ftp.go | 62 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 6 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 9203751..3177a4f 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -152,9 +152,9 @@ func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (Command case Cp: return fc.cp(data.Src, data.Dst) case Chmod: - return fc.chmod(data.Path, data.Mode) + return fc.chmod(data.Path, data.Mode, data.Recursive) case Chown: - return fc.chown(data.Path, data.UID, data.GID) + return fc.chown(data.Path, data.UID, data.GID, data.Recursive) default: return CommandResult{}, fmt.Errorf("unknown FTP command: %s", command) } @@ -444,7 +444,7 @@ func (fc *FtpClient) cpFile(src, dst string) (CommandResult, error) { }, nil } -func (fc *FtpClient) chmod(path string, mode string) (CommandResult, error) { +func (fc *FtpClient) chmod(path, mode string, recursive bool) (CommandResult, error) { path = fc.parsePath(path) fileMode, err := strconv.ParseUint(mode, 8, 32) if err != nil { @@ -453,7 +453,14 @@ func (fc *FtpClient) chmod(path string, mode string) (CommandResult, error) { }, err } - err = os.Chmod(path, os.FileMode(fileMode)) + modePerm := os.FileMode(fileMode) + + if recursive { + err = fc.chmodRecursive(path, modePerm) + } else { + err = os.Chmod(path, modePerm) + } + if err != nil { return CommandResult{ Message: err.Error(), @@ -465,7 +472,26 @@ func (fc *FtpClient) chmod(path string, mode string) (CommandResult, error) { }, nil } -func (fc *FtpClient) chown(path, uidStr, gidStr string) (CommandResult, error) { +func (fc *FtpClient) chmodRecursive(path string, fileMode os.FileMode) error { + return filepath.WalkDir(path, func(p string, d os.DirEntry, err error) error { + if err != nil { + return err + } + + info, err := d.Info() + if err != nil { + return err + } + + if info.Mode()&os.ModeSymlink != 0 { + return nil + } + + return os.Chmod(p, fileMode) + }) +} + +func (fc *FtpClient) chown(path, uidStr, gidStr string, recursive bool) (CommandResult, error) { path = fc.parsePath(path) uid, err := strconv.Atoi(uidStr) @@ -482,7 +508,12 @@ func (fc *FtpClient) chown(path, uidStr, gidStr string) (CommandResult, error) { }, err } - err = os.Chown(path, uid, gid) + if recursive { + err = fc.chownRecursive(path, uid, gid) + } else { + err = os.Chown(path, uid, gid) + } + if err != nil { return CommandResult{ Message: err.Error(), @@ -493,3 +524,22 @@ func (fc *FtpClient) chown(path, uidStr, gidStr string) (CommandResult, error) { Message: fmt.Sprintf("Changed owner of %s to UID: %d, GID: %d", path, uid, gid), }, nil } + +func (fc *FtpClient) chownRecursive(path string, uid, gid int) error { + return filepath.WalkDir(path, func(p string, d os.DirEntry, err error) error { + if err != nil { + return err + } + + info, err := d.Info() + if err != nil { + return err + } + + if info.Mode()&os.ModeSymlink != 0 { + return nil + } + + return os.Chown(path, uid, gid) + }) +} From 038cd26bd32de8f0636059ca77860e79d2cfba50 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 7 Apr 2025 20:06:51 +0900 Subject: [PATCH 292/364] Minor fix Fix typo. --- pkg/runner/ftp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 3177a4f..f83f15a 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -540,6 +540,6 @@ func (fc *FtpClient) chownRecursive(path string, uid, gid int) error { return nil } - return os.Chown(path, uid, gid) + return os.Chown(p, uid, gid) }) } From 7f1fe63886de0713defb90430c5dbb42cc11a8b3 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 7 Apr 2025 20:17:06 +0900 Subject: [PATCH 293/364] Update chmod, chown result message Update Message of CommandResult to indicate that the change was applied recursively when recursive is true. --- pkg/runner/ftp.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index f83f15a..7221de5 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -455,7 +455,9 @@ func (fc *FtpClient) chmod(path, mode string, recursive bool) (CommandResult, er modePerm := os.FileMode(fileMode) + msg := "" if recursive { + msg = " recursively" err = fc.chmodRecursive(path, modePerm) } else { err = os.Chmod(path, modePerm) @@ -468,7 +470,7 @@ func (fc *FtpClient) chmod(path, mode string, recursive bool) (CommandResult, er } return CommandResult{ - Message: fmt.Sprintf("Changed permissions of %s to %o", path, fileMode), + Message: fmt.Sprintf("Changed permissions of %s to %o%s", path, fileMode, msg), }, nil } @@ -508,7 +510,9 @@ func (fc *FtpClient) chown(path, uidStr, gidStr string, recursive bool) (Command }, err } + msg := "" if recursive { + msg = " recursively" err = fc.chownRecursive(path, uid, gid) } else { err = os.Chown(path, uid, gid) @@ -521,7 +525,7 @@ func (fc *FtpClient) chown(path, uidStr, gidStr string, recursive bool) (Command } return CommandResult{ - Message: fmt.Sprintf("Changed owner of %s to UID: %d, GID: %d", path, uid, gid), + Message: fmt.Sprintf("Changed owner of %s to UID: %d, GID: %d%s", path, uid, gid, msg), }, nil } From 3d3bb3f2f4276f4b1e1af75c5d760c8ecc4ad923 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 8 Apr 2025 12:02:11 +0900 Subject: [PATCH 294/364] Delete unnecessary logic Delete unnecessary sync once. --- pkg/db/db.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index c617cd4..753cbd6 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "path/filepath" - "sync" "time" "github.com/alpacanetworks/alpamon/pkg/db/ent" @@ -62,18 +61,16 @@ func InitTestDB(path string) *ent.Client { os.Exit(1) } + sql.Register("sqlite3", &sqlite.Driver{}) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - var once sync.Once - once.Do(func() { - sql.Register("sqlite3", &sqlite.Driver{}) - err = RunMigration(dbFile.Name(), ctx) - if err != nil { - log.Error().Err(err).Msgf("failed to migrate test db: %v.", err) - os.Exit(1) - } - }) + err = RunMigration(dbFile.Name(), ctx) + if err != nil { + log.Error().Err(err).Msgf("failed to migrate test db: %v.", err) + os.Exit(1) + } dbManager := NewDBClientManager(dbFile.Name()) client, err := dbManager.GetClient() From fce6205dcd990a01c220fa3ef1faaf4d83608553 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 8 Apr 2025 12:03:32 +0900 Subject: [PATCH 295/364] Fix build-and-test.yaml Fix build-and-test.yaml to resolve SQLite lock issues. --- .github/workflows/build-and-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index ae4a3cf..44fe518 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -39,4 +39,4 @@ jobs: working-directory: ./cmd/alpamon - name: Run Tests - run: go test -v ./... + run: go test -v ./... -p 1 From 585dc50508d6a78b61e354e177221e0717535bef Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 8 Apr 2025 16:41:59 +0900 Subject: [PATCH 296/364] Refactor demote() Refactor demote() to test that process can change ownership using supplementary group. --- pkg/runner/shell.go | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index 015841d..6b56615 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -3,8 +3,6 @@ package runner import ( "context" "fmt" - "github.com/alpacanetworks/alpamon/pkg/utils" - "github.com/rs/zerolog/log" "os" "os/exec" "os/user" @@ -12,6 +10,9 @@ import ( "strings" "syscall" "time" + + "github.com/alpacanetworks/alpamon/pkg/utils" + "github.com/rs/zerolog/log" ) func demote(username, groupname string) (*syscall.SysProcAttr, error) { @@ -47,12 +48,27 @@ func demote(username, groupname string) (*syscall.SysProcAttr, error) { return nil, err } + groupIds, err := usr.GroupIds() + if err != nil { + return nil, err + } + + groups := make([]uint32, 0, len(groupIds)) + for _, gidStr := range groupIds { + gidInt, err := strconv.Atoi(gidStr) + if err != nil { + return nil, err + } + groups = append(groups, uint32(gidInt)) + } + log.Debug().Msgf("Demote permission to match user: %s, group: %s.", username, groupname) return &syscall.SysProcAttr{ Credential: &syscall.Credential{ - Uid: uint32(uid), - Gid: uint32(gid), + Uid: uint32(uid), + Gid: uint32(gid), + Groups: groups, }, }, nil } From bda9a08fbf0581415885a8117183b8cc2b3b798f Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 8 Apr 2025 16:52:34 +0900 Subject: [PATCH 297/364] Add demoteFtp() Add demoteFtp() to lower the privileges of webftp processes because, unlike the existing demote(), it needs to include supplementary groups when creating webftp processes. --- pkg/runner/command.go | 2 +- pkg/runner/shell.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 4694217..3d64341 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -612,7 +612,7 @@ func (cr *CommandRunner) validateData(data interface{}) error { } func (cr *CommandRunner) openFtp(data openFtpData) error { - sysProcAttr, err := demote(data.Username, data.Groupname) + sysProcAttr, err := demoteFtp(data.Username, data.Groupname) if err != nil { log.Debug().Err(err).Msg("Failed to get demote permission") diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index 6b56615..82aa9bf 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -48,6 +48,49 @@ func demote(username, groupname string) (*syscall.SysProcAttr, error) { return nil, err } + log.Debug().Msgf("Demote permission to match user: %s, group: %s.", username, groupname) + + return &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: uint32(uid), + Gid: uint32(gid), + }, + }, nil +} + +func demoteFtp(username, groupname string) (*syscall.SysProcAttr, error) { + currentUid := os.Getuid() + + if username == "" || groupname == "" { + log.Debug().Msg("No username or groupname provided, running as the current user.") + return nil, nil + } + + if currentUid != 0 { + log.Warn().Msg("Alpamon is not running as root. Falling back to the current user.") + return nil, nil + } + + usr, err := user.Lookup(username) + if err != nil { + return nil, fmt.Errorf("there is no corresponding %s username in this server", username) + } + + group, err := user.LookupGroup(groupname) + if err != nil { + return nil, fmt.Errorf("there is no corresponding %s groupname in this server", groupname) + } + + uid, err := strconv.Atoi(usr.Uid) + if err != nil { + return nil, err + } + + gid, err := strconv.Atoi(group.Gid) + if err != nil { + return nil, err + } + groupIds, err := usr.GroupIds() if err != nil { return nil, err From 3f3aeca7ef73279d70d574f37fe596d7f351c22a Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 10 Apr 2025 14:59:56 +0900 Subject: [PATCH 298/364] Write config directly to target file without using temp file --- cmd/alpamon/command/setup/setup.go | 20 +++++++------------- pkg/logger/server.go | 2 +- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 0771764..15afa81 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -93,26 +93,20 @@ func writeConfig() error { return fmt.Errorf("environment variables ALPACON_URL, PLUGIN_ID, PLUGIN_KEY must be set") } - tmpFile, err := os.CreateTemp("", fmt.Sprintf("%s.conf", name)) - if err != nil { - return fmt.Errorf("failed to create temp file: %v", err) - } - defer func() { _ = tmpFile.Close() }() - - err = tmpl.Execute(tmpFile, configData) + err = os.MkdirAll(filepath.Dir(configTarget), 0755) if err != nil { - _ = os.Remove(tmpFile.Name()) - return fmt.Errorf("failed to execute template: %v", err) + return fmt.Errorf("failed to create config directory: %v", err) } - err = os.MkdirAll(filepath.Dir(configTarget), 0755) + targetFile, err := os.Create(configTarget) if err != nil { - return fmt.Errorf("failed to create config directory: %v", err) + return fmt.Errorf("failed to create target config file: %v", err) } + defer func() { _ = targetFile.Close() }() - err = os.Rename(tmpFile.Name(), configTarget) + err = tmpl.Execute(targetFile, configData) if err != nil { - return fmt.Errorf("failed to move temp file to target: %v", err) + return fmt.Errorf("failed to execute template into target file: %v", err) } return nil diff --git a/pkg/logger/server.go b/pkg/logger/server.go index 0786bcd..e7c6d05 100644 --- a/pkg/logger/server.go +++ b/pkg/logger/server.go @@ -34,7 +34,7 @@ func NewLogServer() *LogServer { } func (ls *LogServer) StartLogServer() { - log.Debug().Msgf("Started log server on %s", address) + log.Debug().Msgf("Started log server on %s.", address) for { select { From ab45ddb28a8d90cea3c2e8f5e3ee5772ae17620b Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 10 Apr 2025 15:17:54 +0900 Subject: [PATCH 299/364] Fix release.yml --- .github/workflows/release.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f568ab7..bcc4f1f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -41,12 +41,24 @@ jobs: name: alpamon_${{ github.ref_name }}_linux_amd64.deb path: dist/alpamon_${{ github.ref_name }}_linux_amd64.deb + - name: Upload ARM64 DEB artifacts + uses: actions/upload-artifact@v4 + with: + name: alpamon_${{ github.ref_name }}_linux_arm64.deb + path: dist/alpamon_${{ github.ref_name }}_linux_arm64.deb + - name: Upload AMD64 RPM artifacts uses: actions/upload-artifact@v4 with: name: alpamon_${{ github.ref_name }}_linux_amd64.rpm path: dist/alpamon_${{ github.ref_name }}_linux_amd64.rpm + - name: Upload ARM64 RPM artifacts + uses: actions/upload-artifact@v4 + with: + name: alpamon_${{ github.ref_name }}_linux_arm64.rpm + path: dist/alpamon_${{ github.ref_name }}_linux_arm64.rpm + packagecloud-deploy: needs: [goreleaser] runs-on: ubuntu-latest @@ -56,11 +68,21 @@ jobs: with: name: alpamon_${{ github.ref_name }}_linux_amd64.deb + - name: Download ARM64 DEB Artifacts + uses: actions/download-artifact@v4 + with: + name: alpamon_${{ github.ref_name }}_linux_arm64.deb + - name: Download AMD64 RPM Artifacts uses: actions/download-artifact@v4 with: name: alpamon_${{ github.ref_name }}_linux_amd64.rpm + - name: Download ARM64 RPM Artifacts + uses: actions/download-artifact@v4 + with: + name: alpamon_${{ github.ref_name }}_linux_arm64.rpm + - run: ls - name: Upload AMD64 DEB to PackageCloud From a262af1193b4d3d3eb130f54ce5495c5fdde0908 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 11 Apr 2025 09:52:11 +0900 Subject: [PATCH 300/364] Minor fix --- pkg/runner/shell.go | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index 82aa9bf..d83fafc 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -150,18 +150,13 @@ func runCmdWithOutput(args []string, username, groupname string, env map[string] defer cancel() var cmd *exec.Cmd - if username == "root" { - if containsShellOperator(args) { - cmd = exec.CommandContext(ctx, "bash", "-c", strings.Join(args, " ")) - } else { - cmd = exec.CommandContext(ctx, args[0], args[1:]...) - } + if containsShellOperator(args) { + cmd = exec.CommandContext(ctx, "bash", "-c", strings.Join(args, " ")) } else { - if containsShellOperator(args) { - cmd = exec.CommandContext(ctx, "bash", "-c", strings.Join(args, " ")) - } else { - cmd = exec.CommandContext(ctx, args[0], args[1:]...) - } + cmd = exec.CommandContext(ctx, args[0], args[1:]...) + } + + if username != "root" { sysProcAttr, err := demote(username, groupname) if err != nil { log.Error().Err(err).Msg("Failed to demote user.") From 4547b9cce2dd3154f26c57a2f7bb0efa2b0ed9c3 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 11 Apr 2025 10:27:51 +0900 Subject: [PATCH 301/364] Fix capture both stdout and stderr in runCmdWithOutput --- pkg/runner/shell.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/shell.go b/pkg/runner/shell.go index d83fafc..090a6e6 100644 --- a/pkg/runner/shell.go +++ b/pkg/runner/shell.go @@ -178,10 +178,10 @@ func runCmdWithOutput(args []string, username, groupname string, env map[string] cmd.Dir = usr.HomeDir log.Debug().Msgf("Executing command as user '%s' (group: '%s') -> '%s'", username, groupname, strings.Join(args, " ")) - output, err := cmd.Output() + output, err := cmd.CombinedOutput() if err != nil { if exitError, ok := err.(*exec.ExitError); ok { - return exitError.ExitCode(), err.Error() + return exitError.ExitCode(), string(output) } return -1, err.Error() } From 63d9f479a8edccc8968248b35a1da6a8a5930436 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 18 Apr 2025 17:14:22 +0900 Subject: [PATCH 302/364] Refactor process of initializing collector Refactor alpamon to prevent termination when collector initialization fails. --- pkg/collector/collector.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index 6d6781e..3f396f5 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "net/http" - "os" "sync" "time" @@ -52,7 +51,7 @@ func InitCollector(session *session.Session, client *ent.Client) *Collector { conf, err := fetchConfig(session) if err != nil { log.Error().Err(err).Msg("Failed to fetch collector config.") - os.Exit(1) + return nil } checkFactory := &check.DefaultCheckFactory{} @@ -69,7 +68,7 @@ func InitCollector(session *session.Session, client *ent.Client) *Collector { collector, err := NewCollector(args) if err != nil { log.Error().Err(err).Msg("Failed to create collector.") - os.Exit(1) + return nil } return collector @@ -81,7 +80,7 @@ func fetchConfig(session *session.Session) ([]collectConf, error) { return nil, err } if statusCode != http.StatusOK { - return nil, fmt.Errorf("failed to get collection config: %d status code.", statusCode) + return nil, fmt.Errorf("failed to get collection config: %d status code", statusCode) } var conf []collectConf @@ -135,6 +134,8 @@ func (c *Collector) initTasks(args collectorArgs) error { } func (c *Collector) Start() { + log.Debug().Msg("Started collector") + c.ctx, c.cancel = context.WithCancel(context.Background()) go c.scheduler.Start(c.ctx, c.buffer.Capacity) From e8297825cf3b40e00f6b6188834c6d856064b030 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 18 Apr 2025 17:19:32 +0900 Subject: [PATCH 303/364] Add CollectorRestartChan to WebsocketClient Add CollectorRestartChan to WebsocketClient to receive collector restart commands. Implement RestartCollector() to send a signal to CollectorRestartChan upon receiving collector restart command. --- pkg/runner/client.go | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index d48be0e..16b64a6 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -4,15 +4,16 @@ import ( "context" "encoding/json" "fmt" + "net/http" + "os" + "time" + "github.com/alpacanetworks/alpamon/pkg/config" "github.com/alpacanetworks/alpamon/pkg/scheduler" "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/cenkalti/backoff" "github.com/gorilla/websocket" "github.com/rs/zerolog/log" - "net/http" - "os" - "time" ) const ( @@ -26,11 +27,12 @@ const ( ) type WebsocketClient struct { - Conn *websocket.Conn - requestHeader http.Header - apiSession *scheduler.Session - RestartChan chan struct{} - ShutDownChan chan struct{} + Conn *websocket.Conn + requestHeader http.Header + apiSession *scheduler.Session + RestartChan chan struct{} + ShutDownChan chan struct{} + CollectorRestartChan chan struct{} } func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { @@ -41,10 +43,11 @@ func NewWebsocketClient(session *scheduler.Session) *WebsocketClient { } return &WebsocketClient{ - requestHeader: headers, - apiSession: session, - RestartChan: make(chan struct{}), - ShutDownChan: make(chan struct{}), + requestHeader: headers, + apiSession: session, + RestartChan: make(chan struct{}), + ShutDownChan: make(chan struct{}), + CollectorRestartChan: make(chan struct{}, 1), } } @@ -159,6 +162,14 @@ func (wc *WebsocketClient) Restart() { close(wc.RestartChan) } +func (wc *WebsocketClient) RestartCollector() { + select { + case wc.CollectorRestartChan <- struct{}{}: + default: + log.Info().Msg("Collector restart already requested, skipping duplicate signal.") + } +} + func (wc *WebsocketClient) CommandRequestHandler(message []byte) { var content Content var data CommandData From bc989d24a8d6bb9f3d45a902ac0f06a325f7c53b Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 18 Apr 2025 17:22:26 +0900 Subject: [PATCH 304/364] Refactor to allow for collector restarts Add logic to restart the collector when a restart request signal is received on wsClient.CollectorRestartChan. To implement this, add for-select block. --- cmd/alpamon/command/root.go | 50 ++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/cmd/alpamon/command/root.go b/cmd/alpamon/command/root.go index 962c48d..ec2ae44 100644 --- a/cmd/alpamon/command/root.go +++ b/cmd/alpamon/command/root.go @@ -3,6 +3,10 @@ package command import ( "context" "fmt" + "os" + "os/signal" + "syscall" + "github.com/alpacanetworks/alpamon/cmd/alpamon/command/ftp" "github.com/alpacanetworks/alpamon/cmd/alpamon/command/setup" "github.com/alpacanetworks/alpamon/pkg/collector" @@ -16,9 +20,6 @@ import ( "github.com/alpacanetworks/alpamon/pkg/version" "github.com/rs/zerolog/log" "github.com/spf13/cobra" - "os" - "os/signal" - "syscall" ) const ( @@ -93,29 +94,38 @@ func runAgent() { // Collector metricCollector := collector.InitCollector(session, client) - metricCollector.Start() + if metricCollector != nil { + metricCollector.Start() + } // Websocket Client wsClient := runner.NewWebsocketClient(session) go wsClient.RunForever(ctx) - select { - case <-ctx.Done(): - log.Info().Msg("Received termination signal. Shutting down...") - break - case <-wsClient.ShutDownChan: - log.Info().Msg("Shutdown command received. Shutting down...") - cancel() - break - case <-wsClient.RestartChan: - log.Info().Msg("Restart command received. Restarting... ") - cancel() - gracefulShutdown(metricCollector, wsClient, logFile, logServer, pidFilePath) - restartAgent() - return + for { + select { + case <-ctx.Done(): + log.Info().Msg("Received termination signal. Shutting down...") + gracefulShutdown(metricCollector, wsClient, logFile, logServer, pidFilePath) + return + case <-wsClient.ShutDownChan: + log.Info().Msg("Shutdown command received. Shutting down...") + cancel() + gracefulShutdown(metricCollector, wsClient, logFile, logServer, pidFilePath) + return + case <-wsClient.RestartChan: + log.Info().Msg("Restart command received. Restarting...") + cancel() + gracefulShutdown(metricCollector, wsClient, logFile, logServer, pidFilePath) + restartAgent() + return + case <-wsClient.CollectorRestartChan: + log.Info().Msg("Collector restart command received. Restarting Collector...") + metricCollector.Stop() + metricCollector = collector.InitCollector(session, client) + metricCollector.Start() + } } - - gracefulShutdown(metricCollector, wsClient, logFile, logServer, pidFilePath) } func restartAgent() { From 0878081a6ed18657dd276f0dac695bc704435cb1 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 18 Apr 2025 17:23:53 +0900 Subject: [PATCH 305/364] Add restartcoll command case Add restartcoll command case to trigger collector restarts. --- pkg/runner/command.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 3d64341..9268574 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -218,6 +218,11 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { } return cr.handleShellCmd(cmd, "root", "root", nil) + case "restartcoll": + log.Info().Msg("Restart collector.") + cr.wsClient.RestartCollector() + + return 0, "Collector will be restarted." case "help": helpMessage := ` Available commands: From 0224046a633ccd6298d36be11a306900abd28e75 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 18 Apr 2025 17:25:34 +0900 Subject: [PATCH 306/364] Fix Transporter retry logic Modify Transporter to not retry metric transmissions when 400 error occurs. --- pkg/collector/transporter/transporter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/collector/transporter/transporter.go b/pkg/collector/transporter/transporter.go index 7de2c5f..1b095ad 100644 --- a/pkg/collector/transporter/transporter.go +++ b/pkg/collector/transporter/transporter.go @@ -55,7 +55,7 @@ func (t *Transporter) Send(data base.MetricData) error { return nil } else { if statusCode == http.StatusBadRequest { - return fmt.Errorf("%d Bad Request: %s", statusCode, resp) + return nil } else { return fmt.Errorf("%s %s Error: %d %s", http.MethodPost, url, statusCode, resp) } From ed81710b455c48d1252c21503ee5794817d5413c Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 21 Apr 2025 14:03:28 +0900 Subject: [PATCH 307/364] Improve WebSocketClient.Close to gracefully handle close handshake --- pkg/runner/client.go | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index d48be0e..7e67d00 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -142,12 +142,36 @@ func (wc *WebsocketClient) CloseAndReconnect(ctx context.Context) { // Do not close quitChan, as the purpose here is to disconnect the WebSocket, // not to terminate RunForever. func (wc *WebsocketClient) Close() { - if wc.Conn != nil { - err := wc.Conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + if wc.Conn == nil { + return + } + + deadline := time.Now().Add(5 * time.Second) + err := wc.Conn.WriteControl( + websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), + deadline, + ) + + if err != nil { + log.Debug().Err(err).Msg("Failed to write close message to websocket.") + return + } + + _ = wc.Conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + for { + _, _, err = wc.Conn.NextReader() + if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + break + } if err != nil { - log.Debug().Err(err).Msg("Failed to write close message to websocket.") + break } - _ = wc.Conn.Close() + } + + err = wc.Conn.Close() + if err != nil { + log.Debug().Err(err).Msg("Failed to close websocket connection.") } } From 2f62b27932ab96ee6cdc8e379f9b8b96c8718d6b Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 21 Apr 2025 14:09:06 +0900 Subject: [PATCH 308/364] Improve PtyClient.Close to gracefully handle close handshake --- pkg/runner/pty.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 121e0b6..0459f31 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -15,6 +15,7 @@ import ( "os/exec" "strconv" "strings" + "time" ) type PtyClient struct { @@ -84,7 +85,6 @@ func (pc *PtyClient) RunPtyBackground() { pc.ptmx, err = pty.StartWithSize(pc.cmd, initialSize) if err != nil { log.Error().Err(err).Msg("Failed to start pty.") - pc.close() return } @@ -197,8 +197,20 @@ func (pc *PtyClient) close() { } if pc.conn != nil { - _ = pc.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - _ = pc.conn.Close() + err := pc.conn.WriteControl( + websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), + time.Now().Add(5*time.Second), + ) + if err != nil { + log.Debug().Err(err).Msg("Failed to write close message to pty websocket.") + return + } + + err = pc.conn.Close() + if err != nil { + log.Debug().Err(err).Msg("Failed to close pty websocket connection.") + } } if terminals[pc.sessionID] != nil { From 577bf7ce55afd4a4a18fb0feafdeea301d7ca66e Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 21 Apr 2025 14:09:33 +0900 Subject: [PATCH 309/364] Merge restartcoll command to restart command Merge the 'restartcoll' command into the 'restart' command, considering the future addition of other targets besides the collector. --- pkg/runner/command.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 9268574..f577f3d 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -181,11 +181,24 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { } return 1, "Invalid session ID" case "restart": - time.AfterFunc(1*time.Second, func() { - cr.wsClient.Restart() - }) + target := "alpamon" + message := "Alpamon will restart in 1 second." + if len(args) >= 2 { + target = args[1] + } + + switch target { + case "collector": + log.Info().Msg("Restart collector.") + cr.wsClient.RestartCollector() + message = "Collector will be restarted." + default: + time.AfterFunc(1*time.Second, func() { + cr.wsClient.Restart() + }) + } - return 0, "Alpamon will restart in 1 second." + return 0, message case "quit": time.AfterFunc(1*time.Second, func() { cr.wsClient.ShutDown() From 22598600942a8f232f9b0b852fb456dc149ab071 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 22 Apr 2025 11:13:57 +0900 Subject: [PATCH 310/364] Add pty websocket recovery feature --- pkg/runner/client.go | 3 +- pkg/runner/command.go | 15 ++-- pkg/runner/command_types.go | 16 ++-- pkg/runner/pty.go | 147 ++++++++++++++++++++++++++++-------- 4 files changed, 136 insertions(+), 45 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 7e67d00..393e62e 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -152,7 +152,6 @@ func (wc *WebsocketClient) Close() { websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), deadline, ) - if err != nil { log.Debug().Err(err).Msg("Failed to write close message to websocket.") return @@ -212,7 +211,7 @@ func (wc *WebsocketClient) CommandRequestHandler(message []byte) { 10, time.Time{}, ) - commandRunner := NewCommandRunner(wc, content.Command, data) + commandRunner := NewCommandRunner(wc, wc.apiSession, content.Command, data) go commandRunner.Run() case "quit": log.Debug().Msgf("Quit requested for reason: %s.", content.Reason) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 3d64341..8c1bc86 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -32,18 +32,19 @@ const ( fileUploadTimeout = 60 * 10 ) -func NewCommandRunner(wsClient *WebsocketClient, command Command, data CommandData) *CommandRunner { +func NewCommandRunner(wsClient *WebsocketClient, apiSession *scheduler.Session, command Command, data CommandData) *CommandRunner { var name string if command.ID != "" { name = fmt.Sprintf("CommandRunner-%s", strings.Split(command.ID, "-")[0]) } return &CommandRunner{ - name: name, - command: command, - data: data, - wsClient: wsClient, - validator: validator.New(), + name: name, + command: command, + data: data, + wsClient: wsClient, + apiSession: apiSession, + validator: validator.New(), } } @@ -148,7 +149,7 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return 1, fmt.Sprintf("openpty: Not enough information. %s", err.Error()) } - ptyClient := NewPtyClient(cr.data) + ptyClient := NewPtyClient(cr.data, cr.apiSession) go ptyClient.RunPtyBackground() return 0, "Spawned a pty terminal." diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index 9c076e7..0a3d363 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -1,6 +1,9 @@ package runner -import "gopkg.in/go-playground/validator.v9" +import ( + "github.com/alpacanetworks/alpamon/pkg/scheduler" + "gopkg.in/go-playground/validator.v9" +) type Content struct { Query string `json:"query"` @@ -54,11 +57,12 @@ type CommandData struct { } type CommandRunner struct { - name string - command Command - wsClient *WebsocketClient - data CommandData - validator *validator.Validate + name string + command Command + wsClient *WebsocketClient + apiSession *scheduler.Session + data CommandData + validator *validator.Validate } // Structs defining the required input data for command validation purposes. // diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 0459f31..6bc7871 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -2,9 +2,11 @@ package runner import ( "context" + "encoding/json" "errors" "fmt" "github.com/alpacanetworks/alpamon/pkg/config" + "github.com/alpacanetworks/alpamon/pkg/scheduler" "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/creack/pty" "github.com/gorilla/websocket" @@ -15,11 +17,13 @@ import ( "os/exec" "strconv" "strings" + "sync/atomic" "time" ) type PtyClient struct { conn *websocket.Conn + apiSession *scheduler.Session requestHeader http.Header cmd *exec.Cmd ptmx *os.File @@ -30,21 +34,25 @@ type PtyClient struct { groupname string homeDirectory string sessionID string + isRecovering atomic.Bool // default : false } +const reissuePtyWebsocketURL = "/api/websh/pty-channels/" + var terminals map[string]*PtyClient func init() { terminals = make(map[string]*PtyClient) } -func NewPtyClient(data CommandData) *PtyClient { +func NewPtyClient(data CommandData, apiSession *scheduler.Session) *PtyClient { headers := http.Header{ "Authorization": {fmt.Sprintf(`id="%s", key="%s"`, config.GlobalSettings.ID, config.GlobalSettings.Key)}, "Origin": {config.GlobalSettings.ServerURL}, } return &PtyClient{ + apiSession: apiSession, requestHeader: headers, url: strings.Replace(config.GlobalSettings.ServerURL, "http", "ws", 1) + data.URL, rows: data.Rows, @@ -56,50 +64,69 @@ func NewPtyClient(data CommandData) *PtyClient { } } -func (pc *PtyClient) RunPtyBackground() { - log.Debug().Msg("Opening websocket for pty session.") - +func (pc *PtyClient) initializePtySession() error { var err error pc.conn, _, err = websocket.DefaultDialer.Dial(pc.url, pc.requestHeader) if err != nil { - log.Error().Err(err).Msgf("Failed to connect to pty websocket at %s.", pc.url) - return + return fmt.Errorf("failed to connect pty websocket: %w", err) } - defer pc.close() pc.cmd = exec.Command("/bin/bash", "-i") - uid, gid, groupIds, env, err := pc.getPtyUserAndEnv() if err != nil { - log.Error().Err(err).Msgf("Failed to get pty user and env.") - return + return fmt.Errorf("failed to get user/env: %w", err) } - pc.setPtyCmdSysProcAttrAndEnv(uid, gid, groupIds, env) - initialSize := &pty.Winsize{ - Rows: pc.rows, - Cols: pc.cols, + initialSize := &pty.Winsize{Rows: pc.rows, Cols: pc.cols} + pc.ptmx, err = pty.StartWithSize(pc.cmd, initialSize) + if err != nil { + return fmt.Errorf("failed to start pty: %w", err) } - pc.ptmx, err = pty.StartWithSize(pc.cmd, initialSize) + terminals[pc.sessionID] = pc + return nil +} + +func (pc *PtyClient) RunPtyBackground() { + log.Debug().Msg("Opening websocket for pty session.") + + err := pc.initializePtySession() if err != nil { - log.Error().Err(err).Msg("Failed to start pty.") return } + defer pc.close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - go pc.readFromWebsocket(ctx, cancel) - go pc.readFromPTY(ctx, cancel) + recoveryChan := make(chan struct{}, 1) + recoveredWsChan := make(chan struct{}, 1) + recoveredPtyChan := make(chan struct{}, 1) - terminals[pc.sessionID] = pc + go pc.readFromWebsocket(ctx, cancel, recoveryChan, recoveredWsChan) + go pc.readFromPTY(ctx, cancel, recoveryChan, recoveredPtyChan) - <-ctx.Done() + for { + select { + case <-ctx.Done(): + return + case <-recoveryChan: + log.Debug().Msg("Attempting to reconnect pty websocket...") + err = pc.recovery() + pc.isRecovering.Store(false) + if err != nil { + cancel() + return + } + log.Debug().Msg("Pty websocket reconnected successfully.") + recoveredWsChan <- struct{}{} + recoveredPtyChan <- struct{}{} + } + } } -func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.CancelFunc) { +func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.CancelFunc, recoveryChan, recoveredChan chan struct{}) { for { select { case <-ctx.Done(): @@ -111,11 +138,22 @@ func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.Cance if ctx.Err() != nil { return } - if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { - log.Debug().Err(err).Msg("Failed to read from pty websocket.") + + if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + log.Debug().Msg("Pty websocket connection closed by peer.") + cancel() + return + } + + if pc.isRecovering.CompareAndSwap(false, true) { + recoveryChan <- struct{}{} + } + select { + case <-recoveredChan: + continue + case <-ctx.Done(): + return } - cancel() - return } _, err = pc.ptmx.Write(message) if err != nil { @@ -133,7 +171,7 @@ func (pc *PtyClient) readFromWebsocket(ctx context.Context, cancel context.Cance } } -func (pc *PtyClient) readFromPTY(ctx context.Context, cancel context.CancelFunc) { +func (pc *PtyClient) readFromPTY(ctx context.Context, cancel context.CancelFunc, recoveryChan, recoveredChan chan struct{}) { buf := make([]byte, 2048) for { @@ -159,11 +197,21 @@ func (pc *PtyClient) readFromPTY(ctx context.Context, cancel context.CancelFunc) if ctx.Err() != nil { return } - if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { - log.Debug().Err(err).Msg("Failed to write to pty.") + if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + log.Debug().Msg("Pty websocket connection closed by peer.") + cancel() + return + } + + if pc.isRecovering.CompareAndSwap(false, true) { + recoveryChan <- struct{}{} + } + select { + case <-recoveredChan: + continue + case <-ctx.Done(): + return } - cancel() - return } } } @@ -220,6 +268,45 @@ func (pc *PtyClient) close() { log.Debug().Msg("Websocket connection for pty has been closed.") } +// recovery reconnects the WebSocket while keeping the PTY session alive. +// Note: recovery don't close the existing conn explicitly to avoid breaking the session. +// The goal is to replace a broken connection, not perform a graceful shutdown. +func (pc *PtyClient) recovery() error { + data := map[string]interface{}{ + "session": pc.sessionID, + } + body, statusCode, err := pc.apiSession.Post(reissuePtyWebsocketURL, data, 5) + if err != nil { + log.Error().Err(err).Msg("Failed to request pty websocket reissue.") + return err + } + + if statusCode != http.StatusCreated { + err = fmt.Errorf("unexpected status code: %d", statusCode) + log.Error().Err(err).Msg("Failed to request pty websocket reissue.") + return err + } + + var resp struct { + WebsocketURL string `json:"websocket_url"` + } + + err = json.Unmarshal(body, &resp) + if err != nil { + log.Error().Err(err).Msg("Failed to parse response when reissuing pty websocket url.") + return err + } + + pc.url = strings.Replace(config.GlobalSettings.ServerURL, "http", "ws", 1) + resp.WebsocketURL + pc.conn, _, err = websocket.DefaultDialer.Dial(pc.url, pc.requestHeader) + if err != nil { + log.Error().Err(err).Msg("Failed to reconnect to pty websocket after recovery.") + return err + } + + return nil +} + // getPtyUserAndEnv retrieves user information and sets environment variables. func (pc *PtyClient) getPtyUserAndEnv() (uid, gid int, groupIds []string, env map[string]string, err error) { env = getDefaultEnv() From 0427392f03c8762fff0575348bf13f65c3b73874 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 22 Apr 2025 14:28:08 +0900 Subject: [PATCH 311/364] Prevent nil panic by assigning websocket conn only on successful recovery --- pkg/runner/pty.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 6bc7871..50017fe 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -298,11 +298,13 @@ func (pc *PtyClient) recovery() error { } pc.url = strings.Replace(config.GlobalSettings.ServerURL, "http", "ws", 1) + resp.WebsocketURL - pc.conn, _, err = websocket.DefaultDialer.Dial(pc.url, pc.requestHeader) + // Assign to pc.conn only if reconnect succeeds to avoid nil panic in concurrent reads/writes. + tempConn, _, err := websocket.DefaultDialer.Dial(pc.url, pc.requestHeader) if err != nil { log.Error().Err(err).Msg("Failed to reconnect to pty websocket after recovery.") return err } + pc.conn = tempConn return nil } From 11651dbf4e9d5f0453a0040b4155caaa77458212 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 24 Apr 2025 10:46:32 +0900 Subject: [PATCH 312/364] Minor fix --- pkg/runner/pty.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 50017fe..ec7b643 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -244,6 +244,10 @@ func (pc *PtyClient) close() { _ = pc.cmd.Wait() } + if terminals[pc.sessionID] != nil { + delete(terminals, pc.sessionID) + } + if pc.conn != nil { err := pc.conn.WriteControl( websocket.CloseMessage, @@ -261,10 +265,6 @@ func (pc *PtyClient) close() { } } - if terminals[pc.sessionID] != nil { - delete(terminals, pc.sessionID) - } - log.Debug().Msg("Websocket connection for pty has been closed.") } From e213f656ca0fd73d42874deb945950e669ef4ec5 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 24 Apr 2025 10:50:11 +0900 Subject: [PATCH 313/364] fix: register terminal before PTY session initialization to ensure proper cleanup --- pkg/runner/pty.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index ec7b643..c0b9727 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -90,12 +90,12 @@ func (pc *PtyClient) initializePtySession() error { func (pc *PtyClient) RunPtyBackground() { log.Debug().Msg("Opening websocket for pty session.") + defer pc.close() err := pc.initializePtySession() if err != nil { return } - defer pc.close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() From 624f9883bdfac88fbcf52243786e21fb18799e34 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 24 Apr 2025 11:10:44 +0900 Subject: [PATCH 314/364] feat: add global ctx support for graceful PTY cleanup --- pkg/runner/client.go | 6 +++--- pkg/runner/command.go | 9 +++++---- pkg/runner/pty.go | 5 ++++- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 393e62e..6f305e2 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -68,7 +68,7 @@ func (wc *WebsocketClient) RunForever(ctx context.Context) { } // Sends "ping" query for Alpacon to verify WebSocket session status without error handling. _ = wc.SendPingQuery() - wc.CommandRequestHandler(message) + wc.CommandRequestHandler(ctx, message) } } } @@ -182,7 +182,7 @@ func (wc *WebsocketClient) Restart() { close(wc.RestartChan) } -func (wc *WebsocketClient) CommandRequestHandler(message []byte) { +func (wc *WebsocketClient) CommandRequestHandler(globalCtx context.Context, message []byte) { var content Content var data CommandData @@ -212,7 +212,7 @@ func (wc *WebsocketClient) CommandRequestHandler(message []byte) { time.Time{}, ) commandRunner := NewCommandRunner(wc, wc.apiSession, content.Command, data) - go commandRunner.Run() + go commandRunner.Run(globalCtx) case "quit": log.Debug().Msgf("Quit requested for reason: %s.", content.Reason) wc.ShutDown() diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 8c1bc86..dd577a6 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -3,6 +3,7 @@ package runner import ( "archive/zip" "bytes" + "context" "encoding/base64" "errors" "fmt" @@ -48,7 +49,7 @@ func NewCommandRunner(wsClient *WebsocketClient, apiSession *scheduler.Session, } } -func (cr *CommandRunner) Run() { +func (cr *CommandRunner) Run(globalCtx context.Context) { var exitCode int var result string @@ -57,7 +58,7 @@ func (cr *CommandRunner) Run() { start := time.Now() switch cr.command.Shell { case "internal": - exitCode, result = cr.handleInternalCmd() + exitCode, result = cr.handleInternalCmd(globalCtx) case "system": exitCode, result = cr.handleShellCmd(cr.command.Line, cr.command.User, cr.command.Group, cr.command.Env) default: @@ -77,7 +78,7 @@ func (cr *CommandRunner) Run() { } } -func (cr *CommandRunner) handleInternalCmd() (int, string) { +func (cr *CommandRunner) handleInternalCmd(globalCtx context.Context) (int, string) { args := strings.Fields(cr.command.Line) if len(args) == 0 { return 1, "No command provided" @@ -150,7 +151,7 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { } ptyClient := NewPtyClient(cr.data, cr.apiSession) - go ptyClient.RunPtyBackground() + go ptyClient.RunPtyBackground(globalCtx) return 0, "Spawned a pty terminal." case "openftp": diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index c0b9727..284c73d 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -88,7 +88,7 @@ func (pc *PtyClient) initializePtySession() error { return nil } -func (pc *PtyClient) RunPtyBackground() { +func (pc *PtyClient) RunPtyBackground(globalCtx context.Context) { log.Debug().Msg("Opening websocket for pty session.") defer pc.close() @@ -109,6 +109,8 @@ func (pc *PtyClient) RunPtyBackground() { for { select { + case <-globalCtx.Done(): + return case <-ctx.Done(): return case <-recoveryChan: @@ -235,6 +237,7 @@ func (pc *PtyClient) resize(rows, cols uint16) error { // close terminates the PTY session and cleans up resources. // It ensures that the PTY, command, and WebSocket connection are properly closed. func (pc *PtyClient) close() { + fmt.Println("pty 에서 close 가 호출됨.") if pc.ptmx != nil { _ = pc.ptmx.Close() } From 81b0f8b0c8f91e709ed37a7d0ac04454f1526059 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 25 Apr 2025 11:27:47 +0900 Subject: [PATCH 315/364] chore: remove unused global ctx for PTY cleanup --- pkg/runner/command.go | 9 ++++----- pkg/runner/pty.go | 4 +--- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index dd577a6..8c1bc86 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -3,7 +3,6 @@ package runner import ( "archive/zip" "bytes" - "context" "encoding/base64" "errors" "fmt" @@ -49,7 +48,7 @@ func NewCommandRunner(wsClient *WebsocketClient, apiSession *scheduler.Session, } } -func (cr *CommandRunner) Run(globalCtx context.Context) { +func (cr *CommandRunner) Run() { var exitCode int var result string @@ -58,7 +57,7 @@ func (cr *CommandRunner) Run(globalCtx context.Context) { start := time.Now() switch cr.command.Shell { case "internal": - exitCode, result = cr.handleInternalCmd(globalCtx) + exitCode, result = cr.handleInternalCmd() case "system": exitCode, result = cr.handleShellCmd(cr.command.Line, cr.command.User, cr.command.Group, cr.command.Env) default: @@ -78,7 +77,7 @@ func (cr *CommandRunner) Run(globalCtx context.Context) { } } -func (cr *CommandRunner) handleInternalCmd(globalCtx context.Context) (int, string) { +func (cr *CommandRunner) handleInternalCmd() (int, string) { args := strings.Fields(cr.command.Line) if len(args) == 0 { return 1, "No command provided" @@ -151,7 +150,7 @@ func (cr *CommandRunner) handleInternalCmd(globalCtx context.Context) (int, stri } ptyClient := NewPtyClient(cr.data, cr.apiSession) - go ptyClient.RunPtyBackground(globalCtx) + go ptyClient.RunPtyBackground() return 0, "Spawned a pty terminal." case "openftp": diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 284c73d..82255c9 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -88,7 +88,7 @@ func (pc *PtyClient) initializePtySession() error { return nil } -func (pc *PtyClient) RunPtyBackground(globalCtx context.Context) { +func (pc *PtyClient) RunPtyBackground() { log.Debug().Msg("Opening websocket for pty session.") defer pc.close() @@ -109,8 +109,6 @@ func (pc *PtyClient) RunPtyBackground(globalCtx context.Context) { for { select { - case <-globalCtx.Done(): - return case <-ctx.Done(): return case <-recoveryChan: From 48db724e4451a7d5844a5cbc869f94bad153fa94 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 25 Apr 2025 11:28:55 +0900 Subject: [PATCH 316/364] chore: minor fix for formatting --- pkg/runner/client.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 6f305e2..4f9fc37 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -146,11 +146,10 @@ func (wc *WebsocketClient) Close() { return } - deadline := time.Now().Add(5 * time.Second) err := wc.Conn.WriteControl( websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), - deadline, + time.Now().Add(5*time.Second), ) if err != nil { log.Debug().Err(err).Msg("Failed to write close message to websocket.") @@ -212,7 +211,7 @@ func (wc *WebsocketClient) CommandRequestHandler(globalCtx context.Context, mess time.Time{}, ) commandRunner := NewCommandRunner(wc, wc.apiSession, content.Command, data) - go commandRunner.Run(globalCtx) + go commandRunner.Run() case "quit": log.Debug().Msgf("Quit requested for reason: %s.", content.Reason) wc.ShutDown() From b657a77c54c9d03c1e35f14c7a446c5acc4db88a Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 25 Apr 2025 11:49:05 +0900 Subject: [PATCH 317/364] chore: remove leftover debug message --- pkg/runner/pty.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 82255c9..c0b9727 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -235,7 +235,6 @@ func (pc *PtyClient) resize(rows, cols uint16) error { // close terminates the PTY session and cleans up resources. // It ensures that the PTY, command, and WebSocket connection are properly closed. func (pc *PtyClient) close() { - fmt.Println("pty 에서 close 가 호출됨.") if pc.ptmx != nil { _ = pc.ptmx.Close() } From 0f65230446d4a1d646650e2c8a523ca5edaf7c1d Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 25 Apr 2025 11:56:48 +0900 Subject: [PATCH 318/364] chore: remove unused global ctx for PTY cleanup --- pkg/runner/client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 4f9fc37..a246d23 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -68,7 +68,7 @@ func (wc *WebsocketClient) RunForever(ctx context.Context) { } // Sends "ping" query for Alpacon to verify WebSocket session status without error handling. _ = wc.SendPingQuery() - wc.CommandRequestHandler(ctx, message) + wc.CommandRequestHandler(message) } } } @@ -181,7 +181,7 @@ func (wc *WebsocketClient) Restart() { close(wc.RestartChan) } -func (wc *WebsocketClient) CommandRequestHandler(globalCtx context.Context, message []byte) { +func (wc *WebsocketClient) CommandRequestHandler(message []byte) { var content Content var data CommandData From 53e29d2d04832d630b82efce3d340a72ae0d6e86 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 25 Apr 2025 12:00:26 +0900 Subject: [PATCH 319/364] chore: revise log messages for PTY recovery --- pkg/runner/pty.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index c0b9727..1c832fe 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -269,7 +269,7 @@ func (pc *PtyClient) close() { } // recovery reconnects the WebSocket while keeping the PTY session alive. -// Note: recovery don't close the existing conn explicitly to avoid breaking the session. +// Note: recovery doesn't close the existing conn explicitly to avoid breaking the session. // The goal is to replace a broken connection, not perform a graceful shutdown. func (pc *PtyClient) recovery() error { data := map[string]interface{}{ @@ -293,7 +293,7 @@ func (pc *PtyClient) recovery() error { err = json.Unmarshal(body, &resp) if err != nil { - log.Error().Err(err).Msg("Failed to parse response when reissuing pty websocket url.") + log.Error().Err(err).Msg("Failed to parse pty websocket reissue response.") return err } @@ -301,7 +301,7 @@ func (pc *PtyClient) recovery() error { // Assign to pc.conn only if reconnect succeeds to avoid nil panic in concurrent reads/writes. tempConn, _, err := websocket.DefaultDialer.Dial(pc.url, pc.requestHeader) if err != nil { - log.Error().Err(err).Msg("Failed to reconnect to pty websocket after recovery.") + log.Error().Err(err).Msg("Failed to reconnect to pty websocket during recovery.") return err } pc.conn = tempConn From 3ea336114538d5921f704d6888d41f2f0126085a Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 28 Apr 2025 11:20:44 +0900 Subject: [PATCH 320/364] fix: improve preremove and postremove scripts for better rpm and deb compatibility --- scripts/postremove.sh | 3 +++ scripts/preremove.sh | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/postremove.sh b/scripts/postremove.sh index 019ca7a..0b31761 100644 --- a/scripts/postremove.sh +++ b/scripts/postremove.sh @@ -1,5 +1,8 @@ #!/bin/sh +# Only effective on Debian-based systems where "purge" is supported. +# No effect on RHEL-based systems. + FILES_TO_REMOVE=" /etc/alpamon/alpamon.conf /usr/lib/tmpfiles.d/alpamon.conf diff --git a/scripts/preremove.sh b/scripts/preremove.sh index a3b8f42..2769c30 100644 --- a/scripts/preremove.sh +++ b/scripts/preremove.sh @@ -1,6 +1,7 @@ #!/bin/sh -if [ "$1" = 'remove' ]; then +# For RPM (0 = remove) and DEB ("remove") +if [ "$1" = "remove" ] || [ "$1" -eq 0 ] 2>/dev/null; then echo 'Stopping and disabling Alpamon service...' if command -v systemctl >/dev/null; then From 46da987095825ee469163e08e1a27e017b0654f9 Mon Sep 17 00:00:00 2001 From: royroyee Date: Mon, 28 Apr 2025 11:50:45 +0900 Subject: [PATCH 321/364] fix: handle upgrade detection properly on RHEL and Debian --- scripts/postinstall.sh | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/scripts/postinstall.sh b/scripts/postinstall.sh index 900debd..b0b5e72 100644 --- a/scripts/postinstall.sh +++ b/scripts/postinstall.sh @@ -76,13 +76,25 @@ cleanup_tmpl_files() { fi } +# debain +# Initial installation: $1 == configure +# Upgrade: $1 == configure, $2 == old version +# rhel +# Initial installation: $1 == 1 +# Upgrade: $1 == 2, and configured to restart on upgrade is_upgrade() { - if [ -n "$2" ]; then - return 0 # Upgrade - else - return 1 # First install - fi + # RHEL + if [ "$1" -eq 2 ] 2>/dev/null; then + return 0 # Upgrade + fi + + # Debian + if [ "$1" = "configure" ] && [ -n "$2" ]; then + return 0 # Upgrade + fi + + return 1 # Initial installation } # Exit on error From c35b7b6bef2f334cc5c8e545a7ca1ce637432f98 Mon Sep 17 00:00:00 2001 From: junho98 Date: Fri, 9 May 2025 11:03:46 +0900 Subject: [PATCH 322/364] Feature: Add chmod command for Unix-like platforms Combined the chmod command execution for Debian, RHEL, and Darwin platforms as they all utilize the standard `/bin/chmod` utility. This change simplifies the logic while retaining the if-else structure to easily accommodate future platform-specific implementations (e.g., for Windows or other operating systems) if needed. --- pkg/runner/command.go | 54 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index b62ea64..9b5a452 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -125,6 +125,13 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return cr.delGroup() case "ping": return 0, time.Now().Format(time.RFC3339) + case "chmod": + if len(args) < 3 { + return 1, "chmod: Insufficient arguments. Usage: chmod " + } + mode := args[1] + path := args[2] + return cr.chmod(mode, path) //case "debug": // TODO : getReporterStats() case "download": @@ -240,6 +247,7 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { case "help": helpMessage := ` Available commands: + chmod : change file/directory permissions package install : install a system package package uninstall : remove a system package upgrade: upgrade alpamon @@ -665,6 +673,52 @@ func (cr *CommandRunner) openFtp(data openFtpData) error { return nil } +// chmodCmdData holds the validated arguments for the chmod command. +type chmodCmdData struct { + Mode string `validate:"required"` + Path string `validate:"required"` +} + +// chmod changes the permissions of a file or directory. +// It takes mode (e.g., "755", "u+x") and path as arguments. +func (cr *CommandRunner) chmod(mode string, path string) (exitCode int, result string) { + data := chmodCmdData{ + Mode: mode, + Path: path, + } + + err := cr.validateData(data) + if err != nil { + var validationErrors validator.ValidationErrors + if errors.As(err, &validationErrors) { + var fieldErrors []string + for _, fe := range validationErrors { + fieldErrors = append(fieldErrors, fmt.Sprintf("field '%s' failed on the '%s' tag (value: '%v')", fe.Field(), fe.Tag(), fe.Value())) + } + return 1, fmt.Sprintf("chmod: Invalid arguments. %s", strings.Join(fieldErrors, "; ")) + } + return 1, fmt.Sprintf("chmod: Invalid arguments. %s", err.Error()) + } + + var cmdArgs []string + + if utils.PlatformLike == "debian" || utils.PlatformLike == "rhel" || utils.PlatformLike == "darwin" { + cmdArgs = []string{"/bin/chmod", data.Mode, data.Path} + } else { + // For other OS like Windows, we would add specific implementations here + // Even though the command might be the same for some platforms, + // we use if-else statements to allow for future OS-specific implementations + return 1, fmt.Sprintf("chmod: Platform '%s' is not currently supported for the chmod operation.", utils.PlatformLike) + } + + exitCode, cmdResult := runCmdWithOutput(cmdArgs, "root", "", nil, 60) + if exitCode != 0 { + return exitCode, fmt.Sprintf("chmod: Failed to change permissions for '%s' to '%s' on platform '%s'. Exit code: %d, Output: %s", data.Path, data.Mode, utils.PlatformLike, exitCode, cmdResult) + } + + return 0, fmt.Sprintf("Successfully changed permissions for '%s' to '%s' on platform '%s'.", data.Path, data.Mode, utils.PlatformLike) +} + func getFileData(data CommandData) ([]byte, error) { var content []byte switch data.Type { From 4d8c4114a5c8bd127957c456e1fea91e519ec871 Mon Sep 17 00:00:00 2001 From: junho98 Date: Fri, 9 May 2025 11:06:10 +0900 Subject: [PATCH 323/364] Fix minor --- pkg/runner/command.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 9b5a452..7eb9fa7 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -247,7 +247,6 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { case "help": helpMessage := ` Available commands: - chmod : change file/directory permissions package install : install a system package package uninstall : remove a system package upgrade: upgrade alpamon From 7cfcccfbc444ee809567e015bfb8b8b91cbb155e Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 9 May 2025 12:56:19 +0900 Subject: [PATCH 324/364] style: minor fix --- pkg/runner/command.go | 20 ++------------------ pkg/runner/command_types.go | 6 ++++++ 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 7eb9fa7..f71f2f9 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -672,12 +672,6 @@ func (cr *CommandRunner) openFtp(data openFtpData) error { return nil } -// chmodCmdData holds the validated arguments for the chmod command. -type chmodCmdData struct { - Mode string `validate:"required"` - Path string `validate:"required"` -} - // chmod changes the permissions of a file or directory. // It takes mode (e.g., "755", "u+x") and path as arguments. func (cr *CommandRunner) chmod(mode string, path string) (exitCode int, result string) { @@ -696,20 +690,10 @@ func (cr *CommandRunner) chmod(mode string, path string) (exitCode int, result s } return 1, fmt.Sprintf("chmod: Invalid arguments. %s", strings.Join(fieldErrors, "; ")) } - return 1, fmt.Sprintf("chmod: Invalid arguments. %s", err.Error()) - } - - var cmdArgs []string - - if utils.PlatformLike == "debian" || utils.PlatformLike == "rhel" || utils.PlatformLike == "darwin" { - cmdArgs = []string{"/bin/chmod", data.Mode, data.Path} - } else { - // For other OS like Windows, we would add specific implementations here - // Even though the command might be the same for some platforms, - // we use if-else statements to allow for future OS-specific implementations - return 1, fmt.Sprintf("chmod: Platform '%s' is not currently supported for the chmod operation.", utils.PlatformLike) + return 1, fmt.Sprintf("chmod: Invalid arguments. %v", err) } + cmdArgs := []string{"/bin/chmod", data.Mode, data.Path} exitCode, cmdResult := runCmdWithOutput(cmdArgs, "root", "", nil, 60) if exitCode != 0 { return exitCode, fmt.Sprintf("chmod: Failed to change permissions for '%s' to '%s' on platform '%s'. Exit code: %d, Output: %s", data.Path, data.Mode, utils.PlatformLike, exitCode, cmdResult) diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index 0a3d363..98ec9e6 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -143,3 +143,9 @@ var nonZipExt = map[string]bool{ ".nupkg": true, ".kmz": true, } + +// chmodCmdData holds the validated arguments for the chmod command. +type chmodCmdData struct { + Mode string `validate:"required"` + Path string `validate:"required"` +} From 51ec8d0e9cd365a66b0be8944f17d65e0c9fa976 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 9 May 2025 13:01:34 +0900 Subject: [PATCH 325/364] chore: add raspbian to debian-like platforms --- pkg/utils/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 78d374c..051a1f5 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -39,7 +39,7 @@ func getPlatformLike() { os.Exit(1) } switch platformInfo.Platform { - case "ubuntu", "debian": + case "ubuntu", "debian", "raspbian": PlatformLike = "debian" case "centos", "rhel", "redhat", "amazon", "amzn", "fedora", "rocky", "oracle", "ol": PlatformLike = "rhel" From 3d85fe7c053cbe08295ebf7671c5f25727a1bddb Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 9 May 2025 17:11:46 +0900 Subject: [PATCH 326/364] Add sqlite3 dependency Add sqlite3 dependency for raspberrypi --- .goreleaser.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index e199cb7..6694dd7 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -44,6 +44,7 @@ nfpms: - rpm dependencies: - zip + - sqlite3 bindir: /usr/local/bin/ contents: From 98edf72110ad906b00daa25a3de5f7cdbe0ab061 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 9 May 2025 17:57:06 +0900 Subject: [PATCH 327/364] Fix install_atlas.sh Fix install_atlas.sh to build with GOARCH. --- scripts/install_atlas.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/install_atlas.sh b/scripts/install_atlas.sh index 614f91a..feb7f1e 100644 --- a/scripts/install_atlas.sh +++ b/scripts/install_atlas.sh @@ -2,11 +2,11 @@ mkdir -p bin -ARCH=$(uname -m) +ARCH=${GOARCH} -if [ "$ARCH" = "x86_64" ]; then +if [ "$ARCH" = "amd64" ]; then curl -L -o bin/atlas https://release.ariga.io/atlas/atlas-community-linux-amd64-latest -elif [ "$ARCH" = "aarch64" ]; then +elif [ "$ARCH" = "arm64" ]; then curl -L -o bin/atlas https://release.ariga.io/atlas/atlas-community-linux-arm64-latest fi From 085e35e8e1d159b29f00584b5ae47e742c6a0416 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 9 May 2025 18:03:20 +0900 Subject: [PATCH 328/364] Move install_atlas.sh execution point Move install_atlas.sh execution point to builds.hooks.pre --- .goreleaser.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 6694dd7..62f0d2b 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -7,7 +7,6 @@ before: - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema - go mod tidy - chmod +x ./scripts/install_atlas.sh - - ./scripts/install_atlas.sh builds: - main: ./cmd/alpamon @@ -21,6 +20,9 @@ builds: goarch: - amd64 - arm64 + hooks: + pre: + - ./scripts/install_atlas.sh checksum: name_template: "{{ .ProjectName }}-{{ .Version }}-checksums.sha256" From 4bcd40ebb0de8b1c607cea0bd6cef7f6e362bf18 Mon Sep 17 00:00:00 2001 From: junho98 Date: Mon, 12 May 2025 11:33:05 +0900 Subject: [PATCH 329/364] Add HomeDirectoryPermission to addUser and CommandData structs - Added HomeDirectoryPermission field to addUserData and CommandData structs - Updated addUser logic to set home directory permissions after user creation - Ensured consistent struct field indentation for readability --- pkg/runner/command.go | 62 +++++++++++------------------------ pkg/runner/command_types.go | 64 +++++++++++++++++-------------------- 2 files changed, 48 insertions(+), 78 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index f71f2f9..827048c 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -125,13 +125,6 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return cr.delGroup() case "ping": return 0, time.Now().Format(time.RFC3339) - case "chmod": - if len(args) < 3 { - return 1, "chmod: Insufficient arguments. Usage: chmod " - } - mode := args[1] - path := args[2] - return cr.chmod(mode, path) //case "debug": // TODO : getReporterStats() case "download": @@ -323,13 +316,14 @@ func (cr *CommandRunner) sync(keys []string) { func (cr *CommandRunner) addUser() (exitCode int, result string) { data := addUserData{ - Username: cr.data.Username, - UID: cr.data.UID, - GID: cr.data.GID, - Comment: cr.data.Comment, - HomeDirectory: cr.data.HomeDirectory, - Shell: cr.data.Shell, - Groupname: cr.data.Groupname, + Username: cr.data.Username, + UID: cr.data.UID, + GID: cr.data.GID, + Comment: cr.data.Comment, + HomeDirectory: cr.data.HomeDirectory, + HomeDirectoryPermission: cr.data.HomeDirectoryPermission, + Shell: cr.data.Shell, + Groupname: cr.data.Groupname, } err := cr.validateData(data) @@ -399,6 +393,16 @@ func (cr *CommandRunner) addUser() (exitCode int, result string) { return 1, "Not implemented 'adduser' command for this platform." } + exitCode, result = runCmdWithOutput( + []string{ + "chmod", cr.data.HomeDirectoryPermission, cr.data.HomeDirectory, + }, + "root", "", nil, 60, + ) + if exitCode != 0 { + return exitCode, result + } + cr.sync([]string{"groups", "users"}) return 0, "Successfully added new user." } @@ -672,36 +676,6 @@ func (cr *CommandRunner) openFtp(data openFtpData) error { return nil } -// chmod changes the permissions of a file or directory. -// It takes mode (e.g., "755", "u+x") and path as arguments. -func (cr *CommandRunner) chmod(mode string, path string) (exitCode int, result string) { - data := chmodCmdData{ - Mode: mode, - Path: path, - } - - err := cr.validateData(data) - if err != nil { - var validationErrors validator.ValidationErrors - if errors.As(err, &validationErrors) { - var fieldErrors []string - for _, fe := range validationErrors { - fieldErrors = append(fieldErrors, fmt.Sprintf("field '%s' failed on the '%s' tag (value: '%v')", fe.Field(), fe.Tag(), fe.Value())) - } - return 1, fmt.Sprintf("chmod: Invalid arguments. %s", strings.Join(fieldErrors, "; ")) - } - return 1, fmt.Sprintf("chmod: Invalid arguments. %v", err) - } - - cmdArgs := []string{"/bin/chmod", data.Mode, data.Path} - exitCode, cmdResult := runCmdWithOutput(cmdArgs, "root", "", nil, 60) - if exitCode != 0 { - return exitCode, fmt.Sprintf("chmod: Failed to change permissions for '%s' to '%s' on platform '%s'. Exit code: %d, Output: %s", data.Path, data.Mode, utils.PlatformLike, exitCode, cmdResult) - } - - return 0, fmt.Sprintf("Successfully changed permissions for '%s' to '%s' on platform '%s'.", data.Path, data.Mode, utils.PlatformLike) -} - func getFileData(data CommandData) ([]byte, error) { var content []byte switch data.Type { diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index 98ec9e6..d2f4203 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -33,27 +33,28 @@ type File struct { } type CommandData struct { - SessionID string `json:"session_id"` - URL string `json:"url"` - Rows uint16 `json:"rows"` - Cols uint16 `json:"cols"` - Username string `json:"username"` - Groupname string `json:"groupname"` - HomeDirectory string `json:"home_directory"` - UID uint64 `json:"uid"` - GID uint64 `json:"gid"` - Comment string `json:"comment"` - Shell string `json:"shell"` - Groups []uint64 `json:"groups"` - Type string `json:"type"` - Content string `json:"content"` - Path string `json:"path"` - Paths []string `json:"paths"` - Files []File `json:"files,omitempty"` - AllowOverwrite bool `json:"allow_overwrite,omitempty"` - AllowUnzip bool `json:"allow_unzip,omitempty"` - UseBlob bool `json:"use_blob,omitempty"` - Keys []string `json:"keys"` + SessionID string `json:"session_id"` + URL string `json:"url"` + Rows uint16 `json:"rows"` + Cols uint16 `json:"cols"` + Username string `json:"username"` + Groupname string `json:"groupname"` + HomeDirectory string `json:"home_directory"` + HomeDirectoryPermission string `json:"home_directory_permission"` + UID uint64 `json:"uid"` + GID uint64 `json:"gid"` + Comment string `json:"comment"` + Shell string `json:"shell"` + Groups []uint64 `json:"groups"` + Type string `json:"type"` + Content string `json:"content"` + Path string `json:"path"` + Paths []string `json:"paths"` + Files []File `json:"files,omitempty"` + AllowOverwrite bool `json:"allow_overwrite,omitempty"` + AllowUnzip bool `json:"allow_unzip,omitempty"` + UseBlob bool `json:"use_blob,omitempty"` + Keys []string `json:"keys"` } type CommandRunner struct { @@ -68,13 +69,14 @@ type CommandRunner struct { // Structs defining the required input data for command validation purposes. // type addUserData struct { - Username string `validate:"required"` - UID uint64 `validate:"required"` - GID uint64 `validate:"required"` - Comment string `validate:"required"` - HomeDirectory string `validate:"required"` - Shell string `validate:"required"` - Groupname string `validate:"required"` + Username string `validate:"required"` + UID uint64 `validate:"required"` + GID uint64 `validate:"required"` + Comment string `validate:"required"` + HomeDirectory string `validate:"required"` + HomeDirectoryPermission string `validate:"required"` + Shell string `validate:"required"` + Groupname string `validate:"required"` } type addGroupData struct { @@ -143,9 +145,3 @@ var nonZipExt = map[string]bool{ ".nupkg": true, ".kmz": true, } - -// chmodCmdData holds the validated arguments for the chmod command. -type chmodCmdData struct { - Mode string `validate:"required"` - Path string `validate:"required"` -} From cecf33eabb6ef743f09a8cf4c00fd43d52c21816 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 12 May 2025 13:36:03 +0900 Subject: [PATCH 330/364] Fix .goreleaser.yaml & install_atlas.sh There was an issue with the existing .goreleaser.yaml and install_atlas.sh where Linux packages were built based on the CPU architecture of the host building the Linux package, rather than being packaged according to the target GOARCH. To resolve this, fix .goreleaser.yaml and install_atlas.sh so that Atlas CLI corresponding to the built GOARCH is installed. --- .goreleaser.yaml | 92 +++++++++++++++++++++++++++++++++++++--- scripts/install_atlas.sh | 8 ++-- 2 files changed, 91 insertions(+), 9 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 62f0d2b..ad50521 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -2,6 +2,29 @@ version: 2 project_name: alpamon +before: + hooks: + - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema + - go mod tidy + - chmod +x ./scripts/install_atlas.sh + +builds: + - main: ./cmd/alpamon + binary: alpamon + ldflags: + - -s -w -X github.com/alpacanetworks/alpamon/pkg/version.Version={{.Version}} + env: + - CGO_ENABLED=0 + goos: + - linux + goarch: + - amd64 + - arm64 +"~/Downloads/.goreleaser.yaml" 81L, 2015B +version: 2 + +project_name: alpamon + before: hooks: - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema @@ -21,8 +44,8 @@ builds: - amd64 - arm64 hooks: - pre: - - ./scripts/install_atlas.sh + post: + - ./scripts/install_atlas.sh {{ .Arch }} checksum: name_template: "{{ .ProjectName }}-{{ .Version }}-checksums.sha256" @@ -31,7 +54,66 @@ archives: - id: alpamon name_template: "{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}" files: - - src: bin/atlas + - src: bin/atlas-{{ .Arch }} + dst: usr/local/bin/atlas + +nfpms: + - package_name: alpamon + maintainer: Younghwan Kim + description: Alpamon + homepage: https://github.com/alpacanetworks/alpamon + license: MIT + vendor: AlpacaX + formats: + - deb + - rpm + dependencies: + - zip + - sqlite3 + bindir: /usr/local/bin/ + + contents: + - src: "configs/tmpfile.conf" + dst: "/usr/lib/tmpfiles.d/{{ .ProjectName }}.conf" + + - src: "configs/{{ .ProjectName }}.conf" + dst: "/etc/alpamon/{{ .ProjectName }}.config.tmpl" + + - src: "configs/{{ .ProjectName }}.service" +version: 2 + +project_name: alpamon + +before: + hooks: + - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema + - go mod tidy + - chmod +x ./scripts/install_atlas.sh + +builds: + - main: ./cmd/alpamon + binary: alpamon + ldflags: + - -s -w -X github.com/alpacanetworks/alpamon/pkg/version.Version={{.Version}} + env: + - CGO_ENABLED=0 + goos: + - linux + goarch: + - amd64 + - arm64 + hooks: + post: + - ./scripts/install_atlas.sh {{ .Arch }} + +checksum: + name_template: "{{ .ProjectName }}-{{ .Version }}-checksums.sha256" + +archives: + - id: alpamon + name_template: "{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}" + files: + - src: bin/atlas-{{ .Arch }} dst: usr/local/bin/atlas nfpms: @@ -64,8 +146,8 @@ nfpms: - src: "configs/{{ .ProjectName }}-restart.timer" dst: "/lib/systemd/system/{{ .ProjectName }}-restart.timer" - - - src: "bin/atlas" + + - src: "bin/atlas-{{ .Arch }}" dst: "/usr/local/bin/atlas" scripts: diff --git a/scripts/install_atlas.sh b/scripts/install_atlas.sh index feb7f1e..cc7923f 100644 --- a/scripts/install_atlas.sh +++ b/scripts/install_atlas.sh @@ -2,12 +2,12 @@ mkdir -p bin -ARCH=${GOARCH} +ARCH=$1 if [ "$ARCH" = "amd64" ]; then - curl -L -o bin/atlas https://release.ariga.io/atlas/atlas-community-linux-amd64-latest + curl -L -o "bin/atlas-$ARCH" https://release.ariga.io/atlas/atlas-linux-amd64-latest elif [ "$ARCH" = "arm64" ]; then - curl -L -o bin/atlas https://release.ariga.io/atlas/atlas-community-linux-arm64-latest + curl -L -o "bin/atlas-$ARCH" https://release.ariga.io/atlas/atlas-linux-arm64-latest fi -chmod +x bin/atlas +chmod +x "bin/atlas-$ARCH" \ No newline at end of file From 8bbef3586c5f43163e59906fa740513cc7e27934 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 12 May 2025 13:42:26 +0900 Subject: [PATCH 331/364] Minor fix Fix typo. --- .goreleaser.yaml | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index ad50521..2efb00d 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -2,29 +2,6 @@ version: 2 project_name: alpamon -before: - hooks: - - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema - - go mod tidy - - chmod +x ./scripts/install_atlas.sh - -builds: - - main: ./cmd/alpamon - binary: alpamon - ldflags: - - -s -w -X github.com/alpacanetworks/alpamon/pkg/version.Version={{.Version}} - env: - - CGO_ENABLED=0 - goos: - - linux - goarch: - - amd64 - - arm64 -"~/Downloads/.goreleaser.yaml" 81L, 2015B -version: 2 - -project_name: alpamon - before: hooks: - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema From 7d4f18c6a52ac4f28977cb0652dc838d89c2db36 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 12 May 2025 13:45:28 +0900 Subject: [PATCH 332/364] Minor fix Fix typo. --- .goreleaser.yaml | 61 +----------------------------------------------- 1 file changed, 1 insertion(+), 60 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 2efb00d..13b2c8c 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -2,65 +2,6 @@ version: 2 project_name: alpamon -before: - hooks: - - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema - - go mod tidy - - chmod +x ./scripts/install_atlas.sh - -builds: - - main: ./cmd/alpamon - binary: alpamon - ldflags: - - -s -w -X github.com/alpacanetworks/alpamon/pkg/version.Version={{.Version}} - env: - - CGO_ENABLED=0 - goos: - - linux - goarch: - - amd64 - - arm64 - hooks: - post: - - ./scripts/install_atlas.sh {{ .Arch }} - -checksum: - name_template: "{{ .ProjectName }}-{{ .Version }}-checksums.sha256" - -archives: - - id: alpamon - name_template: "{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}" - files: - - src: bin/atlas-{{ .Arch }} - dst: usr/local/bin/atlas - -nfpms: - - package_name: alpamon - maintainer: Younghwan Kim - description: Alpamon - homepage: https://github.com/alpacanetworks/alpamon - license: MIT - vendor: AlpacaX - formats: - - deb - - rpm - dependencies: - - zip - - sqlite3 - bindir: /usr/local/bin/ - - contents: - - src: "configs/tmpfile.conf" - dst: "/usr/lib/tmpfiles.d/{{ .ProjectName }}.conf" - - - src: "configs/{{ .ProjectName }}.conf" - dst: "/etc/alpamon/{{ .ProjectName }}.config.tmpl" - - - src: "configs/{{ .ProjectName }}.service" -version: 2 - -project_name: alpamon - before: hooks: - go run -mod=mod entgo.io/ent/cmd/ent@v0.14.0 generate --feature sql/modifier --target ./pkg/db/ent ./pkg/db/schema @@ -123,7 +64,7 @@ nfpms: - src: "configs/{{ .ProjectName }}-restart.timer" dst: "/lib/systemd/system/{{ .ProjectName }}-restart.timer" - + - src: "bin/atlas-{{ .Arch }}" dst: "/usr/local/bin/atlas" From 65c9940afc300f0e6287a45ef2ffba1b2c29b893 Mon Sep 17 00:00:00 2001 From: junho98 Date: Tue, 13 May 2025 13:36:32 +0900 Subject: [PATCH 333/364] feat: make HomeDirectoryPermission optional for addUser (backward compatible) - Changed HomeDirectoryPermission field in CommandData and addUserData structs to be optional (omitempty for validation) - In addUser, if HomeDirectoryPermission is not provided, default to "700" for backward compatibility - Ensures existing clients/calls without this field continue to work as before --- pkg/runner/command.go | 7 ++++++- pkg/runner/command_types.go | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 827048c..4a72c09 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -393,9 +393,14 @@ func (cr *CommandRunner) addUser() (exitCode int, result string) { return 1, "Not implemented 'adduser' command for this platform." } + // Set default permission for home directory if not provided + if data.HomeDirectoryPermission == "" { + data.HomeDirectoryPermission = "700" + } + exitCode, result = runCmdWithOutput( []string{ - "chmod", cr.data.HomeDirectoryPermission, cr.data.HomeDirectory, + "chmod", data.HomeDirectoryPermission, data.HomeDirectory, }, "root", "", nil, 60, ) diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index d2f4203..9fdfc3d 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -74,7 +74,7 @@ type addUserData struct { GID uint64 `validate:"required"` Comment string `validate:"required"` HomeDirectory string `validate:"required"` - HomeDirectoryPermission string `validate:"required"` + HomeDirectoryPermission string `validate:"omitempty"` // Use omitempty for backward compatibility Shell string `validate:"required"` Groupname string `validate:"required"` } From 9cf25097fbf13561f0baa34866916b8c39d064c4 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 13 May 2025 17:34:17 +0900 Subject: [PATCH 334/364] fix(tls): set InsecureSkipVerify based on SSLVerify config option --- pkg/config/config.go | 2 +- pkg/runner/client.go | 8 +++++++- pkg/runner/ftp.go | 9 ++++++++- pkg/runner/pty.go | 8 +++++++- pkg/scheduler/session.go | 2 +- 5 files changed, 24 insertions(+), 5 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 4e9635e..0be1825 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -113,8 +113,8 @@ func validateConfig(config Config, wsPath string) (bool, Settings) { valid = false } + settings.SSLVerify = config.SSL.Verify if settings.UseSSL { - settings.SSLVerify = config.SSL.Verify caCert := config.SSL.CaCert if !settings.SSLVerify { log.Warn().Msg( diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 690faca..48a2a5f 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -2,6 +2,7 @@ package runner import ( "context" + "crypto/tls" "encoding/json" "fmt" "net/http" @@ -113,7 +114,12 @@ func (wc *WebsocketClient) Connect() { log.Error().Msg("Maximum retry duration reached. Shutting down.") return ctx.Err() default: - conn, _, err := websocket.DefaultDialer.Dial(config.GlobalSettings.WSPath, wc.requestHeader) + dialer := websocket.Dialer{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: !config.GlobalSettings.SSLVerify, + }, + } + conn, _, err := dialer.Dial(config.GlobalSettings.WSPath, wc.requestHeader) if err != nil { nextInterval := wsBackoff.NextBackOff() log.Debug().Err(err).Msgf("Failed to connect to %s, will try again in %ds.", config.GlobalSettings.WSPath, int(nextInterval.Seconds())) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 45b1ca3..3e1c705 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -2,8 +2,10 @@ package runner import ( "context" + "crypto/tls" "encoding/json" "fmt" + "github.com/alpacanetworks/alpamon/pkg/config" "net/http" "os" "path/filepath" @@ -43,7 +45,12 @@ func (fc *FtpClient) RunFtpBackground() { fc.log.Debug().Msg("Opening websocket for ftp session.") var err error - fc.conn, _, err = websocket.DefaultDialer.Dial(fc.url, fc.requestHeader) + dialer := websocket.Dialer{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: !config.GlobalSettings.SSLVerify, + }, + } + fc.conn, _, err = dialer.Dial(fc.url, fc.requestHeader) if err != nil { fc.log.Debug().Err(err).Msgf("Failed to connect to pty websocket at %s.", fc.url) return diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 1c832fe..e25efa9 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -2,6 +2,7 @@ package runner import ( "context" + "crypto/tls" "encoding/json" "errors" "fmt" @@ -298,8 +299,13 @@ func (pc *PtyClient) recovery() error { } pc.url = strings.Replace(config.GlobalSettings.ServerURL, "http", "ws", 1) + resp.WebsocketURL + dialer := websocket.Dialer{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: !config.GlobalSettings.SSLVerify, + }, + } // Assign to pc.conn only if reconnect succeeds to avoid nil panic in concurrent reads/writes. - tempConn, _, err := websocket.DefaultDialer.Dial(pc.url, pc.requestHeader) + tempConn, _, err := dialer.Dial(pc.url, pc.requestHeader) if err != nil { log.Error().Err(err).Msg("Failed to reconnect to pty websocket during recovery.") return err diff --git a/pkg/scheduler/session.go b/pkg/scheduler/session.go index 5ec9a0c..bc8c539 100644 --- a/pkg/scheduler/session.go +++ b/pkg/scheduler/session.go @@ -41,7 +41,7 @@ func InitSession() *Session { tlsConfig.RootCAs = caCertPool } - tlsConfig.InsecureSkipVerify = config.GlobalSettings.SSLVerify + tlsConfig.InsecureSkipVerify = !config.GlobalSettings.SSLVerify client.Transport = &http.Transport{ TLSClientConfig: tlsConfig, } From bb01aea2edc7601c4ee045ccf6416bf69d3e2c54 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 15 May 2025 11:21:07 +0900 Subject: [PATCH 335/364] fix(tls): set InsecureSkipVerify based on SSLVerify config option --- pkg/runner/pty.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index e25efa9..5d1801c 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -67,7 +67,12 @@ func NewPtyClient(data CommandData, apiSession *scheduler.Session) *PtyClient { func (pc *PtyClient) initializePtySession() error { var err error - pc.conn, _, err = websocket.DefaultDialer.Dial(pc.url, pc.requestHeader) + dialer := websocket.Dialer{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: !config.GlobalSettings.SSLVerify, + }, + } + pc.conn, _, err = dialer.Dial(pc.url, pc.requestHeader) if err != nil { return fmt.Errorf("failed to connect pty websocket: %w", err) } From f5d2efff34ad710685924b2ae0f77e34fa76e548 Mon Sep 17 00:00:00 2001 From: royroyee Date: Thu, 15 May 2025 11:32:06 +0900 Subject: [PATCH 336/364] fix(upgrade): add -y flag to upgrade commands for non-interactive execution --- pkg/runner/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 4a72c09..3754615 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -101,7 +101,7 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { if utils.PlatformLike == "debian" { cmd = "apt-get update -y && " + - "apt-get install --only-upgrade alpamon" + "apt-get install --only-upgrade alpamon -y" } else if utils.PlatformLike == "rhel" { cmd = "yum update -y alpamon" } else { From 13ecb0b751aa698bf13ee4032934056c575677ee Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 15 May 2025 19:17:54 +0900 Subject: [PATCH 337/364] Add tlsConfig to http.Client Add tls.Config to http.Client for file transfer based on the SSLVerify option defined in alpamon.conf. --- pkg/runner/command.go | 19 +++++++++++++++++++ pkg/utils/http_client.go | 22 ++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 3754615..9c6cd50 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -3,6 +3,8 @@ package runner import ( "archive/zip" "bytes" + "crypto/tls" + "crypto/x509" "encoding/base64" "errors" "fmt" @@ -706,6 +708,23 @@ func getFileData(data CommandData) ([]byte, error) { } client := http.Client{} + + tlsConfig := &tls.Config{} + if config.GlobalSettings.CaCert != "" { + caCertPool := x509.NewCertPool() + caCert, err := os.ReadFile(config.GlobalSettings.CaCert) + if err != nil { + log.Fatal().Err(err).Msg("Failed to read CA certificate.") + } + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caCertPool + } + + tlsConfig.InsecureSkipVerify = !config.GlobalSettings.SSLVerify + client.Transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + resp, err := client.Do(req) if err != nil { return nil, fmt.Errorf("failed to download content from URL: %w", err) diff --git a/pkg/utils/http_client.go b/pkg/utils/http_client.go index 18f7a3c..f6848a7 100644 --- a/pkg/utils/http_client.go +++ b/pkg/utils/http_client.go @@ -2,9 +2,15 @@ package utils import ( "bytes" + "crypto/tls" + "crypto/x509" "io" "net/http" + "os" "time" + + "github.com/alpacanetworks/alpamon/pkg/config" + "github.com/rs/zerolog/log" ) func Put(url string, body bytes.Buffer, timeout time.Duration) ([]byte, int, error) { @@ -15,6 +21,22 @@ func Put(url string, body bytes.Buffer, timeout time.Duration) ([]byte, int, err client := &http.Client{Timeout: timeout} + tlsConfig := &tls.Config{} + if config.GlobalSettings.CaCert != "" { + caCertPool := x509.NewCertPool() + caCert, err := os.ReadFile(config.GlobalSettings.CaCert) + if err != nil { + log.Fatal().Err(err).Msg("Failed to read CA certificate.") + } + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caCertPool + } + + tlsConfig.InsecureSkipVerify = !config.GlobalSettings.SSLVerify + client.Transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + resp, err := client.Do(req) if err != nil { return nil, 0, err From 829cff92d54f9098809f916b637ff2f8668f4906 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 16 May 2025 14:51:47 +0900 Subject: [PATCH 338/364] chore: clean up code style --- pkg/runner/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 9c6cd50..b57034a 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -692,7 +692,7 @@ func getFileData(data CommandData) ([]byte, error) { return nil, fmt.Errorf("failed to parse URL '%s': %w", data.Content, err) } - req, err := http.NewRequest("GET", parsedRequestURL.String(), nil) + req, err := http.NewRequest(http.MethodGet, parsedRequestURL.String(), nil) if err != nil { return nil, fmt.Errorf("failed to create request: %w", err) } From d4a00517351342b74a3d0edd5a23a66f1ae2a049 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 16 May 2025 15:19:34 +0900 Subject: [PATCH 339/364] Change log level from Fatal to Error Adjust the log level to Error because Fatal terminates the agent. --- pkg/runner/command.go | 2 +- pkg/utils/http_client.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index b57034a..08e450a 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -714,7 +714,7 @@ func getFileData(data CommandData) ([]byte, error) { caCertPool := x509.NewCertPool() caCert, err := os.ReadFile(config.GlobalSettings.CaCert) if err != nil { - log.Fatal().Err(err).Msg("Failed to read CA certificate.") + log.Error().Err(err).Msg("Failed to read CA certificate.") } caCertPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caCertPool diff --git a/pkg/utils/http_client.go b/pkg/utils/http_client.go index f6848a7..13fa273 100644 --- a/pkg/utils/http_client.go +++ b/pkg/utils/http_client.go @@ -26,7 +26,7 @@ func Put(url string, body bytes.Buffer, timeout time.Duration) ([]byte, int, err caCertPool := x509.NewCertPool() caCert, err := os.ReadFile(config.GlobalSettings.CaCert) if err != nil { - log.Fatal().Err(err).Msg("Failed to read CA certificate.") + log.Error().Err(err).Msg("Failed to read CA certificate.") } caCertPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caCertPool From c8bb89b0cec677037d68955cd7ac974ed88a50bc Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 20 May 2025 16:48:22 +0900 Subject: [PATCH 340/364] Implement disk-related utility functions Add IsVirtualDisk() to determine if the disk corresponding to name parameter is a virtual disk. Add GetDiskBaseName() to obtain the base name of a disk. --- pkg/utils/metrics.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index 3c97e88..ced97a8 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -38,7 +38,16 @@ var ( "/dev": true, } virtualMountPointPattern = "^/(sys|proc|run|dev/)" - loopFileSystemPrefix = "/dev/loop" + virtaulDisk = map[string]bool{ + "loop": true, + "ram": true, + "fd": true, + "sr": true, + "zram": true, + } + loopFileSystemPrefix = "/dev/loop" + linuxDiskNamePattern = regexp.MustCompile(`^([a-z]+[0-9]*)(p[0-9]+)?$`) + macDiskNamePattern = regexp.MustCompile(`^(disk[0-9]+)(s[0-9]+)?$`) ) func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { @@ -107,6 +116,14 @@ func IsVirtualFileSystem(device string, fstype string, mountPoint string) bool { return false } +func IsVirtualDisk(name string) bool { + if virtaulDisk[name] { + return true + } + + return false +} + func ParseDiskName(device string) string { device = strings.TrimPrefix(device, "/dev/") @@ -123,3 +140,15 @@ func ParseDiskName(device string) string { return device } + +func GetDiskBaseName(name string) string { + if matches := linuxDiskNamePattern.FindStringSubmatch(name); len(matches) == 2 { + return matches[1] + } + + if matches := macDiskNamePattern.FindStringSubmatch(name); len(matches) == 2 { + return matches[1] + } + + return name +} From 0f8b6a127b93c214627f3e7794839cd0ebf2f694 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 20 May 2025 16:55:03 +0900 Subject: [PATCH 341/364] Add filtering logic for virtual disks. Add logic to filter out virtual disks when collecting disk I/O and disk info. --- .../check/realtime/disk/io/io_collect.go | 13 ++++++++++++- pkg/runner/commit.go | 16 ++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/pkg/collector/check/realtime/disk/io/io_collect.go b/pkg/collector/check/realtime/disk/io/io_collect.go index 0171c05..c8fc2ad 100644 --- a/pkg/collector/check/realtime/disk/io/io_collect.go +++ b/pkg/collector/check/realtime/disk/io/io_collect.go @@ -44,9 +44,20 @@ func (c *CollectCheck) collectAndSaveDiskIO(ctx context.Context) error { func (c *CollectCheck) parseDiskIO(ioCounters map[string]disk.IOCountersStat) []base.CheckResult { var data []base.CheckResult + seen := make(map[string]bool) for name, ioCounter := range ioCounters { var readBps, writeBps float64 + if utils.IsVirtualDisk(name) { + continue + } + + baseName := utils.GetDiskBaseName(name) + if seen[baseName] { + continue + } + seen[baseName] = true + if lastCounter, exist := c.lastMetric[name]; exist { readBps, writeBps = utils.CalculateDiskIOBps(ioCounter, lastCounter, c.GetInterval()) } else { @@ -57,7 +68,7 @@ func (c *CollectCheck) parseDiskIO(ioCounters map[string]disk.IOCountersStat) [] c.lastMetric[name] = ioCounter data = append(data, base.CheckResult{ Timestamp: time.Now(), - Device: name, + Device: baseName, ReadBps: &readBps, WriteBps: &writeBps, }) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 84c81f3..2955729 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -650,12 +650,24 @@ func getRpmPackage(path string) ([]SystemPackageData, error) { func getDisks() ([]Disk, error) { ioCounters, err := disk.IOCounters() + seen := make(map[string]bool) + if err != nil { return []Disk{}, err } disks := []Disk{} for name, ioCounter := range ioCounters { + if utils.IsVirtualDisk(name) { + continue + } + + baseName := utils.GetDiskBaseName(name) + if seen[baseName] { + continue + } + seen[baseName] = true + disks = append(disks, Disk{ Name: name, SerialNumber: ioCounter.SerialNumber, @@ -674,6 +686,10 @@ func getPartitions() ([]Partition, error) { } for _, partition := range partitions { + if utils.IsVirtualFileSystem(partition.Device, partition.Fstype, partition.Mountpoint) { + continue + } + if value, exists := seen[partition.Device]; exists { value.MountPoints = append(value.MountPoints, partition.Mountpoint) seen[partition.Device] = value From 9eb27f6185cabec034a25f12b7b2f589a3a668d3 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 20 May 2025 17:37:04 +0900 Subject: [PATCH 342/364] Minor fix Fix condition. --- pkg/utils/metrics.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index ced97a8..e95c2a6 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -142,11 +142,11 @@ func ParseDiskName(device string) string { } func GetDiskBaseName(name string) string { - if matches := linuxDiskNamePattern.FindStringSubmatch(name); len(matches) == 2 { + if matches := linuxDiskNamePattern.FindStringSubmatch(name); len(matches) >= 2 { return matches[1] } - if matches := macDiskNamePattern.FindStringSubmatch(name); len(matches) == 2 { + if matches := macDiskNamePattern.FindStringSubmatch(name); len(matches) >= 2 { return matches[1] } From 1ea4209cec49664acc9c5e33bd787dfab31da332 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 20 May 2025 18:01:11 +0900 Subject: [PATCH 343/364] Fix regexp for getting disk name Fix regexp for getting disk name --- pkg/utils/metrics.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index e95c2a6..d3caf2a 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -46,8 +46,9 @@ var ( "zram": true, } loopFileSystemPrefix = "/dev/loop" - linuxDiskNamePattern = regexp.MustCompile(`^([a-z]+[0-9]*)(p[0-9]+)?$`) - macDiskNamePattern = regexp.MustCompile(`^(disk[0-9]+)(s[0-9]+)?$`) + diskPattern = regexp.MustCompile( + `^(nvme\d+n\d+|xvd[a-z]+|vd[a-z]+|sd[a-z]+|disk\d+)(?:p\d+|s\d+|\d+)?$`, + ) ) func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { @@ -142,11 +143,7 @@ func ParseDiskName(device string) string { } func GetDiskBaseName(name string) string { - if matches := linuxDiskNamePattern.FindStringSubmatch(name); len(matches) >= 2 { - return matches[1] - } - - if matches := macDiskNamePattern.FindStringSubmatch(name); len(matches) >= 2 { + if matches := diskPattern.FindStringSubmatch(name); len(matches) >= 2 { return matches[1] } From 441e7b624b7618e88b4088bd3943e05bdcdc2abd Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 20 May 2025 19:28:22 +0900 Subject: [PATCH 344/364] Add more regexp for disk Add more regexp for disk --- pkg/utils/metrics.go | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index d3caf2a..1e4ea4a 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -46,9 +46,11 @@ var ( "zram": true, } loopFileSystemPrefix = "/dev/loop" - diskPattern = regexp.MustCompile( - `^(nvme\d+n\d+|xvd[a-z]+|vd[a-z]+|sd[a-z]+|disk\d+)(?:p\d+|s\d+|\d+)?$`, - ) + nvmeDiskPattern = regexp.MustCompile(`^(nvme\d+n\d+)(p\d+)?$`) + scsiDiskPattern = regexp.MustCompile(`^([a-z]+)(\d+)?$`) + mmcDiskPattern = regexp.MustCompile(`^(mmcblk\d+)(p\d+)?$`) + lvmDiskPattern = regexp.MustCompile(`^(dm-\d+)$`) + macDiskPattern = regexp.MustCompile(`^(disk\d+)(s\d+)?$`) ) func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { @@ -143,8 +145,27 @@ func ParseDiskName(device string) string { } func GetDiskBaseName(name string) string { - if matches := diskPattern.FindStringSubmatch(name); len(matches) >= 2 { - return matches[1] + switch { + case strings.HasPrefix(name, "nvme"): + if m := nvmeDiskPattern.FindStringSubmatch(name); len(m) >= 2 { + return m[1] + } + case strings.HasPrefix(name, "mmcb"): + if m := mmcDiskPattern.FindStringSubmatch(name); len(m) >= 2 { + return m[1] + } + case strings.HasPrefix(name, "disk"): + if m := macDiskPattern.FindStringSubmatch(name); len(m) >= 2 { + return m[1] + } + case strings.HasPrefix(name, "dm-"): + if m := lvmDiskPattern.FindStringSubmatch(name); len(m) >= 2 { + return m[1] + } + default: + if m := scsiDiskPattern.FindStringSubmatch(name); len(m) >= 2 { + return m[1] + } } return name From 19bd77ef40c280ae736e2e7da492375edd40aff3 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 20 May 2025 19:55:56 +0900 Subject: [PATCH 345/364] Minor fix Fix regexp in ParseDiskName() --- pkg/utils/metrics.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index 1e4ea4a..03b5cab 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -120,17 +120,13 @@ func IsVirtualFileSystem(device string, fstype string, mountPoint string) bool { } func IsVirtualDisk(name string) bool { - if virtaulDisk[name] { - return true - } - - return false + return virtaulDisk[name] } func ParseDiskName(device string) string { device = strings.TrimPrefix(device, "/dev/") - re := regexp.MustCompile(`^[a-zA-Z]+\d*`) + re := regexp.MustCompile(`^[a-zA-Z]+`) if match := re.FindString(device); match != "" { return match } From 0b868b6c54998fa9a0e3f8b9eb46fcabd418bc30 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 21 May 2025 17:54:27 +0900 Subject: [PATCH 346/364] Implement interface-related utility functions Add FilterVirtualInterface to filter if the interface is a virtual interface. --- pkg/utils/metrics.go | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index 03b5cab..a7e7e43 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -45,12 +45,17 @@ var ( "sr": true, "zram": true, } + virtualInterfaceFlags = map[string]bool{ + "flagloopback": true, + "flagpointtopoint": true, + } loopFileSystemPrefix = "/dev/loop" nvmeDiskPattern = regexp.MustCompile(`^(nvme\d+n\d+)(p\d+)?$`) scsiDiskPattern = regexp.MustCompile(`^([a-z]+)(\d+)?$`) mmcDiskPattern = regexp.MustCompile(`^(mmcblk\d+)(p\d+)?$`) lvmDiskPattern = regexp.MustCompile(`^(dm-\d+)$`) macDiskPattern = regexp.MustCompile(`^(disk\d+)(s\d+)?$`) + VirtualIfacePattern = regexp.MustCompile(`^(lo|docker|veth|br-|virbr|vmnet|tap|tun|wl|wg|zt|tailscale|enp0s|cni)`) ) func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { @@ -166,3 +171,32 @@ func GetDiskBaseName(name string) string { return name } + +func FilterVirtualInterface(ifaces net.InterfaceStatList) map[string]net.InterfaceStat { + interfaces := make(map[string]net.InterfaceStat) + for _, iface := range ifaces { + if iface.HardwareAddr == "" { + continue + } + + if VirtualIfacePattern.MatchString(iface.Name) { + continue + } + + isVirtualFlag := false + for _, flag := range iface.Flags { + if virtualInterfaceFlags[strings.ToLower(flag)] { + isVirtualFlag = true + break + } + } + + if isVirtualFlag { + continue + } + + interfaces[iface.Name] = iface + } + + return interfaces +} From b7e3772e3bf6622f4f029721fce821131327d8c2 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Wed, 21 May 2025 17:55:40 +0900 Subject: [PATCH 347/364] Add filtering logic for virtual interfaces Add logic to filter out virtual interfaces when collecting network traffic and interface info. --- pkg/collector/check/realtime/net/net_collect.go | 9 +-------- pkg/runner/commit.go | 5 +++++ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/pkg/collector/check/realtime/net/net_collect.go b/pkg/collector/check/realtime/net/net_collect.go index c9507d6..a39814b 100644 --- a/pkg/collector/check/realtime/net/net_collect.go +++ b/pkg/collector/check/realtime/net/net_collect.go @@ -92,14 +92,7 @@ func (c *CollectCheck) collectInterfaces() (map[string]net.InterfaceStat, error) return nil, err } - interfaces := map[string]net.InterfaceStat{} - for _, iface := range ifaces { - mac := iface.HardwareAddr - if mac == "" { - continue - } - interfaces[iface.Name] = iface - } + interfaces := utils.FilterVirtualInterface(ifaces) return interfaces, nil } diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 2955729..26a2b57 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -466,6 +466,11 @@ func getNetworkInterfaces() ([]Interface, error) { if mac == "" { continue } + + if utils.VirtualIfacePattern.MatchString(iface.Name) { + continue + } + interfaces = append(interfaces, Interface{ Name: iface.Name, Flags: getFlags(iface), From e884e89df6f33c9e72ceea2a17774207475fa14f Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 22 May 2025 19:57:27 +0900 Subject: [PATCH 348/364] Fix getDpkgPackage() Fix a scanner error(bufio.Scanner: token too long) that occurred when parsing the dpkg status file and the block size was large. Add logger for blocks that failed parsing. Add exception handling for packages that are malformed or failed to parse in the dpkg status file. --- pkg/runner/commit.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 84c81f3..03bcef7 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -37,7 +37,8 @@ const ( passwdFilePath = "/etc/passwd" groupFilePath = "/etc/group" - dpkgDbPath = "/var/lib/dpkg/status" + dpkgDbPath = "/var/lib/dpkg/status" + dpkgBufferSize = 1024 * 1024 IFF_UP = 1 << 0 // Interface is up IFF_LOOPBACK = 1 << 3 // Loopback interface @@ -578,6 +579,9 @@ func getDpkgPackage() ([]SystemPackageData, error) { scanner := bufio.NewScanner(fd) scanner.Split(utils.ScanBlock) + buf := make([]byte, 0, dpkgBufferSize) + scanner.Buffer(buf, dpkgBufferSize) + pkgNamePrefix := []byte("Package:") for scanner.Scan() { chunk := scanner.Bytes() @@ -598,7 +602,8 @@ func getDpkgPackage() ([]SystemPackageData, error) { reader := textproto.NewReader(bufio.NewReader(bytes.NewReader(chunk))) header, err := reader.ReadMIMEHeader() if err != nil && !errors.Is(err, io.EOF) { - return nil, err + log.Error().Err(err).Msgf("Failed to parse package %s", pkgName) + continue } pkg := SystemPackageData{ @@ -608,10 +613,16 @@ func getDpkgPackage() ([]SystemPackageData, error) { Arch: header.Get("Architecture"), } + if pkg.Name == "" || pkg.Version == "" { + log.Error().Msgf("Skip malformed package entry: %s", chunk) + continue + } + packages = append(packages, pkg) } if err = scanner.Err(); err != nil { + log.Error().Err(err).Msg("Error occurred while scanning dpkg status file.") return nil, err } From 0ba13d9f59225b2149b0a47a52a6b1142a727aaa Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 23 May 2025 11:14:42 +0900 Subject: [PATCH 349/364] Minor fix Change log level. --- pkg/runner/commit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 03bcef7..c66d94c 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -614,7 +614,7 @@ func getDpkgPackage() ([]SystemPackageData, error) { } if pkg.Name == "" || pkg.Version == "" { - log.Error().Msgf("Skip malformed package entry: %s", chunk) + log.Debug().Msgf("Skip malformed package entry: %s", chunk) continue } From 3d423c4b90b2115c89391ed4a98dd008e7f18dfd Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 26 May 2025 17:28:32 +0900 Subject: [PATCH 350/364] Fix regexp for virtual interfaces Delete wl prefix that is a prefix for Physical Wireless Interfaces from VirtualIfacePattern. --- pkg/utils/metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/metrics.go b/pkg/utils/metrics.go index a7e7e43..3947616 100644 --- a/pkg/utils/metrics.go +++ b/pkg/utils/metrics.go @@ -55,7 +55,7 @@ var ( mmcDiskPattern = regexp.MustCompile(`^(mmcblk\d+)(p\d+)?$`) lvmDiskPattern = regexp.MustCompile(`^(dm-\d+)$`) macDiskPattern = regexp.MustCompile(`^(disk\d+)(s\d+)?$`) - VirtualIfacePattern = regexp.MustCompile(`^(lo|docker|veth|br-|virbr|vmnet|tap|tun|wl|wg|zt|tailscale|enp0s|cni)`) + VirtualIfacePattern = regexp.MustCompile(`^(lo|docker|veth|br-|virbr|vmnet|tap|tun|wg|zt|tailscale|enp0s|cni)`) ) func CalculateNetworkBps(current net.IOCountersStat, last net.IOCountersStat, interval time.Duration) (inputBps float64, outputBps float64) { From 7375fea055e508b9d5e4340551ad229b58e26f5a Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Mon, 26 May 2025 17:29:42 +0900 Subject: [PATCH 351/364] Add filter to getNetworkAddresses() Add exception handling to getNetworkAddresses() for filtering virtual interfaces. --- pkg/runner/commit.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index 7d22469..c1b5e4e 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -497,6 +497,11 @@ func getNetworkAddresses() ([]Address, error) { if mac == "" { continue } + + if utils.VirtualIfacePattern.MatchString(iface.Name) { + continue + } + addrs, err := iface.Addrs() if err != nil { return nil, err From 49f9d59b304927ab060266b5a5bec1927d5a4796 Mon Sep 17 00:00:00 2001 From: royroyee Date: Tue, 27 May 2025 16:41:59 +0900 Subject: [PATCH 352/364] feat: add exponential backoff retry logic to pty websocket recovery --- pkg/runner/client.go | 6 ++-- pkg/runner/pty.go | 83 +++++++++++++++++++++++++++----------------- 2 files changed, 55 insertions(+), 34 deletions(-) diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 48a2a5f..68b804d 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -105,14 +105,14 @@ func (wc *WebsocketClient) Connect() { wsBackoff := backoff.NewExponentialBackOff() wsBackoff.InitialInterval = minConnectInterval wsBackoff.MaxInterval = maxConnectInterval - wsBackoff.MaxElapsedTime = 0 // No time limit for retries (infinite retry) - wsBackoff.RandomizationFactor = 0 // Retry forever + wsBackoff.MaxElapsedTime = 0 // No time limit for retries (infinite retry) + wsBackoff.RandomizationFactor = 0 operation := func() error { select { case <-ctx.Done(): log.Error().Msg("Maximum retry duration reached. Shutting down.") - return ctx.Err() + return backoff.Permanent(ctx.Err()) default: dialer := websocket.Dialer{ TLSClientConfig: &tls.Config{ diff --git a/pkg/runner/pty.go b/pkg/runner/pty.go index 5d1801c..60e7914 100644 --- a/pkg/runner/pty.go +++ b/pkg/runner/pty.go @@ -9,6 +9,7 @@ import ( "github.com/alpacanetworks/alpamon/pkg/config" "github.com/alpacanetworks/alpamon/pkg/scheduler" "github.com/alpacanetworks/alpamon/pkg/utils" + "github.com/cenkalti/backoff" "github.com/creack/pty" "github.com/gorilla/websocket" "github.com/rs/zerolog/log" @@ -38,7 +39,10 @@ type PtyClient struct { isRecovering atomic.Bool // default : false } -const reissuePtyWebsocketURL = "/api/websh/pty-channels/" +const ( + maxRecoveryTimeout = 1 * time.Minute + reissuePtyWebsocketURL = "/api/websh/pty-channels/" +) var terminals map[string]*PtyClient @@ -278,44 +282,61 @@ func (pc *PtyClient) close() { // Note: recovery doesn't close the existing conn explicitly to avoid breaking the session. // The goal is to replace a broken connection, not perform a graceful shutdown. func (pc *PtyClient) recovery() error { - data := map[string]interface{}{ - "session": pc.sessionID, - } - body, statusCode, err := pc.apiSession.Post(reissuePtyWebsocketURL, data, 5) - if err != nil { - log.Error().Err(err).Msg("Failed to request pty websocket reissue.") - return err - } + ctx, cancel := context.WithTimeout(context.Background(), maxRecoveryTimeout) + defer cancel() - if statusCode != http.StatusCreated { - err = fmt.Errorf("unexpected status code: %d", statusCode) - log.Error().Err(err).Msg("Failed to request pty websocket reissue.") - return err - } + retryBackoff := backoff.NewExponentialBackOff() + retryBackoff.InitialInterval = 1 * time.Second + retryBackoff.MaxInterval = 30 * time.Second + retryBackoff.MaxElapsedTime = 0 // until ctx timeout + retryBackoff.RandomizationFactor = 0 - var resp struct { - WebsocketURL string `json:"websocket_url"` - } + operation := func() error { + select { + case <-ctx.Done(): + log.Error().Msg("PTY recovery aborted: timeout reached.") + return backoff.Permanent(ctx.Err()) + default: + data := map[string]interface{}{ + "session": pc.sessionID, + } + body, statusCode, err := pc.apiSession.Post(reissuePtyWebsocketURL, data, 5) + if err != nil || statusCode != http.StatusCreated { + nextInterval := retryBackoff.NextBackOff() + log.Warn().Err(err).Msgf("Failed to reissue pty websocket (status: %d), will try again in %ds.", statusCode, int(nextInterval.Seconds())) + return fmt.Errorf("reissue failed: %w", err) + } - err = json.Unmarshal(body, &resp) - if err != nil { - log.Error().Err(err).Msg("Failed to parse pty websocket reissue response.") - return err - } + var resp struct { + WebsocketURL string `json:"websocket_url"` + } + if err = json.Unmarshal(body, &resp); err != nil { + log.Warn().Err(err).Msg("Failed to parse reissue response.") + return fmt.Errorf("unmarshal error: %w", err) + } + pc.url = strings.Replace(config.GlobalSettings.ServerURL, "http", "ws", 1) + resp.WebsocketURL - pc.url = strings.Replace(config.GlobalSettings.ServerURL, "http", "ws", 1) + resp.WebsocketURL - dialer := websocket.Dialer{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: !config.GlobalSettings.SSLVerify, - }, + dialer := websocket.Dialer{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: !config.GlobalSettings.SSLVerify, + }, + } + conn, _, err := dialer.Dial(pc.url, pc.requestHeader) + if err != nil { + log.Warn().Err(err).Msg("PTY websocket reconnection failed.") + return err + } + + pc.conn = conn + log.Info().Msg("PTY WebSocket reconnected successfully.") + return nil + } } - // Assign to pc.conn only if reconnect succeeds to avoid nil panic in concurrent reads/writes. - tempConn, _, err := dialer.Dial(pc.url, pc.requestHeader) + + err := backoff.Retry(operation, backoff.WithContext(retryBackoff, ctx)) if err != nil { - log.Error().Err(err).Msg("Failed to reconnect to pty websocket during recovery.") return err } - pc.conn = tempConn return nil } From 0890358cfec71d22992f189dc2c5d539c7e870f9 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 27 May 2025 17:49:35 +0900 Subject: [PATCH 353/364] Fix .goreleaser.yaml Fix nfpms dependencies for rocky linux. --- .goreleaser.yaml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 13b2c8c..a983680 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -44,9 +44,6 @@ nfpms: formats: - deb - rpm - dependencies: - - zip - - sqlite3 bindir: /usr/local/bin/ contents: @@ -73,6 +70,16 @@ nfpms: preremove: "scripts/preremove.sh" postremove: "scripts/postremove.sh" + overrides: + deb: + dependencies: + - zip + - sqlite3 + rpm: + dependencies: + - zip + - sqlite + changelog: sort: asc filters: From 2f386eafa259f0711108a6874e46a79df303b740 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 29 May 2025 17:14:42 +0900 Subject: [PATCH 354/364] Update mv webftp command Update mv webftp command to support rename feature. --- pkg/runner/ftp.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 3e1c705..8030bac 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -5,13 +5,13 @@ import ( "crypto/tls" "encoding/json" "fmt" - "github.com/alpacanetworks/alpamon/pkg/config" "net/http" "os" "path/filepath" "strconv" "strings" + "github.com/alpacanetworks/alpamon/pkg/config" "github.com/alpacanetworks/alpamon/pkg/logger" "github.com/alpacanetworks/alpamon/pkg/utils" "github.com/gorilla/websocket" @@ -391,7 +391,7 @@ func (fc *FtpClient) rmd(path string, recursive bool) (CommandResult, error) { func (fc *FtpClient) mv(src, dst string) (CommandResult, error) { src = fc.parsePath(src) - dst = filepath.Join(fc.parsePath(dst), filepath.Base(src)) + dst = fc.parsePath(dst) err := os.Rename(src, dst) if err != nil { From da6ea3c023200e8ed367793d99531124c9378c61 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 29 May 2025 19:34:42 +0900 Subject: [PATCH 355/364] Update chown webftp command Update chown webftp command to be worked by username and groupname. --- pkg/runner/ftp.go | 8 ++++---- pkg/runner/ftp_types.go | 4 ++-- pkg/utils/utils.go | 27 +++++++++++++++++++++++---- 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/pkg/runner/ftp.go b/pkg/runner/ftp.go index 8030bac..bedee88 100644 --- a/pkg/runner/ftp.go +++ b/pkg/runner/ftp.go @@ -161,7 +161,7 @@ func (fc *FtpClient) handleFtpCommand(command FtpCommand, data FtpData) (Command case Chmod: return fc.chmod(data.Path, data.Mode, data.Recursive) case Chown: - return fc.chown(data.Path, data.UID, data.GID, data.Recursive) + return fc.chown(data.Path, data.Username, data.Groupname, data.Recursive) default: return CommandResult{}, fmt.Errorf("unknown FTP command: %s", command) } @@ -500,17 +500,17 @@ func (fc *FtpClient) chmodRecursive(path string, fileMode os.FileMode) error { }) } -func (fc *FtpClient) chown(path, uidStr, gidStr string, recursive bool) (CommandResult, error) { +func (fc *FtpClient) chown(path, username, groupname string, recursive bool) (CommandResult, error) { path = fc.parsePath(path) - uid, err := strconv.Atoi(uidStr) + uid, err := utils.LookUpUID(username) if err != nil { return CommandResult{ Message: err.Error(), }, err } - gid, err := strconv.Atoi(gidStr) + gid, err := utils.LookUpGID(groupname) if err != nil { return CommandResult{ Message: err.Error(), diff --git a/pkg/runner/ftp_types.go b/pkg/runner/ftp_types.go index 31458a5..1919029 100644 --- a/pkg/runner/ftp_types.go +++ b/pkg/runner/ftp_types.go @@ -47,8 +47,8 @@ type FtpData struct { Src string `json:"src,omitempty"` Dst string `json:"dst,omitempty"` Mode string `json:"mode,omitempty"` - UID string `json:"uid,omitempty"` - GID string `json:"gid,omitempty"` + Username string `json:"username,omitempty"` + Groupname string `json:"groupname,omitempty"` } type FtpContent struct { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 051a1f5..b141338 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -4,10 +4,6 @@ import ( "bytes" "context" "fmt" - "github.com/alpacanetworks/alpamon/pkg/version" - "github.com/google/go-github/github" - "github.com/rs/zerolog/log" - "github.com/shirou/gopsutil/v4/host" "net/url" "os" "os/user" @@ -15,6 +11,11 @@ import ( "runtime" "strconv" "strings" + + "github.com/alpacanetworks/alpamon/pkg/version" + "github.com/google/go-github/github" + "github.com/rs/zerolog/log" + "github.com/shirou/gopsutil/v4/host" ) var ( @@ -158,3 +159,21 @@ func GetLatestVersion() string { func GetUserAgent(name string) string { return fmt.Sprintf("%s/%s", name, version.Version) } + +func LookUpUID(username string) (int, error) { + usr, err := user.Lookup(username) + if err != nil { + return 0, err + } + + return strconv.Atoi(usr.Uid) +} + +func LookUpGID(groupname string) (int, error) { + group, err := user.LookupGroup(groupname) + if err != nil { + return 0, err + } + + return strconv.Atoi(group.Gid) +} From d7698aafbc3c34f2b083f95bb0fc6b1699f9dbf7 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Thu, 29 May 2025 19:50:17 +0900 Subject: [PATCH 356/364] Add exception handling for edge case Add exception handling to return -1 if the username or groupname is an empty string. --- pkg/utils/utils.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index b141338..dc3ed70 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -161,6 +161,10 @@ func GetUserAgent(name string) string { } func LookUpUID(username string) (int, error) { + if username == "" { + return -1, nil + } + usr, err := user.Lookup(username) if err != nil { return 0, err @@ -170,6 +174,10 @@ func LookUpUID(username string) (int, error) { } func LookUpGID(groupname string) (int, error) { + if groupname == "" { + return -1, nil + } + group, err := user.LookupGroup(groupname) if err != nil { return 0, err From 4c5e728d3f155df1800b78246d7d5a385bc7a3e8 Mon Sep 17 00:00:00 2001 From: royroyee Date: Fri, 30 May 2025 10:08:02 +0900 Subject: [PATCH 357/364] fix(logger): improve pretty logger compatibility with tools like less --- pkg/logger/logger.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index c446aa3..907bade 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -83,19 +83,20 @@ func InitLogger() *os.File { func PrettyWriter(out io.Writer) zerolog.ConsoleWriter { return zerolog.ConsoleWriter{ Out: out, + NoColor: true, TimeFormat: time.RFC3339, TimeLocation: time.Local, FormatLevel: func(i interface{}) string { - return "[" + strings.ToUpper(i.(string)) + "]" + return "[" + strings.ToUpper(fmt.Sprint(i)) + "]" }, FormatMessage: func(i interface{}) string { - return " " + i.(string) + return " " + fmt.Sprint(i) }, FormatFieldName: func(i interface{}) string { - return "(" + i.(string) + ")" + return "(" + fmt.Sprint(i) + ")" }, FormatFieldValue: func(i interface{}) string { - return i.(string) + return fmt.Sprint(i) }, } } From ef9e42716a9acc19995a7cb986c33eb53ff79aa0 Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Fri, 30 May 2025 19:27:43 +0900 Subject: [PATCH 358/364] Add a timer to ensure atomicity When alpamon restarts, atomicity isn't guaranteed between the data sent for sync and the data sent by the collector, causing errors on the server side. To resolve this, add a timer to sync after a 5-second delay upon restart. --- pkg/runner/commit.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/runner/commit.go b/pkg/runner/commit.go index c1b5e4e..aad3d75 100644 --- a/pkg/runner/commit.go +++ b/pkg/runner/commit.go @@ -59,7 +59,10 @@ var syncMutex sync.Mutex func CommitAsync(session *scheduler.Session, commissioned bool) { if commissioned { - go syncSystemInfo(session, nil) + go func() { + time.Sleep(5 * time.Second) + syncSystemInfo(session, nil) + }() } else { go commitSystemInfo() } From 5acbd545fc44f2311d5dcc0c831820d0a7961001 Mon Sep 17 00:00:00 2001 From: SeowonNoh Date: Wed, 4 Jun 2025 13:21:36 +0900 Subject: [PATCH 359/364] feat: add moduser command to handle user information updates --- pkg/runner/command.go | 45 +++++++++++++++++++++++++++++++++++++ pkg/runner/command_types.go | 5 +++++ 2 files changed, 50 insertions(+) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 08e450a..163bb12 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -125,6 +125,8 @@ func (cr *CommandRunner) handleInternalCmd() (int, string) { return cr.delUser() case "delgroup": return cr.delGroup() + case "moduser": + return cr.modUser() case "ping": return 0, time.Now().Format(time.RFC3339) //case "debug": @@ -537,6 +539,49 @@ func (cr *CommandRunner) delGroup() (exitCode int, result string) { return 0, "Successfully deleted the group." } +func (cr *CommandRunner) modUser() (exitCode int, result string) { + data := modUserData{ + Username: cr.data.Username, + Comment: cr.data.Comment, + } + + err := cr.validateData(data) + if err != nil { + return 1, fmt.Sprintf("adduser: Not enough information. %s", err) + } + + if utils.PlatformLike == "debian" { + exitCode, result = runCmdWithOutput( + []string{ + "usr/sbin/usermod", + "--comment", data.Comment, + data.Username, + }, + "root", "", nil, 60, + ) + if exitCode != 0 { + return exitCode, result + } + } else if utils.PlatformLike == "rhel" { + exitCode, result = runCmdWithOutput( + []string{ + "/usr/sbin/usermod", + "--comment", data.Comment, + data.Username, + }, + "root", "", nil, 60, + ) + if exitCode != 0 { + return exitCode, result + } + } else { + return 1, "Not implemented 'moduser' command for this platform." + } + + cr.sync([]string{"groups", "users"}) + return 0, "Successfully modified user information." +} + func (cr *CommandRunner) runFileUpload(fileName string) (exitCode int, result string) { log.Debug().Msgf("Uploading file to %s. (username: %s, groupname: %s)", fileName, cr.data.Username, cr.data.Groupname) diff --git a/pkg/runner/command_types.go b/pkg/runner/command_types.go index 9fdfc3d..f9a3a0d 100644 --- a/pkg/runner/command_types.go +++ b/pkg/runner/command_types.go @@ -92,6 +92,11 @@ type deleteGroupData struct { Groupname string `validate:"required"` } +type modUserData struct { + Username string `validate:"required"` + Comment string `validate:"required"` +} + type openPtyData struct { SessionID string `validate:"required"` URL string `validate:"required"` From 0efdc8486122db2fecfa64045375f906c32f3046 Mon Sep 17 00:00:00 2001 From: SeowonNoh Date: Wed, 4 Jun 2025 15:18:09 +0900 Subject: [PATCH 360/364] feat: support -G option to modify membership relations --- pkg/runner/command.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 163bb12..f6d3c12 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -555,6 +555,7 @@ func (cr *CommandRunner) modUser() (exitCode int, result string) { []string{ "usr/sbin/usermod", "--comment", data.Comment, + "-G", utils.JoinUint64s(cr.data.Groups), data.Username, }, "root", "", nil, 60, @@ -567,6 +568,7 @@ func (cr *CommandRunner) modUser() (exitCode int, result string) { []string{ "/usr/sbin/usermod", "--comment", data.Comment, + "-G", utils.JoinUint64s(cr.data.Groups), data.Username, }, "root", "", nil, 60, From 998a7c4a3e5332ed47679182c4847d85b0169284 Mon Sep 17 00:00:00 2001 From: SeowonNoh Date: Thu, 5 Jun 2025 17:22:13 +0900 Subject: [PATCH 361/364] fix: minor code adjustments --- pkg/runner/command.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index f6d3c12..38d7a80 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -547,13 +547,13 @@ func (cr *CommandRunner) modUser() (exitCode int, result string) { err := cr.validateData(data) if err != nil { - return 1, fmt.Sprintf("adduser: Not enough information. %s", err) + return 1, fmt.Sprintf("moduser: Not enough information. %s", err) } if utils.PlatformLike == "debian" { exitCode, result = runCmdWithOutput( []string{ - "usr/sbin/usermod", + "/usr/sbin/usermod", "--comment", data.Comment, "-G", utils.JoinUint64s(cr.data.Groups), data.Username, From e147810f0fa5420de941f6804e577cf561b3f4d2 Mon Sep 17 00:00:00 2001 From: SeowonNoh Date: Thu, 5 Jun 2025 18:18:21 +0900 Subject: [PATCH 362/364] Simplify usermod command execution by removing redundant platform checks --- pkg/runner/command.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index 38d7a80..a1b1af2 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -563,19 +563,6 @@ func (cr *CommandRunner) modUser() (exitCode int, result string) { if exitCode != 0 { return exitCode, result } - } else if utils.PlatformLike == "rhel" { - exitCode, result = runCmdWithOutput( - []string{ - "/usr/sbin/usermod", - "--comment", data.Comment, - "-G", utils.JoinUint64s(cr.data.Groups), - data.Username, - }, - "root", "", nil, 60, - ) - if exitCode != 0 { - return exitCode, result - } } else { return 1, "Not implemented 'moduser' command for this platform." } From 962a74b23af380fff1f6b3113d30155bcb96ea52 Mon Sep 17 00:00:00 2001 From: SeowonNoh Date: Thu, 5 Jun 2025 18:19:24 +0900 Subject: [PATCH 363/364] Add rhel support for usermod command alongside debian with shared implementation --- pkg/runner/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/runner/command.go b/pkg/runner/command.go index a1b1af2..4f2c7ab 100644 --- a/pkg/runner/command.go +++ b/pkg/runner/command.go @@ -550,7 +550,7 @@ func (cr *CommandRunner) modUser() (exitCode int, result string) { return 1, fmt.Sprintf("moduser: Not enough information. %s", err) } - if utils.PlatformLike == "debian" { + if utils.PlatformLike == "debian" || utils.PlatformLike == "rhel" { exitCode, result = runCmdWithOutput( []string{ "/usr/sbin/usermod", From ec3714c5ae68bb015ae6e579a84b6d3d7ef77bfe Mon Sep 17 00:00:00 2001 From: Apdul0329 Date: Tue, 10 Jun 2025 11:34:30 +0900 Subject: [PATCH 364/364] Refactor setup command logic Refactor the logic to use the existing configuration file when the setup command is executed non-interactively and a configuration file already exists. --- cmd/alpamon/command/setup/setup.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/cmd/alpamon/command/setup/setup.go b/cmd/alpamon/command/setup/setup.go index 15afa81..958a347 100644 --- a/cmd/alpamon/command/setup/setup.go +++ b/cmd/alpamon/command/setup/setup.go @@ -2,15 +2,16 @@ package setup import ( "fmt" - cli "github.com/alpacanetworks/alpacon-cli/utils" - "github.com/alpacanetworks/alpamon/pkg/utils" - "github.com/spf13/cobra" - "golang.org/x/term" "os" "os/exec" "path/filepath" "syscall" "text/template" + + cli "github.com/alpacanetworks/alpacon-cli/utils" + "github.com/alpacanetworks/alpamon/pkg/utils" + "github.com/spf13/cobra" + "golang.org/x/term" ) var ( @@ -43,8 +44,14 @@ var SetupCmd = &cobra.Command{ var isOverwrite bool configExists := fileExists(configTarget) - if configExists && term.IsTerminal(syscall.Stdin) { + if configExists { fmt.Println("A configuration file already exists at:", configTarget) + fmt.Println("When setting up non-interactively, the existing configuration file will be used.") + + if !term.IsTerminal(syscall.Stdin) { + return nil + } + isOverwrite = cli.PromptForBool("Do you want to overwrite it with a new configuration?: ") if !isOverwrite { fmt.Println("Keeping the existing configuration file. Skipping configuration update.")