Explorar o código

style: better logging

bigeagle %!s(int64=9) %!d(string=hai) anos
pai
achega
f8fd1ae460

+ 7 - 0
cmd/tunasync/tunasync.go

@@ -4,6 +4,7 @@ import (
 	"os"
 
 	"github.com/codegangsta/cli"
+	"github.com/gin-gonic/gin"
 	"gopkg.in/op/go-logging.v1"
 
 	tunasync "github.com/tuna/tunasync/internal"
@@ -21,6 +22,9 @@ func startManager(c *cli.Context) {
 		logger.Error("Error loading config: %s", err.Error())
 		os.Exit(1)
 	}
+	if !cfg.Debug {
+		gin.SetMode(gin.ReleaseMode)
+	}
 
 	m := manager.GetTUNASyncManager(cfg)
 	if m == nil {
@@ -34,6 +38,9 @@ func startManager(c *cli.Context) {
 
 func startWorker(c *cli.Context) {
 	tunasync.InitLogger(c.Bool("verbose"), c.Bool("debug"), c.Bool("with-systemd"))
+	if !c.Bool("debug") {
+		gin.SetMode(gin.ReleaseMode)
+	}
 
 	cfg, err := worker.LoadConfig(c.String("config"))
 	if err != nil {

+ 5 - 1
internal/logger.go

@@ -12,7 +12,11 @@ func InitLogger(verbose, debug, withSystemd bool) {
 	if withSystemd {
 		fmtString = "[%{level:.6s}] %{message}"
 	} else {
-		fmtString = "%{color}[%{time:06-01-02 15:04:05}][%{level:.6s}][%{shortfile}]%{color:reset} %{message}"
+		if debug {
+			fmtString = "%{color}[%{time:06-01-02 15:04:05}][%{level:.6s}][%{shortfile}]%{color:reset} %{message}"
+		} else {
+			fmtString = "%{color}[%{time:06-01-02 15:04:05}][%{level:.6s}]%{color:reset} %{message}"
+		}
 	}
 	format := logging.MustStringFormatter(fmtString)
 	logging.SetFormatter(format)

+ 15 - 9
internal/status.go

@@ -18,24 +18,30 @@ const (
 	Disabled
 )
 
-func (s SyncStatus) MarshalJSON() ([]byte, error) {
-	var strStatus string
+func (s SyncStatus) String() string {
 	switch s {
 	case None:
-		strStatus = "none"
+		return "none"
 	case Failed:
-		strStatus = "failed"
+		return "failed"
 	case Success:
-		strStatus = "success"
+		return "success"
 	case Syncing:
-		strStatus = "syncing"
+		return "syncing"
 	case PreSyncing:
-		strStatus = "pre-syncing"
+		return "pre-syncing"
 	case Paused:
-		strStatus = "paused"
+		return "paused"
 	case Disabled:
-		strStatus = "disabled"
+		return "disabled"
 	default:
+		return ""
+	}
+}
+
+func (s SyncStatus) MarshalJSON() ([]byte, error) {
+	strStatus := s.String()
+	if strStatus == "" {
 		return []byte{}, errors.New("Invalid status value")
 	}
 

+ 1 - 1
manager/config.go

@@ -41,7 +41,7 @@ func LoadConfig(cfgFile string, c *cli.Context) (*Config, error) {
 
 	if cfgFile != "" {
 		if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
-			logger.Error(err.Error())
+			logger.Errorf(err.Error())
 			return nil, err
 		}
 	}

+ 1 - 1
manager/middleware.go

@@ -11,7 +11,7 @@ func contextErrorLogger(c *gin.Context) {
 	errs := c.Errors.ByType(gin.ErrorTypeAny)
 	if len(errs) > 0 {
 		for _, err := range errs {
-			logger.Error(`"in request "%s %s: %s"`,
+			logger.Errorf(`"in request "%s %s: %s"`,
 				c.Request.Method, c.Request.URL.Path,
 				err.Error())
 		}

+ 28 - 4
manager/server.go

@@ -37,14 +37,19 @@ func GetTUNASyncManager(cfg *Config) *Manager {
 	}
 	s := &Manager{
 		cfg:     cfg,
-		engine:  gin.Default(),
 		adapter: nil,
 	}
 
+	s.engine = gin.New()
+	s.engine.Use(gin.Recovery())
+	if cfg.Debug {
+		s.engine.Use(gin.Logger())
+	}
+
 	if cfg.Files.CACert != "" {
 		httpClient, err := CreateHTTPClient(cfg.Files.CACert)
 		if err != nil {
-			logger.Error("Error initializing HTTP client: %s", err.Error())
+			logger.Errorf("Error initializing HTTP client: %s", err.Error())
 			return nil
 		}
 		s.httpClient = httpClient
@@ -53,7 +58,7 @@ func GetTUNASyncManager(cfg *Config) *Manager {
 	if cfg.Files.DBFile != "" {
 		adapter, err := makeDBAdapter(cfg.Files.DBType, cfg.Files.DBFile)
 		if err != nil {
-			logger.Error("Error initializing DB adapter: %s", err.Error())
+			logger.Errorf("Error initializing DB adapter: %s", err.Error())
 			return nil
 		}
 		s.setDBAdapter(adapter)
@@ -170,6 +175,8 @@ func (s *Manager) registerWorker(c *gin.Context) {
 		s.returnErrJSON(c, http.StatusInternalServerError, err)
 		return
 	}
+
+	logger.Noticef("Worker <%s> registered", _worker.ID)
 	// create workerCmd channel for this worker
 	c.JSON(http.StatusOK, newWorker)
 }
@@ -210,6 +217,22 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
 		status.LastUpdate = curStatus.LastUpdate
 	}
 
+	// for logging
+	switch status.Status {
+	case Success:
+		logger.Noticef("Job [%s] @<%s> success", status.Name, status.Worker)
+	case Failed:
+		logger.Warningf("Job [%s] @<%s> failed", status.Name, status.Worker)
+	case Syncing:
+		logger.Infof("Job [%s] @<%s> starts syncing", status.Name, status.Worker)
+	case Disabled:
+		logger.Noticef("Job [%s] @<%s> disabled", status.Name, status.Worker)
+	case Paused:
+		logger.Noticef("Job [%s] @<%s> paused", status.Name, status.Worker)
+	default:
+		logger.Infof("Job [%s] @<%s> status: %s", status.Name, status.Worker, status.Status)
+	}
+
 	newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
 	if err != nil {
 		err := fmt.Errorf("failed to update job %s of worker %s: %s",
@@ -228,7 +251,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
 	workerID := clientCmd.WorkerID
 	if workerID == "" {
 		// TODO: decide which worker should do this mirror when WorkerID is null string
-		logger.Error("handleClientCmd case workerID == \" \" not implemented yet")
+		logger.Errorf("handleClientCmd case workerID == \" \" not implemented yet")
 		c.AbortWithStatus(http.StatusInternalServerError)
 		return
 	}
@@ -263,6 +286,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
 		s.adapter.UpdateMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID, curStat)
 	}
 
+	logger.Noticef("Posting command '%s %s' to <%s>", clientCmd.Cmd, clientCmd.MirrorID, clientCmd.WorkerID)
 	// post command to worker
 	_, err = PostJSON(workerURL, workerCmd, s.httpClient)
 	if err != nil {

+ 1 - 1
tests/manager.conf

@@ -1,4 +1,4 @@
-debug = true
+debug = false
 
 [server]
 addr = "127.0.0.1"

+ 2 - 2
worker/cgroup.go

@@ -43,7 +43,7 @@ func (c *cgroupHook) preExec() error {
 func (c *cgroupHook) postExec() error {
 	err := c.killAll()
 	if err != nil {
-		logger.Error("Error killing tasks: %s", err.Error())
+		logger.Errorf("Error killing tasks: %s", err.Error())
 	}
 
 	c.created = false
@@ -75,7 +75,7 @@ func (c *cgroupHook) killAll() error {
 		taskList = append(taskList, pid)
 	}
 	for _, pid := range taskList {
-		logger.Debug("Killing process: %d", pid)
+		logger.Debugf("Killing process: %d", pid)
 		unix.Kill(pid, syscall.SIGKILL)
 	}
 

+ 1 - 1
worker/config.go

@@ -91,7 +91,7 @@ func LoadConfig(cfgFile string) (*Config, error) {
 
 	cfg := new(Config)
 	if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
-		logger.Error(err.Error())
+		logger.Errorf(err.Error())
 		return nil, err
 	}
 	return cfg, nil

+ 6 - 6
worker/job.go

@@ -86,7 +86,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
 	runHooks := func(Hooks []jobHook, action func(h jobHook) error, hookname string) error {
 		for _, hook := range Hooks {
 			if err := action(hook); err != nil {
-				logger.Error(
+				logger.Errorf(
 					"failed at %s hooks for %s: %s",
 					hookname, m.Name(), err.Error(),
 				)
@@ -105,7 +105,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
 		defer close(jobDone)
 
 		managerChan <- jobMessage{tunasync.PreSyncing, m.Name(), "", false}
-		logger.Info("start syncing: %s", m.Name())
+		logger.Noticef("start syncing: %s", m.Name())
 
 		Hooks := provider.Hooks()
 		rHooks := []jobHook{}
@@ -123,7 +123,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
 			stopASAP := false // stop job as soon as possible
 
 			if retry > 0 {
-				logger.Info("retry syncing: %s, retry: %d", m.Name(), retry)
+				logger.Noticef("retry syncing: %s, retry: %d", m.Name(), retry)
 			}
 			err := runHooks(Hooks, func(h jobHook) error { return h.preExec() }, "pre-exec")
 			if err != nil {
@@ -150,7 +150,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
 				stopASAP = true
 				err := provider.Terminate()
 				if err != nil {
-					logger.Error("failed to terminate provider %s: %s", m.Name(), err.Error())
+					logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
 					return err
 				}
 				syncErr = errors.New("killed by manager")
@@ -164,7 +164,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
 
 			if syncErr == nil {
 				// syncing success
-				logger.Info("succeeded syncing %s", m.Name())
+				logger.Noticef("succeeded syncing %s", m.Name())
 				managerChan <- jobMessage{tunasync.Success, m.Name(), "", true}
 				// post-success hooks
 				err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success")
@@ -176,7 +176,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
 			}
 
 			// syncing failed
-			logger.Warning("failed syncing %s: %s", m.Name(), syncErr.Error())
+			logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
 			managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), retry == maxRetry-1}
 
 			// post-fail hooks

+ 1 - 1
worker/loglimit_hook.go

@@ -30,7 +30,7 @@ func (f fileSlice) Swap(i, j int)      { f[i], f[j] = f[j], f[i] }
 func (f fileSlice) Less(i, j int) bool { return f[i].ModTime().Before(f[j].ModTime()) }
 
 func (l *logLimiter) preExec() error {
-	logger.Debug("executing log limitter for %s", l.provider.Name())
+	logger.Debugf("executing log limitter for %s", l.provider.Name())
 
 	p := l.provider
 	if p.LogFile() == "/dev/null" {

+ 2 - 2
worker/provider.go

@@ -142,7 +142,7 @@ func (p *baseProvider) prepareLogFile() error {
 	if p.logFile == nil {
 		logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE, 0644)
 		if err != nil {
-			logger.Error("Error opening logfile %s: %s", p.LogFile(), err.Error())
+			logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
 			return err
 		}
 		p.logFile = logFile
@@ -178,7 +178,7 @@ func (p *baseProvider) Wait() error {
 }
 
 func (p *baseProvider) Terminate() error {
-	logger.Debug("terminating provider: %s", p.Name())
+	logger.Debugf("terminating provider: %s", p.Name())
 	if !p.IsRunning() {
 		return nil
 	}

+ 3 - 3
worker/runner.go

@@ -45,11 +45,11 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
 		}
 	}
 
-	logger.Debug("Executing command %s at %s", cmdAndArgs[0], workingDir)
+	logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir)
 	if _, err := os.Stat(workingDir); os.IsNotExist(err) {
-		logger.Debug("Making dir %s", workingDir)
+		logger.Debugf("Making dir %s", workingDir)
 		if err = os.MkdirAll(workingDir, 0755); err != nil {
-			logger.Error("Error making dir %s", workingDir)
+			logger.Errorf("Error making dir %s", workingDir)
 		}
 	}
 

+ 8 - 9
worker/worker.go

@@ -49,7 +49,7 @@ func GetTUNASyncWorker(cfg *Config) *Worker {
 	if cfg.Manager.CACert != "" {
 		httpClient, err := CreateHTTPClient(cfg.Manager.CACert)
 		if err != nil {
-			logger.Error("Error initializing HTTP client: %s", err.Error())
+			logger.Errorf("Error initializing HTTP client: %s", err.Error())
 			return nil
 		}
 		w.httpClient = httpClient
@@ -190,7 +190,7 @@ func (w *Worker) makeHTTPServer() {
 			c.JSON(http.StatusNotFound, gin.H{"msg": fmt.Sprintf("Mirror ``%s'' not found", cmd.MirrorID)})
 			return
 		}
-		logger.Info("Received command: %v", cmd)
+		logger.Noticef("Received command: %v", cmd)
 		// if job disabled, start them first
 		switch cmd.Cmd {
 		case CmdStart, CmdRestart:
@@ -225,7 +225,6 @@ func (w *Worker) makeHTTPServer() {
 
 		c.JSON(http.StatusOK, gin.H{"msg": "OK"})
 	})
-
 	w.httpEngine = s
 }
 
@@ -281,7 +280,7 @@ func (w *Worker) runSchedule() {
 				job.SetState(stateReady)
 				go job.Run(w.managerChan, w.semaphore)
 				stime := m.LastUpdate.Add(job.provider.Interval())
-				logger.Debug("Scheduling job %s @%s", job.Name(), stime.Format("2006-01-02 15:04:05"))
+				logger.Debugf("Scheduling job %s @%s", job.Name(), stime.Format("2006-01-02 15:04:05"))
 				w.schedule.AddJob(stime, job)
 			}
 		}
@@ -302,7 +301,7 @@ func (w *Worker) runSchedule() {
 			// got status update from job
 			job := w.jobs[jobMsg.name]
 			if job.State() != stateReady {
-				logger.Info("Job %s state is not ready, skip adding new schedule", jobMsg.name)
+				logger.Infof("Job %s state is not ready, skip adding new schedule", jobMsg.name)
 				continue
 			}
 
@@ -316,7 +315,7 @@ func (w *Worker) runSchedule() {
 			// can trigger scheduling
 			if jobMsg.schedule {
 				schedTime := time.Now().Add(job.provider.Interval())
-				logger.Info(
+				logger.Noticef(
 					"Next scheduled time for %s: %s",
 					job.Name(),
 					schedTime.Format("2006-01-02 15:04:05"),
@@ -362,7 +361,7 @@ func (w *Worker) registorWorker() {
 	}
 
 	if _, err := PostJSON(url, msg, w.httpClient); err != nil {
-		logger.Error("Failed to register worker")
+		logger.Errorf("Failed to register worker")
 	}
 }
 
@@ -385,7 +384,7 @@ func (w *Worker) updateStatus(jobMsg jobMessage) {
 	}
 
 	if _, err := PostJSON(url, smsg, w.httpClient); err != nil {
-		logger.Error("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
+		logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
 	}
 }
 
@@ -399,7 +398,7 @@ func (w *Worker) fetchJobStatus() []MirrorStatus {
 	)
 
 	if _, err := GetJSON(url, &mirrorList, w.httpClient); err != nil {
-		logger.Error("Failed to fetch job status: %s", err.Error())
+		logger.Errorf("Failed to fetch job status: %s", err.Error())
 	}
 
 	return mirrorList