job.go 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. package worker
  2. import (
  3. "errors"
  4. "fmt"
  5. "sync"
  6. "sync/atomic"
  7. "time"
  8. tunasync "github.com/tuna/tunasync/internal"
  9. )
  10. // this file contains the workflow of a mirror jb
  11. type ctrlAction uint8
  12. const (
  13. jobStart ctrlAction = iota
  14. jobStop // stop syncing keep the job
  15. jobDisable // disable the job (stops goroutine)
  16. jobRestart // restart syncing
  17. jobPing // ensure the goroutine is alive
  18. jobHalt // worker halts
  19. jobForceStart // ignore concurrent limit
  20. )
  21. type jobMessage struct {
  22. status tunasync.SyncStatus
  23. name string
  24. msg string
  25. schedule bool
  26. }
  27. const (
  28. // empty state
  29. stateNone uint32 = iota
  30. // ready to run, able to schedule
  31. stateReady
  32. // paused by jobStop
  33. statePaused
  34. // disabled by jobDisable
  35. stateDisabled
  36. // worker is halting
  37. stateHalting
  38. )
  39. // use to ensure all jobs are finished before
  40. // worker exit
  41. var jobsDone sync.WaitGroup
  42. type mirrorJob struct {
  43. provider mirrorProvider
  44. ctrlChan chan ctrlAction
  45. disabled chan empty
  46. state uint32
  47. size string
  48. }
  49. func newMirrorJob(provider mirrorProvider) *mirrorJob {
  50. return &mirrorJob{
  51. provider: provider,
  52. ctrlChan: make(chan ctrlAction, 1),
  53. state: stateNone,
  54. }
  55. }
  56. func (m *mirrorJob) Name() string {
  57. return m.provider.Name()
  58. }
  59. func (m *mirrorJob) State() uint32 {
  60. return atomic.LoadUint32(&(m.state))
  61. }
  62. func (m *mirrorJob) SetState(state uint32) {
  63. atomic.StoreUint32(&(m.state), state)
  64. }
  65. func (m *mirrorJob) SetProvider(provider mirrorProvider) error {
  66. s := m.State()
  67. if (s != stateNone) && (s != stateDisabled) {
  68. return fmt.Errorf("Provider cannot be switched when job state is %d", s)
  69. }
  70. m.provider = provider
  71. return nil
  72. }
  73. // runMirrorJob is the goroutine where syncing job runs in
  74. // arguments:
  75. // provider: mirror provider object
  76. // ctrlChan: receives messages from the manager
  77. // managerChan: push messages to the manager, this channel should have a larger buffer
  78. // sempaphore: make sure the concurrent running syncing job won't explode
  79. // TODO: message struct for managerChan
  80. func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) error {
  81. jobsDone.Add(1)
  82. m.disabled = make(chan empty)
  83. defer func() {
  84. close(m.disabled)
  85. jobsDone.Done()
  86. }()
  87. provider := m.provider
  88. // to make code shorter
  89. runHooks := func(Hooks []jobHook, action func(h jobHook) error, hookname string) error {
  90. for _, hook := range Hooks {
  91. if err := action(hook); err != nil {
  92. logger.Errorf(
  93. "failed at %s hooks for %s: %s",
  94. hookname, m.Name(), err.Error(),
  95. )
  96. managerChan <- jobMessage{
  97. tunasync.Failed, m.Name(),
  98. fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
  99. true,
  100. }
  101. return err
  102. }
  103. }
  104. return nil
  105. }
  106. runJobWrapper := func(kill <-chan empty, jobDone chan<- empty) error {
  107. defer close(jobDone)
  108. managerChan <- jobMessage{tunasync.PreSyncing, m.Name(), "", false}
  109. logger.Noticef("start syncing: %s", m.Name())
  110. Hooks := provider.Hooks()
  111. rHooks := []jobHook{}
  112. for i := len(Hooks); i > 0; i-- {
  113. rHooks = append(rHooks, Hooks[i-1])
  114. }
  115. logger.Debug("hooks: pre-job")
  116. err := runHooks(Hooks, func(h jobHook) error { return h.preJob() }, "pre-job")
  117. if err != nil {
  118. return err
  119. }
  120. for retry := 0; retry < provider.Retry(); retry++ {
  121. stopASAP := false // stop job as soon as possible
  122. if retry > 0 {
  123. logger.Noticef("retry syncing: %s, retry: %d", m.Name(), retry)
  124. }
  125. err := runHooks(Hooks, func(h jobHook) error { return h.preExec() }, "pre-exec")
  126. if err != nil {
  127. return err
  128. }
  129. // start syncing
  130. managerChan <- jobMessage{tunasync.Syncing, m.Name(), "", false}
  131. var syncErr error
  132. syncDone := make(chan error, 1)
  133. started := make(chan empty, 10) // we may receive "started" more than one time (e.g. two_stage_rsync)
  134. go func() {
  135. err := provider.Run(started)
  136. syncDone <- err
  137. }()
  138. select { // Wait until provider started or error happened
  139. case err := <-syncDone:
  140. logger.Errorf("failed to start provider %s: %s", m.Name(), err.Error())
  141. syncDone <- err // it will be read again later
  142. case <-started:
  143. logger.Debug("provider started")
  144. }
  145. // Now terminating the provider is feasible
  146. var termErr error
  147. timeout := provider.Timeout()
  148. if timeout <= 0 {
  149. timeout = 100000 * time.Hour // never time out
  150. }
  151. select {
  152. case syncErr = <-syncDone:
  153. logger.Debug("syncing done")
  154. case <-time.After(timeout):
  155. logger.Notice("provider timeout")
  156. termErr = provider.Terminate()
  157. syncErr = fmt.Errorf("%s timeout after %v", m.Name(), timeout)
  158. case <-kill:
  159. logger.Debug("received kill")
  160. stopASAP = true
  161. termErr = provider.Terminate()
  162. syncErr = errors.New("killed by manager")
  163. }
  164. if termErr != nil {
  165. logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
  166. return termErr
  167. }
  168. // post-exec hooks
  169. herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")
  170. if herr != nil {
  171. return herr
  172. }
  173. if syncErr == nil {
  174. // syncing success
  175. logger.Noticef("succeeded syncing %s", m.Name())
  176. // post-success hooks
  177. logger.Debug("post-success hooks")
  178. err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success")
  179. if err != nil {
  180. return err
  181. }
  182. } else {
  183. // syncing failed
  184. logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
  185. // post-fail hooks
  186. logger.Debug("post-fail hooks")
  187. err := runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
  188. if err != nil {
  189. return err
  190. }
  191. }
  192. if syncErr == nil {
  193. // syncing success
  194. m.size = provider.DataSize()
  195. managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
  196. return nil
  197. }
  198. // syncing failed
  199. managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == provider.Retry()-1) && (m.State() == stateReady)}
  200. // gracefully exit
  201. if stopASAP {
  202. logger.Debug("No retry, exit directly")
  203. return nil
  204. }
  205. // continue to next retry
  206. } // for retry
  207. return nil
  208. }
  209. runJob := func(kill <-chan empty, jobDone chan<- empty, bypassSemaphore <-chan empty) {
  210. select {
  211. case semaphore <- empty{}:
  212. defer func() { <-semaphore }()
  213. runJobWrapper(kill, jobDone)
  214. case <-bypassSemaphore:
  215. logger.Noticef("Concurrent limit ignored by %s", m.Name())
  216. runJobWrapper(kill, jobDone)
  217. case <-kill:
  218. jobDone <- empty{}
  219. return
  220. }
  221. }
  222. bypassSemaphore := make(chan empty, 1)
  223. for {
  224. if m.State() == stateReady {
  225. kill := make(chan empty)
  226. jobDone := make(chan empty)
  227. go runJob(kill, jobDone, bypassSemaphore)
  228. _wait_for_job:
  229. select {
  230. case <-jobDone:
  231. logger.Debug("job done")
  232. case ctrl := <-m.ctrlChan:
  233. switch ctrl {
  234. case jobStop:
  235. m.SetState(statePaused)
  236. close(kill)
  237. <-jobDone
  238. case jobDisable:
  239. m.SetState(stateDisabled)
  240. close(kill)
  241. <-jobDone
  242. return nil
  243. case jobRestart:
  244. m.SetState(stateReady)
  245. close(kill)
  246. <-jobDone
  247. time.Sleep(time.Second) // Restart may fail if the process was not exited yet
  248. continue
  249. case jobForceStart:
  250. select { //non-blocking
  251. default:
  252. case bypassSemaphore <- empty{}:
  253. }
  254. fallthrough
  255. case jobStart:
  256. m.SetState(stateReady)
  257. goto _wait_for_job
  258. case jobHalt:
  259. m.SetState(stateHalting)
  260. close(kill)
  261. <-jobDone
  262. return nil
  263. default:
  264. // TODO: implement this
  265. close(kill)
  266. return nil
  267. }
  268. }
  269. }
  270. ctrl := <-m.ctrlChan
  271. switch ctrl {
  272. case jobStop:
  273. m.SetState(statePaused)
  274. case jobDisable:
  275. m.SetState(stateDisabled)
  276. return nil
  277. case jobForceStart:
  278. select { //non-blocking
  279. default:
  280. case bypassSemaphore <- empty{}:
  281. }
  282. fallthrough
  283. case jobRestart:
  284. fallthrough
  285. case jobStart:
  286. m.SetState(stateReady)
  287. default:
  288. // TODO
  289. return nil
  290. }
  291. }
  292. }