job.go 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. package worker
  2. import (
  3. "errors"
  4. "fmt"
  5. "sync"
  6. "sync/atomic"
  7. "time"
  8. tunasync "github.com/tuna/tunasync/internal"
  9. )
  10. // this file contains the workflow of a mirror jb
  11. type ctrlAction uint8
  12. const (
  13. jobStart ctrlAction = iota
  14. jobStop // stop syncing keep the job
  15. jobDisable // disable the job (stops goroutine)
  16. jobRestart // restart syncing
  17. jobPing // ensure the goroutine is alive
  18. jobHalt // worker halts
  19. jobForceStart // ignore concurrent limit
  20. )
  21. type jobMessage struct {
  22. status tunasync.SyncStatus
  23. name string
  24. msg string
  25. schedule bool
  26. }
  27. const (
  28. // empty state
  29. stateNone uint32 = iota
  30. // ready to run, able to schedule
  31. stateReady
  32. // paused by jobStop
  33. statePaused
  34. // disabled by jobDisable
  35. stateDisabled
  36. // worker is halting
  37. stateHalting
  38. )
  39. // use to ensure all jobs are finished before
  40. // worker exit
  41. var jobsDone sync.WaitGroup
  42. type mirrorJob struct {
  43. provider mirrorProvider
  44. ctrlChan chan ctrlAction
  45. disabled chan empty
  46. state uint32
  47. size string
  48. }
  49. func newMirrorJob(provider mirrorProvider) *mirrorJob {
  50. return &mirrorJob{
  51. provider: provider,
  52. ctrlChan: make(chan ctrlAction, 1),
  53. state: stateNone,
  54. }
  55. }
  56. func (m *mirrorJob) Name() string {
  57. return m.provider.Name()
  58. }
  59. func (m *mirrorJob) State() uint32 {
  60. return atomic.LoadUint32(&(m.state))
  61. }
  62. func (m *mirrorJob) SetState(state uint32) {
  63. atomic.StoreUint32(&(m.state), state)
  64. }
  65. func (m *mirrorJob) SetProvider(provider mirrorProvider) error {
  66. s := m.State()
  67. if (s != stateNone) && (s != stateDisabled) {
  68. return fmt.Errorf("Provider cannot be switched when job state is %d", s)
  69. }
  70. m.provider = provider
  71. return nil
  72. }
  73. // runMirrorJob is the goroutine where syncing job runs in
  74. // arguments:
  75. // provider: mirror provider object
  76. // ctrlChan: receives messages from the manager
  77. // managerChan: push messages to the manager, this channel should have a larger buffer
  78. // sempaphore: make sure the concurrent running syncing job won't explode
  79. // TODO: message struct for managerChan
  80. func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) error {
  81. jobsDone.Add(1)
  82. m.disabled = make(chan empty)
  83. defer func() {
  84. close(m.disabled)
  85. jobsDone.Done()
  86. }()
  87. provider := m.provider
  88. // to make code shorter
  89. runHooks := func(Hooks []jobHook, action func(h jobHook) error, hookname string) error {
  90. for _, hook := range Hooks {
  91. if err := action(hook); err != nil {
  92. logger.Errorf(
  93. "failed at %s hooks for %s: %s",
  94. hookname, m.Name(), err.Error(),
  95. )
  96. managerChan <- jobMessage{
  97. tunasync.Failed, m.Name(),
  98. fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
  99. true,
  100. }
  101. return err
  102. }
  103. }
  104. return nil
  105. }
  106. runJobWrapper := func(kill <-chan empty, jobDone chan<- empty) error {
  107. defer close(jobDone)
  108. managerChan <- jobMessage{tunasync.PreSyncing, m.Name(), "", false}
  109. logger.Noticef("start syncing: %s", m.Name())
  110. Hooks := provider.Hooks()
  111. rHooks := []jobHook{}
  112. for i := len(Hooks); i > 0; i-- {
  113. rHooks = append(rHooks, Hooks[i-1])
  114. }
  115. logger.Debug("hooks: pre-job")
  116. err := runHooks(Hooks, func(h jobHook) error { return h.preJob() }, "pre-job")
  117. if err != nil {
  118. return err
  119. }
  120. for retry := 0; retry < provider.Retry(); retry++ {
  121. stopASAP := false // stop job as soon as possible
  122. if retry > 0 {
  123. logger.Noticef("retry syncing: %s, retry: %d", m.Name(), retry)
  124. }
  125. err := runHooks(Hooks, func(h jobHook) error { return h.preExec() }, "pre-exec")
  126. if err != nil {
  127. return err
  128. }
  129. // start syncing
  130. managerChan <- jobMessage{tunasync.Syncing, m.Name(), "", false}
  131. var syncErr error
  132. syncDone := make(chan error, 1)
  133. started := make(chan empty, 10) // we may receive "started" more than one time (e.g. two_stage_rsync)
  134. go func() {
  135. err := provider.Run(started)
  136. syncDone <- err
  137. }()
  138. select { // Wait until provider started or error happened
  139. case err := <-syncDone:
  140. logger.Errorf("failed to start provider %s: %s", m.Name(), err.Error())
  141. syncDone <- err // it will be read again later
  142. case <-started:
  143. logger.Debug("provider started")
  144. }
  145. // Now terminating the provider is feasible
  146. select {
  147. case syncErr = <-syncDone:
  148. logger.Debug("syncing done")
  149. case <-kill:
  150. logger.Debug("received kill")
  151. stopASAP = true
  152. err := provider.Terminate()
  153. if err != nil {
  154. logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
  155. return err
  156. }
  157. syncErr = errors.New("killed by manager")
  158. }
  159. // post-exec hooks
  160. herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")
  161. if herr != nil {
  162. return herr
  163. }
  164. if syncErr == nil {
  165. // syncing success
  166. logger.Noticef("succeeded syncing %s", m.Name())
  167. // post-success hooks
  168. logger.Debug("post-success hooks")
  169. err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success")
  170. if err != nil {
  171. return err
  172. }
  173. } else {
  174. // syncing failed
  175. logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
  176. // post-fail hooks
  177. logger.Debug("post-fail hooks")
  178. err := runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
  179. if err != nil {
  180. return err
  181. }
  182. }
  183. if syncErr == nil {
  184. // syncing success
  185. m.size = provider.DataSize()
  186. managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
  187. return nil
  188. }
  189. // syncing failed
  190. managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == provider.Retry()-1) && (m.State() == stateReady)}
  191. // gracefully exit
  192. if stopASAP {
  193. logger.Debug("No retry, exit directly")
  194. return nil
  195. }
  196. // continue to next retry
  197. } // for retry
  198. return nil
  199. }
  200. runJob := func(kill <-chan empty, jobDone chan<- empty, bypassSemaphore <-chan empty) {
  201. select {
  202. case semaphore <- empty{}:
  203. defer func() { <-semaphore }()
  204. runJobWrapper(kill, jobDone)
  205. case <-bypassSemaphore:
  206. logger.Noticef("Concurrent limit ignored by %s", m.Name())
  207. runJobWrapper(kill, jobDone)
  208. case <-kill:
  209. jobDone <- empty{}
  210. return
  211. }
  212. }
  213. bypassSemaphore := make(chan empty, 1)
  214. for {
  215. if m.State() == stateReady {
  216. kill := make(chan empty)
  217. jobDone := make(chan empty)
  218. go runJob(kill, jobDone, bypassSemaphore)
  219. _wait_for_job:
  220. select {
  221. case <-jobDone:
  222. logger.Debug("job done")
  223. case ctrl := <-m.ctrlChan:
  224. switch ctrl {
  225. case jobStop:
  226. m.SetState(statePaused)
  227. close(kill)
  228. <-jobDone
  229. case jobDisable:
  230. m.SetState(stateDisabled)
  231. close(kill)
  232. <-jobDone
  233. return nil
  234. case jobRestart:
  235. m.SetState(stateReady)
  236. close(kill)
  237. <-jobDone
  238. time.Sleep(time.Second) // Restart may fail if the process was not exited yet
  239. continue
  240. case jobForceStart:
  241. select { //non-blocking
  242. default:
  243. case bypassSemaphore <- empty{}:
  244. }
  245. fallthrough
  246. case jobStart:
  247. m.SetState(stateReady)
  248. goto _wait_for_job
  249. case jobHalt:
  250. m.SetState(stateHalting)
  251. close(kill)
  252. <-jobDone
  253. return nil
  254. default:
  255. // TODO: implement this
  256. close(kill)
  257. return nil
  258. }
  259. }
  260. }
  261. ctrl := <-m.ctrlChan
  262. switch ctrl {
  263. case jobStop:
  264. m.SetState(statePaused)
  265. case jobDisable:
  266. m.SetState(stateDisabled)
  267. return nil
  268. case jobForceStart:
  269. select { //non-blocking
  270. default:
  271. case bypassSemaphore <- empty{}:
  272. }
  273. fallthrough
  274. case jobRestart:
  275. fallthrough
  276. case jobStart:
  277. m.SetState(stateReady)
  278. default:
  279. // TODO
  280. return nil
  281. }
  282. }
  283. }