new feature: run "tunasynctl start" with "-f" to override concurrent job limit

This commit is contained in:
z4yx 2018-05-30 18:55:06 +08:00
parent 6cbe91b4f1
commit c750aa1871
7 changed files with 88 additions and 50 deletions

View File

@ -285,11 +285,16 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
"argument WORKER", 1) "argument WORKER", 1)
} }
options := map[string]bool{}
if c.Bool("force") {
options["force"] = true
}
cmd := tunasync.ClientCmd{ cmd := tunasync.ClientCmd{
Cmd: cmd, Cmd: cmd,
MirrorID: mirrorID, MirrorID: mirrorID,
WorkerID: c.String("worker"), WorkerID: c.String("worker"),
Args: argsList, Args: argsList,
Options: options,
} }
resp, err := tunasync.PostJSON(baseURL+cmdPath, cmd, client) resp, err := tunasync.PostJSON(baseURL+cmdPath, cmd, client)
if err != nil { if err != nil {
@ -410,6 +415,11 @@ func main() {
}, },
} }
forceStartFlag := cli.BoolFlag{
Name: "force, f",
Usage: "Override the concurrent limit",
}
app.Commands = []cli.Command{ app.Commands = []cli.Command{
{ {
Name: "list", Name: "list",
@ -450,7 +460,7 @@ func main() {
{ {
Name: "start", Name: "start",
Usage: "Start a job", Usage: "Start a job",
Flags: append(commonFlags, cmdFlags...), Flags: append(append(commonFlags, cmdFlags...), forceStartFlag),
Action: initializeWrapper(cmdJob(tunasync.CmdStart)), Action: initializeWrapper(cmdJob(tunasync.CmdStart)),
}, },
{ {

View File

@ -68,9 +68,10 @@ func (c CmdVerb) String() string {
// A WorkerCmd is the command message send from the // A WorkerCmd is the command message send from the
// manager to a worker // manager to a worker
type WorkerCmd struct { type WorkerCmd struct {
Cmd CmdVerb `json:"cmd"` Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"` MirrorID string `json:"mirror_id"`
Args []string `json:"args"` Args []string `json:"args"`
Options map[string]bool `json:"options"`
} }
func (c WorkerCmd) String() string { func (c WorkerCmd) String() string {
@ -83,8 +84,9 @@ func (c WorkerCmd) String() string {
// A ClientCmd is the command message send from client // A ClientCmd is the command message send from client
// to the manager // to the manager
type ClientCmd struct { type ClientCmd struct {
Cmd CmdVerb `json:"cmd"` Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"` MirrorID string `json:"mirror_id"`
WorkerID string `json:"worker_id"` WorkerID string `json:"worker_id"`
Args []string `json:"args"` Args []string `json:"args"`
Options map[string]bool `json:"options"`
} }

View File

@ -337,6 +337,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
Cmd: clientCmd.Cmd, Cmd: clientCmd.Cmd,
MirrorID: clientCmd.MirrorID, MirrorID: clientCmd.MirrorID,
Args: clientCmd.Args, Args: clientCmd.Args,
Options: clientCmd.Options,
} }
// update job status, even if the job did not disable successfully, // update job status, even if the job did not disable successfully,

View File

@ -218,6 +218,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
defer func() { <-semaphore }() defer func() { <-semaphore }()
runJobWrapper(kill, jobDone) runJobWrapper(kill, jobDone)
case <-bypassSemaphore: case <-bypassSemaphore:
logger.Noticef("Concurrent limit ignored by %s", m.Name())
runJobWrapper(kill, jobDone) runJobWrapper(kill, jobDone)
case <-kill: case <-kill:
jobDone <- empty{} jobDone <- empty{}

View File

@ -286,7 +286,7 @@ func TestConcurrentMirrorJobs(t *testing.T) {
c := cmdConfig{ c := cmdConfig{
name: fmt.Sprintf("job-%d", i), name: fmt.Sprintf("job-%d", i),
upstreamURL: "http://mirrors.tuna.moe/", upstreamURL: "http://mirrors.tuna.moe/",
command: "sleep 3", command: "sleep 2",
workingDir: tmpDir, workingDir: tmpDir,
logDir: tmpDir, logDir: tmpDir,
logFile: "/dev/null", logFile: "/dev/null",
@ -302,17 +302,12 @@ func TestConcurrentMirrorJobs(t *testing.T) {
managerChan := make(chan jobMessage, 10) managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, CONCURRENT-2) semaphore := make(chan empty, CONCURRENT-2)
Convey("When we run them all", func(ctx C) { countingJobs := func(managerChan chan jobMessage, totalJobs, concurrentCheck int) (peakConcurrent, counterFailed int) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
}
counterEnded := 0 counterEnded := 0
counterRunning := 0 counterRunning := 0
maxRunning := 0 peakConcurrent = 0
counterFailed := 0 counterFailed = 0
for counterEnded < CONCURRENT { for counterEnded < totalJobs {
msg := <-managerChan msg := <-managerChan
switch msg.status { switch msg.status {
case PreSyncing: case PreSyncing:
@ -328,13 +323,29 @@ func TestConcurrentMirrorJobs(t *testing.T) {
So(0, ShouldEqual, 1) So(0, ShouldEqual, 1)
} }
// Test if semaphore works // Test if semaphore works
So(counterRunning, ShouldBeLessThanOrEqualTo, CONCURRENT-2) So(counterRunning, ShouldBeLessThanOrEqualTo, concurrentCheck)
if counterRunning > maxRunning { if counterRunning > peakConcurrent {
maxRunning = counterRunning peakConcurrent = counterRunning
} }
} }
// select {
// case msg := <-managerChan:
// logger.Errorf("extra message received: %v", msg)
// So(0, ShouldEqual, 1)
// case <-time.After(2 * time.Second):
// }
return
}
So(maxRunning, ShouldEqual, CONCURRENT-2) Convey("When we run them all", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
}
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0) So(counterFailed, ShouldEqual, 0)
for _, job := range jobs { for _, job := range jobs {
@ -352,33 +363,42 @@ func TestConcurrentMirrorJobs(t *testing.T) {
// Cancel the one waiting for semaphore // Cancel the one waiting for semaphore
jobs[len(jobs)-1].ctrlChan <- jobStop jobs[len(jobs)-1].ctrlChan <- jobStop
counterEnded := 0 peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT-1, CONCURRENT-2)
counterRunning := 0
maxRunning := 0 So(peakConcurrent, ShouldEqual, CONCURRENT-2)
counterFailed := 0 So(counterFailed, ShouldEqual, 0)
for counterEnded < CONCURRENT-1 {
msg := <-managerChan for _, job := range jobs {
switch msg.status { job.ctrlChan <- jobDisable
case PreSyncing: <-job.disabled
counterRunning++ }
case Syncing: })
case Failed: Convey("If we override the concurrent limit", func(ctx C) {
counterFailed++ for _, job := range jobs {
fallthrough go job.Run(managerChan, semaphore)
case Success: job.ctrlChan <- jobStart
counterEnded++ time.Sleep(200 * time.Millisecond)
counterRunning--
default:
So(0, ShouldEqual, 1)
}
// Test if semaphore works
So(counterRunning, ShouldBeLessThanOrEqualTo, CONCURRENT-2)
if counterRunning > maxRunning {
maxRunning = counterRunning
}
} }
So(maxRunning, ShouldEqual, CONCURRENT-2) jobs[len(jobs)-1].ctrlChan <- jobForceStart
jobs[len(jobs)-2].ctrlChan <- jobForceStart
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT)
So(peakConcurrent, ShouldEqual, CONCURRENT)
So(counterFailed, ShouldEqual, 0)
time.Sleep(1 * time.Second)
// fmt.Println("Restart them")
for _, job := range jobs {
job.ctrlChan <- jobStart
}
peakConcurrent, counterFailed = countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0) So(counterFailed, ShouldEqual, 0)
for _, job := range jobs { for _, job := range jobs {

View File

@ -268,9 +268,9 @@ sleep 5
So(err, ShouldBeNil) So(err, ShouldBeNil)
c := cmdConfig{ c := cmdConfig{
name: "run-pwd", name: "run-ls",
upstreamURL: "http://mirrors.tuna.moe/", upstreamURL: "http://mirrors.tuna.moe/",
command: "pwd", command: "ls",
workingDir: tmpDir, workingDir: tmpDir,
logDir: tmpDir, logDir: tmpDir,
logFile: "/dev/null", logFile: "/dev/null",

View File

@ -219,7 +219,11 @@ func (w *Worker) makeHTTPServer() {
} }
switch cmd.Cmd { switch cmd.Cmd {
case CmdStart: case CmdStart:
job.ctrlChan <- jobStart if cmd.Options["force"] {
job.ctrlChan <- jobForceStart
} else {
job.ctrlChan <- jobStart
}
case CmdRestart: case CmdRestart:
job.ctrlChan <- jobRestart job.ctrlChan <- jobRestart
case CmdStop: case CmdStop: