mirror of
https://github.com/tuna/tunasync.git
synced 2025-04-21 04:42:46 +00:00
New start, the master branch now uses the codebase written in golang
Ready
This commit is contained in:
commit
583eaf7f6a
1
.gitignore
vendored
1
.gitignore
vendored
@ -56,3 +56,4 @@ target/
|
||||
*.swp
|
||||
*~
|
||||
/examples/tunasync.json
|
||||
/*.cov
|
||||
|
31
.testandcover.bash
Executable file
31
.testandcover.bash
Executable file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
function die() {
|
||||
echo $*
|
||||
exit 1
|
||||
}
|
||||
|
||||
export GOPATH=`pwd`:$GOPATH
|
||||
|
||||
# Initialize profile.cov
|
||||
echo "mode: count" > profile.cov
|
||||
|
||||
# Initialize error tracking
|
||||
ERROR=""
|
||||
|
||||
# Test each package and append coverage profile info to profile.cov
|
||||
for pkg in `cat .testpackages.txt`
|
||||
do
|
||||
#$HOME/gopath/bin/
|
||||
go test -v -covermode=count -coverprofile=profile_tmp.cov $pkg || ERROR="Error testing $pkg"
|
||||
|
||||
[ -f profile_tmp.cov ] && {
|
||||
tail -n +2 profile_tmp.cov >> profile.cov || die "Unable to append coverage for $pkg"
|
||||
}
|
||||
done
|
||||
|
||||
if [ ! -z "$ERROR" ]
|
||||
then
|
||||
die "Encountered error, last error was: $ERROR"
|
||||
fi
|
3
.testpackages.txt
Normal file
3
.testpackages.txt
Normal file
@ -0,0 +1,3 @@
|
||||
github.com/tuna/tunasync/internal
|
||||
github.com/tuna/tunasync/manager
|
||||
github.com/tuna/tunasync/worker
|
20
.travis.yml
Normal file
20
.travis.yml
Normal file
@ -0,0 +1,20 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
|
||||
before_install:
|
||||
- sudo apt-get install cgroup-bin
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get -v github.com/mattn/goveralls
|
||||
|
||||
os:
|
||||
- linux
|
||||
|
||||
before_script:
|
||||
- sudo cgcreate -t travis -a travis -g cpu:tunasync
|
||||
|
||||
script:
|
||||
- ./.testandcover.bash
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=profile.cov -service=travis-ci
|
117
README.md
117
README.md
@ -1,14 +1,113 @@
|
||||
tunasync
|
||||
========
|
||||
|
||||
[](https://travis-ci.org/tuna/tunasync)
|
||||
[](https://coveralls.io/github/tuna/tunasync?branch=dev)
|
||||
|
||||
## Design
|
||||
|
||||
```
|
||||
# Architecture
|
||||
|
||||
- Manager: Centural instance on status and job management
|
||||
- Worker: Runs mirror jobs
|
||||
|
||||
+------------+ +---+ +---+
|
||||
| Client API | | | Job Status | | +----------+ +----------+
|
||||
+------------+ | +----------------->| |--->| mirror +---->| mirror |
|
||||
+------------+ | | | w | | config | | provider |
|
||||
| Worker API | | H | | o | +----------+ +----+-----+
|
||||
+------------+ | T | Job Control | r | |
|
||||
+------------+ | T +----------------->| k | +------------+ |
|
||||
| Job/Status | | P | Start/Stop/... | e | | mirror job |<----+
|
||||
| Management | | S | | r | +------^-----+
|
||||
+------------+ | | Update Status | | +---------+---------+
|
||||
+------------+ | <------------------+ | | Scheduler |
|
||||
| BoltDB | | | | | +-------------------+
|
||||
+------------+ +---+ +---+
|
||||
|
||||
|
||||
# Job Run Process
|
||||
|
||||
|
||||
PreSyncing Syncing Success
|
||||
+-----------+ +-----------+ +-------------+ +--------------+
|
||||
| pre-job +--+->| job run +--->| post-exec +-+-->| post-success |
|
||||
+-----------+ ^ +-----------+ +-------------+ | +--------------+
|
||||
| |
|
||||
| +-----------------+ | Failed
|
||||
+------+ post-fail |<---------+
|
||||
+-----------------+
|
||||
```
|
||||
|
||||
## TODO
|
||||
|
||||
- [ ] use context manager to handle job contexts
|
||||
- [x] Hooks need "before_exec", "after_exec"
|
||||
- [x] implement `tunasynctl tail` and `tunasynctl log` or equivalent feature
|
||||
- [x] status file
|
||||
- [ ] mirror size
|
||||
- [x] upstream
|
||||
- [x] btrfs backend (create snapshot before syncing)
|
||||
- [x] add mirror job online
|
||||
- [x] use toml as configuration
|
||||
- [x] split to `tunasync-manager` and `tunasync-worker` instances
|
||||
- [x] use HTTP as communication protocol
|
||||
- [x] implement manager as status server first, and use python worker
|
||||
- [x] implement go worker
|
||||
- Web frontend for `tunasync-manager`
|
||||
- [ ] start/stop/restart job
|
||||
- [ ] enable/disable mirror
|
||||
- [ ] view log
|
||||
- [ ] config file structure
|
||||
- [ ] support multi-file configuration (`/etc/tunasync.d/mirror-enabled/*.conf`)
|
||||
|
||||
|
||||
## Generate Self-Signed Certificate
|
||||
|
||||
Fisrt, create root CA
|
||||
|
||||
```
|
||||
openssl genrsa -out rootCA.key 2048
|
||||
openssl req -x509 -new -nodes -key rootCA.key -days 365 -out rootCA.crt
|
||||
```
|
||||
|
||||
Create host key
|
||||
|
||||
```
|
||||
openssl genrsa -out host.key 2048
|
||||
```
|
||||
|
||||
Now create CSR, before that, write a `req.cnf`
|
||||
|
||||
```
|
||||
[req]
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_req
|
||||
|
||||
[req_distinguished_name]
|
||||
countryName = Country Name (2 letter code)
|
||||
countryName_default = CN
|
||||
stateOrProvinceName = State or Province Name (full name)
|
||||
stateOrProvinceName_default = BJ
|
||||
localityName = Locality Name (eg, city)
|
||||
localityName_default = Beijing
|
||||
organizationalUnitName = Organizational Unit Name (eg, section)
|
||||
organizationalUnitName_default = TUNA
|
||||
commonName = Common Name (server FQDN or domain name)
|
||||
commonName_default = <server_FQDN>
|
||||
commonName_max = 64
|
||||
|
||||
[v3_req]
|
||||
# Extensions to add to a certificate request
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[alt_names]
|
||||
DNS.1 = <server_FQDN_1>
|
||||
DNS.2 = <server_FQDN_2>
|
||||
```
|
||||
|
||||
Substitute `<server_FQDN>` with your server's FQDN, then run
|
||||
|
||||
```
|
||||
openssl req -new -key host.key -out host.csr -config req.cnf
|
||||
```
|
||||
|
||||
Finally generate and sign host cert with root CA
|
||||
|
||||
```
|
||||
openssl x509 -req -in host.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out host.crt -days 365 -extensions v3_req -extfile req.cnf
|
||||
```
|
||||
|
159
cmd/tunasync/tunasync.go
Normal file
159
cmd/tunasync/tunasync.go
Normal file
@ -0,0 +1,159 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/gin-gonic/gin"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
|
||||
tunasync "github.com/tuna/tunasync/internal"
|
||||
"github.com/tuna/tunasync/manager"
|
||||
"github.com/tuna/tunasync/worker"
|
||||
)
|
||||
|
||||
var logger = logging.MustGetLogger("tunasync-cmd")
|
||||
|
||||
func startManager(c *cli.Context) {
|
||||
tunasync.InitLogger(c.Bool("verbose"), c.Bool("debug"), c.Bool("with-systemd"))
|
||||
|
||||
cfg, err := manager.LoadConfig(c.String("config"), c)
|
||||
if err != nil {
|
||||
logger.Errorf("Error loading config: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
if !cfg.Debug {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
|
||||
m := manager.GetTUNASyncManager(cfg)
|
||||
if m == nil {
|
||||
logger.Errorf("Error intializing TUNA sync worker.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.Info("Run tunasync manager server.")
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func startWorker(c *cli.Context) {
|
||||
tunasync.InitLogger(c.Bool("verbose"), c.Bool("debug"), c.Bool("with-systemd"))
|
||||
if !c.Bool("debug") {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
|
||||
cfg, err := worker.LoadConfig(c.String("config"))
|
||||
if err != nil {
|
||||
logger.Errorf("Error loading config: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
w := worker.GetTUNASyncWorker(cfg)
|
||||
if w == nil {
|
||||
logger.Errorf("Error intializing TUNA sync worker.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.Info("Run tunasync worker.")
|
||||
w.Run()
|
||||
}
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Version = "0.1"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "manager",
|
||||
Aliases: []string{"m"},
|
||||
Usage: "start the tunasync manager",
|
||||
Action: startManager,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "config, c",
|
||||
Usage: "Load manager configurations from `FILE`",
|
||||
},
|
||||
|
||||
cli.StringFlag{
|
||||
Name: "addr",
|
||||
Usage: "The manager will listen on `ADDR`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "port",
|
||||
Usage: "The manager will bind to `PORT`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert",
|
||||
Usage: "Use SSL certificate from `FILE`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "Use SSL key from `FILE`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "status-file",
|
||||
Usage: "Write status file to `FILE`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "db-file",
|
||||
Usage: "Use `FILE` as the database file",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "db-type",
|
||||
Usage: "Use database type `TYPE`",
|
||||
},
|
||||
|
||||
cli.BoolFlag{
|
||||
Name: "verbose, v",
|
||||
Usage: "Enable verbose logging",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Run manager in debug mode",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "with-systemd",
|
||||
Usage: "Enable systemd-compatible logging",
|
||||
},
|
||||
|
||||
cli.StringFlag{
|
||||
Name: "pidfile",
|
||||
Value: "/run/tunasync/tunasync.manager.pid",
|
||||
Usage: "The pid file of the manager process",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "worker",
|
||||
Aliases: []string{"w"},
|
||||
Usage: "start the tunasync worker",
|
||||
Action: startWorker,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "config, c",
|
||||
Usage: "Load worker configurations from `FILE`",
|
||||
},
|
||||
|
||||
cli.BoolFlag{
|
||||
Name: "verbose, v",
|
||||
Usage: "Enable verbose logging",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Run manager in debug mode",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "with-systemd",
|
||||
Usage: "Enable systemd-compatible logging",
|
||||
},
|
||||
|
||||
cli.StringFlag{
|
||||
Name: "pidfile",
|
||||
Value: "/run/tunasync/tunasync.worker.pid",
|
||||
Usage: "The pid file of the worker process",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
}
|
292
cmd/tunasynctl/tunasynctl.go
Normal file
292
cmd/tunasynctl/tunasynctl.go
Normal file
@ -0,0 +1,292 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/codegangsta/cli"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
|
||||
tunasync "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
listJobsPath = "/jobs"
|
||||
listWorkersPath = "/workers"
|
||||
cmdPath = "/cmd"
|
||||
|
||||
systemCfgFile = "/etc/tunasync/ctl.conf"
|
||||
userCfgFile = "$HOME/.config/tunasync/ctl.conf"
|
||||
)
|
||||
|
||||
var logger = logging.MustGetLogger("tunasynctl-cmd")
|
||||
|
||||
var baseURL string
|
||||
var client *http.Client
|
||||
|
||||
func initializeWrapper(handler func(*cli.Context)) func(*cli.Context) {
|
||||
return func(c *cli.Context) {
|
||||
err := initialize(c)
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
handler(c)
|
||||
}
|
||||
}
|
||||
|
||||
type config struct {
|
||||
ManagerAddr string `toml:"manager_addr"`
|
||||
ManagerPort int `toml:"manager_port"`
|
||||
CACert string `toml:"ca_cert"`
|
||||
}
|
||||
|
||||
func loadConfig(cfgFile string, c *cli.Context) (*config, error) {
|
||||
cfg := new(config)
|
||||
cfg.ManagerAddr = "localhost"
|
||||
cfg.ManagerPort = 14242
|
||||
|
||||
if cfgFile != "" {
|
||||
if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
|
||||
logger.Errorf(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if c.String("manager") != "" {
|
||||
cfg.ManagerAddr = c.String("manager")
|
||||
}
|
||||
if c.Int("port") > 0 {
|
||||
cfg.ManagerPort = c.Int("port")
|
||||
}
|
||||
|
||||
if c.String("ca-cert") != "" {
|
||||
cfg.CACert = c.String("ca-cert")
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func initialize(c *cli.Context) error {
|
||||
// init logger
|
||||
tunasync.InitLogger(c.Bool("verbose"), c.Bool("verbose"), false)
|
||||
var cfgFile string
|
||||
|
||||
// choose config file and load config
|
||||
if c.String("config") != "" {
|
||||
cfgFile = c.String("config")
|
||||
} else if _, err := os.Stat(os.ExpandEnv(userCfgFile)); err == nil {
|
||||
cfgFile = os.ExpandEnv(userCfgFile)
|
||||
} else if _, err := os.Stat(systemCfgFile); err == nil {
|
||||
cfgFile = systemCfgFile
|
||||
}
|
||||
cfg, err := loadConfig(cfgFile, c)
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("Load configuration for tunasynctl error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// parse base url of the manager server
|
||||
baseURL = fmt.Sprintf("https://%s:%d",
|
||||
cfg.ManagerAddr, cfg.ManagerPort)
|
||||
|
||||
logger.Infof("Use manager address: %s", baseURL)
|
||||
|
||||
// create HTTP client
|
||||
client, err = tunasync.CreateHTTPClient(cfg.CACert)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error initializing HTTP client: %s", err.Error())
|
||||
logger.Error(err.Error())
|
||||
return err
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func listWorkers(c *cli.Context) {
|
||||
var workers []tunasync.WorkerStatus
|
||||
_, err := tunasync.GetJSON(baseURL+listWorkersPath, &workers, client)
|
||||
if err != nil {
|
||||
logger.Errorf("Filed to correctly get informations from manager server: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(workers, "", " ")
|
||||
if err != nil {
|
||||
logger.Errorf("Error printing out informations: %s", err.Error())
|
||||
}
|
||||
fmt.Print(string(b))
|
||||
}
|
||||
|
||||
func listJobs(c *cli.Context) {
|
||||
// FIXME: there should be an API on manager server side that return MirrorStatus list to tunasynctl
|
||||
var jobs []tunasync.MirrorStatus
|
||||
if c.Bool("all") {
|
||||
_, err := tunasync.GetJSON(baseURL+listJobsPath, &jobs, client)
|
||||
if err != nil {
|
||||
logger.Errorf("Filed to correctly get information of all jobs from manager server: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
} else {
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
logger.Error("Usage Error: jobs command need at least one arguments or \"--all\" flag.")
|
||||
os.Exit(1)
|
||||
}
|
||||
ans := make(chan []tunasync.MirrorStatus, len(args))
|
||||
for _, workerID := range args {
|
||||
go func(workerID string) {
|
||||
var workerJobs []tunasync.MirrorStatus
|
||||
_, err := tunasync.GetJSON(fmt.Sprintf("%s/workers/%s/jobs", baseURL, workerID), &workerJobs, client)
|
||||
if err != nil {
|
||||
logger.Errorf("Filed to correctly get jobs for worker %s: %s", workerID, err.Error())
|
||||
}
|
||||
ans <- workerJobs
|
||||
}(workerID)
|
||||
}
|
||||
for range args {
|
||||
jobs = append(jobs, <-ans...)
|
||||
}
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(jobs, "", " ")
|
||||
if err != nil {
|
||||
logger.Errorf("Error printing out informations: %s", err.Error())
|
||||
}
|
||||
fmt.Printf(string(b))
|
||||
}
|
||||
|
||||
func cmdJob(cmd tunasync.CmdVerb) func(*cli.Context) {
|
||||
return func(c *cli.Context) {
|
||||
var mirrorID string
|
||||
var argsList []string
|
||||
if len(c.Args()) == 1 {
|
||||
mirrorID = c.Args()[0]
|
||||
} else if len(c.Args()) == 2 {
|
||||
mirrorID = c.Args()[0]
|
||||
for _, arg := range strings.Split(c.Args()[1], ",") {
|
||||
argsList = append(argsList, strings.TrimSpace(arg))
|
||||
}
|
||||
} else {
|
||||
logger.Error("Usage Error: cmd command receive just 1 required positional argument MIRROR and 1 optional ")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cmd := tunasync.ClientCmd{
|
||||
Cmd: cmd,
|
||||
MirrorID: mirrorID,
|
||||
WorkerID: c.String("worker"),
|
||||
Args: argsList,
|
||||
}
|
||||
resp, err := tunasync.PostJSON(baseURL+cmdPath, cmd, client)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to correctly send command: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to parse response: %s", err.Error())
|
||||
}
|
||||
|
||||
logger.Errorf("Failed to correctly send command: HTTP status code is not 200: %s", body)
|
||||
} else {
|
||||
logger.Info("Succesfully send command")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Version = "0.1"
|
||||
|
||||
commonFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "config, c",
|
||||
Usage: "Read configuration from `FILE` rather than" +
|
||||
" ~/.config/tunasync/ctl.conf and /etc/tunasync/ctl.conf",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "manager, m",
|
||||
Usage: "The manager server address",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "port, p",
|
||||
Usage: "The manager server port",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "ca-cert",
|
||||
Usage: "Trust root CA cert file `CERT`",
|
||||
},
|
||||
|
||||
cli.BoolFlag{
|
||||
Name: "verbose, v",
|
||||
Usage: "Enable verbosely logging",
|
||||
},
|
||||
}
|
||||
cmdFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "worker, w",
|
||||
Usage: "Send the command to `WORKER`",
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List jobs of workers",
|
||||
Flags: append(commonFlags,
|
||||
[]cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "List all jobs of all workers",
|
||||
},
|
||||
}...),
|
||||
Action: initializeWrapper(listJobs),
|
||||
},
|
||||
{
|
||||
Name: "workers",
|
||||
Usage: "List workers",
|
||||
Flags: commonFlags,
|
||||
Action: initializeWrapper(listWorkers),
|
||||
},
|
||||
{
|
||||
Name: "start",
|
||||
Usage: "Start a job",
|
||||
Flags: append(commonFlags, cmdFlags...),
|
||||
Action: initializeWrapper(cmdJob(tunasync.CmdStart)),
|
||||
},
|
||||
{
|
||||
Name: "stop",
|
||||
Usage: "Stop a job",
|
||||
Flags: append(commonFlags, cmdFlags...),
|
||||
Action: initializeWrapper(cmdJob(tunasync.CmdStop)),
|
||||
},
|
||||
{
|
||||
Name: "disable",
|
||||
Usage: "Disable a job",
|
||||
Flags: append(commonFlags, cmdFlags...),
|
||||
Action: initializeWrapper(cmdJob(tunasync.CmdDisable)),
|
||||
},
|
||||
{
|
||||
Name: "restart",
|
||||
Usage: "Restart a job",
|
||||
Flags: append(commonFlags, cmdFlags...),
|
||||
Action: initializeWrapper(cmdJob(tunasync.CmdRestart)),
|
||||
},
|
||||
{
|
||||
Name: "ping",
|
||||
Flags: append(commonFlags, cmdFlags...),
|
||||
Action: initializeWrapper(cmdJob(tunasync.CmdPing)),
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
[global]
|
||||
log_dir = "/var/log/tunasync"
|
||||
# mirror_root = /srv/mirror_disk
|
||||
mirror_root = "/mnt/sdb1/mirror"
|
||||
use_btrfs = false
|
||||
local_dir = "{mirror_root}/_working/{mirror_name}/"
|
||||
status_file = "/tmp/tunasync.json"
|
||||
# maximum numbers of running jobs
|
||||
concurrent = 2
|
||||
# interval in minutes
|
||||
interval = 1
|
||||
max_retry = 2
|
||||
ctrl_addr = "/tmp/tunasync.sock"
|
||||
|
||||
[btrfs]
|
||||
service_dir = "{mirror_root}/_current/{mirror_name}"
|
||||
working_dir = "{mirror_root}/_working/{mirror_name}"
|
||||
gc_root = "{mirror_root}/_garbage/"
|
||||
gc_dir = "{mirror_root}/_garbage/_gc_{mirror_name}_{{timestamp}}"
|
||||
|
||||
# [[mirrors]]
|
||||
# name = "archlinux"
|
||||
# provider = "rsync"
|
||||
# upstream = "rsync://mirror.us.leaseweb.net/archlinux/"
|
||||
# log_file = "/tmp/archlinux-{date}.log"
|
||||
# use_ipv6 = true
|
||||
|
||||
[[mirrors]]
|
||||
name = "arch1"
|
||||
provider = "shell"
|
||||
command = "sleep 10"
|
||||
local_dir = "/mnt/sdb1/mirror/archlinux/current/"
|
||||
# log_file = "/dev/null"
|
||||
exec_post_sync = "/bin/bash -c 'date --utc \"+%s\" > ${TUNASYNC_WORKING_DIR}/.timestamp'"
|
||||
|
||||
[[mirrors]]
|
||||
name = "arch2"
|
||||
provider = "shell"
|
||||
command = "sleep 20"
|
||||
local_dir = "/mnt/sdb1/mirror/archlinux/current/"
|
||||
# log_file = "/dev/null"
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "arch3"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "/tmp/rsync_test/src/"
|
||||
local_dir = "/tmp/rsync_test/dst/"
|
||||
log_file = "/tmp/rsync_test/log"
|
||||
# log_file = "/dev/null"
|
||||
no_delay = true
|
||||
|
||||
[[mirrors]]
|
||||
name = "arch4"
|
||||
provider = "shell"
|
||||
command = "./shell_provider.sh"
|
||||
upstream = "https://pypi.python.org/"
|
||||
# log_file = "/tmp/arch4-{date}.log"
|
||||
use_btrfs = false
|
||||
# set environment varialbes
|
||||
[mirrors.env]
|
||||
REPO = "/usr/local/bin/repo"
|
||||
|
||||
[[mirrors]]
|
||||
name = "arch5"
|
||||
provider = "shell"
|
||||
command = "./shell_provider.sh"
|
||||
upstream = "https://pypi.python.org/"
|
||||
# log_file = "/tmp/arch4-{date}.log"
|
||||
use_btrfs = false
|
||||
[mirrors.env]
|
||||
REPO = "/usr/local/bin/repo2"
|
||||
|
||||
# vim: ft=toml ts=2 sts=2 sw=2
|
32
internal/logger.go
Normal file
32
internal/logger.go
Normal file
@ -0,0 +1,32 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
// InitLogger initilizes logging format and level
|
||||
func InitLogger(verbose, debug, withSystemd bool) {
|
||||
var fmtString string
|
||||
if withSystemd {
|
||||
fmtString = "[%{level:.6s}] %{message}"
|
||||
} else {
|
||||
if debug {
|
||||
fmtString = "%{color}[%{time:06-01-02 15:04:05}][%{level:.6s}][%{shortfile}]%{color:reset} %{message}"
|
||||
} else {
|
||||
fmtString = "%{color}[%{time:06-01-02 15:04:05}][%{level:.6s}]%{color:reset} %{message}"
|
||||
}
|
||||
}
|
||||
format := logging.MustStringFormatter(fmtString)
|
||||
logging.SetFormatter(format)
|
||||
logging.SetBackend(logging.NewLogBackend(os.Stdout, "", 0))
|
||||
|
||||
if debug {
|
||||
logging.SetLevel(logging.DEBUG, "tunasync")
|
||||
} else if verbose {
|
||||
logging.SetLevel(logging.INFO, "tunasync")
|
||||
} else {
|
||||
logging.SetLevel(logging.NOTICE, "tunasync")
|
||||
}
|
||||
}
|
78
internal/msg.go
Normal file
78
internal/msg.go
Normal file
@ -0,0 +1,78 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A StatusUpdateMsg represents a msg when
|
||||
// a worker has done syncing
|
||||
type MirrorStatus struct {
|
||||
Name string `json:"name"`
|
||||
Worker string `json:"worker"`
|
||||
IsMaster bool `json:"is_master"`
|
||||
Status SyncStatus `json:"status"`
|
||||
LastUpdate time.Time `json:"last_update"`
|
||||
Upstream string `json:"upstream"`
|
||||
Size string `json:"size"`
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
}
|
||||
|
||||
// A WorkerStatus is the information struct that describe
|
||||
// a worker, and sent from the manager to clients.
|
||||
type WorkerStatus struct {
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"` // worker url
|
||||
Token string `json:"token"` // session token
|
||||
LastOnline time.Time `json:"last_online"` // last seen
|
||||
}
|
||||
|
||||
type CmdVerb uint8
|
||||
|
||||
const (
|
||||
CmdStart CmdVerb = iota
|
||||
CmdStop // stop syncing keep the job
|
||||
CmdDisable // disable the job (stops goroutine)
|
||||
CmdRestart // restart syncing
|
||||
CmdPing // ensure the goroutine is alive
|
||||
)
|
||||
|
||||
func (c CmdVerb) String() string {
|
||||
switch c {
|
||||
case CmdStart:
|
||||
return "start"
|
||||
case CmdStop:
|
||||
return "stop"
|
||||
case CmdDisable:
|
||||
return "disable"
|
||||
case CmdRestart:
|
||||
return "restart"
|
||||
case CmdPing:
|
||||
return "ping"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// A WorkerCmd is the command message send from the
|
||||
// manager to a worker
|
||||
type WorkerCmd struct {
|
||||
Cmd CmdVerb `json:"cmd"`
|
||||
MirrorID string `json:"mirror_id"`
|
||||
Args []string `json:"args"`
|
||||
}
|
||||
|
||||
func (c WorkerCmd) String() string {
|
||||
if len(c.Args) > 0 {
|
||||
return fmt.Sprintf("%v (%s, %v)", c.Cmd, c.MirrorID, c.Args)
|
||||
}
|
||||
return fmt.Sprintf("%v (%s)", c.Cmd, c.MirrorID)
|
||||
}
|
||||
|
||||
// A ClientCmd is the command message send from client
|
||||
// to the manager
|
||||
type ClientCmd struct {
|
||||
Cmd CmdVerb `json:"cmd"`
|
||||
MirrorID string `json:"mirror_id"`
|
||||
WorkerID string `json:"worker_id"`
|
||||
Args []string `json:"args"`
|
||||
}
|
72
internal/status.go
Normal file
72
internal/status.go
Normal file
@ -0,0 +1,72 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type SyncStatus uint8
|
||||
|
||||
const (
|
||||
None SyncStatus = iota
|
||||
Failed
|
||||
Success
|
||||
Syncing
|
||||
PreSyncing
|
||||
Paused
|
||||
Disabled
|
||||
)
|
||||
|
||||
func (s SyncStatus) String() string {
|
||||
switch s {
|
||||
case None:
|
||||
return "none"
|
||||
case Failed:
|
||||
return "failed"
|
||||
case Success:
|
||||
return "success"
|
||||
case Syncing:
|
||||
return "syncing"
|
||||
case PreSyncing:
|
||||
return "pre-syncing"
|
||||
case Paused:
|
||||
return "paused"
|
||||
case Disabled:
|
||||
return "disabled"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (s SyncStatus) MarshalJSON() ([]byte, error) {
|
||||
strStatus := s.String()
|
||||
if strStatus == "" {
|
||||
return []byte{}, errors.New("Invalid status value")
|
||||
}
|
||||
|
||||
return json.Marshal(strStatus)
|
||||
}
|
||||
|
||||
func (s *SyncStatus) UnmarshalJSON(v []byte) error {
|
||||
sv := string(v)
|
||||
switch sv {
|
||||
case `"none"`:
|
||||
*s = None
|
||||
case `"failed"`:
|
||||
*s = Failed
|
||||
case `"success"`:
|
||||
*s = Success
|
||||
case `"syncing"`:
|
||||
*s = Syncing
|
||||
case `"pre-syncing"`:
|
||||
*s = PreSyncing
|
||||
case `"paused"`:
|
||||
*s = Paused
|
||||
case `"disabled"`:
|
||||
*s = Disabled
|
||||
default:
|
||||
return fmt.Errorf("Invalid status value: %s", string(v))
|
||||
}
|
||||
return nil
|
||||
}
|
23
internal/status_test.go
Normal file
23
internal/status_test.go
Normal file
@ -0,0 +1,23 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestSyncStatus(t *testing.T) {
|
||||
Convey("SyncStatus json ser-de should work", t, func() {
|
||||
|
||||
b, err := json.Marshal(PreSyncing)
|
||||
So(err, ShouldBeNil)
|
||||
So(b, ShouldResemble, []byte(`"pre-syncing"`)) // deep equal should be used
|
||||
|
||||
var s SyncStatus
|
||||
|
||||
err = json.Unmarshal([]byte(`"failed"`), &s)
|
||||
So(err, ShouldBeNil)
|
||||
So(s, ShouldEqual, Failed)
|
||||
})
|
||||
}
|
86
internal/util.go
Normal file
86
internal/util.go
Normal file
@ -0,0 +1,86 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetTLSConfig generate tls.Config from CAFile
|
||||
func GetTLSConfig(CAFile string) (*tls.Config, error) {
|
||||
caCert, err := ioutil.ReadFile(CAFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
|
||||
return nil, errors.New("Failed to add CA to pool")
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
}
|
||||
tlsConfig.BuildNameToCertificate()
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// CreateHTTPClient returns a http.Client
|
||||
func CreateHTTPClient(CAFile string) (*http.Client, error) {
|
||||
var tlsConfig *tls.Config
|
||||
var err error
|
||||
|
||||
if CAFile != "" {
|
||||
tlsConfig, err = GetTLSConfig(CAFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tr := &http.Transport{
|
||||
MaxIdleConnsPerHost: 20,
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
|
||||
return &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: 5 * time.Second,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PostJSON posts json object to url
|
||||
func PostJSON(url string, obj interface{}, client *http.Client) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client, _ = CreateHTTPClient("")
|
||||
}
|
||||
b := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(b).Encode(obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Post(url, "application/json; charset=utf-8", b)
|
||||
}
|
||||
|
||||
// GetJSON gets a json response from url
|
||||
func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client, _ = CreateHTTPClient("")
|
||||
}
|
||||
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return resp, errors.New("HTTP status code is not 200")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
return resp, json.Unmarshal(body, obj)
|
||||
}
|
7
manager/common.go
Normal file
7
manager/common.go
Normal file
@ -0,0 +1,7 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
var logger = logging.MustGetLogger("tunasync")
|
74
manager/config.go
Normal file
74
manager/config.go
Normal file
@ -0,0 +1,74 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
// A Config is the top-level toml-serializaible config struct
|
||||
type Config struct {
|
||||
Debug bool `toml:"debug"`
|
||||
Server ServerConfig `toml:"server"`
|
||||
Files FileConfig `toml:"files"`
|
||||
}
|
||||
|
||||
// A ServerConfig represents the configuration for HTTP server
|
||||
type ServerConfig struct {
|
||||
Addr string `toml:"addr"`
|
||||
Port int `toml:"port"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
}
|
||||
|
||||
// A FileConfig contains paths to special files
|
||||
type FileConfig struct {
|
||||
StatusFile string `toml:"status_file"`
|
||||
DBFile string `toml:"db_file"`
|
||||
DBType string `toml:"db_type"`
|
||||
// used to connect to worker
|
||||
CACert string `toml:"ca_cert"`
|
||||
}
|
||||
|
||||
func LoadConfig(cfgFile string, c *cli.Context) (*Config, error) {
|
||||
|
||||
cfg := new(Config)
|
||||
cfg.Server.Addr = "127.0.0.1"
|
||||
cfg.Server.Port = 14242
|
||||
cfg.Debug = false
|
||||
cfg.Files.StatusFile = "/var/lib/tunasync/tunasync.json"
|
||||
cfg.Files.DBFile = "/var/lib/tunasync/tunasync.db"
|
||||
cfg.Files.DBType = "bolt"
|
||||
|
||||
if cfgFile != "" {
|
||||
if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
|
||||
logger.Errorf(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if c == nil {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
if c.String("addr") != "" {
|
||||
cfg.Server.Addr = c.String("addr")
|
||||
}
|
||||
if c.Int("port") > 0 {
|
||||
cfg.Server.Port = c.Int("port")
|
||||
}
|
||||
if c.String("cert") != "" && c.String("key") != "" {
|
||||
cfg.Server.SSLCert = c.String("cert")
|
||||
cfg.Server.SSLKey = c.String("key")
|
||||
}
|
||||
if c.String("status-file") != "" {
|
||||
cfg.Files.StatusFile = c.String("status-file")
|
||||
}
|
||||
if c.String("db-file") != "" {
|
||||
cfg.Files.DBFile = c.String("db-file")
|
||||
}
|
||||
if c.String("db-type") != "" {
|
||||
cfg.Files.DBFile = c.String("db-type")
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
141
manager/config_test.go
Normal file
141
manager/config_test.go
Normal file
@ -0,0 +1,141 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/codegangsta/cli"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
var cfgBlob = `
|
||||
debug = true
|
||||
[server]
|
||||
addr = "0.0.0.0"
|
||||
port = 5000
|
||||
|
||||
[files]
|
||||
status_file = "/tmp/tunasync.json"
|
||||
db_file = "/var/lib/tunasync/tunasync.db"
|
||||
`
|
||||
|
||||
Convey("toml decoding should work", t, func() {
|
||||
|
||||
var conf Config
|
||||
_, err := toml.Decode(cfgBlob, &conf)
|
||||
ShouldEqual(err, nil)
|
||||
ShouldEqual(conf.Server.Addr, "0.0.0.0")
|
||||
ShouldEqual(conf.Server.Port, 5000)
|
||||
ShouldEqual(conf.Files.StatusFile, "/tmp/tunasync.json")
|
||||
ShouldEqual(conf.Files.DBFile, "/var/lib/tunasync/tunasync.db")
|
||||
})
|
||||
|
||||
Convey("load Config should work", t, func() {
|
||||
Convey("create config file & cli context", func() {
|
||||
tmpfile, err := ioutil.TempFile("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "config, c",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "addr",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "port",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "status-file",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "db-file",
|
||||
},
|
||||
}
|
||||
Convey("when giving no config options", func() {
|
||||
app.Action = func(c *cli.Context) {
|
||||
cfgFile := c.String("config")
|
||||
cfg, err := LoadConfig(cfgFile, c)
|
||||
So(err, ShouldEqual, nil)
|
||||
So(cfg.Server.Addr, ShouldEqual, "127.0.0.1")
|
||||
}
|
||||
args := strings.Split("cmd", " ")
|
||||
app.Run(args)
|
||||
})
|
||||
Convey("when giving config options", func() {
|
||||
app.Action = func(c *cli.Context) {
|
||||
cfgFile := c.String("config")
|
||||
So(cfgFile, ShouldEqual, tmpfile.Name())
|
||||
conf, err := LoadConfig(cfgFile, c)
|
||||
So(err, ShouldEqual, nil)
|
||||
So(conf.Server.Addr, ShouldEqual, "0.0.0.0")
|
||||
So(conf.Server.Port, ShouldEqual, 5000)
|
||||
So(conf.Files.StatusFile, ShouldEqual, "/tmp/tunasync.json")
|
||||
So(conf.Files.DBFile, ShouldEqual, "/var/lib/tunasync/tunasync.db")
|
||||
|
||||
}
|
||||
cmd := fmt.Sprintf("cmd -c %s", tmpfile.Name())
|
||||
args := strings.Split(cmd, " ")
|
||||
app.Run(args)
|
||||
})
|
||||
Convey("when giving cli options", func() {
|
||||
app.Action = func(c *cli.Context) {
|
||||
cfgFile := c.String("config")
|
||||
So(cfgFile, ShouldEqual, "")
|
||||
conf, err := LoadConfig(cfgFile, c)
|
||||
So(err, ShouldEqual, nil)
|
||||
So(conf.Server.Addr, ShouldEqual, "0.0.0.0")
|
||||
So(conf.Server.Port, ShouldEqual, 5001)
|
||||
So(conf.Server.SSLCert, ShouldEqual, "/ssl.cert")
|
||||
So(conf.Server.SSLKey, ShouldEqual, "/ssl.key")
|
||||
So(conf.Files.StatusFile, ShouldEqual, "/tunasync.json")
|
||||
So(conf.Files.DBFile, ShouldEqual, "/tunasync.db")
|
||||
|
||||
}
|
||||
args := strings.Split(
|
||||
"cmd --addr=0.0.0.0 --port=5001 --cert=/ssl.cert --key /ssl.key --status-file=/tunasync.json --db-file=/tunasync.db",
|
||||
" ",
|
||||
)
|
||||
app.Run(args)
|
||||
})
|
||||
Convey("when giving both config and cli options", func() {
|
||||
app.Action = func(c *cli.Context) {
|
||||
cfgFile := c.String("config")
|
||||
So(cfgFile, ShouldEqual, tmpfile.Name())
|
||||
conf, err := LoadConfig(cfgFile, c)
|
||||
So(err, ShouldEqual, nil)
|
||||
So(conf.Server.Addr, ShouldEqual, "0.0.0.0")
|
||||
So(conf.Server.Port, ShouldEqual, 5000)
|
||||
So(conf.Server.SSLCert, ShouldEqual, "/ssl.cert")
|
||||
So(conf.Server.SSLKey, ShouldEqual, "/ssl.key")
|
||||
So(conf.Files.StatusFile, ShouldEqual, "/tunasync.json")
|
||||
So(conf.Files.DBFile, ShouldEqual, "/tunasync.db")
|
||||
|
||||
}
|
||||
cmd := fmt.Sprintf(
|
||||
"cmd -c %s --cert=/ssl.cert --key /ssl.key --status-file=/tunasync.json --db-file=/tunasync.db",
|
||||
tmpfile.Name(),
|
||||
)
|
||||
args := strings.Split(cmd, " ")
|
||||
app.Run(args)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
178
manager/db.go
Normal file
178
manager/db.go
Normal file
@ -0,0 +1,178 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
type dbAdapter interface {
|
||||
Init() error
|
||||
ListWorkers() ([]WorkerStatus, error)
|
||||
GetWorker(workerID string) (WorkerStatus, error)
|
||||
CreateWorker(w WorkerStatus) (WorkerStatus, error)
|
||||
UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error)
|
||||
GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error)
|
||||
ListMirrorStatus(workerID string) ([]MirrorStatus, error)
|
||||
ListAllMirrorStatus() ([]MirrorStatus, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) {
|
||||
if dbType == "bolt" {
|
||||
innerDB, err := bolt.Open(dbFile, 0600, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db := boltAdapter{
|
||||
db: innerDB,
|
||||
dbFile: dbFile,
|
||||
}
|
||||
err = db.Init()
|
||||
return &db, err
|
||||
}
|
||||
// unsupported db-type
|
||||
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
|
||||
}
|
||||
|
||||
const (
|
||||
_workerBucketKey = "workers"
|
||||
_statusBucketKey = "mirror_status"
|
||||
)
|
||||
|
||||
type boltAdapter struct {
|
||||
db *bolt.DB
|
||||
dbFile string
|
||||
}
|
||||
|
||||
func (b *boltAdapter) Init() (err error) {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
_, err = tx.CreateBucketIfNotExists([]byte(_workerBucketKey))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
|
||||
}
|
||||
_, err = tx.CreateBucketIfNotExists([]byte(_statusBucketKey))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket %s error: %s", _statusBucketKey, err.Error())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (b *boltAdapter) ListWorkers() (ws []WorkerStatus, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_workerBucketKey))
|
||||
c := bucket.Cursor()
|
||||
var w WorkerStatus
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
jsonErr := json.Unmarshal(v, &w)
|
||||
if jsonErr != nil {
|
||||
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
|
||||
continue
|
||||
}
|
||||
ws = append(ws, w)
|
||||
}
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_workerBucketKey))
|
||||
v := bucket.Get([]byte(workerID))
|
||||
if v == nil {
|
||||
return fmt.Errorf("invalid workerID %s", workerID)
|
||||
}
|
||||
err := json.Unmarshal(v, &w)
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_workerBucketKey))
|
||||
v, err := json.Marshal(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = bucket.Put([]byte(w.ID), v)
|
||||
return err
|
||||
})
|
||||
return w, err
|
||||
}
|
||||
|
||||
func (b *boltAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
|
||||
id := mirrorID + "/" + workerID
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
v, err := json.Marshal(status)
|
||||
err = bucket.Put([]byte(id), v)
|
||||
return err
|
||||
})
|
||||
return status, err
|
||||
}
|
||||
|
||||
func (b *boltAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) {
|
||||
id := mirrorID + "/" + workerID
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
v := bucket.Get([]byte(id))
|
||||
if v == nil {
|
||||
return fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID)
|
||||
}
|
||||
err := json.Unmarshal(v, &m)
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
c := bucket.Cursor()
|
||||
var m MirrorStatus
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if wID := strings.Split(string(k), "/")[1]; wID == workerID {
|
||||
jsonErr := json.Unmarshal(v, &m)
|
||||
if jsonErr != nil {
|
||||
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
|
||||
continue
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
c := bucket.Cursor()
|
||||
var m MirrorStatus
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
jsonErr := json.Unmarshal(v, &m)
|
||||
if jsonErr != nil {
|
||||
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
|
||||
continue
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) Close() error {
|
||||
if b.db != nil {
|
||||
return b.db.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
117
manager/db_test.go
Normal file
117
manager/db_test.go
Normal file
@ -0,0 +1,117 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
func TestBoltAdapter(t *testing.T) {
|
||||
Convey("boltAdapter should work", t, func() {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
dbType, dbFile := "bolt", filepath.Join(tmpDir, "bolt.db")
|
||||
boltDB, err := makeDBAdapter(dbType, dbFile)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
defer func() {
|
||||
// close boltDB
|
||||
err := boltDB.Close()
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
testWorkerIDs := []string{"test_worker1", "test_worker2"}
|
||||
Convey("create worker", func() {
|
||||
for _, id := range testWorkerIDs {
|
||||
w := WorkerStatus{
|
||||
ID: id,
|
||||
Token: "token_" + id,
|
||||
LastOnline: time.Now(),
|
||||
}
|
||||
w, err = boltDB.CreateWorker(w)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
Convey("get exists worker", func() {
|
||||
_, err := boltDB.GetWorker(testWorkerIDs[0])
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("list exist worker", func() {
|
||||
ws, err := boltDB.ListWorkers()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ws), ShouldEqual, 2)
|
||||
})
|
||||
|
||||
Convey("get inexist worker", func() {
|
||||
_, err := boltDB.GetWorker("invalid workerID")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("update mirror status", func() {
|
||||
status1 := MirrorStatus{
|
||||
Name: "arch-sync1",
|
||||
Worker: testWorkerIDs[0],
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "3GB",
|
||||
}
|
||||
status2 := MirrorStatus{
|
||||
Name: "arch-sync2",
|
||||
Worker: testWorkerIDs[1],
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
}
|
||||
|
||||
_, err := boltDB.UpdateMirrorStatus(status1.Worker, status1.Name, status1)
|
||||
_, err = boltDB.UpdateMirrorStatus(status2.Worker, status2.Name, status2)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("get mirror status", func() {
|
||||
m, err := boltDB.GetMirrorStatus(testWorkerIDs[0], status1.Name)
|
||||
So(err, ShouldBeNil)
|
||||
expectedJSON, err := json.Marshal(status1)
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(m)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
|
||||
Convey("list mirror status", func() {
|
||||
ms, err := boltDB.ListMirrorStatus(testWorkerIDs[0])
|
||||
So(err, ShouldBeNil)
|
||||
expectedJSON, err := json.Marshal([]MirrorStatus{status1})
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(ms)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
|
||||
Convey("list all mirror status", func() {
|
||||
ms, err := boltDB.ListAllMirrorStatus()
|
||||
So(err, ShouldBeNil)
|
||||
expectedJSON, err := json.Marshal([]MirrorStatus{status1, status2})
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(ms)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
}
|
35
manager/middleware.go
Normal file
35
manager/middleware.go
Normal file
@ -0,0 +1,35 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func contextErrorLogger(c *gin.Context) {
|
||||
errs := c.Errors.ByType(gin.ErrorTypeAny)
|
||||
if len(errs) > 0 {
|
||||
for _, err := range errs {
|
||||
logger.Errorf(`"in request "%s %s: %s"`,
|
||||
c.Request.Method, c.Request.URL.Path,
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
// pass on to the next middleware in chain
|
||||
c.Next()
|
||||
}
|
||||
|
||||
func (s *Manager) workerIDValidator(c *gin.Context) {
|
||||
workerID := c.Param("id")
|
||||
_, err := s.adapter.GetWorker(workerID)
|
||||
if err != nil {
|
||||
// no worker named `workerID` exists
|
||||
err := fmt.Errorf("invalid workerID %s", workerID)
|
||||
s.returnErrJSON(c, http.StatusBadRequest, err)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
// pass on to the next middleware in chain
|
||||
c.Next()
|
||||
}
|
300
manager/server.go
Normal file
300
manager/server.go
Normal file
@ -0,0 +1,300 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
_errorKey = "error"
|
||||
_infoKey = "message"
|
||||
)
|
||||
|
||||
var manager *Manager
|
||||
|
||||
// A Manager represents a manager server
|
||||
type Manager struct {
|
||||
cfg *Config
|
||||
engine *gin.Engine
|
||||
adapter dbAdapter
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// GetTUNASyncManager returns the manager from config
|
||||
func GetTUNASyncManager(cfg *Config) *Manager {
|
||||
if manager != nil {
|
||||
return manager
|
||||
}
|
||||
|
||||
// create gin engine
|
||||
if !cfg.Debug {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
s := &Manager{
|
||||
cfg: cfg,
|
||||
adapter: nil,
|
||||
}
|
||||
|
||||
s.engine = gin.New()
|
||||
s.engine.Use(gin.Recovery())
|
||||
if cfg.Debug {
|
||||
s.engine.Use(gin.Logger())
|
||||
}
|
||||
|
||||
if cfg.Files.CACert != "" {
|
||||
httpClient, err := CreateHTTPClient(cfg.Files.CACert)
|
||||
if err != nil {
|
||||
logger.Errorf("Error initializing HTTP client: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
s.httpClient = httpClient
|
||||
}
|
||||
|
||||
if cfg.Files.DBFile != "" {
|
||||
adapter, err := makeDBAdapter(cfg.Files.DBType, cfg.Files.DBFile)
|
||||
if err != nil {
|
||||
logger.Errorf("Error initializing DB adapter: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
s.setDBAdapter(adapter)
|
||||
}
|
||||
|
||||
// common log middleware
|
||||
s.engine.Use(contextErrorLogger)
|
||||
|
||||
s.engine.GET("/ping", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{_infoKey: "pong"})
|
||||
})
|
||||
// list jobs, status page
|
||||
s.engine.GET("/jobs", s.listAllJobs)
|
||||
|
||||
// list workers
|
||||
s.engine.GET("/workers", s.listWorkers)
|
||||
// worker online
|
||||
s.engine.POST("/workers", s.registerWorker)
|
||||
|
||||
// workerID should be valid in this route group
|
||||
workerValidateGroup := s.engine.Group("/workers", s.workerIDValidator)
|
||||
// get job list
|
||||
workerValidateGroup.GET(":id/jobs", s.listJobsOfWorker)
|
||||
// post job status
|
||||
workerValidateGroup.POST(":id/jobs/:job", s.updateJobOfWorker)
|
||||
|
||||
// for tunasynctl to post commands
|
||||
s.engine.POST("/cmd", s.handleClientCmd)
|
||||
|
||||
manager = s
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Manager) setDBAdapter(adapter dbAdapter) {
|
||||
s.adapter = adapter
|
||||
}
|
||||
|
||||
// Run runs the manager server forever
|
||||
func (s *Manager) Run() {
|
||||
addr := fmt.Sprintf("%s:%d", s.cfg.Server.Addr, s.cfg.Server.Port)
|
||||
|
||||
httpServer := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.engine,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
if s.cfg.Server.SSLCert == "" && s.cfg.Server.SSLKey == "" {
|
||||
if err := httpServer.ListenAndServe(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
if err := httpServer.ListenAndServeTLS(s.cfg.Server.SSLCert, s.cfg.Server.SSLKey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// listAllJobs repond with all jobs of specified workers
|
||||
func (s *Manager) listAllJobs(c *gin.Context) {
|
||||
mirrorStatusList, err := s.adapter.ListAllMirrorStatus()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to list all mirror status: %s",
|
||||
err.Error(),
|
||||
)
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
webMirStatusList := []webMirrorStatus{}
|
||||
for _, m := range mirrorStatusList {
|
||||
webMirStatusList = append(
|
||||
webMirStatusList,
|
||||
convertMirrorStatus(m),
|
||||
)
|
||||
}
|
||||
c.JSON(http.StatusOK, webMirStatusList)
|
||||
}
|
||||
|
||||
// listWrokers respond with informations of all the workers
|
||||
func (s *Manager) listWorkers(c *gin.Context) {
|
||||
var workerInfos []WorkerStatus
|
||||
workers, err := s.adapter.ListWorkers()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to list workers: %s",
|
||||
err.Error(),
|
||||
)
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
for _, w := range workers {
|
||||
workerInfos = append(workerInfos,
|
||||
WorkerStatus{
|
||||
ID: w.ID,
|
||||
LastOnline: w.LastOnline,
|
||||
})
|
||||
}
|
||||
c.JSON(http.StatusOK, workerInfos)
|
||||
}
|
||||
|
||||
// registerWorker register an newly-online worker
|
||||
func (s *Manager) registerWorker(c *gin.Context) {
|
||||
var _worker WorkerStatus
|
||||
c.BindJSON(&_worker)
|
||||
_worker.LastOnline = time.Now()
|
||||
newWorker, err := s.adapter.CreateWorker(_worker)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to register worker: %s",
|
||||
err.Error(),
|
||||
)
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Noticef("Worker <%s> registered", _worker.ID)
|
||||
// create workerCmd channel for this worker
|
||||
c.JSON(http.StatusOK, newWorker)
|
||||
}
|
||||
|
||||
// listJobsOfWorker respond with all the jobs of the specified worker
|
||||
func (s *Manager) listJobsOfWorker(c *gin.Context) {
|
||||
workerID := c.Param("id")
|
||||
mirrorStatusList, err := s.adapter.ListMirrorStatus(workerID)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to list jobs of worker %s: %s",
|
||||
workerID, err.Error(),
|
||||
)
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, mirrorStatusList)
|
||||
}
|
||||
|
||||
func (s *Manager) returnErrJSON(c *gin.Context, code int, err error) {
|
||||
c.JSON(code, gin.H{
|
||||
_errorKey: err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Manager) updateJobOfWorker(c *gin.Context) {
|
||||
workerID := c.Param("id")
|
||||
var status MirrorStatus
|
||||
c.BindJSON(&status)
|
||||
mirrorName := status.Name
|
||||
|
||||
curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName)
|
||||
|
||||
// Only successful syncing needs last_update
|
||||
if status.Status == Success {
|
||||
status.LastUpdate = time.Now()
|
||||
} else {
|
||||
status.LastUpdate = curStatus.LastUpdate
|
||||
}
|
||||
|
||||
// for logging
|
||||
switch status.Status {
|
||||
case Success:
|
||||
logger.Noticef("Job [%s] @<%s> success", status.Name, status.Worker)
|
||||
case Failed:
|
||||
logger.Warningf("Job [%s] @<%s> failed", status.Name, status.Worker)
|
||||
case Syncing:
|
||||
logger.Infof("Job [%s] @<%s> starts syncing", status.Name, status.Worker)
|
||||
case Disabled:
|
||||
logger.Noticef("Job [%s] @<%s> disabled", status.Name, status.Worker)
|
||||
case Paused:
|
||||
logger.Noticef("Job [%s] @<%s> paused", status.Name, status.Worker)
|
||||
default:
|
||||
logger.Infof("Job [%s] @<%s> status: %s", status.Name, status.Worker, status.Status)
|
||||
}
|
||||
|
||||
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to update job %s of worker %s: %s",
|
||||
mirrorName, workerID, err.Error(),
|
||||
)
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, newStatus)
|
||||
}
|
||||
|
||||
func (s *Manager) handleClientCmd(c *gin.Context) {
|
||||
var clientCmd ClientCmd
|
||||
c.BindJSON(&clientCmd)
|
||||
workerID := clientCmd.WorkerID
|
||||
if workerID == "" {
|
||||
// TODO: decide which worker should do this mirror when WorkerID is null string
|
||||
logger.Errorf("handleClientCmd case workerID == \" \" not implemented yet")
|
||||
c.AbortWithStatus(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w, err := s.adapter.GetWorker(workerID)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("worker %s is not registered yet", workerID)
|
||||
s.returnErrJSON(c, http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
workerURL := w.URL
|
||||
// parse client cmd into worker cmd
|
||||
workerCmd := WorkerCmd{
|
||||
Cmd: clientCmd.Cmd,
|
||||
MirrorID: clientCmd.MirrorID,
|
||||
Args: clientCmd.Args,
|
||||
}
|
||||
|
||||
// update job status, even if the job did not disable successfully,
|
||||
// this status should be set as disabled
|
||||
curStat, _ := s.adapter.GetMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID)
|
||||
changed := false
|
||||
switch clientCmd.Cmd {
|
||||
case CmdDisable:
|
||||
curStat.Status = Disabled
|
||||
changed = true
|
||||
case CmdStop:
|
||||
curStat.Status = Paused
|
||||
changed = true
|
||||
}
|
||||
if changed {
|
||||
s.adapter.UpdateMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID, curStat)
|
||||
}
|
||||
|
||||
logger.Noticef("Posting command '%s %s' to <%s>", clientCmd.Cmd, clientCmd.MirrorID, clientCmd.WorkerID)
|
||||
// post command to worker
|
||||
_, err = PostJSON(workerURL, workerCmd, s.httpClient)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("post command to worker %s(%s) fail: %s", workerID, workerURL, err.Error())
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: check response for success
|
||||
c.JSON(http.StatusOK, gin.H{_infoKey: "successfully send command to worker " + workerID})
|
||||
}
|
310
manager/server_test.go
Normal file
310
manager/server_test.go
Normal file
@ -0,0 +1,310 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
_magicBadWorkerID = "magic_bad_worker_id"
|
||||
)
|
||||
|
||||
func TestHTTPServer(t *testing.T) {
|
||||
Convey("HTTP server should work", t, func(ctx C) {
|
||||
InitLogger(true, true, false)
|
||||
s := GetTUNASyncManager(&Config{Debug: false})
|
||||
So(s, ShouldNotBeNil)
|
||||
s.setDBAdapter(&mockDBAdapter{
|
||||
workerStore: map[string]WorkerStatus{
|
||||
_magicBadWorkerID: WorkerStatus{
|
||||
ID: _magicBadWorkerID,
|
||||
}},
|
||||
statusStore: make(map[string]MirrorStatus),
|
||||
})
|
||||
port := rand.Intn(10000) + 20000
|
||||
baseURL := fmt.Sprintf("http://127.0.0.1:%d", port)
|
||||
go func() {
|
||||
s.engine.Run(fmt.Sprintf("127.0.0.1:%d", port))
|
||||
}()
|
||||
time.Sleep(50 * time.Microsecond)
|
||||
resp, err := http.Get(baseURL + "/ping")
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
So(resp.Header.Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
So(err, ShouldBeNil)
|
||||
var p map[string]string
|
||||
err = json.Unmarshal(body, &p)
|
||||
So(err, ShouldBeNil)
|
||||
So(p[_infoKey], ShouldEqual, "pong")
|
||||
|
||||
Convey("when database fail", func(ctx C) {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/workers/%s/jobs", baseURL, _magicBadWorkerID))
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusInternalServerError)
|
||||
defer resp.Body.Close()
|
||||
var msg map[string]string
|
||||
err = json.NewDecoder(resp.Body).Decode(&msg)
|
||||
So(err, ShouldBeNil)
|
||||
So(msg[_errorKey], ShouldEqual, fmt.Sprintf("failed to list jobs of worker %s: %s", _magicBadWorkerID, "database fail"))
|
||||
})
|
||||
|
||||
Convey("when register a worker", func(ctx C) {
|
||||
w := WorkerStatus{
|
||||
ID: "test_worker1",
|
||||
}
|
||||
resp, err := PostJSON(baseURL+"/workers", w, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
Convey("list all workers", func(ctx C) {
|
||||
So(err, ShouldBeNil)
|
||||
resp, err := http.Get(baseURL + "/workers")
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
var actualResponseObj []WorkerStatus
|
||||
err = json.NewDecoder(resp.Body).Decode(&actualResponseObj)
|
||||
So(err, ShouldBeNil)
|
||||
So(len(actualResponseObj), ShouldEqual, 2)
|
||||
})
|
||||
|
||||
Convey("update mirror status of a existed worker", func(ctx C) {
|
||||
status := MirrorStatus{
|
||||
Name: "arch-sync1",
|
||||
Worker: "test_worker1",
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "3GB",
|
||||
}
|
||||
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
|
||||
defer resp.Body.Close()
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
Convey("list mirror status of an existed worker", func(ctx C) {
|
||||
var ms []MirrorStatus
|
||||
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
|
||||
m := ms[0]
|
||||
So(m.Name, ShouldEqual, status.Name)
|
||||
So(m.Worker, ShouldEqual, status.Worker)
|
||||
So(m.Status, ShouldEqual, status.Status)
|
||||
So(m.Upstream, ShouldEqual, status.Upstream)
|
||||
So(m.Size, ShouldEqual, status.Size)
|
||||
So(m.IsMaster, ShouldEqual, status.IsMaster)
|
||||
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
|
||||
|
||||
})
|
||||
|
||||
Convey("list all job status of all workers", func(ctx C) {
|
||||
var ms []webMirrorStatus
|
||||
resp, err := GetJSON(baseURL+"/jobs", &ms, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
m := ms[0]
|
||||
So(m.Name, ShouldEqual, status.Name)
|
||||
So(m.Status, ShouldEqual, status.Status)
|
||||
So(m.Upstream, ShouldEqual, status.Upstream)
|
||||
So(m.Size, ShouldEqual, status.Size)
|
||||
So(m.IsMaster, ShouldEqual, status.IsMaster)
|
||||
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 1*time.Second)
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
Convey("update mirror status of an inexisted worker", func(ctx C) {
|
||||
invalidWorker := "test_worker2"
|
||||
status := MirrorStatus{
|
||||
Name: "arch-sync2",
|
||||
Worker: invalidWorker,
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
}
|
||||
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s",
|
||||
baseURL, status.Worker, status.Name), status, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
|
||||
defer resp.Body.Close()
|
||||
var msg map[string]string
|
||||
err = json.NewDecoder(resp.Body).Decode(&msg)
|
||||
So(err, ShouldBeNil)
|
||||
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
|
||||
})
|
||||
Convey("handle client command", func(ctx C) {
|
||||
cmdChan := make(chan WorkerCmd, 1)
|
||||
workerServer := makeMockWorkerServer(cmdChan)
|
||||
workerPort := rand.Intn(10000) + 30000
|
||||
bindAddress := fmt.Sprintf("127.0.0.1:%d", workerPort)
|
||||
workerBaseURL := fmt.Sprintf("http://%s", bindAddress)
|
||||
w := WorkerStatus{
|
||||
ID: "test_worker_cmd",
|
||||
URL: workerBaseURL + "/cmd",
|
||||
}
|
||||
resp, err := PostJSON(baseURL+"/workers", w, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
go func() {
|
||||
// run the mock worker server
|
||||
workerServer.Run(bindAddress)
|
||||
}()
|
||||
time.Sleep(50 * time.Microsecond)
|
||||
// verify the worker mock server is running
|
||||
workerResp, err := http.Get(workerBaseURL + "/ping")
|
||||
defer workerResp.Body.Close()
|
||||
So(err, ShouldBeNil)
|
||||
So(workerResp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
Convey("when client send wrong cmd", func(ctx C) {
|
||||
clientCmd := ClientCmd{
|
||||
Cmd: CmdStart,
|
||||
MirrorID: "ubuntu-sync",
|
||||
WorkerID: "not_exist_worker",
|
||||
}
|
||||
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
|
||||
defer resp.Body.Close()
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
|
||||
})
|
||||
|
||||
Convey("when client send correct cmd", func(ctx C) {
|
||||
clientCmd := ClientCmd{
|
||||
Cmd: CmdStart,
|
||||
MirrorID: "ubuntu-sync",
|
||||
WorkerID: w.ID,
|
||||
}
|
||||
|
||||
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
time.Sleep(50 * time.Microsecond)
|
||||
select {
|
||||
case cmd := <-cmdChan:
|
||||
ctx.So(cmd.Cmd, ShouldEqual, clientCmd.Cmd)
|
||||
ctx.So(cmd.MirrorID, ShouldEqual, clientCmd.MirrorID)
|
||||
default:
|
||||
ctx.So(0, ShouldEqual, 1)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type mockDBAdapter struct {
|
||||
workerStore map[string]WorkerStatus
|
||||
statusStore map[string]MirrorStatus
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) ListWorkers() ([]WorkerStatus, error) {
|
||||
workers := make([]WorkerStatus, len(b.workerStore))
|
||||
idx := 0
|
||||
for _, w := range b.workerStore {
|
||||
workers[idx] = w
|
||||
idx++
|
||||
}
|
||||
return workers, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) GetWorker(workerID string) (WorkerStatus, error) {
|
||||
w, ok := b.workerStore[workerID]
|
||||
if !ok {
|
||||
return WorkerStatus{}, fmt.Errorf("invalid workerId")
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
|
||||
// _, ok := b.workerStore[w.ID]
|
||||
// if ok {
|
||||
// return workerStatus{}, fmt.Errorf("duplicate worker name")
|
||||
// }
|
||||
b.workerStore[w.ID] = w
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error) {
|
||||
id := mirrorID + "/" + workerID
|
||||
status, ok := b.statusStore[id]
|
||||
if !ok {
|
||||
return MirrorStatus{}, fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
|
||||
// if _, ok := b.workerStore[workerID]; !ok {
|
||||
// // unregistered worker
|
||||
// return MirrorStatus{}, fmt.Errorf("invalid workerID %s", workerID)
|
||||
// }
|
||||
|
||||
id := mirrorID + "/" + workerID
|
||||
b.statusStore[id] = status
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) ListMirrorStatus(workerID string) ([]MirrorStatus, error) {
|
||||
var mirrorStatusList []MirrorStatus
|
||||
// simulating a database fail
|
||||
if workerID == _magicBadWorkerID {
|
||||
return []MirrorStatus{}, fmt.Errorf("database fail")
|
||||
}
|
||||
for k, v := range b.statusStore {
|
||||
if wID := strings.Split(k, "/")[1]; wID == workerID {
|
||||
mirrorStatusList = append(mirrorStatusList, v)
|
||||
}
|
||||
}
|
||||
return mirrorStatusList, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) ListAllMirrorStatus() ([]MirrorStatus, error) {
|
||||
var mirrorStatusList []MirrorStatus
|
||||
for _, v := range b.statusStore {
|
||||
mirrorStatusList = append(mirrorStatusList, v)
|
||||
}
|
||||
return mirrorStatusList, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeMockWorkerServer(cmdChan chan WorkerCmd) *gin.Engine {
|
||||
r := gin.Default()
|
||||
r.GET("/ping", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{_infoKey: "pong"})
|
||||
})
|
||||
r.POST("/cmd", func(c *gin.Context) {
|
||||
var cmd WorkerCmd
|
||||
c.BindJSON(&cmd)
|
||||
cmdChan <- cmd
|
||||
})
|
||||
|
||||
return r
|
||||
}
|
62
manager/status.go
Normal file
62
manager/status.go
Normal file
@ -0,0 +1,62 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
type textTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (t textTime) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
|
||||
}
|
||||
func (t *textTime) UnmarshalJSON(b []byte) error {
|
||||
s := string(b)
|
||||
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
|
||||
*t = textTime{t2}
|
||||
return err
|
||||
}
|
||||
|
||||
type stampTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (t stampTime) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(t.Unix())
|
||||
}
|
||||
func (t *stampTime) UnmarshalJSON(b []byte) error {
|
||||
ts, err := strconv.Atoi(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = stampTime{time.Unix(int64(ts), 0)}
|
||||
return err
|
||||
}
|
||||
|
||||
// webMirrorStatus is the mirror status to be shown in the web page
|
||||
type webMirrorStatus struct {
|
||||
Name string `json:"name"`
|
||||
IsMaster bool `json:"is_master"`
|
||||
Status SyncStatus `json:"status"`
|
||||
LastUpdate textTime `json:"last_update"`
|
||||
LastUpdateTs stampTime `json:"last_update_ts"`
|
||||
Upstream string `json:"upstream"`
|
||||
Size string `json:"size"` // approximate size
|
||||
}
|
||||
|
||||
func convertMirrorStatus(m MirrorStatus) webMirrorStatus {
|
||||
return webMirrorStatus{
|
||||
Name: m.Name,
|
||||
IsMaster: m.IsMaster,
|
||||
Status: m.Status,
|
||||
LastUpdate: textTime{m.LastUpdate},
|
||||
LastUpdateTs: stampTime{m.LastUpdate},
|
||||
Upstream: m.Upstream,
|
||||
Size: m.Size,
|
||||
}
|
||||
}
|
44
manager/status_test.go
Normal file
44
manager/status_test.go
Normal file
@ -0,0 +1,44 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
tunasync "github.com/tuna/tunasync/internal"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestStatus(t *testing.T) {
|
||||
Convey("status json ser-de should work", t, func() {
|
||||
tz := "Asia/Tokyo"
|
||||
loc, err := time.LoadLocation(tz)
|
||||
So(err, ShouldBeNil)
|
||||
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
|
||||
m := webMirrorStatus{
|
||||
Name: "tunalinux",
|
||||
Status: tunasync.Success,
|
||||
LastUpdate: textTime{t},
|
||||
LastUpdateTs: stampTime{t},
|
||||
Size: "5GB",
|
||||
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
|
||||
}
|
||||
|
||||
b, err := json.Marshal(m)
|
||||
So(err, ShouldBeNil)
|
||||
//fmt.Println(string(b))
|
||||
var m2 webMirrorStatus
|
||||
err = json.Unmarshal(b, &m2)
|
||||
So(err, ShouldBeNil)
|
||||
// fmt.Printf("%#v", m2)
|
||||
So(m2.Name, ShouldEqual, m.Name)
|
||||
So(m2.Status, ShouldEqual, m.Status)
|
||||
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
|
||||
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
|
||||
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
|
||||
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
|
||||
So(m2.Size, ShouldEqual, m.Size)
|
||||
So(m2.Upstream, ShouldEqual, m.Upstream)
|
||||
})
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
setproctitle==1.1.9
|
||||
sh==1.11
|
||||
toml==0.9.1
|
@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=Delete garbage subvolumes generated by tunasync
|
||||
Requires = network.target
|
||||
After = network.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/home/tuna/.virtualenvs/tunasync/bin/python -u /home/tuna/tunasync/tunasync_snapshot_gc.py -c /etc/tunasync.ini
|
||||
|
||||
[Install]
|
||||
WantedBy = multi-user.target
|
@ -1,8 +0,0 @@
|
||||
[Unit]
|
||||
Description=TUNAsync GC every 10 minutes
|
||||
|
||||
[Timer]
|
||||
OnUnitActiveSec=10min
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description = TUNA mirrors sync daemon
|
||||
Requires = network.target
|
||||
After = network.target
|
||||
|
||||
[Service]
|
||||
ExecStart = /home/tuna/.virtualenvs/tunasync/bin/python -u /home/tuna/tunasync/tunasync.py -c /etc/tunasync.ini
|
||||
KillSignal = SIGTERM
|
||||
ExecReload = /bin/kill -SIGUSR1 $MAINPID
|
||||
Environment = "HOME=/home/tuna"
|
||||
|
||||
[Install]
|
||||
WantedBy = multi-user.target
|
3
tests/bin/myrsync.sh
Executable file
3
tests/bin/myrsync.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
echo $@
|
||||
sleep 5
|
@ -2,6 +2,7 @@
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $REPO
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo $@
|
||||
sleep 5
|
||||
exit 1
|
18
tests/httpClient.go
Normal file
18
tests/httpClient.go
Normal file
@ -0,0 +1,18 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg, err := internal.GetTLSConfig("rootCA.crt")
|
||||
fmt.Println(err)
|
||||
var msg map[string]string
|
||||
resp, err := internal.GetJSON("https://localhost:5002/", &msg, cfg)
|
||||
fmt.Println(err)
|
||||
fmt.Println(resp)
|
||||
}
|
17
tests/httpServer.go
Normal file
17
tests/httpServer.go
Normal file
@ -0,0 +1,17 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
s := gin.Default()
|
||||
s.GET("/", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"msg": "passed"})
|
||||
})
|
||||
s.RunTLS(":5002", "manager.crt", "manager.key")
|
||||
}
|
15
tests/manager.conf
Normal file
15
tests/manager.conf
Normal file
@ -0,0 +1,15 @@
|
||||
debug = false
|
||||
|
||||
[server]
|
||||
addr = "127.0.0.1"
|
||||
port = 12345
|
||||
ssl_cert = "manager.crt"
|
||||
ssl_key = "manager.key"
|
||||
|
||||
[files]
|
||||
db_type = "bolt"
|
||||
db_file = "/tmp/tunasync/manager.db"
|
||||
ca_cert = "rootCA.crt"
|
||||
|
||||
|
||||
# vim: ft=toml
|
22
tests/manager.crt
Normal file
22
tests/manager.crt
Normal file
@ -0,0 +1,22 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDmjCCAoKgAwIBAgIJANsBsjPEVQ3CMA0GCSqGSIb3DQEBCwUAMIGEMQswCQYD
|
||||
VQQGEwJDTjELMAkGA1UECAwCQkoxETAPBgNVBAcMCFRzaW5naHVhMQ0wCwYDVQQK
|
||||
DARUVU5BMRAwDgYDVQQLDAdNaXJyb3JzMRIwEAYDVQQDDAlsb2NhbGhvc3QxIDAe
|
||||
BgkqhkiG9w0BCQEWEXJvb3RAbWlycm9ycy50dW5hMB4XDTE2MDQyODExMzAwNloX
|
||||
DTI2MDQyNjExMzAwNlowTzELMAkGA1UEBhMCQ04xCzAJBgNVBAgMAkJKMRAwDgYD
|
||||
VQQHDAdCZWlqaW5nMQ0wCwYDVQQLDARUVU5BMRIwEAYDVQQDDAlsb2NhbGhvc3Qw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDsQ2iLGiyJgMe1Y4kvmkZ8
|
||||
2fGOCZWp1rYZ5DWCqKZ4WtlmnxHYT4ZkopCCNo0FoQZ4TmDPWJctfRcHaTbidtFp
|
||||
u416rg9zcg9jlwtO0OKNTzS0RkiF2zUyX4bGFx85xu9z18JYwnWej4fvpfGsPUev
|
||||
T/roLkuUyaHJc+LeOIT0e9+mwSUC6KckGC86B5PK1gyFFjnuNeuk9TL6jnzAcczZ
|
||||
sCF8gzDAtxEN++fQFxY/ZMnyAGzmyo9qVqJwLB7ANU6PfcIpcaD0GRDqOFRyDwCM
|
||||
WmLHIZAltmDOKpd1Qj0N4nsPbsExQHBP01B2iB18CR8zG2DrCi77ZafNvQjL7KZX
|
||||
AgMBAAGjQzBBMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMCcGA1UdEQQgMB6CCWxv
|
||||
Y2FsaG9zdIIRbWFuYWdlci5sb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAKrN
|
||||
zOxDqtZzx8Lj+0/EahuINCrJWWA29jnbz7u4nJ+38zLW4WFJLF6DWSaFOLjQjwUk
|
||||
X8RD/Ja5UW1eK0Ur+Q9pkNxpqZstOBHs/SuudMwfYu48uMs938+sS58DMV3Yeyjx
|
||||
Jk8RaWgWrsrTXBpxmGbjWSV+HCoM56lzOSVp1g5H0ksbYakxR6lmkFagptcC2HEL
|
||||
QMtgnQc+DPXUMGkAGaWOx7Wrwby2elDPafP1eZEBR+tBdkD4C2/bDAdK2soEN48K
|
||||
EdWYFiWiefGb+Vf60mrud+dRF069nOKYOg6xTDg3jy4PIJp44Luxn7vOZRV/zmfT
|
||||
0BZ5A+Zy/iAtg7hw5sE=
|
||||
-----END CERTIFICATE-----
|
18
tests/manager.csr
Normal file
18
tests/manager.csr
Normal file
@ -0,0 +1,18 @@
|
||||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIC5jCCAc4CAQAwTzELMAkGA1UEBhMCQ04xCzAJBgNVBAgMAkJKMRAwDgYDVQQH
|
||||
DAdCZWlqaW5nMQ0wCwYDVQQLDARUVU5BMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEi
|
||||
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDsQ2iLGiyJgMe1Y4kvmkZ82fGO
|
||||
CZWp1rYZ5DWCqKZ4WtlmnxHYT4ZkopCCNo0FoQZ4TmDPWJctfRcHaTbidtFpu416
|
||||
rg9zcg9jlwtO0OKNTzS0RkiF2zUyX4bGFx85xu9z18JYwnWej4fvpfGsPUevT/ro
|
||||
LkuUyaHJc+LeOIT0e9+mwSUC6KckGC86B5PK1gyFFjnuNeuk9TL6jnzAcczZsCF8
|
||||
gzDAtxEN++fQFxY/ZMnyAGzmyo9qVqJwLB7ANU6PfcIpcaD0GRDqOFRyDwCMWmLH
|
||||
IZAltmDOKpd1Qj0N4nsPbsExQHBP01B2iB18CR8zG2DrCi77ZafNvQjL7KZXAgMB
|
||||
AAGgUjBQBgkqhkiG9w0BCQ4xQzBBMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMCcG
|
||||
A1UdEQQgMB6CCWxvY2FsaG9zdIIRbWFuYWdlci5sb2NhbGhvc3QwDQYJKoZIhvcN
|
||||
AQELBQADggEBAOsVix8POTWeY1uGRSatGX8D9UKZxIGsquOMOWyucSUqEnkGmTri
|
||||
ketJKcKXuRP3bHsHM+XGbVm0qisfCqg5p1MX0P2yw87+zqAVXSHEuuYLeD75qnu+
|
||||
yraydJh6NDp9cwHQxAvFK2Dav8OXHEaug00ZZ3U/Mt2q/b6b2d3ihtGU+wU2Yl4b
|
||||
xBMIcqsVHapKJOQd+MJBaP2GojCwLE1yuI5Wg6iffgsydoAt+51CPUDs9/KRypqm
|
||||
zlEPmljToZBl/y/TvUBA1egAnnkXMWzhvK75GFRSPizPRUsqSfu7qysYKcTUseqd
|
||||
RBP67pHi9Hhmi4rRvytXtFF3ju/MtJ/+wxk=
|
||||
-----END CERTIFICATE REQUEST-----
|
27
tests/manager.key
Normal file
27
tests/manager.key
Normal file
@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEA7ENoixosiYDHtWOJL5pGfNnxjgmVqda2GeQ1gqimeFrZZp8R
|
||||
2E+GZKKQgjaNBaEGeE5gz1iXLX0XB2k24nbRabuNeq4Pc3IPY5cLTtDijU80tEZI
|
||||
hds1Ml+GxhcfOcbvc9fCWMJ1no+H76XxrD1Hr0/66C5LlMmhyXPi3jiE9HvfpsEl
|
||||
AuinJBgvOgeTytYMhRY57jXrpPUy+o58wHHM2bAhfIMwwLcRDfvn0BcWP2TJ8gBs
|
||||
5sqPalaicCwewDVOj33CKXGg9BkQ6jhUcg8AjFpixyGQJbZgziqXdUI9DeJ7D27B
|
||||
MUBwT9NQdogdfAkfMxtg6wou+2Wnzb0Iy+ymVwIDAQABAoIBAQC1Vy/gxKA2kg+3
|
||||
G8TqMqGzppyPBrBHAbQXv1+K/+N2MVT4PVO3EaL3jwcXysMG9QdAQ/hubXLryT1p
|
||||
xMoJnGUzoG8BIKRfWcaSDBbz0cRx7b9oNyHnC8+S8FtDo++lqxmTcqGK+wbIQyZ1
|
||||
PIt4RjjFSMAugYolk3WIaFhTdFIoS4ozk/VZNyYzWg2XEjMugL9Pe/zU0vlzQPRj
|
||||
4vUhmX4lvuJ1/T3XR53vMU1cMiwxSGbLeG4F4zshzIh9LfbHFKNweO/YIfmFJVaS
|
||||
C7aYl9Jss5SDviUuowHcgqk6oivWr3cxiVma/zc5SMeWzgmGcDX6izQx1Y8PPsUy
|
||||
vsuLHGZRAoGBAP2DDKVc3FSslIiqV/8iHKh4sRPEJ6j03il62LwzRBmmZb3t6eD1
|
||||
oxAxJA+3dEcjxzOEdPng6Vtvbd5BqFy5kRTkqjWA03HjsFGgItbhzfw3CtsSH1R1
|
||||
IlxvA71+k65yP0QY9xwYWUBXNQtp0cLT1hlDwv+W5UCC1lxtDpyHlsBNAoGBAO6V
|
||||
BZDawpohmzLtc5O4FXyt5B/hR79VNs5bfOj856xNnf6FREVgxCgoZvYlUh80lzSN
|
||||
SQl68llCQJCWlndcdafnu5PRo2WiuJbIMcNdwZY6wT+gT/twXwE6nk7RDg9KaARc
|
||||
OCKjLJLATOslF38K9n1I0Y/ZdCBFNcBxfHHlaTMzAoGBANQ+5NaJsXo+5ziojXXw
|
||||
xFeUfITVBHNjV6EY1d5zeX+UHbhvORF79mK3Eb8K1BI/dSa/rgQK9rTzzON4yxGe
|
||||
10XL0GltCxpeC5+7V4/ai0+vcapKOOrICtWiqFn9YH1771X/JNxj0k2Y9bMxjEn2
|
||||
e1i5r8e3OQbSw8+sCsCokGE9AoGBAMx4rT97LQL5wFBCTyaPwuKLCZME+P+S4Ziz
|
||||
sfbgIRF7p+elgWBQUWz1S2CzlZEm+lvQpoLYevFipYEFfkkn1bIkGY/TQE1vyvF2
|
||||
+6crKCk/i7WjCEk/Aj1EZr63zmvuYf0yp+2PmTjgVEvHCz8XPy8ahHfbbvnlNu8K
|
||||
lBPtAF8fAoGAXuW/i9hu4sgIflWHN+QPN1je4QVMB/Ej8IGMqT9Dde0aCf95OqFp
|
||||
yct1Oz8R2VLsKI1pxIqIBrnCogHKVkYAYlnRxcykWwy2uhQrDK6CPVmgXg3Yv+7S
|
||||
kbXHpBlfVFInugn3T+Hvn1uYJ5Ih7OIfcCwZ+6B2Zal7O4RhELuk4rM=
|
||||
-----END RSA PRIVATE KEY-----
|
19
tests/managerMain.go
Normal file
19
tests/managerMain.go
Normal file
@ -0,0 +1,19 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tuna/tunasync/manager"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg, err := manager.LoadConfig("manager.conf", nil)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
m := manager.GetTUNASyncManager(cfg)
|
||||
m.Run()
|
||||
}
|
27
tests/req.cnf
Normal file
27
tests/req.cnf
Normal file
@ -0,0 +1,27 @@
|
||||
[req]
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_req
|
||||
|
||||
[req_distinguished_name]
|
||||
countryName = Country Name (2 letter code)
|
||||
countryName_default = CN
|
||||
stateOrProvinceName = State or Province Name (full name)
|
||||
stateOrProvinceName_default = BJ
|
||||
localityName = Locality Name (eg, city)
|
||||
localityName_default = Beijing
|
||||
organizationalUnitName = Organizational Unit Name (eg, section)
|
||||
organizationalUnitName_default = TUNA
|
||||
commonName = Common Name (server FQDN or domain name)
|
||||
commonName_default = localhost
|
||||
commonName_max = 64
|
||||
|
||||
[v3_req]
|
||||
# Extensions to add to a certificate request
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[alt_names]
|
||||
DNS.1 = localhost
|
||||
# DNS.2 = manager.localhost
|
||||
DNS.2 = worker.localhost
|
23
tests/rootCA.crt
Normal file
23
tests/rootCA.crt
Normal file
@ -0,0 +1,23 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID3TCCAsWgAwIBAgIJAJ1h7cAbsEpbMA0GCSqGSIb3DQEBCwUAMIGEMQswCQYD
|
||||
VQQGEwJDTjELMAkGA1UECAwCQkoxETAPBgNVBAcMCFRzaW5naHVhMQ0wCwYDVQQK
|
||||
DARUVU5BMRAwDgYDVQQLDAdNaXJyb3JzMRIwEAYDVQQDDAlsb2NhbGhvc3QxIDAe
|
||||
BgkqhkiG9w0BCQEWEXJvb3RAbWlycm9ycy50dW5hMB4XDTE2MDQyODExMjcxNloX
|
||||
DTI2MDQyNjExMjcxNlowgYQxCzAJBgNVBAYTAkNOMQswCQYDVQQIDAJCSjERMA8G
|
||||
A1UEBwwIVHNpbmdodWExDTALBgNVBAoMBFRVTkExEDAOBgNVBAsMB01pcnJvcnMx
|
||||
EjAQBgNVBAMMCWxvY2FsaG9zdDEgMB4GCSqGSIb3DQEJARYRcm9vdEBtaXJyb3Jz
|
||||
LnR1bmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGPtjiqI89E/mz
|
||||
3JuWvqbwihQczDug9GiyP5axNT+WkJka0qL+U09V05cn6qXX/JK0BHxqSPYEZy3R
|
||||
hkLIrtR0LPSk8RCxU9mv11FRigl5NevWbbzJkM2aBS1KIpD07Kk+UJkp/dsIWeNq
|
||||
Mo/4edkLqob+gIG5IQM/B1mPuAVUrqAVGRAlA1qXv2ahWcdZrbybMrQ9nBPbTwcg
|
||||
qbK6ytJ2K8GpuWdr+72SJXxIN0rmBfyHQuHwpRMP6XzTCEYd0TCr6YQ+tWnrpk8c
|
||||
djFKVjIwg22jHUcmVYXNxRw66JPK2aZrL3RkRmlJoIhd5np+SbRkWmbS5zNTgKc8
|
||||
TKUskCCVAgMBAAGjUDBOMB0GA1UdDgQWBBS6lED67P/J7snFaxZcdr0gSE/oZDAf
|
||||
BgNVHSMEGDAWgBS6lED67P/J7snFaxZcdr0gSE/oZDAMBgNVHRMEBTADAQH/MA0G
|
||||
CSqGSIb3DQEBCwUAA4IBAQCh9mwuLSnDBoIxF5XsFnv4lrNvlGvyRffDa9/wh7Pb
|
||||
s9rBKfKPO+8Yy7H57Os4Dl/2QoQTjMsvFJTY1TKE3zTDxPAaM5xmgxv3DHFFSG8r
|
||||
G9zEKyDAVzsdu1kSXvJLIdaycSXCWUjRIiYI153N5TUGtq6lctPeOv/w0P6S8KXP
|
||||
VgBpiJWiexUOYXVin2zrkbSRkNVntDEbDr5cQ0RznpyqAfKt990VzUjORarh0zyb
|
||||
+FG9pX/gjO8atGhIuA7hqxUwy4Ov70SxeiiK+POgp/Km9y36G7KM+KZKsj+8JQIq
|
||||
6/it/KzzDE/awOSw2Ti0ZqCMUCIrsDOA9nmc+t0bERON
|
||||
-----END CERTIFICATE-----
|
27
tests/rootCA.key
Normal file
27
tests/rootCA.key
Normal file
@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAxj7Y4qiPPRP5s9yblr6m8IoUHMw7oPRosj+WsTU/lpCZGtKi
|
||||
/lNPVdOXJ+ql1/yStAR8akj2BGct0YZCyK7UdCz0pPEQsVPZr9dRUYoJeTXr1m28
|
||||
yZDNmgUtSiKQ9OypPlCZKf3bCFnjajKP+HnZC6qG/oCBuSEDPwdZj7gFVK6gFRkQ
|
||||
JQNal79moVnHWa28mzK0PZwT208HIKmyusrSdivBqblna/u9kiV8SDdK5gX8h0Lh
|
||||
8KUTD+l80whGHdEwq+mEPrVp66ZPHHYxSlYyMINtox1HJlWFzcUcOuiTytmmay90
|
||||
ZEZpSaCIXeZ6fkm0ZFpm0uczU4CnPEylLJAglQIDAQABAoIBAEkIvj5CewK1aTip
|
||||
/Wf7tOTI+b/iPdV+NVP1uT5vr414l+8ZypKHfqLP4NAD0jVQB3vqayt81aWpnWej
|
||||
XtcwEXT7WuWpKc0qZvgxCvRPNk5BXzEQHIzlm9kyLw0wztZsma0rZEHkE91vwChP
|
||||
mFqiCSQTHsiD70aUsu11d7lKwiv/ww0pty8OmItgL7eefq6UeIidymYSJN6j7OHJ
|
||||
+Wp6PSKeYJ2/hSVx/F6upGMBJxjaNs9Q53IgH7YwrPThjyVnpyavbJEcawdpdhNo
|
||||
Y7XqnLYKQiHi86L2Rr7C7g7cv+89GcApweNhDaJUlGzOLnN+3++7n91+S0yjI4CW
|
||||
/WCY4gECgYEA73z3yzkZ4gk+36g49ZyR+es1VYDCXrRGIpEZTneDK87h9wPCYi9b
|
||||
5/tvCRfWzJemkhORfE+t8VnC1Ar/VFQJ7gJQXZth/dDecdPQ87pE0fu95BBwQrjG
|
||||
bRgL+IIloWYh+WhIPVFyLP29lJ6s/gqR0ySKX80NjkHIxnzlNxFgqR0CgYEA0+nv
|
||||
WK1rgsyrq4jW9Iw3VnuATpSCu0BjiGGEOk/2/LLfN8YG7870o7R0QSAIKz3nI3AM
|
||||
bTsYiHOlA6d6ZZWfxMz8MPsb0YOTeDTQFg10wxq90Qk02O9nopS1cOSWAK+70lzz
|
||||
EZyNezNDlI8vsmHu+rYa2MgeFvUQbt+yGNywM9kCgYBHr294vEncGApi5jbOhiRH
|
||||
27jmNBl6QZnwxN//VdTEqzOGPfDUdPqcsO1mmmUobohkl0joHe2iHc0srXIKKvGh
|
||||
9b1al6U4VWoQRmf4XJw3ApSvjKAdyLNUemsy4roi2rB2uFlPSW7UusshjnGXxVAr
|
||||
FHf6/yT8nQJdL4to9WGqnQKBgEEzRNT/5ohD+L26SIjNa2lMblm/D8oVMYqQlmJq
|
||||
oA936X37i77U6ihEKVCwTlMfpLIek3Q4LoAtNKQ/L0V6F8IxX5aibBi2ZvUhKrTe
|
||||
RwKQg76BGqV0Y2p+XqTxb8WeTCeZOaA9jrpNN4nJ1F8KCsFQrknsqHVfyUKTyPQl
|
||||
UoFhAoGBAMXcOnMKhBwhUYZ7pkkntT6vKMBMLz4K2j0mjiYKgoriPn6H4/T2mP13
|
||||
qU8VInHwoMN/RIGTCDK2+UUnZfK+aXPhYMUEtFxWQxaWpZ2UopFYCcgYC3yLaBGu
|
||||
8eWr2G48pJrv/dBxP1nVsgEedfYfjZvyGOrbcRakfiCZOcNHaPb1
|
||||
-----END RSA PRIVATE KEY-----
|
1
tests/rootCA.srl
Normal file
1
tests/rootCA.srl
Normal file
@ -0,0 +1 @@
|
||||
DB01B233C4550DC3
|
54
tests/worker.conf
Normal file
54
tests/worker.conf
Normal file
@ -0,0 +1,54 @@
|
||||
[global]
|
||||
name = "test_worker"
|
||||
log_dir = "/tmp/tunasync/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/tmp/tunasync"
|
||||
concurrent = 10
|
||||
interval = 1
|
||||
|
||||
[manager]
|
||||
api_base = "https://localhost:12345"
|
||||
token = "some_token"
|
||||
ca_cert = "rootCA.crt"
|
||||
|
||||
[cgroup]
|
||||
enable = true
|
||||
base_path = "/sys/fs/cgroup"
|
||||
group = "tunasync"
|
||||
|
||||
[server]
|
||||
hostname = "localhost"
|
||||
listen_addr = "127.0.0.1"
|
||||
listen_port = 6000
|
||||
ssl_cert = "worker.crt"
|
||||
ssl_key = "worker.key"
|
||||
|
||||
[[mirrors]]
|
||||
name = "AOSP"
|
||||
provider = "command"
|
||||
command = "/tmp/tunasync/bin/myrsync2.sh"
|
||||
upstream = "https://aosp.google.com/"
|
||||
interval = 2
|
||||
mirror_dir = "/tmp/tunasync/git/AOSP"
|
||||
role = "slave"
|
||||
[mirrors.env]
|
||||
REPO = "/usr/local/bin/aosp-repo"
|
||||
|
||||
[[mirrors]]
|
||||
name = "debian"
|
||||
command = "/tmp/tunasync/bin/myrsync.sh"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://ftp.debian.org/debian/"
|
||||
use_ipv6 = true
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "fedora"
|
||||
command = "/tmp/tunasync/bin/myrsync.sh"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://ftp.fedoraproject.org/fedora/"
|
||||
use_ipv6 = true
|
||||
exclude_file = "/etc/tunasync.d/fedora-exclude.txt"
|
||||
|
||||
|
||||
# vim: ft=toml
|
22
tests/worker.crt
Normal file
22
tests/worker.crt
Normal file
@ -0,0 +1,22 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDmTCCAoGgAwIBAgIJANsBsjPEVQ3DMA0GCSqGSIb3DQEBCwUAMIGEMQswCQYD
|
||||
VQQGEwJDTjELMAkGA1UECAwCQkoxETAPBgNVBAcMCFRzaW5naHVhMQ0wCwYDVQQK
|
||||
DARUVU5BMRAwDgYDVQQLDAdNaXJyb3JzMRIwEAYDVQQDDAlsb2NhbGhvc3QxIDAe
|
||||
BgkqhkiG9w0BCQEWEXJvb3RAbWlycm9ycy50dW5hMB4XDTE2MDQyODEyMjEwMFoX
|
||||
DTE3MDQyODEyMjEwMFowTzELMAkGA1UEBhMCQ04xCzAJBgNVBAgMAkJKMRAwDgYD
|
||||
VQQHDAdCZWlqaW5nMQ0wCwYDVQQLDARUVU5BMRIwEAYDVQQDDAlsb2NhbGhvc3Qw
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCexn2BKhy7DGoFNNi05DOr
|
||||
AZg/JITCxWJzrGMT0Ca5twP7yYTsrLDlbYhy2FwVQ45D1OycKKiOuzyxqV7lvgDI
|
||||
iNtf3LYeEKImsuMxcjkDncQ1eY5kcNG/e0sAj9FyoK/pPbjbEzzfj5z5FqDxtYkf
|
||||
4y5DR1pUf5SfQEJ0n5AclcXY8PrUwzA6MD6sAs4SZopQPunx3m0b1uYPACBIKiY0
|
||||
wZiUhrjoPCqR0orj8ZLDO0pGDFh8jmFFQMHNpwad37K3MXWkpAsR+MUXckocQ8O/
|
||||
6vIgFFDoqYxOuS3GkQ/Dh7dNaPhJ86OFJ+A8C0BDqHNYvkVVvA2gPmHN+8LFJHat
|
||||
AgMBAAGjQjBAMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMCYGA1UdEQQfMB2CCWxv
|
||||
Y2FsaG9zdIIQd29ya2VyLmxvY2FsaG9zdDANBgkqhkiG9w0BAQsFAAOCAQEAECje
|
||||
0jI8cp5eQyDzuHbbVcl8jZXMn/UVuHOQ/VLcaBOUWHDl7QANTTtyyYT+2Q+CdpyJ
|
||||
Gn+fUB4tQP7naGR4bNpVytdttOlNZ89scZ3O74GX0vcAPvr62MxeASw44WuT6ir3
|
||||
zSTrww3qvvExG22atRIyGIFKLgmMMyzMskUFjELq80/nY55bCbStvhMJ0GHsC22n
|
||||
2YRYD8+gyCJUT3hYjXymaPojvE9Cq6zBOUUP2yIwId2LQev2UNvJaEVvphmYtS08
|
||||
VVLiXy9ye6pc+0cZonJ4aTESRIgv53pPoHNhhRkR1xbdojUKhk0Fq8NKi2bPZVzQ
|
||||
zVC9pCxHNGqRIcctzA==
|
||||
-----END CERTIFICATE-----
|
18
tests/worker.csr
Normal file
18
tests/worker.csr
Normal file
@ -0,0 +1,18 @@
|
||||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIC5TCCAc0CAQAwTzELMAkGA1UEBhMCQ04xCzAJBgNVBAgMAkJKMRAwDgYDVQQH
|
||||
DAdCZWlqaW5nMQ0wCwYDVQQLDARUVU5BMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEi
|
||||
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCexn2BKhy7DGoFNNi05DOrAZg/
|
||||
JITCxWJzrGMT0Ca5twP7yYTsrLDlbYhy2FwVQ45D1OycKKiOuzyxqV7lvgDIiNtf
|
||||
3LYeEKImsuMxcjkDncQ1eY5kcNG/e0sAj9FyoK/pPbjbEzzfj5z5FqDxtYkf4y5D
|
||||
R1pUf5SfQEJ0n5AclcXY8PrUwzA6MD6sAs4SZopQPunx3m0b1uYPACBIKiY0wZiU
|
||||
hrjoPCqR0orj8ZLDO0pGDFh8jmFFQMHNpwad37K3MXWkpAsR+MUXckocQ8O/6vIg
|
||||
FFDoqYxOuS3GkQ/Dh7dNaPhJ86OFJ+A8C0BDqHNYvkVVvA2gPmHN+8LFJHatAgMB
|
||||
AAGgUTBPBgkqhkiG9w0BCQ4xQjBAMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMCYG
|
||||
A1UdEQQfMB2CCWxvY2FsaG9zdIIQd29ya2VyLmxvY2FsaG9zdDANBgkqhkiG9w0B
|
||||
AQsFAAOCAQEAjiJVwuZFuuNvVTGwiLxJgqGKCp2NMPFtlqD4snpTVzSgzJLSqBvl
|
||||
d4CoF+ayW+4tY3HTmjUmWKuVZ/PC+MMWXd5LxfZC06u8uLXp2liUmD1NGqK1u6VD
|
||||
gVcS2NyX/BhIYWp3ey61i25dHDcaY1MHto6zJ2kfnt0RunvaKr3jVKsZTrfqypfz
|
||||
1AQ/E4SwdWRKaG1RorYgIs+G51oizCLoPIxMcipM+ub0Z00jfS7jFyPqtxcrtM+v
|
||||
fpRIGlqW0jBWxJUQKpds7TkPrxVojZINaANsVk3Zw+TYvmurRyU8WPoilIyQ7vxF
|
||||
tUSyxm2ss2B0tEqQZQytnNQut9G4s6svZg==
|
||||
-----END CERTIFICATE REQUEST-----
|
27
tests/worker.key
Normal file
27
tests/worker.key
Normal file
@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAnsZ9gSocuwxqBTTYtOQzqwGYPySEwsVic6xjE9AmubcD+8mE
|
||||
7Kyw5W2IcthcFUOOQ9TsnCiojrs8sale5b4AyIjbX9y2HhCiJrLjMXI5A53ENXmO
|
||||
ZHDRv3tLAI/RcqCv6T242xM834+c+Rag8bWJH+MuQ0daVH+Un0BCdJ+QHJXF2PD6
|
||||
1MMwOjA+rALOEmaKUD7p8d5tG9bmDwAgSComNMGYlIa46DwqkdKK4/GSwztKRgxY
|
||||
fI5hRUDBzacGnd+ytzF1pKQLEfjFF3JKHEPDv+ryIBRQ6KmMTrktxpEPw4e3TWj4
|
||||
SfOjhSfgPAtAQ6hzWL5FVbwNoD5hzfvCxSR2rQIDAQABAoIBAG37hrJzTmWPSt2C
|
||||
Zt6e+N9rAmAy1rfobLM95X+y/zBEA0FlrWsYkIzMW+lZ0Cd2nVSFaMUfMOt17awP
|
||||
a8nu3LIMgxGbXJfk4720ysXUnoPPxDtakXUn5VMjf6fK98XUYyZI+AThBZjC7XRp
|
||||
5WCpZGwvPTujcIH5wiSyKZaJdRUm3wpoZ1NB3VcmxoQM72yleU2t79YsNyFavbcn
|
||||
z6/1zaz4q1BVGZpioD9WBPGAhktrwmgYL3xcrqvMeGSY281bbXgV/YySIxibBa9z
|
||||
bTq4dImT4CxNzx2y2A+b9n/zR7TBitww1yvCf7OPJ0NK5keEVtef0p2TscjOlndk
|
||||
mv9/NQECgYEAy+2rdapdTgafYu1tM9lhx1VJjQZ8jpjkYKVzoknQ/m/4c2adYsnz
|
||||
LsowkCo/0DpjxVPE/yo6wEBUct0A7/dbQCSXhx/XStjuIUT4mZjOXtBtLKrJSF8y
|
||||
WzhFyiPv3+wdbxCmrbfK8/z+UWa+rcIV7saCbDJJTTkT6E32dBNW0O0CgYEAx1FF
|
||||
Eg+5SeqYQM9i8A708ySxPrFsRY1i2MVIiSkLiN7MEJAJKgAl8xn0/0pGDD/qjWlc
|
||||
2nL7YzYoWOGnJAfqUF5OlWZ3+VOBYEHJIrA2ajgdjVYhnfz7zCZy51OanoVJDBjw
|
||||
2gQWnBC0ISeygf4NhyvLianwoc1cp+BgVQm6RMECgYEAnF3ldxfm64lQdb6wWW15
|
||||
+CqBd01d/MlndGPpQqtvQWoCDBrG25UWju4iRqjevX/IOOp+x1lOK1QobNrheR8m
|
||||
LQzh046quo2UKpaEOOJee309+V4LcR7tsdx4RwM/T2fxOdR+uf2P9X4sU6aA1yNX
|
||||
RfuYzfXRFxGJHjuJmn+pthECgYEAvf1jv3GphyHNe4mzn2xCZTpGkaIBuNKqtEJp
|
||||
gATV7+Of1PHXKmf1xKKrfGVKHAcZBy61yazsn4dSMlb2QUwiN/WNJrAEEG9e1Wgf
|
||||
16bsV5eh48WESdqKEfFcedChhBU8qgFkJAzdmGn7qdbzOyH1tzEx1MlejHz6ozMn
|
||||
4CdjnIECgYBAEquvEj6eptAx+tVk4bk/XE0XT2qC6kYCB3U08hhlSTCb2EoDPm+n
|
||||
/gEpvHH3+pz4jvUDoBMvL4uncoUQQuVP4rvv3PoElAtl1bT1mKovqqUFJTXqZEK9
|
||||
bBgGkvCi5HpeCocIFgLxyjajnhBEeMEBkcfkG7SNrOtMTUc/dUWKaA==
|
||||
-----END RSA PRIVATE KEY-----
|
19
tests/workerMain.go
Normal file
19
tests/workerMain.go
Normal file
@ -0,0 +1,19 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tuna/tunasync/worker"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg, err := worker.LoadConfig("worker.conf")
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
m := worker.GetTUNASyncWorker(cfg)
|
||||
m.Run()
|
||||
}
|
28
tunasync.py
28
tunasync.py
@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import os
|
||||
import argparse
|
||||
|
||||
from tunasync import TUNASync
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
parser = argparse.ArgumentParser(prog="tunasync")
|
||||
parser.add_argument("-c", "--config",
|
||||
default="tunasync.ini", help="config file")
|
||||
parser.add_argument("--pidfile", default="/run/tunasync/tunasync.pid",
|
||||
help="pidfile")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(args.pidfile, 'w') as f:
|
||||
f.write("{}".format(os.getpid()))
|
||||
|
||||
tunaSync = TUNASync()
|
||||
tunaSync.read_config(args.config)
|
||||
|
||||
tunaSync.run_jobs()
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,4 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
from .tunasync import TUNASync
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,62 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import sh
|
||||
import os
|
||||
from datetime import datetime
|
||||
from .hook import JobHook
|
||||
|
||||
|
||||
class BtrfsVolumeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BtrfsHook(JobHook):
|
||||
|
||||
def __init__(self, service_dir, working_dir, gc_dir):
|
||||
self.service_dir = service_dir
|
||||
self.working_dir = working_dir
|
||||
self.gc_dir = gc_dir
|
||||
|
||||
def before_job(self, ctx={}, *args, **kwargs):
|
||||
self._create_working_snapshot()
|
||||
ctx['current_dir'] = self.working_dir
|
||||
|
||||
def after_job(self, status=None, ctx={}, *args, **kwargs):
|
||||
if status == "success":
|
||||
self._commit_changes()
|
||||
ctx['current_dir'] = self.service_dir
|
||||
|
||||
def _ensure_subvolume(self):
|
||||
# print(self.service_dir)
|
||||
try:
|
||||
ret = sh.btrfs("subvolume", "show", self.service_dir)
|
||||
except Exception, e:
|
||||
print(e)
|
||||
raise BtrfsVolumeError("Invalid subvolume")
|
||||
|
||||
if ret.stderr != '':
|
||||
raise BtrfsVolumeError("Invalid subvolume")
|
||||
|
||||
def _create_working_snapshot(self):
|
||||
self._ensure_subvolume()
|
||||
if os.path.exists(self.working_dir):
|
||||
print("Warning: working dir existed, are you sure no rsync job is running?")
|
||||
else:
|
||||
# print("btrfs subvolume snapshot {} {}".format(self.service_dir, self.working_dir))
|
||||
sh.btrfs("subvolume", "snapshot", self.service_dir, self.working_dir)
|
||||
|
||||
def _commit_changes(self):
|
||||
self._ensure_subvolume()
|
||||
self._ensure_subvolume()
|
||||
gc_dir = self.gc_dir.format(timestamp=datetime.now().strftime("%s"))
|
||||
|
||||
out = sh.mv(self.service_dir, gc_dir)
|
||||
assert out.exit_code == 0 and out.stderr == ""
|
||||
out = sh.mv(self.working_dir, self.service_dir)
|
||||
assert out.exit_code == 0 and out.stderr == ""
|
||||
# print("btrfs subvolume delete {}".format(self.tmp_dir))
|
||||
# sh.sleep(3)
|
||||
# out = sh.btrfs("subvolume", "delete", self.tmp_dir)
|
||||
# assert out.exit_code == 0 and out.stderr == ""
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,57 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import socket
|
||||
import os
|
||||
import json
|
||||
import struct
|
||||
|
||||
|
||||
class ControlServer(object):
|
||||
|
||||
valid_commands = set((
|
||||
"start", "stop", "restart", "status", "log",
|
||||
))
|
||||
|
||||
def __init__(self, address, mgr_chan, cld_chan):
|
||||
self.address = address
|
||||
self.mgr_chan = mgr_chan
|
||||
self.cld_chan = cld_chan
|
||||
try:
|
||||
os.unlink(self.address)
|
||||
except OSError:
|
||||
if os.path.exists(self.address):
|
||||
raise Exception("file exists: {}".format(self.address))
|
||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
self.sock.bind(self.address)
|
||||
os.chmod(address, 0o700)
|
||||
|
||||
print("Control Server listening on: {}".format(self.address))
|
||||
self.sock.listen(1)
|
||||
|
||||
def serve_forever(self):
|
||||
while 1:
|
||||
conn, _ = self.sock.accept()
|
||||
|
||||
try:
|
||||
length = struct.unpack('!H', conn.recv(2))[0]
|
||||
content = conn.recv(length)
|
||||
cmd = json.loads(content)
|
||||
if cmd['cmd'] not in self.valid_commands:
|
||||
raise Exception("Invalid Command")
|
||||
self.mgr_chan.put(("CMD", (cmd['cmd'], cmd['target'], cmd["kwargs"])))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
res = "Invalid Command"
|
||||
else:
|
||||
res = self.cld_chan.get()
|
||||
|
||||
conn.sendall(struct.pack('!H', len(res)))
|
||||
conn.sendall(res)
|
||||
conn.close()
|
||||
|
||||
|
||||
def run_control_server(address, mgr_chan, cld_chan):
|
||||
cs = ControlServer(address, mgr_chan, cld_chan)
|
||||
cs.serve_forever()
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import os
|
||||
import sh
|
||||
import shlex
|
||||
from .hook import JobHook
|
||||
|
||||
|
||||
class CmdExecHook(JobHook):
|
||||
POST_SYNC = "post_sync"
|
||||
PRE_SYNC = "pre_sync"
|
||||
|
||||
def __init__(self, command, exec_at=POST_SYNC):
|
||||
self.command = shlex.split(command)
|
||||
if exec_at == self.POST_SYNC:
|
||||
self.before_job = self._keep_calm
|
||||
self.after_job = self._exec
|
||||
elif exec_at == self.PRE_SYNC:
|
||||
self.before_job = self._exec
|
||||
self.after_job = self._keep_calm
|
||||
|
||||
def _keep_calm(self, ctx={}, **kwargs):
|
||||
pass
|
||||
|
||||
def _exec(self, ctx={}, **kwargs):
|
||||
new_env = os.environ.copy()
|
||||
new_env["TUNASYNC_MIRROR_NAME"] = ctx["mirror_name"]
|
||||
new_env["TUNASYNC_WORKING_DIR"] = ctx["current_dir"]
|
||||
new_env["TUNASYNC_JOB_EXIT_STATUS"] = kwargs.get("status", "")
|
||||
|
||||
_cmd = self.command[0]
|
||||
_args = [] if len(self.command) == 1 else self.command[1:]
|
||||
cmd = sh.Command(_cmd)
|
||||
cmd(*_args, _env=new_env)
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
|
||||
class JobHook(object):
|
||||
|
||||
def before_job(self, *args, **kwargs):
|
||||
raise NotImplementedError("")
|
||||
|
||||
def after_job(self, *args, **kwargs):
|
||||
raise NotImplementedError("")
|
||||
|
||||
def before_exec(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def after_exec(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
135
tunasync/jobs.py
135
tunasync/jobs.py
@ -1,135 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import sh
|
||||
import sys
|
||||
from setproctitle import setproctitle
|
||||
import signal
|
||||
import Queue
|
||||
import traceback
|
||||
|
||||
|
||||
def run_job(sema, child_q, manager_q, provider, **settings):
|
||||
aquired = False
|
||||
setproctitle("tunasync-{}".format(provider.name))
|
||||
|
||||
def before_quit(*args):
|
||||
provider.terminate()
|
||||
if aquired:
|
||||
print("{} release semaphore".format(provider.name))
|
||||
sema.release()
|
||||
sys.exit(0)
|
||||
|
||||
def sleep_wait(timeout):
|
||||
try:
|
||||
msg = child_q.get(timeout=timeout)
|
||||
if msg == "terminate":
|
||||
manager_q.put(("CONFIG_ACK", (provider.name, "QUIT")))
|
||||
return True
|
||||
except Queue.Empty:
|
||||
return False
|
||||
|
||||
signal.signal(signal.SIGTERM, before_quit)
|
||||
|
||||
if provider.delay > 0:
|
||||
if sleep_wait(provider.delay):
|
||||
return
|
||||
|
||||
max_retry = settings.get("max_retry", 1)
|
||||
|
||||
def _real_run(idx=0, stage="job_hook", ctx=None):
|
||||
"""\
|
||||
4 stages:
|
||||
0 -> job_hook, 1 -> set_retry, 2 -> exec_hook, 3 -> exec
|
||||
"""
|
||||
|
||||
assert(ctx is not None)
|
||||
|
||||
if stage == "exec":
|
||||
# exec_job
|
||||
try:
|
||||
provider.run(ctx=ctx)
|
||||
provider.wait()
|
||||
except sh.ErrorReturnCode:
|
||||
status = "fail"
|
||||
else:
|
||||
status = "success"
|
||||
return status
|
||||
|
||||
elif stage == "set_retry":
|
||||
# enter stage 3 with retry
|
||||
for retry in range(max_retry):
|
||||
status = "syncing"
|
||||
manager_q.put(("UPDATE", (provider.name, status, ctx)))
|
||||
print("start syncing {}, retry: {}".format(provider.name, retry))
|
||||
status = _real_run(idx=0, stage="exec_hook", ctx=ctx)
|
||||
if status == "success":
|
||||
break
|
||||
return status
|
||||
|
||||
# job_hooks
|
||||
elif stage == "job_hook":
|
||||
if idx == len(provider.hooks):
|
||||
return _real_run(idx=idx, stage="set_retry", ctx=ctx)
|
||||
hook = provider.hooks[idx]
|
||||
hook_before, hook_after = hook.before_job, hook.after_job
|
||||
status = "pre-syncing"
|
||||
|
||||
elif stage == "exec_hook":
|
||||
if idx == len(provider.hooks):
|
||||
return _real_run(idx=idx, stage="exec", ctx=ctx)
|
||||
hook = provider.hooks[idx]
|
||||
hook_before, hook_after = hook.before_exec, hook.after_exec
|
||||
status = "syncing"
|
||||
|
||||
try:
|
||||
# print("%s run before_%s, %d" % (provider.name, stage, idx))
|
||||
hook_before(provider=provider, ctx=ctx)
|
||||
status = _real_run(idx=idx+1, stage=stage, ctx=ctx)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
status = "fail"
|
||||
finally:
|
||||
# print("%s run after_%s, %d" % (provider.name, stage, idx))
|
||||
# job may break when syncing
|
||||
if status != "success":
|
||||
status = "fail"
|
||||
try:
|
||||
hook_after(provider=provider, status=status, ctx=ctx)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
|
||||
return status
|
||||
|
||||
while 1:
|
||||
try:
|
||||
sema.acquire(True)
|
||||
except:
|
||||
break
|
||||
aquired = True
|
||||
|
||||
ctx = {} # put context info in it
|
||||
ctx['current_dir'] = provider.local_dir
|
||||
ctx['mirror_name'] = provider.name
|
||||
status = "pre-syncing"
|
||||
manager_q.put(("UPDATE", (provider.name, status, ctx)))
|
||||
|
||||
try:
|
||||
status = _real_run(idx=0, stage="job_hook", ctx=ctx)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
status = "fail"
|
||||
finally:
|
||||
sema.release()
|
||||
aquired = False
|
||||
|
||||
print("syncing {} finished, sleep {} minutes for the next turn".format(
|
||||
provider.name, provider.interval
|
||||
))
|
||||
|
||||
manager_q.put(("UPDATE", (provider.name, status, ctx)))
|
||||
|
||||
if sleep_wait(timeout=provider.interval * 60):
|
||||
break
|
||||
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,88 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import sh
|
||||
import os
|
||||
from .hook import JobHook
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class LogLimitHook(JobHook):
|
||||
|
||||
def __init__(self, limit=10):
|
||||
self.limit = limit
|
||||
|
||||
def before_job(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def after_job(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def before_exec(self, provider, ctx={}, *args, **kwargs):
|
||||
log_dir = provider.log_dir
|
||||
self.ensure_log_dir(log_dir)
|
||||
log_file = provider.log_file.format(
|
||||
date=datetime.now().strftime("%Y-%m-%d_%H-%M"))
|
||||
ctx['log_file'] = log_file
|
||||
if log_file == "/dev/null":
|
||||
return
|
||||
|
||||
log_link = os.path.join(log_dir, "latest")
|
||||
ctx['log_link'] = log_link
|
||||
|
||||
lfiles = [os.path.join(log_dir, lfile)
|
||||
for lfile in os.listdir(log_dir)
|
||||
if lfile.startswith(provider.name)]
|
||||
|
||||
lfiles_set = set(lfiles)
|
||||
# sort to get the newest 10 files
|
||||
lfiles_ts = sorted(
|
||||
[(os.path.getmtime(lfile), lfile) for lfile in lfiles],
|
||||
key=lambda x: x[0],
|
||||
reverse=True)
|
||||
lfiles_keep = set([x[1] for x in lfiles_ts[:self.limit]])
|
||||
lfiles_rm = lfiles_set - lfiles_keep
|
||||
# remove old files
|
||||
for lfile in lfiles_rm:
|
||||
try:
|
||||
sh.rm(lfile)
|
||||
except:
|
||||
pass
|
||||
|
||||
# create a soft link
|
||||
self.create_link(log_link, log_file)
|
||||
|
||||
def after_exec(self, status=None, ctx={}, *args, **kwargs):
|
||||
log_file = ctx.get('log_file', None)
|
||||
log_link = ctx.get('log_link', None)
|
||||
if log_file == "/dev/null":
|
||||
return
|
||||
if status == "fail":
|
||||
log_file_save = log_file + ".fail"
|
||||
try:
|
||||
sh.mv(log_file, log_file_save)
|
||||
except:
|
||||
pass
|
||||
self.create_link(log_link, log_file_save)
|
||||
|
||||
def ensure_log_dir(self, log_dir):
|
||||
if not os.path.exists(log_dir):
|
||||
sh.mkdir("-p", log_dir)
|
||||
|
||||
def create_link(self, log_link, log_file):
|
||||
if log_link == log_file:
|
||||
return
|
||||
if not (log_link and log_file):
|
||||
return
|
||||
|
||||
if os.path.lexists(log_link):
|
||||
try:
|
||||
sh.rm(log_link)
|
||||
except:
|
||||
return
|
||||
try:
|
||||
sh.ln('-s', log_file, log_link)
|
||||
except:
|
||||
return
|
||||
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,156 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import os
|
||||
from datetime import datetime
|
||||
from .mirror_provider import RsyncProvider, TwoStageRsyncProvider, ShellProvider
|
||||
from .btrfs_snapshot import BtrfsHook
|
||||
from .loglimit import LogLimitHook
|
||||
from .exec_pre_post import CmdExecHook
|
||||
|
||||
|
||||
class MirrorConfig(object):
|
||||
|
||||
_valid_providers = set(("rsync", "two-stage-rsync", "shell", ))
|
||||
|
||||
def __init__(self, parent, options):
|
||||
self._parent = parent
|
||||
self._popt = self._parent._settings
|
||||
self.options = dict(options.items()) # copy
|
||||
self._validate()
|
||||
|
||||
def _validate(self):
|
||||
provider = self.options.get("provider", None)
|
||||
assert provider in self._valid_providers
|
||||
|
||||
if provider == "rsync":
|
||||
assert "upstream" in self.options
|
||||
|
||||
elif provider == "shell":
|
||||
assert "command" in self.options
|
||||
|
||||
local_dir_tmpl = self.options.get(
|
||||
"local_dir", self._popt["global"]["local_dir"])
|
||||
|
||||
self.options["local_dir"] = local_dir_tmpl.format(
|
||||
mirror_root=self._popt["global"]["mirror_root"],
|
||||
mirror_name=self.name,
|
||||
)
|
||||
|
||||
if "interval" not in self.options:
|
||||
self.options["interval"] = self._popt["global"]["interval"]
|
||||
|
||||
assert isinstance(self.options["interval"], int)
|
||||
|
||||
log_dir = self.options.get(
|
||||
"log_dir", self._popt["global"]["log_dir"])
|
||||
if "log_file" not in self.options:
|
||||
self.options["log_file"] = os.path.join(
|
||||
log_dir, self.name, self.name + "_{date}.log")
|
||||
|
||||
self.log_dir = os.path.dirname(self.log_file)
|
||||
|
||||
if "use_btrfs" not in self.options:
|
||||
self.options["use_btrfs"] = self._parent.use_btrfs
|
||||
assert self.options["use_btrfs"] in (True, False)
|
||||
|
||||
if "env" in self.options:
|
||||
assert isinstance(self.options["env"], dict)
|
||||
|
||||
def __getattr__(self, key):
|
||||
if key in self.__dict__:
|
||||
return self.__dict__[key]
|
||||
else:
|
||||
return self.__dict__["options"].get(key, None)
|
||||
|
||||
def to_provider(self, hooks=[], no_delay=False):
|
||||
|
||||
kwargs = {
|
||||
'name': self.name,
|
||||
'upstream_url': self.upstream,
|
||||
'local_dir': self.local_dir,
|
||||
'log_dir': self.log_dir,
|
||||
'log_file': self.log_file,
|
||||
'interval': self.interval,
|
||||
'env': self.env,
|
||||
'hooks': hooks,
|
||||
}
|
||||
|
||||
if self.provider == "rsync":
|
||||
kwargs.update({
|
||||
'useIPv6': self.use_ipv6,
|
||||
'password': self.password,
|
||||
'exclude_file': self.exclude_file,
|
||||
})
|
||||
provider = RsyncProvider(**kwargs)
|
||||
|
||||
elif self.provider == "two-stage-rsync":
|
||||
kwargs.update({
|
||||
'useIPv6': self.use_ipv6,
|
||||
'password': self.password,
|
||||
'exclude_file': self.exclude_file,
|
||||
})
|
||||
provider = TwoStageRsyncProvider(**kwargs)
|
||||
provider.set_stage1_profile(self.stage1_profile)
|
||||
|
||||
elif self.options["provider"] == "shell":
|
||||
kwargs.update({
|
||||
'command': self.command,
|
||||
'log_stdout': self.options.get("log_stdout", True),
|
||||
})
|
||||
|
||||
provider = ShellProvider(**kwargs)
|
||||
|
||||
if not no_delay:
|
||||
sm = self._parent.status_manager
|
||||
last_update = sm.get_info(self.name, 'last_update')
|
||||
if last_update not in (None, '-'):
|
||||
last_update = datetime.strptime(
|
||||
last_update, '%Y-%m-%d %H:%M:%S')
|
||||
delay = int(last_update.strftime("%s")) \
|
||||
+ self.interval * 60 - int(datetime.now().strftime("%s"))
|
||||
if delay < 0:
|
||||
delay = 0
|
||||
provider.set_delay(delay)
|
||||
|
||||
return provider
|
||||
|
||||
def compare(self, other):
|
||||
assert self.name == other.name
|
||||
|
||||
for key, val in self.options.iteritems():
|
||||
if other.options.get(key, None) != val:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def hooks(self):
|
||||
hooks = []
|
||||
parent = self._parent
|
||||
if self.options["use_btrfs"]:
|
||||
working_dir = parent.btrfs_working_dir_tmpl.format(
|
||||
mirror_root=parent.mirror_root,
|
||||
mirror_name=self.name
|
||||
)
|
||||
service_dir = parent.btrfs_service_dir_tmpl.format(
|
||||
mirror_root=parent.mirror_root,
|
||||
mirror_name=self.name
|
||||
)
|
||||
gc_dir = parent.btrfs_gc_dir_tmpl.format(
|
||||
mirror_root=parent.mirror_root,
|
||||
mirror_name=self.name
|
||||
)
|
||||
hooks.append(BtrfsHook(service_dir, working_dir, gc_dir))
|
||||
|
||||
hooks.append(LogLimitHook())
|
||||
|
||||
if self.exec_pre_sync:
|
||||
hooks.append(
|
||||
CmdExecHook(self.exec_pre_sync, CmdExecHook.PRE_SYNC))
|
||||
|
||||
if self.exec_post_sync:
|
||||
hooks.append(
|
||||
CmdExecHook(self.exec_post_sync, CmdExecHook.POST_SYNC))
|
||||
|
||||
return hooks
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,226 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import sh
|
||||
import os
|
||||
import shlex
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class MirrorProvider(object):
|
||||
'''
|
||||
Mirror method class, can be `rsync', `debmirror', etc.
|
||||
'''
|
||||
|
||||
def __init__(self, name, local_dir, log_dir, log_file="/dev/null",
|
||||
interval=120, hooks=[]):
|
||||
self.name = name
|
||||
self.local_dir = local_dir
|
||||
self.log_file = log_file
|
||||
self.log_dir = log_dir
|
||||
self.interval = interval
|
||||
self.hooks = hooks
|
||||
self.p = None
|
||||
self.delay = 0
|
||||
|
||||
# deprecated
|
||||
def ensure_log_dir(self):
|
||||
log_dir = os.path.dirname(self.log_file)
|
||||
if not os.path.exists(log_dir):
|
||||
sh.mkdir("-p", log_dir)
|
||||
|
||||
def get_log_file(self, ctx={}):
|
||||
if 'log_file' in ctx:
|
||||
log_file = ctx['log_file']
|
||||
else:
|
||||
now = datetime.now().strftime("%Y-%m-%d_%H")
|
||||
log_file = self.log_file.format(date=now)
|
||||
ctx['log_file'] = log_file
|
||||
return log_file
|
||||
|
||||
def set_delay(self, sec):
|
||||
''' Set start delay '''
|
||||
self.delay = sec
|
||||
|
||||
def run(self, ctx={}):
|
||||
raise NotImplementedError("run method should be implemented")
|
||||
|
||||
def terminate(self):
|
||||
if self.p is not None:
|
||||
self.p.process.terminate()
|
||||
print("{} terminated".format(self.name))
|
||||
self.p = None
|
||||
|
||||
def wait(self):
|
||||
if self.p is not None:
|
||||
self.p.wait()
|
||||
self.p = None
|
||||
|
||||
|
||||
class RsyncProvider(MirrorProvider):
|
||||
|
||||
_default_options = ['-aHvh', '--no-o', '--no-g', '--stats',
|
||||
'--exclude', '.~tmp~/',
|
||||
'--delete', '--delete-after', '--delay-updates',
|
||||
'--safe-links', '--timeout=120', '--contimeout=120']
|
||||
|
||||
def __init__(self, name, upstream_url, local_dir, log_dir,
|
||||
useIPv6=True, password=None, exclude_file=None,
|
||||
log_file="/dev/null", interval=120, env=None, hooks=[]):
|
||||
super(RsyncProvider, self).__init__(name, local_dir, log_dir, log_file,
|
||||
interval, hooks)
|
||||
|
||||
self.upstream_url = upstream_url
|
||||
self.useIPv6 = useIPv6
|
||||
self.exclude_file = exclude_file
|
||||
self.password = password
|
||||
self.env = env
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
|
||||
_options = [o for o in self._default_options] # copy
|
||||
|
||||
if self.useIPv6:
|
||||
_options.append("-6")
|
||||
|
||||
if self.exclude_file:
|
||||
_options.append("--exclude-from")
|
||||
_options.append(self.exclude_file)
|
||||
|
||||
return _options
|
||||
|
||||
def run(self, ctx={}):
|
||||
_args = self.options
|
||||
_args.append(self.upstream_url)
|
||||
|
||||
working_dir = ctx.get("current_dir", self.local_dir)
|
||||
_args.append(working_dir)
|
||||
|
||||
log_file = self.get_log_file(ctx)
|
||||
new_env = os.environ.copy()
|
||||
if self.password is not None:
|
||||
new_env["RSYNC_PASSWORD"] = self.password
|
||||
if self.env is not None and isinstance(self.env, dict):
|
||||
for k, v in self.env.items():
|
||||
new_env[k] = v
|
||||
|
||||
self.p = sh.rsync(*_args, _env=new_env, _out=log_file,
|
||||
_err_to_out=True, _out_bufsize=1, _bg=True)
|
||||
|
||||
|
||||
class TwoStageRsyncProvider(RsyncProvider):
|
||||
|
||||
_stage1_options = ['-aHvh', '--no-o', '--no-g',
|
||||
'--exclude', '.~tmp~/',
|
||||
'--safe-links', '--timeout=120', '--contimeout=120']
|
||||
|
||||
_stage2_options = ['-aHvh', '--no-o', '--no-g', '--stats',
|
||||
'--exclude', '.~tmp~/',
|
||||
'--delete', '--delete-after', '--delay-updates',
|
||||
'--safe-links', '--timeout=120', '--contimeout=120']
|
||||
|
||||
_stage1_profiles = {
|
||||
"debian": [
|
||||
'dists/',
|
||||
],
|
||||
"debian-oldstyle": [
|
||||
'Packages*', 'Sources*', 'Release*',
|
||||
'InRelease', 'i18n/*', 'ls-lR*', 'dep11/*',
|
||||
]
|
||||
}
|
||||
|
||||
def set_stage1_profile(self, profile):
|
||||
if profile not in self._stage1_profiles:
|
||||
raise Exception("Profile Undefined: %s, %s" % (profile, self.name))
|
||||
|
||||
self._stage1_excludes = self._stage1_profiles[profile]
|
||||
|
||||
def options(self, stage):
|
||||
_default_options = self._stage1_options \
|
||||
if stage == 1 else self._stage2_options
|
||||
_options = [o for o in _default_options] # copy
|
||||
|
||||
if stage == 1:
|
||||
for _exc in self._stage1_excludes:
|
||||
_options.append("--exclude")
|
||||
_options.append(_exc)
|
||||
|
||||
if self.useIPv6:
|
||||
_options.append("-6")
|
||||
|
||||
if self.exclude_file:
|
||||
_options.append("--exclude-from")
|
||||
_options.append(self.exclude_file)
|
||||
|
||||
return _options
|
||||
|
||||
def run(self, ctx={}):
|
||||
working_dir = ctx.get("current_dir", self.local_dir)
|
||||
log_file = self.get_log_file(ctx)
|
||||
new_env = os.environ.copy()
|
||||
if self.password is not None:
|
||||
new_env["RSYNC_PASSWORD"] = self.password
|
||||
if self.env is not None and isinstance(self.env, dict):
|
||||
for k, v in self.env.items():
|
||||
new_env[k] = v
|
||||
|
||||
with open(log_file, 'w', buffering=1) as f:
|
||||
def log_output(line):
|
||||
f.write(line)
|
||||
|
||||
for stage in (1, 2):
|
||||
|
||||
_args = self.options(stage)
|
||||
_args.append(self.upstream_url)
|
||||
_args.append(working_dir)
|
||||
f.write("==== Stage {} Begins ====\n\n".format(stage))
|
||||
|
||||
self.p = sh.rsync(
|
||||
*_args, _env=new_env, _out=log_output,
|
||||
_err_to_out=True, _out_bufsize=1, _bg=False
|
||||
)
|
||||
self.p.wait()
|
||||
|
||||
|
||||
class ShellProvider(MirrorProvider):
|
||||
|
||||
def __init__(self, name, command, upstream_url, local_dir, log_dir,
|
||||
log_file="/dev/null", log_stdout=True, interval=120, env=None,
|
||||
hooks=[]):
|
||||
|
||||
super(ShellProvider, self).__init__(name, local_dir, log_dir, log_file,
|
||||
interval, hooks)
|
||||
self.upstream_url = str(upstream_url)
|
||||
self.command = shlex.split(command)
|
||||
self.log_stdout = log_stdout
|
||||
self.env = env
|
||||
|
||||
def run(self, ctx={}):
|
||||
|
||||
log_file = self.get_log_file(ctx)
|
||||
|
||||
new_env = os.environ.copy()
|
||||
new_env["TUNASYNC_MIRROR_NAME"] = self.name
|
||||
new_env["TUNASYNC_LOCAL_DIR"] = self.local_dir
|
||||
new_env["TUNASYNC_WORKING_DIR"] = ctx.get("current_dir", self.local_dir)
|
||||
new_env["TUNASYNC_UPSTREAM_URL"] = self.upstream_url
|
||||
new_env["TUNASYNC_LOG_FILE"] = log_file
|
||||
|
||||
if self.env is not None and isinstance(self.env, dict):
|
||||
for k, v in self.env.items():
|
||||
new_env[k] = v
|
||||
|
||||
_cmd = self.command[0]
|
||||
_args = [] if len(self.command) == 1 else self.command[1:]
|
||||
|
||||
cmd = sh.Command(_cmd)
|
||||
|
||||
if self.log_stdout:
|
||||
self.p = cmd(*_args, _env=new_env, _out=log_file,
|
||||
_err_to_out=True, _out_bufsize=1, _bg=True)
|
||||
else:
|
||||
self.p = cmd(*_args, _env=new_env, _out='/dev/null',
|
||||
_err='/dev/null', _out_bufsize=1, _bg=True)
|
||||
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,123 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class StatusManager(object):
|
||||
|
||||
def __init__(self, parent, dbfile):
|
||||
self.parent = parent
|
||||
self.dbfile = dbfile
|
||||
self.init_mirrors()
|
||||
|
||||
def init_mirrors(self):
|
||||
mirrors = {}
|
||||
for name, cfg in self.parent.mirrors.iteritems():
|
||||
mirrors[name] = {
|
||||
'name': name,
|
||||
'last_update': '-',
|
||||
'status': 'unknown',
|
||||
'upstream': cfg.upstream or '-',
|
||||
}
|
||||
|
||||
try:
|
||||
with open(self.dbfile) as f:
|
||||
_mirrors = json.load(f)
|
||||
for m in _mirrors:
|
||||
name = m["name"]
|
||||
mirrors[name]["last_update"] = m["last_update"]
|
||||
mirrors[name]["status"] = m["status"]
|
||||
except:
|
||||
pass
|
||||
|
||||
self.mirrors = mirrors
|
||||
self.mirrors_ctx = {key: {} for key in self.mirrors}
|
||||
|
||||
def get_info(self, name, key):
|
||||
if key == "ctx":
|
||||
return self.mirrors_ctx.get(name, {})
|
||||
_m = self.mirrors.get(name, {})
|
||||
return _m.get(key, None)
|
||||
|
||||
def refresh_mirror(self, name):
|
||||
cfg = self.parent.mirrors.get(name, None)
|
||||
if cfg is None:
|
||||
return
|
||||
_m = self.mirrors.get(name, {
|
||||
'name': name,
|
||||
'last_update': '-',
|
||||
'status': '-',
|
||||
})
|
||||
_m['upstream'] = cfg.upstream or '-'
|
||||
self.mirrors[name] = dict(_m.items())
|
||||
self.commit_db()
|
||||
|
||||
def update_status(self, name, status, ctx={}):
|
||||
|
||||
_m = self.mirrors.get(name, {
|
||||
'name': name,
|
||||
'last_update': '-',
|
||||
'status': '-',
|
||||
})
|
||||
|
||||
if status in ("syncing", "fail", "pre-syncing"):
|
||||
update_time = _m["last_update"]
|
||||
elif status == "success":
|
||||
update_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
else:
|
||||
print("Invalid status: {}, from {}".format(status, name))
|
||||
|
||||
_m['last_update'] = update_time
|
||||
_m['status'] = status
|
||||
self.mirrors[name] = dict(_m.items())
|
||||
self.mirrors_ctx[name] = ctx
|
||||
|
||||
self.commit_db()
|
||||
print("Updated status file, {}:{}".format(name, status))
|
||||
|
||||
def list_status(self, _format=False):
|
||||
_mirrors = sorted(
|
||||
[m for _, m in self.mirrors.items()],
|
||||
key=lambda x: x['name']
|
||||
)
|
||||
if not _format:
|
||||
return _mirrors
|
||||
|
||||
name_len = max([len(_m['name']) for _m in _mirrors])
|
||||
update_len = max([len(_m['last_update']) for _m in _mirrors])
|
||||
status_len = max([len(_m['status']) for _m in _mirrors])
|
||||
heading = ' '.join([
|
||||
'name'.ljust(name_len),
|
||||
'last update'.ljust(update_len),
|
||||
'status'.ljust(status_len)
|
||||
])
|
||||
line = ' '.join(['-'*name_len, '-'*update_len, '-'*status_len])
|
||||
tabular = '\n'.join(
|
||||
[
|
||||
' '.join(
|
||||
(_m['name'].ljust(name_len),
|
||||
_m['last_update'].ljust(update_len),
|
||||
_m['status'].ljust(status_len))
|
||||
) for _m in _mirrors
|
||||
]
|
||||
)
|
||||
return '\n'.join((heading, line, tabular))
|
||||
|
||||
def get_status(self, name, _format=False):
|
||||
if name not in self.mirrors:
|
||||
return None
|
||||
|
||||
mir = self.mirrors[name]
|
||||
if not _format:
|
||||
return mir
|
||||
|
||||
tmpl = "{name} last_update: {last_update} status: {status}"
|
||||
return tmpl.format(**mir)
|
||||
|
||||
def commit_db(self):
|
||||
with open(self.dbfile, 'wb') as f:
|
||||
_mirrors = self.list_status()
|
||||
json.dump(_mirrors, f, indent=2, separators=(',', ':'))
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,279 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import signal
|
||||
import sys
|
||||
import toml
|
||||
|
||||
from multiprocessing import Process, Semaphore, Queue
|
||||
from . import jobs
|
||||
from .hook import JobHook
|
||||
from .mirror_config import MirrorConfig
|
||||
from .status_manager import StatusManager
|
||||
from .clt_server import run_control_server
|
||||
|
||||
|
||||
class TUNASync(object):
|
||||
|
||||
_instance = None
|
||||
_settings = None
|
||||
_inited = False
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if not cls._instance:
|
||||
cls._instance = super(TUNASync, cls).__new__(cls, *args, **kwargs)
|
||||
|
||||
return cls._instance
|
||||
|
||||
def read_config(self, config_file):
|
||||
self._config_file = config_file
|
||||
with open(self._config_file) as f:
|
||||
self._settings = toml.loads(f.read())
|
||||
|
||||
self._inited = True
|
||||
self._mirrors = {}
|
||||
self._providers = {}
|
||||
self.processes = {}
|
||||
self.semaphore = Semaphore(self._settings["global"]["concurrent"])
|
||||
self.channel = Queue()
|
||||
self._hooks = []
|
||||
|
||||
self.mirror_root = self._settings["global"]["mirror_root"]
|
||||
|
||||
self.use_btrfs = self._settings["global"]["use_btrfs"]
|
||||
self.btrfs_service_dir_tmpl = self._settings["btrfs"]["service_dir"]
|
||||
self.btrfs_working_dir_tmpl = self._settings["btrfs"]["working_dir"]
|
||||
self.btrfs_gc_dir_tmpl = self._settings["btrfs"]["gc_dir"]
|
||||
|
||||
self.status_file = self._settings["global"]["status_file"]
|
||||
self.status_manager = StatusManager(self, self.status_file)
|
||||
|
||||
self.ctrl_addr = self._settings["global"]["ctrl_addr"]
|
||||
self.ctrl_channel = Queue()
|
||||
p = Process(
|
||||
target=run_control_server,
|
||||
args=(self.ctrl_addr, self.channel, self.ctrl_channel),
|
||||
)
|
||||
p.start()
|
||||
self.processes["CTRL_SERVER"] = (self.ctrl_channel, p)
|
||||
|
||||
def add_hook(self, h):
|
||||
assert isinstance(h, JobHook)
|
||||
self._hooks.append(h)
|
||||
|
||||
def hooks(self):
|
||||
return self._hooks
|
||||
|
||||
@property
|
||||
def mirrors(self):
|
||||
if self._mirrors:
|
||||
return self._mirrors
|
||||
|
||||
for mirror_opt in self._settings["mirrors"]:
|
||||
name = mirror_opt["name"]
|
||||
self._mirrors[name] = \
|
||||
MirrorConfig(self, mirror_opt)
|
||||
|
||||
return self._mirrors
|
||||
|
||||
@property
|
||||
def providers(self):
|
||||
if self._providers:
|
||||
return self._providers
|
||||
|
||||
for name, mirror in self.mirrors.iteritems():
|
||||
hooks = mirror.hooks() + self.hooks()
|
||||
provider = mirror.to_provider(hooks, no_delay=mirror.no_delay)
|
||||
self._providers[name] = provider
|
||||
|
||||
return self._providers
|
||||
|
||||
def run_jobs(self):
|
||||
for name in self.providers:
|
||||
self.run_provider(name)
|
||||
|
||||
def sig_handler(*args):
|
||||
print("terminate subprocesses")
|
||||
for _, np in self.processes.iteritems():
|
||||
_, p = np
|
||||
p.terminate()
|
||||
print("Good Bye")
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, sig_handler)
|
||||
signal.signal(signal.SIGTERM, sig_handler)
|
||||
signal.signal(signal.SIGUSR1, self.reload_mirrors)
|
||||
signal.signal(signal.SIGUSR2, self.reload_mirrors_force)
|
||||
|
||||
self.run_forever()
|
||||
|
||||
def run_provider(self, name):
|
||||
if name not in self.providers:
|
||||
print("{} doesnot exist".format(name))
|
||||
return
|
||||
|
||||
provider = self.providers[name]
|
||||
child_queue = Queue()
|
||||
p = Process(
|
||||
target=jobs.run_job,
|
||||
args=(self.semaphore, child_queue, self.channel, provider, ),
|
||||
kwargs={
|
||||
'max_retry': self._settings['global']['max_retry']}
|
||||
)
|
||||
p.start()
|
||||
provider.set_delay(0) # clear delay after first start
|
||||
self.processes[name] = (child_queue, p)
|
||||
|
||||
def reload_mirrors(self, signum, frame):
|
||||
try:
|
||||
return self._reload_mirrors(signum, frame, force=False)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
def reload_mirrors_force(self, signum, frame):
|
||||
try:
|
||||
return self._reload_mirrors(signum, frame, force=True)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
def _reload_mirrors(self, signum, frame, force=False):
|
||||
print("reload mirror configs, force restart: {}".format(force))
|
||||
|
||||
with open(self._config_file) as f:
|
||||
self._settings = toml.loads(f.read())
|
||||
|
||||
for mirror_opt in self._settings["mirrors"]:
|
||||
name = mirror_opt["name"]
|
||||
newMirCfg = MirrorConfig(self, mirror_opt)
|
||||
|
||||
if name in self._mirrors:
|
||||
if newMirCfg.compare(self._mirrors[name]):
|
||||
continue
|
||||
|
||||
self._mirrors[name] = newMirCfg
|
||||
|
||||
hooks = newMirCfg.hooks() + self.hooks()
|
||||
newProvider = newMirCfg.to_provider(hooks, no_delay=True)
|
||||
self._providers[name] = newProvider
|
||||
|
||||
if name in self.processes:
|
||||
q, p = self.processes[name]
|
||||
|
||||
if force:
|
||||
p.terminate()
|
||||
print("Terminated Job: {}".format(name))
|
||||
self.run_provider(name)
|
||||
else:
|
||||
q.put("terminate")
|
||||
print("New configuration queued to {}".format(name))
|
||||
else:
|
||||
print("New mirror: {}".format(name))
|
||||
self.run_provider(name)
|
||||
|
||||
self.status_manager.refresh_mirror(name)
|
||||
|
||||
def run_forever(self):
|
||||
while 1:
|
||||
try:
|
||||
msg_hdr, msg_body = self.channel.get()
|
||||
except IOError:
|
||||
continue
|
||||
|
||||
if msg_hdr == "UPDATE":
|
||||
mirror_name, status, ctx = msg_body
|
||||
try:
|
||||
self.status_manager.update_status(
|
||||
mirror_name, status, dict(ctx.items()))
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
elif msg_hdr == "CONFIG_ACK":
|
||||
mirror_name, status = msg_body
|
||||
if status == "QUIT":
|
||||
print("New configuration applied to {}".format(mirror_name))
|
||||
self.run_provider(mirror_name)
|
||||
|
||||
elif msg_hdr == "CMD":
|
||||
cmd, mirror_name, kwargs = msg_body
|
||||
if (mirror_name not in self.mirrors) and (mirror_name != "__ALL__"):
|
||||
self.ctrl_channel.put("Invalid target")
|
||||
continue
|
||||
res = self.handle_cmd(cmd, mirror_name, kwargs)
|
||||
self.ctrl_channel.put(res)
|
||||
|
||||
def handle_cmd(self, cmd, mirror_name, kwargs):
|
||||
if cmd == "restart":
|
||||
if mirror_name not in self.providers:
|
||||
res = "Invalid job: {}".format(mirror_name)
|
||||
return res
|
||||
|
||||
if mirror_name in self.processes:
|
||||
_, p = self.processes[mirror_name]
|
||||
p.terminate()
|
||||
self.providers[mirror_name].set_delay(0)
|
||||
self.run_provider(mirror_name)
|
||||
res = "Restarted Job: {}".format(mirror_name)
|
||||
|
||||
elif cmd == "stop":
|
||||
if mirror_name not in self.processes:
|
||||
res = "{} not running".format(mirror_name)
|
||||
return res
|
||||
|
||||
_, p = self.processes.pop(mirror_name)
|
||||
p.terminate()
|
||||
res = "Stopped Job: {}".format(mirror_name)
|
||||
|
||||
elif cmd == "start":
|
||||
if mirror_name in self.processes:
|
||||
res = "{} already running".format(mirror_name)
|
||||
return res
|
||||
|
||||
self.run_provider(mirror_name)
|
||||
res = "Started Job: {}".format(mirror_name)
|
||||
|
||||
elif cmd == "status":
|
||||
if mirror_name == "__ALL__":
|
||||
res = self.status_manager.list_status(_format=True)
|
||||
else:
|
||||
res = self.status_manager.get_status(mirror_name, _format=True)
|
||||
|
||||
elif cmd == "log":
|
||||
job_ctx = self.status_manager.get_info(mirror_name, "ctx")
|
||||
n = kwargs.get("n", 0)
|
||||
if n == 0:
|
||||
res = job_ctx.get(
|
||||
"log_link",
|
||||
job_ctx.get("log_file", "/dev/null"),
|
||||
)
|
||||
else:
|
||||
import os
|
||||
log_file = job_ctx.get("log_file", None)
|
||||
if log_file is None:
|
||||
return "/dev/null"
|
||||
|
||||
log_dir = os.path.dirname(log_file)
|
||||
lfiles = [
|
||||
os.path.join(log_dir, lfile)
|
||||
for lfile in os.listdir(log_dir)
|
||||
if lfile.startswith(mirror_name) and lfile != "latest"
|
||||
]
|
||||
|
||||
if len(lfiles) <= n:
|
||||
res = "Only {} log files available".format(len(lfiles))
|
||||
return res
|
||||
|
||||
lfiles_set = set(lfiles)
|
||||
# sort to get the newest 10 files
|
||||
lfiles_ts = sorted(
|
||||
[(os.path.getmtime(lfile), lfile) for lfile in lfiles_set],
|
||||
key=lambda x: x[0],
|
||||
reverse=True,
|
||||
)
|
||||
return lfiles_ts[n][1]
|
||||
|
||||
else:
|
||||
res = "Invalid command"
|
||||
|
||||
return res
|
||||
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,43 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import re
|
||||
import sh
|
||||
import os
|
||||
import argparse
|
||||
import toml
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(prog="tunasync_snapshot_gc")
|
||||
parser.add_argument("--max-level", type=int, default=1, help="max walk level to find garbage snapshots")
|
||||
parser.add_argument("--pattern", default=r"^_gc_.+_\d+", help="pattern to match garbage snapshots")
|
||||
parser.add_argument("-c", "--config", help="tunasync config file")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
pattern = re.compile(args.pattern)
|
||||
|
||||
def walk(_dir, level=1):
|
||||
if level > args.max_level:
|
||||
return
|
||||
|
||||
for fname in os.listdir(_dir):
|
||||
abs_fname = os.path.join(_dir, fname)
|
||||
if os.path.isdir(abs_fname):
|
||||
if pattern.match(fname):
|
||||
print("GC: {}".format(abs_fname))
|
||||
try:
|
||||
sh.btrfs("subvolume", "delete", abs_fname)
|
||||
except sh.ErrorReturnCode as e:
|
||||
print("Error: {}".format(e.stderr))
|
||||
else:
|
||||
walk(abs_fname, level+1)
|
||||
|
||||
with open(args.config) as f:
|
||||
settings = toml.loads(f.read())
|
||||
|
||||
mirror_root = settings["global"]["mirror_root"]
|
||||
gc_root = settings["btrfs"]["gc_root"].format(mirror_root=mirror_root)
|
||||
|
||||
walk(gc_root)
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
import sys
|
||||
import socket
|
||||
import argparse
|
||||
import json
|
||||
import struct
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(prog="tunasynctl")
|
||||
parser.add_argument("-s", "--socket",
|
||||
default="/run/tunasync/tunasync.sock", help="socket file")
|
||||
|
||||
subparsers = parser.add_subparsers(dest="command", help='sub-command help')
|
||||
|
||||
sp = subparsers.add_parser('start', help="start job")
|
||||
sp.add_argument("target", help="mirror job name")
|
||||
|
||||
sp = subparsers.add_parser('stop', help="stop job")
|
||||
sp.add_argument("target", help="mirror job name")
|
||||
|
||||
sp = subparsers.add_parser('restart', help="restart job")
|
||||
sp.add_argument("target", help="mirror job name")
|
||||
|
||||
sp = subparsers.add_parser('status', help="show mirror status")
|
||||
sp.add_argument("target", nargs="?", default="__ALL__", help="mirror job name")
|
||||
|
||||
sp = subparsers.add_parser('log', help="return log file path")
|
||||
sp.add_argument("-n", type=int, default=0, help="last n-th log, default 0 (latest)")
|
||||
sp.add_argument("target", help="mirror job name")
|
||||
|
||||
sp = subparsers.add_parser('help', help="show help message")
|
||||
|
||||
args = vars(parser.parse_args())
|
||||
|
||||
if args['command'] == "help":
|
||||
parser.print_help()
|
||||
sys.exit(0)
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
|
||||
try:
|
||||
sock.connect(args.pop("socket"))
|
||||
except socket.error as msg:
|
||||
print(msg)
|
||||
sys.exit(1)
|
||||
|
||||
pack = json.dumps({
|
||||
"cmd": args.pop("command"),
|
||||
"target": args.pop("target"),
|
||||
"kwargs": args,
|
||||
})
|
||||
|
||||
try:
|
||||
sock.sendall(struct.pack('!H', len(pack)) + pack)
|
||||
length = struct.unpack('!H', sock.recv(2))[0]
|
||||
print(sock.recv(length))
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
finally:
|
||||
sock.close()
|
||||
|
||||
# vim: ts=4 sw=4 sts=4 expandtab
|
83
worker/cgroup.go
Normal file
83
worker/cgroup.go
Normal file
@ -0,0 +1,83 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
)
|
||||
|
||||
type cgroupHook struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
basePath string
|
||||
baseGroup string
|
||||
created bool
|
||||
}
|
||||
|
||||
func newCgroupHook(p mirrorProvider, basePath, baseGroup string) *cgroupHook {
|
||||
if basePath == "" {
|
||||
basePath = "/sys/fs/cgroup"
|
||||
}
|
||||
if baseGroup == "" {
|
||||
baseGroup = "tunasync"
|
||||
}
|
||||
return &cgroupHook{
|
||||
provider: p,
|
||||
basePath: basePath,
|
||||
baseGroup: baseGroup,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cgroupHook) preExec() error {
|
||||
c.created = true
|
||||
return sh.Command("cgcreate", "-g", c.Cgroup()).Run()
|
||||
}
|
||||
|
||||
func (c *cgroupHook) postExec() error {
|
||||
err := c.killAll()
|
||||
if err != nil {
|
||||
logger.Errorf("Error killing tasks: %s", err.Error())
|
||||
}
|
||||
|
||||
c.created = false
|
||||
return sh.Command("cgdelete", c.Cgroup()).Run()
|
||||
}
|
||||
|
||||
func (c *cgroupHook) Cgroup() string {
|
||||
name := c.provider.Name()
|
||||
return fmt.Sprintf("cpu:%s/%s", c.baseGroup, name)
|
||||
}
|
||||
|
||||
func (c *cgroupHook) killAll() error {
|
||||
if !c.created {
|
||||
return nil
|
||||
}
|
||||
name := c.provider.Name()
|
||||
taskFile, err := os.Open(filepath.Join(c.basePath, "cpu", c.baseGroup, name, "tasks"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer taskFile.Close()
|
||||
taskList := []int{}
|
||||
scanner := bufio.NewScanner(taskFile)
|
||||
for scanner.Scan() {
|
||||
pid, err := strconv.Atoi(scanner.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
taskList = append(taskList, pid)
|
||||
}
|
||||
for _, pid := range taskList {
|
||||
logger.Debugf("Killing process: %d", pid)
|
||||
unix.Kill(pid, syscall.SIGKILL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
108
worker/cgroup_test.go
Normal file
108
worker/cgroup_test.go
Normal file
@ -0,0 +1,108 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestCgroup(t *testing.T) {
|
||||
Convey("Cgroup Should Work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
cmdScript := filepath.Join(tmpDir, "cmd.sh")
|
||||
daemonScript := filepath.Join(tmpDir, "daemon.sh")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
bgPidfile := filepath.Join(tmpDir, "bg.pid")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-cgroup",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: cmdScript + " " + daemonScript,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 600 * time.Second,
|
||||
env: map[string]string{
|
||||
"BG_PIDFILE": bgPidfile,
|
||||
},
|
||||
}
|
||||
cmdScriptContent := `#!/bin/bash
|
||||
redirect-std() {
|
||||
[[ -t 0 ]] && exec </dev/null
|
||||
[[ -t 1 ]] && exec >/dev/null
|
||||
[[ -t 2 ]] && exec 2>/dev/null
|
||||
}
|
||||
|
||||
# close all non-std* fds
|
||||
close-fds() {
|
||||
eval exec {3..255}\>\&-
|
||||
}
|
||||
|
||||
# full daemonization of external command with setsid
|
||||
daemonize() {
|
||||
(
|
||||
redirect-std
|
||||
cd /
|
||||
close-fds
|
||||
exec setsid "$@"
|
||||
) &
|
||||
}
|
||||
|
||||
echo $$
|
||||
daemonize $@
|
||||
sleep 5
|
||||
`
|
||||
daemonScriptContent := `#!/bin/bash
|
||||
echo $$ > $BG_PIDFILE
|
||||
sleep 30
|
||||
`
|
||||
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
err = ioutil.WriteFile(daemonScript, []byte(daemonScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
|
||||
provider.AddHook(cg)
|
||||
|
||||
err = cg.preExec()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go func() {
|
||||
err = provider.Run()
|
||||
ctx.So(err, ShouldNotBeNil)
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
// Deamon should be started
|
||||
daemonPidBytes, err := ioutil.ReadFile(bgPidfile)
|
||||
So(err, ShouldBeNil)
|
||||
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
|
||||
logger.Debug("daemon pid: %s", daemonPid)
|
||||
procDir := filepath.Join("/proc", daemonPid)
|
||||
_, err = os.Stat(procDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// Deamon won't be killed
|
||||
_, err = os.Stat(procDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// Deamon can be killed by cgroup killer
|
||||
cg.postExec()
|
||||
_, err = os.Stat(procDir)
|
||||
So(os.IsNotExist(err), ShouldBeTrue)
|
||||
|
||||
})
|
||||
}
|
78
worker/cmd_provider.go
Normal file
78
worker/cmd_provider.go
Normal file
@ -0,0 +1,78 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/anmitsu/go-shlex"
|
||||
)
|
||||
|
||||
type cmdConfig struct {
|
||||
name string
|
||||
upstreamURL, command string
|
||||
workingDir, logDir, logFile string
|
||||
interval time.Duration
|
||||
env map[string]string
|
||||
}
|
||||
|
||||
type cmdProvider struct {
|
||||
baseProvider
|
||||
cmdConfig
|
||||
command []string
|
||||
}
|
||||
|
||||
func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
|
||||
// TODO: check config options
|
||||
provider := &cmdProvider{
|
||||
baseProvider: baseProvider{
|
||||
name: c.name,
|
||||
ctx: NewContext(),
|
||||
interval: c.interval,
|
||||
},
|
||||
cmdConfig: c,
|
||||
}
|
||||
|
||||
provider.ctx.Set(_WorkingDirKey, c.workingDir)
|
||||
provider.ctx.Set(_LogDirKey, c.logDir)
|
||||
provider.ctx.Set(_LogFileKey, c.logFile)
|
||||
|
||||
cmd, err := shlex.Split(c.command, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
provider.command = cmd
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func (p *cmdProvider) Upstream() string {
|
||||
return p.upstreamURL
|
||||
}
|
||||
|
||||
func (p *cmdProvider) Run() error {
|
||||
if err := p.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return p.Wait()
|
||||
}
|
||||
|
||||
func (p *cmdProvider) Start() error {
|
||||
env := map[string]string{
|
||||
"TUNASYNC_MIRROR_NAME": p.Name(),
|
||||
"TUNASYNC_WORKING_DIR": p.WorkingDir(),
|
||||
"TUNASYNC_UPSTREAM_URL": p.upstreamURL,
|
||||
"TUNASYNC_LOG_FILE": p.LogFile(),
|
||||
}
|
||||
for k, v := range p.env {
|
||||
env[k] = v
|
||||
}
|
||||
p.cmd = newCmdJob(p, p.command, p.WorkingDir(), env)
|
||||
if err := p.prepareLogFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
p.isRunning.Store(true)
|
||||
return nil
|
||||
}
|
13
worker/common.go
Normal file
13
worker/common.go
Normal file
@ -0,0 +1,13 @@
|
||||
package worker
|
||||
|
||||
// put global viables and types here
|
||||
|
||||
import (
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
type empty struct{}
|
||||
|
||||
const maxRetry = 2
|
||||
|
||||
var logger = logging.MustGetLogger("tunasync")
|
102
worker/config.go
Normal file
102
worker/config.go
Normal file
@ -0,0 +1,102 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
type ProviderEnum uint8
|
||||
|
||||
const (
|
||||
ProvRsync ProviderEnum = iota
|
||||
ProvTwoStageRsync
|
||||
ProvCommand
|
||||
)
|
||||
|
||||
func (p *ProviderEnum) UnmarshalText(text []byte) error {
|
||||
s := string(text)
|
||||
switch s {
|
||||
case `command`:
|
||||
*p = ProvCommand
|
||||
case `rsync`:
|
||||
*p = ProvRsync
|
||||
case `two-stage-rsync`:
|
||||
*p = ProvTwoStageRsync
|
||||
default:
|
||||
return errors.New("Invalid value to provierEnum")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Global globalConfig `toml:"global"`
|
||||
Manager managerConfig `toml:"manager"`
|
||||
Server serverConfig `toml:"server"`
|
||||
Cgroup cgroupConfig `toml:"cgroup"`
|
||||
Mirrors []mirrorConfig `toml:"mirrors"`
|
||||
}
|
||||
|
||||
type globalConfig struct {
|
||||
Name string `toml:"name"`
|
||||
LogDir string `toml:"log_dir"`
|
||||
MirrorDir string `toml:"mirror_dir"`
|
||||
Concurrent int `toml:"concurrent"`
|
||||
Interval int `toml:"interval"`
|
||||
}
|
||||
|
||||
type managerConfig struct {
|
||||
APIBase string `toml:"api_base"`
|
||||
CACert string `toml:"ca_cert"`
|
||||
Token string `toml:"token"`
|
||||
}
|
||||
|
||||
type serverConfig struct {
|
||||
Hostname string `toml:"hostname"`
|
||||
Addr string `toml:"listen_addr"`
|
||||
Port int `toml:"listen_port"`
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
}
|
||||
|
||||
type cgroupConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
BasePath string `toml:"base_path"`
|
||||
Group string `toml:"group"`
|
||||
}
|
||||
|
||||
type mirrorConfig struct {
|
||||
Name string `toml:"name"`
|
||||
Provider ProviderEnum `toml:"provider"`
|
||||
Upstream string `toml:"upstream"`
|
||||
Interval int `toml:"interval"`
|
||||
MirrorDir string `toml:"mirror_dir"`
|
||||
LogDir string `toml:"log_dir"`
|
||||
Env map[string]string `toml:"env"`
|
||||
Role string `toml:"role"`
|
||||
|
||||
ExecOnSuccess string `toml:"exec_on_success"`
|
||||
ExecOnFailure string `toml:"exec_on_failure"`
|
||||
|
||||
Command string `toml:"command"`
|
||||
UseIPv6 bool `toml:"use_ipv6"`
|
||||
ExcludeFile string `toml:"exclude_file"`
|
||||
Password string `toml:"password"`
|
||||
Stage1Profile string `toml:"stage1_profile"`
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration
|
||||
func LoadConfig(cfgFile string) (*Config, error) {
|
||||
if _, err := os.Stat(cfgFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := new(Config)
|
||||
if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
|
||||
logger.Errorf(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
154
worker/config_test.go
Normal file
154
worker/config_test.go
Normal file
@ -0,0 +1,154 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
var cfgBlob = `
|
||||
[global]
|
||||
name = "test_worker"
|
||||
log_dir = "/var/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/data/mirrors"
|
||||
concurrent = 10
|
||||
interval = 240
|
||||
|
||||
[manager]
|
||||
api_base = "https://127.0.0.1:5000"
|
||||
token = "some_token"
|
||||
|
||||
[server]
|
||||
hostname = "worker1.example.com"
|
||||
listen_addr = "127.0.0.1"
|
||||
listen_port = 6000
|
||||
ssl_cert = "/etc/tunasync.d/worker1.cert"
|
||||
ssl_key = "/etc/tunasync.d/worker1.key"
|
||||
|
||||
[[mirrors]]
|
||||
name = "AOSP"
|
||||
provider = "command"
|
||||
upstream = "https://aosp.google.com/"
|
||||
interval = 720
|
||||
mirror_dir = "/data/git/AOSP"
|
||||
exec_on_success = "bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
|
||||
[mirrors.env]
|
||||
REPO = "/usr/local/bin/aosp-repo"
|
||||
|
||||
[[mirrors]]
|
||||
name = "debian"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://ftp.debian.org/debian/"
|
||||
use_ipv6 = true
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "fedora"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://ftp.fedoraproject.org/fedora/"
|
||||
use_ipv6 = true
|
||||
exclude_file = "/etc/tunasync.d/fedora-exclude.txt"
|
||||
exec_on_failure = "bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
|
||||
`
|
||||
|
||||
Convey("When giving invalid file", t, func() {
|
||||
cfg, err := LoadConfig("/path/to/invalid/file")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(cfg, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("Everything should work on valid config file", t, func() {
|
||||
tmpfile, err := ioutil.TempFile("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
cfg, err := LoadConfig(tmpfile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg.Global.Name, ShouldEqual, "test_worker")
|
||||
So(cfg.Global.Interval, ShouldEqual, 240)
|
||||
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
|
||||
|
||||
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
|
||||
So(cfg.Server.Hostname, ShouldEqual, "worker1.example.com")
|
||||
|
||||
m := cfg.Mirrors[0]
|
||||
So(m.Name, ShouldEqual, "AOSP")
|
||||
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
|
||||
So(m.Provider, ShouldEqual, ProvCommand)
|
||||
So(m.Interval, ShouldEqual, 720)
|
||||
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
|
||||
|
||||
m = cfg.Mirrors[1]
|
||||
So(m.Name, ShouldEqual, "debian")
|
||||
So(m.MirrorDir, ShouldEqual, "")
|
||||
So(m.Provider, ShouldEqual, ProvTwoStageRsync)
|
||||
|
||||
m = cfg.Mirrors[2]
|
||||
So(m.Name, ShouldEqual, "fedora")
|
||||
So(m.MirrorDir, ShouldEqual, "")
|
||||
So(m.Provider, ShouldEqual, ProvRsync)
|
||||
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
|
||||
|
||||
So(len(cfg.Mirrors), ShouldEqual, 3)
|
||||
})
|
||||
|
||||
Convey("Providers can be inited from a valid config file", t, func() {
|
||||
tmpfile, err := ioutil.TempFile("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
cfg, err := LoadConfig(tmpfile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
w := &Worker{
|
||||
cfg: cfg,
|
||||
providers: make(map[string]mirrorProvider),
|
||||
}
|
||||
|
||||
w.initProviders()
|
||||
|
||||
p := w.providers["AOSP"]
|
||||
So(p.Name(), ShouldEqual, "AOSP")
|
||||
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/AOSP")
|
||||
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/AOSP/latest.log")
|
||||
_, ok := p.(*cmdProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
for _, hook := range p.Hooks() {
|
||||
switch h := hook.(type) {
|
||||
case *execPostHook:
|
||||
So(h.command, ShouldResemble, []string{"bash", "-c", `echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status`})
|
||||
}
|
||||
}
|
||||
|
||||
p = w.providers["debian"]
|
||||
So(p.Name(), ShouldEqual, "debian")
|
||||
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian")
|
||||
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian/latest.log")
|
||||
r2p, ok := p.(*twoStageRsyncProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(r2p.stage1Profile, ShouldEqual, "debian")
|
||||
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian")
|
||||
|
||||
p = w.providers["fedora"]
|
||||
So(p.Name(), ShouldEqual, "fedora")
|
||||
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/fedora")
|
||||
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/fedora/latest.log")
|
||||
rp, ok := p.(*rsyncProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(rp.WorkingDir(), ShouldEqual, "/data/mirrors/fedora")
|
||||
So(rp.excludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
|
||||
|
||||
})
|
||||
}
|
61
worker/context.go
Normal file
61
worker/context.go
Normal file
@ -0,0 +1,61 @@
|
||||
package worker
|
||||
|
||||
// Context object aims to store runtime configurations
|
||||
|
||||
import "errors"
|
||||
|
||||
// A Context object is a layered key-value storage
|
||||
// when enters a context, the changes to the storage would be stored
|
||||
// in a new layer and when exits, the top layer poped and the storage
|
||||
// returned to the state before entering this context
|
||||
type Context struct {
|
||||
parent *Context
|
||||
store map[string]interface{}
|
||||
}
|
||||
|
||||
// NewContext returns a new context object
|
||||
func NewContext() *Context {
|
||||
return &Context{
|
||||
parent: nil,
|
||||
store: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Enter generates a new layer of context
|
||||
func (ctx *Context) Enter() *Context {
|
||||
|
||||
return &Context{
|
||||
parent: ctx,
|
||||
store: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Exit return the upper layer of context
|
||||
func (ctx *Context) Exit() (*Context, error) {
|
||||
if ctx.parent == nil {
|
||||
return nil, errors.New("Cannot exit the bottom layer context")
|
||||
}
|
||||
return ctx.parent, nil
|
||||
}
|
||||
|
||||
// Get returns the value corresponding to key, if it's
|
||||
// not found in the current layer, return the lower layer
|
||||
// context's value
|
||||
func (ctx *Context) Get(key string) (interface{}, bool) {
|
||||
if ctx.parent == nil {
|
||||
if value, ok := ctx.store[key]; ok {
|
||||
return value, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
if value, ok := ctx.store[key]; ok {
|
||||
return value, true
|
||||
}
|
||||
return ctx.parent.Get(key)
|
||||
}
|
||||
|
||||
// Set sets the value to the key at current layer
|
||||
func (ctx *Context) Set(key string, value interface{}) {
|
||||
ctx.store[key] = value
|
||||
}
|
64
worker/context_test.go
Normal file
64
worker/context_test.go
Normal file
@ -0,0 +1,64 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestContext(t *testing.T) {
|
||||
Convey("Context should work", t, func() {
|
||||
|
||||
ctx := NewContext()
|
||||
So(ctx, ShouldNotBeNil)
|
||||
So(ctx.parent, ShouldBeNil)
|
||||
|
||||
ctx.Set("logdir1", "logdir_value_1")
|
||||
ctx.Set("logdir2", "logdir_value_2")
|
||||
logdir, ok := ctx.Get("logdir1")
|
||||
So(ok, ShouldBeTrue)
|
||||
So(logdir, ShouldEqual, "logdir_value_1")
|
||||
|
||||
Convey("When entering a new context", func() {
|
||||
ctx = ctx.Enter()
|
||||
logdir, ok = ctx.Get("logdir1")
|
||||
So(ok, ShouldBeTrue)
|
||||
So(logdir, ShouldEqual, "logdir_value_1")
|
||||
|
||||
ctx.Set("logdir1", "new_value_1")
|
||||
|
||||
logdir, ok = ctx.Get("logdir1")
|
||||
So(ok, ShouldBeTrue)
|
||||
So(logdir, ShouldEqual, "new_value_1")
|
||||
|
||||
logdir, ok = ctx.Get("logdir2")
|
||||
So(ok, ShouldBeTrue)
|
||||
So(logdir, ShouldEqual, "logdir_value_2")
|
||||
|
||||
Convey("When accesing invalid key", func() {
|
||||
logdir, ok = ctx.Get("invalid_key")
|
||||
So(ok, ShouldBeFalse)
|
||||
So(logdir, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("When exiting the new context", func() {
|
||||
ctx, err := ctx.Exit()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
logdir, ok = ctx.Get("logdir1")
|
||||
So(ok, ShouldBeTrue)
|
||||
So(logdir, ShouldEqual, "logdir_value_1")
|
||||
|
||||
logdir, ok = ctx.Get("logdir2")
|
||||
So(ok, ShouldBeTrue)
|
||||
So(logdir, ShouldEqual, "logdir_value_2")
|
||||
|
||||
Convey("When exiting from top bottom context", func() {
|
||||
ctx, err := ctx.Exit()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(ctx, ShouldBeNil)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
96
worker/exec_post_hook.go
Normal file
96
worker/exec_post_hook.go
Normal file
@ -0,0 +1,96 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/anmitsu/go-shlex"
|
||||
"github.com/codeskyblue/go-sh"
|
||||
)
|
||||
|
||||
// hook to execute command after syncing
|
||||
// typically setting timestamp, etc.
|
||||
|
||||
const (
|
||||
execOnSuccess uint8 = iota
|
||||
execOnFailure
|
||||
)
|
||||
|
||||
type execPostHook struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
|
||||
// exec on success or on failure
|
||||
execOn uint8
|
||||
// command
|
||||
command []string
|
||||
}
|
||||
|
||||
func newExecPostHook(provider mirrorProvider, execOn uint8, command string) (*execPostHook, error) {
|
||||
cmd, err := shlex.Split(command, true)
|
||||
if err != nil {
|
||||
// logger.Errorf("Failed to create exec-post-hook for command: %s", command)
|
||||
return nil, err
|
||||
}
|
||||
if execOn != execOnSuccess && execOn != execOnFailure {
|
||||
return nil, fmt.Errorf("Invalid option for exec-on: %d", execOn)
|
||||
}
|
||||
|
||||
return &execPostHook{
|
||||
provider: provider,
|
||||
execOn: execOn,
|
||||
command: cmd,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *execPostHook) postSuccess() error {
|
||||
if h.execOn == execOnSuccess {
|
||||
return h.Do()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *execPostHook) postFail() error {
|
||||
if h.execOn == execOnFailure {
|
||||
return h.Do()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *execPostHook) Do() error {
|
||||
p := h.provider
|
||||
|
||||
exitStatus := ""
|
||||
if h.execOn == execOnSuccess {
|
||||
exitStatus = "success"
|
||||
} else {
|
||||
exitStatus = "failure"
|
||||
}
|
||||
|
||||
env := map[string]string{
|
||||
"TUNASYNC_MIRROR_NAME": p.Name(),
|
||||
"TUNASYNC_WORKING_DIR": p.WorkingDir(),
|
||||
"TUNASYNC_UPSTREAM_URL": p.Upstream(),
|
||||
"TUNASYNC_LOG_FILE": p.LogFile(),
|
||||
"TUNASYNC_JOB_EXIT_STATUS": exitStatus,
|
||||
}
|
||||
|
||||
session := sh.NewSession()
|
||||
for k, v := range env {
|
||||
session.SetEnv(k, v)
|
||||
}
|
||||
|
||||
var cmd string
|
||||
args := []interface{}{}
|
||||
if len(h.command) == 1 {
|
||||
cmd = h.command[0]
|
||||
} else if len(h.command) > 1 {
|
||||
cmd = h.command[0]
|
||||
for _, arg := range h.command[1:] {
|
||||
args = append(args, arg)
|
||||
}
|
||||
} else {
|
||||
return errors.New("Invalid Command")
|
||||
}
|
||||
return session.Command(cmd, args...).Run()
|
||||
}
|
113
worker/exec_post_test.go
Normal file
113
worker/exec_post_test.go
Normal file
@ -0,0 +1,113 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
func TestExecPost(t *testing.T) {
|
||||
Convey("ExecPost should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "cmd.sh")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-exec-post",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: filepath.Join(tmpDir, "latest.log"),
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("On success", func() {
|
||||
hook, err := newExecPostHook(provider, execOnSuccess, "bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'")
|
||||
So(err, ShouldBeNil)
|
||||
provider.AddHook(hook)
|
||||
managerChan := make(chan jobMessage)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo $TUNASYNC_MIRROR_NAME
|
||||
echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
`
|
||||
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Success)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
|
||||
expectedOutput := "success\n"
|
||||
|
||||
outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(outputContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
|
||||
Convey("On failure", func() {
|
||||
hook, err := newExecPostHook(provider, execOnFailure, "bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'")
|
||||
So(err, ShouldBeNil)
|
||||
provider.AddHook(hook)
|
||||
managerChan := make(chan jobMessage)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo $TUNASYNC_MIRROR_NAME
|
||||
echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
exit 1
|
||||
`
|
||||
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
for i := 0; i < maxRetry; i++ {
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
}
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
|
||||
expectedOutput := "failure\n"
|
||||
|
||||
outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(outputContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
})
|
||||
}
|
42
worker/hooks.go
Normal file
42
worker/hooks.go
Normal file
@ -0,0 +1,42 @@
|
||||
package worker
|
||||
|
||||
/*
|
||||
hooks to exec before/after syncing
|
||||
failed
|
||||
+------------------ post-fail hooks -------------------+
|
||||
| |
|
||||
job start -> pre-job hooks --v-> pre-exec hooks --> (syncing) --> post-exec hooks --+---------> post-success --> end
|
||||
success
|
||||
*/
|
||||
|
||||
type jobHook interface {
|
||||
preJob() error
|
||||
preExec() error
|
||||
postExec() error
|
||||
postSuccess() error
|
||||
postFail() error
|
||||
}
|
||||
|
||||
type emptyHook struct {
|
||||
provider mirrorProvider
|
||||
}
|
||||
|
||||
func (h *emptyHook) preJob() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *emptyHook) preExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *emptyHook) postExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *emptyHook) postSuccess() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *emptyHook) postFail() error {
|
||||
return nil
|
||||
}
|
262
worker/job.go
Normal file
262
worker/job.go
Normal file
@ -0,0 +1,262 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
tunasync "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
// this file contains the workflow of a mirror jb
|
||||
|
||||
type ctrlAction uint8
|
||||
|
||||
const (
|
||||
jobStart ctrlAction = iota
|
||||
jobStop // stop syncing keep the job
|
||||
jobDisable // disable the job (stops goroutine)
|
||||
jobRestart // restart syncing
|
||||
jobPing // ensure the goroutine is alive
|
||||
)
|
||||
|
||||
type jobMessage struct {
|
||||
status tunasync.SyncStatus
|
||||
name string
|
||||
msg string
|
||||
schedule bool
|
||||
}
|
||||
|
||||
const (
|
||||
// empty state
|
||||
stateNone uint32 = iota
|
||||
// ready to run, able to schedule
|
||||
stateReady
|
||||
// paused by jobStop
|
||||
statePaused
|
||||
// disabled by jobDisable
|
||||
stateDisabled
|
||||
)
|
||||
|
||||
type mirrorJob struct {
|
||||
provider mirrorProvider
|
||||
ctrlChan chan ctrlAction
|
||||
disabled chan empty
|
||||
state uint32
|
||||
}
|
||||
|
||||
func newMirrorJob(provider mirrorProvider) *mirrorJob {
|
||||
return &mirrorJob{
|
||||
provider: provider,
|
||||
ctrlChan: make(chan ctrlAction, 1),
|
||||
state: stateNone,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mirrorJob) Name() string {
|
||||
return m.provider.Name()
|
||||
}
|
||||
|
||||
func (m *mirrorJob) State() uint32 {
|
||||
return atomic.LoadUint32(&(m.state))
|
||||
}
|
||||
|
||||
func (m *mirrorJob) SetState(state uint32) {
|
||||
atomic.StoreUint32(&(m.state), state)
|
||||
}
|
||||
|
||||
// runMirrorJob is the goroutine where syncing job runs in
|
||||
// arguments:
|
||||
// provider: mirror provider object
|
||||
// ctrlChan: receives messages from the manager
|
||||
// managerChan: push messages to the manager, this channel should have a larger buffer
|
||||
// sempaphore: make sure the concurrent running syncing job won't explode
|
||||
// TODO: message struct for managerChan
|
||||
func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) error {
|
||||
|
||||
m.disabled = make(chan empty)
|
||||
defer func() {
|
||||
close(m.disabled)
|
||||
m.SetState(stateDisabled)
|
||||
}()
|
||||
|
||||
provider := m.provider
|
||||
|
||||
// to make code shorter
|
||||
runHooks := func(Hooks []jobHook, action func(h jobHook) error, hookname string) error {
|
||||
for _, hook := range Hooks {
|
||||
if err := action(hook); err != nil {
|
||||
logger.Errorf(
|
||||
"failed at %s hooks for %s: %s",
|
||||
hookname, m.Name(), err.Error(),
|
||||
)
|
||||
managerChan <- jobMessage{
|
||||
tunasync.Failed, m.Name(),
|
||||
fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
|
||||
false,
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
runJobWrapper := func(kill <-chan empty, jobDone chan<- empty) error {
|
||||
defer close(jobDone)
|
||||
|
||||
managerChan <- jobMessage{tunasync.PreSyncing, m.Name(), "", false}
|
||||
logger.Noticef("start syncing: %s", m.Name())
|
||||
|
||||
Hooks := provider.Hooks()
|
||||
rHooks := []jobHook{}
|
||||
for i := len(Hooks); i > 0; i-- {
|
||||
rHooks = append(rHooks, Hooks[i-1])
|
||||
}
|
||||
|
||||
logger.Debug("hooks: pre-job")
|
||||
err := runHooks(Hooks, func(h jobHook) error { return h.preJob() }, "pre-job")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for retry := 0; retry < maxRetry; retry++ {
|
||||
stopASAP := false // stop job as soon as possible
|
||||
|
||||
if retry > 0 {
|
||||
logger.Noticef("retry syncing: %s, retry: %d", m.Name(), retry)
|
||||
}
|
||||
err := runHooks(Hooks, func(h jobHook) error { return h.preExec() }, "pre-exec")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// start syncing
|
||||
managerChan <- jobMessage{tunasync.Syncing, m.Name(), "", false}
|
||||
|
||||
var syncErr error
|
||||
syncDone := make(chan error, 1)
|
||||
go func() {
|
||||
err := provider.Run()
|
||||
if !stopASAP {
|
||||
syncDone <- err
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case syncErr = <-syncDone:
|
||||
logger.Debug("syncing done")
|
||||
case <-kill:
|
||||
logger.Debug("received kill")
|
||||
stopASAP = true
|
||||
err := provider.Terminate()
|
||||
if err != nil {
|
||||
logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
syncErr = errors.New("killed by manager")
|
||||
}
|
||||
|
||||
// post-exec hooks
|
||||
herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")
|
||||
if herr != nil {
|
||||
return herr
|
||||
}
|
||||
|
||||
if syncErr == nil {
|
||||
// syncing success
|
||||
logger.Noticef("succeeded syncing %s", m.Name())
|
||||
managerChan <- jobMessage{tunasync.Success, m.Name(), "", true}
|
||||
// post-success hooks
|
||||
err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// syncing failed
|
||||
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
|
||||
managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), retry == maxRetry-1}
|
||||
|
||||
// post-fail hooks
|
||||
logger.Debug("post-fail hooks")
|
||||
err = runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// gracefully exit
|
||||
if stopASAP {
|
||||
logger.Debug("No retry, exit directly")
|
||||
return nil
|
||||
}
|
||||
// continue to next retry
|
||||
} // for retry
|
||||
return nil
|
||||
}
|
||||
|
||||
runJob := func(kill <-chan empty, jobDone chan<- empty) {
|
||||
select {
|
||||
case semaphore <- empty{}:
|
||||
defer func() { <-semaphore }()
|
||||
runJobWrapper(kill, jobDone)
|
||||
case <-kill:
|
||||
jobDone <- empty{}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
if m.State() == stateReady {
|
||||
kill := make(chan empty)
|
||||
jobDone := make(chan empty)
|
||||
go runJob(kill, jobDone)
|
||||
|
||||
_wait_for_job:
|
||||
select {
|
||||
case <-jobDone:
|
||||
logger.Debug("job done")
|
||||
case ctrl := <-m.ctrlChan:
|
||||
switch ctrl {
|
||||
case jobStop:
|
||||
m.SetState(statePaused)
|
||||
close(kill)
|
||||
<-jobDone
|
||||
case jobDisable:
|
||||
m.SetState(stateDisabled)
|
||||
close(kill)
|
||||
<-jobDone
|
||||
return nil
|
||||
case jobRestart:
|
||||
m.SetState(stateReady)
|
||||
close(kill)
|
||||
<-jobDone
|
||||
continue
|
||||
case jobStart:
|
||||
m.SetState(stateReady)
|
||||
goto _wait_for_job
|
||||
default:
|
||||
// TODO: implement this
|
||||
close(kill)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ctrl := <-m.ctrlChan
|
||||
switch ctrl {
|
||||
case jobStop:
|
||||
m.SetState(statePaused)
|
||||
case jobDisable:
|
||||
m.SetState(stateDisabled)
|
||||
return nil
|
||||
case jobRestart:
|
||||
m.SetState(stateReady)
|
||||
case jobStart:
|
||||
m.SetState(stateReady)
|
||||
default:
|
||||
// TODO
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
177
worker/job_test.go
Normal file
177
worker/job_test.go
Normal file
@ -0,0 +1,177 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
func TestMirrorJob(t *testing.T) {
|
||||
|
||||
InitLogger(true, true, false)
|
||||
|
||||
Convey("MirrorJob should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "cmd.sh")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-cmd-jobtest",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "bash " + scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 1 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(provider.Name(), ShouldEqual, c.name)
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
|
||||
Convey("For a normal mirror job", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo $TUNASYNC_MIRROR_NAME
|
||||
echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
`
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"%s\n%s\n%s\n%s\n",
|
||||
provider.WorkingDir(),
|
||||
provider.Name(),
|
||||
provider.upstreamURL,
|
||||
provider.LogFile(),
|
||||
)
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
readedScriptContent, err := ioutil.ReadFile(scriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
|
||||
|
||||
Convey("If we let it run several times", func(ctx C) {
|
||||
managerChan := make(chan jobMessage, 10)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
// job should not start if we don't start it
|
||||
select {
|
||||
case <-managerChan:
|
||||
So(0, ShouldEqual, 1) // made this fail
|
||||
case <-time.After(1 * time.Second):
|
||||
So(0, ShouldEqual, 0)
|
||||
}
|
||||
|
||||
job.ctrlChan <- jobStart
|
||||
for i := 0; i < 2; i++ {
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Success)
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
job.ctrlChan <- jobStart
|
||||
}
|
||||
select {
|
||||
case msg := <-managerChan:
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Success)
|
||||
|
||||
case <-time.After(2 * time.Second):
|
||||
So(0, ShouldEqual, 1)
|
||||
}
|
||||
|
||||
job.ctrlChan <- jobDisable
|
||||
select {
|
||||
case <-managerChan:
|
||||
So(0, ShouldEqual, 1) // made this fail
|
||||
case <-job.disabled:
|
||||
So(0, ShouldEqual, 0)
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
Convey("When running long jobs", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
sleep 5
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
managerChan := make(chan jobMessage, 10)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
|
||||
Convey("If we kill it", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
|
||||
job.ctrlChan <- jobStop
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
|
||||
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
|
||||
Convey("If we don't kill it", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Success)
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"%s\n%s\n",
|
||||
provider.WorkingDir(), provider.WorkingDir(),
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
}
|
108
worker/loglimit_hook.go
Normal file
108
worker/loglimit_hook.go
Normal file
@ -0,0 +1,108 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// limit
|
||||
|
||||
type logLimiter struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
}
|
||||
|
||||
func newLogLimiter(provider mirrorProvider) *logLimiter {
|
||||
return &logLimiter{
|
||||
provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
type fileSlice []os.FileInfo
|
||||
|
||||
func (f fileSlice) Len() int { return len(f) }
|
||||
func (f fileSlice) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
func (f fileSlice) Less(i, j int) bool { return f[i].ModTime().Before(f[j].ModTime()) }
|
||||
|
||||
func (l *logLimiter) preExec() error {
|
||||
logger.Debugf("executing log limitter for %s", l.provider.Name())
|
||||
|
||||
p := l.provider
|
||||
if p.LogFile() == "/dev/null" {
|
||||
return nil
|
||||
}
|
||||
|
||||
logDir := p.LogDir()
|
||||
files, err := ioutil.ReadDir(logDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
os.MkdirAll(logDir, 0755)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
matchedFiles := []os.FileInfo{}
|
||||
for _, f := range files {
|
||||
if strings.HasPrefix(f.Name(), p.Name()) {
|
||||
matchedFiles = append(matchedFiles, f)
|
||||
}
|
||||
}
|
||||
|
||||
// sort the filelist in time order
|
||||
// earlier modified files are sorted as larger
|
||||
sort.Sort(
|
||||
sort.Reverse(
|
||||
fileSlice(matchedFiles),
|
||||
),
|
||||
)
|
||||
// remove old files
|
||||
if len(matchedFiles) > 9 {
|
||||
for _, f := range matchedFiles[9:] {
|
||||
// logger.Debug(f.Name())
|
||||
os.Remove(filepath.Join(logDir, f.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
logFile := filepath.Join(
|
||||
logDir,
|
||||
fmt.Sprintf(
|
||||
"%s_%s.log",
|
||||
p.Name(),
|
||||
time.Now().Format("2006-01-02_15_04"),
|
||||
),
|
||||
)
|
||||
|
||||
logLink := filepath.Join(logDir, "latest")
|
||||
|
||||
if _, err = os.Stat(logLink); err == nil {
|
||||
os.Remove(logLink)
|
||||
}
|
||||
os.Symlink(logFile, logLink)
|
||||
|
||||
ctx := p.EnterContext()
|
||||
ctx.Set(_LogFileKey, logFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *logLimiter) postSuccess() error {
|
||||
l.provider.ExitContext()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *logLimiter) postFail() error {
|
||||
logFile := l.provider.LogFile()
|
||||
logFileFail := logFile + ".fail"
|
||||
logDir := l.provider.LogDir()
|
||||
logLink := filepath.Join(logDir, "latest")
|
||||
os.Rename(logFile, logFileFail)
|
||||
os.Remove(logLink)
|
||||
os.Symlink(logFileFail, logLink)
|
||||
|
||||
l.provider.ExitContext()
|
||||
return nil
|
||||
}
|
146
worker/loglimit_test.go
Normal file
146
worker/loglimit_test.go
Normal file
@ -0,0 +1,146 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
func TestLogLimiter(t *testing.T) {
|
||||
Convey("LogLimiter should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpLogDir, err := ioutil.TempDir("", "tunasync-log")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
defer os.RemoveAll(tmpLogDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "cmd.sh")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-loglimit",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpLogDir,
|
||||
logFile: filepath.Join(tmpLogDir, "latest.log"),
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
limiter := newLogLimiter(provider)
|
||||
provider.AddHook(limiter)
|
||||
|
||||
Convey("If logs are created simply", func() {
|
||||
for i := 0; i < 15; i++ {
|
||||
fn := filepath.Join(tmpLogDir, fmt.Sprintf("%s-%d.log", provider.Name(), i))
|
||||
f, _ := os.Create(fn)
|
||||
// time.Sleep(1 * time.Second)
|
||||
f.Close()
|
||||
}
|
||||
|
||||
matches, _ := filepath.Glob(filepath.Join(tmpLogDir, "*.log"))
|
||||
So(len(matches), ShouldEqual, 15)
|
||||
|
||||
managerChan := make(chan jobMessage)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo $TUNASYNC_MIRROR_NAME
|
||||
echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
`
|
||||
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
logFile := provider.LogFile()
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Success)
|
||||
|
||||
job.ctrlChan <- jobDisable
|
||||
|
||||
So(logFile, ShouldNotEqual, provider.LogFile())
|
||||
|
||||
matches, _ = filepath.Glob(filepath.Join(tmpLogDir, "*.log"))
|
||||
So(len(matches), ShouldEqual, 10)
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"%s\n%s\n%s\n%s\n",
|
||||
provider.WorkingDir(),
|
||||
provider.Name(),
|
||||
provider.upstreamURL,
|
||||
logFile,
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
|
||||
Convey("If job failed simply", func() {
|
||||
managerChan := make(chan jobMessage)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo $TUNASYNC_MIRROR_NAME
|
||||
echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
sleep 5
|
||||
`
|
||||
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
logFile := provider.LogFile()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
job.ctrlChan <- jobStop
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
|
||||
So(logFile, ShouldNotEqual, provider.LogFile())
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"%s\n%s\n%s\n%s\n",
|
||||
provider.WorkingDir(),
|
||||
provider.Name(),
|
||||
provider.upstreamURL,
|
||||
logFile,
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
loggedContent, err = ioutil.ReadFile(logFile + ".fail")
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
|
||||
})
|
||||
}
|
203
worker/provider.go
Normal file
203
worker/provider.go
Normal file
@ -0,0 +1,203 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// mirror provider is the wrapper of mirror jobs
|
||||
|
||||
type providerType uint8
|
||||
|
||||
const (
|
||||
_WorkingDirKey = "working_dir"
|
||||
_LogDirKey = "log_dir"
|
||||
_LogFileKey = "log_file"
|
||||
)
|
||||
|
||||
// A mirrorProvider instance
|
||||
type mirrorProvider interface {
|
||||
// name
|
||||
Name() string
|
||||
Upstream() string
|
||||
|
||||
// run mirror job in background
|
||||
Run() error
|
||||
// run mirror job in background
|
||||
Start() error
|
||||
// Wait job to finish
|
||||
Wait() error
|
||||
// terminate mirror job
|
||||
Terminate() error
|
||||
// job hooks
|
||||
IsRunning() bool
|
||||
// Cgroup
|
||||
Cgroup() *cgroupHook
|
||||
|
||||
AddHook(hook jobHook)
|
||||
Hooks() []jobHook
|
||||
|
||||
Interval() time.Duration
|
||||
|
||||
WorkingDir() string
|
||||
LogDir() string
|
||||
LogFile() string
|
||||
IsMaster() bool
|
||||
|
||||
// enter context
|
||||
EnterContext() *Context
|
||||
// exit context
|
||||
ExitContext() *Context
|
||||
// return context
|
||||
Context() *Context
|
||||
}
|
||||
|
||||
type baseProvider struct {
|
||||
sync.Mutex
|
||||
|
||||
ctx *Context
|
||||
name string
|
||||
interval time.Duration
|
||||
isMaster bool
|
||||
|
||||
cmd *cmdJob
|
||||
isRunning atomic.Value
|
||||
|
||||
logFile *os.File
|
||||
|
||||
cgroup *cgroupHook
|
||||
hooks []jobHook
|
||||
}
|
||||
|
||||
func (p *baseProvider) Name() string {
|
||||
return p.name
|
||||
}
|
||||
|
||||
func (p *baseProvider) EnterContext() *Context {
|
||||
p.ctx = p.ctx.Enter()
|
||||
return p.ctx
|
||||
}
|
||||
|
||||
func (p *baseProvider) ExitContext() *Context {
|
||||
p.ctx, _ = p.ctx.Exit()
|
||||
return p.ctx
|
||||
}
|
||||
|
||||
func (p *baseProvider) Context() *Context {
|
||||
return p.ctx
|
||||
}
|
||||
|
||||
func (p *baseProvider) Interval() time.Duration {
|
||||
// logger.Debug("interval for %s: %v", p.Name(), p.interval)
|
||||
return p.interval
|
||||
}
|
||||
|
||||
func (p *baseProvider) IsMaster() bool {
|
||||
return p.isMaster
|
||||
}
|
||||
|
||||
func (p *baseProvider) WorkingDir() string {
|
||||
if v, ok := p.ctx.Get(_WorkingDirKey); ok {
|
||||
if s, ok := v.(string); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
panic("working dir is impossible to be non-exist")
|
||||
}
|
||||
|
||||
func (p *baseProvider) LogDir() string {
|
||||
if v, ok := p.ctx.Get(_LogDirKey); ok {
|
||||
if s, ok := v.(string); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
panic("log dir is impossible to be unavailable")
|
||||
}
|
||||
|
||||
func (p *baseProvider) LogFile() string {
|
||||
if v, ok := p.ctx.Get(_LogFileKey); ok {
|
||||
if s, ok := v.(string); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
panic("log dir is impossible to be unavailable")
|
||||
}
|
||||
|
||||
func (p *baseProvider) AddHook(hook jobHook) {
|
||||
if cg, ok := hook.(*cgroupHook); ok {
|
||||
p.cgroup = cg
|
||||
}
|
||||
p.hooks = append(p.hooks, hook)
|
||||
}
|
||||
|
||||
func (p *baseProvider) Hooks() []jobHook {
|
||||
return p.hooks
|
||||
}
|
||||
|
||||
func (p *baseProvider) Cgroup() *cgroupHook {
|
||||
return p.cgroup
|
||||
}
|
||||
|
||||
func (p *baseProvider) prepareLogFile() error {
|
||||
if p.LogFile() == "/dev/null" {
|
||||
p.cmd.SetLogFile(nil)
|
||||
return nil
|
||||
}
|
||||
if p.logFile == nil {
|
||||
logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
|
||||
return err
|
||||
}
|
||||
p.logFile = logFile
|
||||
}
|
||||
p.cmd.SetLogFile(p.logFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *baseProvider) Run() error {
|
||||
panic("Not Implemented")
|
||||
}
|
||||
|
||||
func (p *baseProvider) Start() error {
|
||||
panic("Not Implemented")
|
||||
}
|
||||
|
||||
func (p *baseProvider) IsRunning() bool {
|
||||
isRunning, _ := p.isRunning.Load().(bool)
|
||||
return isRunning
|
||||
}
|
||||
|
||||
func (p *baseProvider) Wait() error {
|
||||
defer func() {
|
||||
p.Lock()
|
||||
p.isRunning.Store(false)
|
||||
if p.logFile != nil {
|
||||
p.logFile.Close()
|
||||
p.logFile = nil
|
||||
}
|
||||
p.Unlock()
|
||||
}()
|
||||
return p.cmd.Wait()
|
||||
}
|
||||
|
||||
func (p *baseProvider) Terminate() error {
|
||||
logger.Debugf("terminating provider: %s", p.Name())
|
||||
if !p.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.Lock()
|
||||
if p.logFile != nil {
|
||||
p.logFile.Close()
|
||||
p.logFile = nil
|
||||
}
|
||||
p.Unlock()
|
||||
|
||||
err := p.cmd.Terminate()
|
||||
p.isRunning.Store(false)
|
||||
|
||||
return err
|
||||
}
|
301
worker/provider_test.go
Normal file
301
worker/provider_test.go
Normal file
@ -0,0 +1,301 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestRsyncProvider(t *testing.T) {
|
||||
Convey("Rsync Provider should work", t, func() {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := rsyncConfig{
|
||||
name: "tuna",
|
||||
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
|
||||
rsyncCmd: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newRsyncProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(provider.Name(), ShouldEqual, c.name)
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
|
||||
Convey("When entering a context (auto exit)", func() {
|
||||
func() {
|
||||
ctx := provider.EnterContext()
|
||||
defer provider.ExitContext()
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
newWorkingDir := "/srv/mirror/working/tuna"
|
||||
ctx.Set(_WorkingDirKey, newWorkingDir)
|
||||
So(provider.WorkingDir(), ShouldEqual, newWorkingDir)
|
||||
}()
|
||||
|
||||
Convey("After context is done", func() {
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When entering a context (manually exit)", func() {
|
||||
ctx := provider.EnterContext()
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
newWorkingDir := "/srv/mirror/working/tuna"
|
||||
ctx.Set(_WorkingDirKey, newWorkingDir)
|
||||
So(provider.WorkingDir(), ShouldEqual, newWorkingDir)
|
||||
|
||||
Convey("After context is done", func() {
|
||||
provider.ExitContext()
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Let's try a run", func() {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo "syncing to $(pwd)"
|
||||
echo $@
|
||||
sleep 1
|
||||
echo "Done"
|
||||
exit 0
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"syncing to %s\n"+
|
||||
"%s\n"+
|
||||
"Done\n",
|
||||
provider.WorkingDir(),
|
||||
fmt.Sprintf(
|
||||
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
|
||||
"--delete --delete-after --delay-updates --safe-links "+
|
||||
"--timeout=120 --contimeout=120 -6 %s %s",
|
||||
provider.upstreamURL, provider.WorkingDir(),
|
||||
),
|
||||
)
|
||||
|
||||
err = provider.Run()
|
||||
So(err, ShouldBeNil)
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
// fmt.Println(string(loggedContent))
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func TestCmdProvider(t *testing.T) {
|
||||
Convey("Command Provider should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "cmd.sh")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-cmd",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "bash " + scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 600 * time.Second,
|
||||
env: map[string]string{
|
||||
"AOSP_REPO_BIN": "/usr/local/bin/repo",
|
||||
},
|
||||
}
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(provider.Name(), ShouldEqual, c.name)
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
|
||||
Convey("Let's try to run a simple command", func() {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo $TUNASYNC_MIRROR_NAME
|
||||
echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
echo $AOSP_REPO_BIN
|
||||
`
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"%s\n%s\n%s\n%s\n%s\n",
|
||||
provider.WorkingDir(),
|
||||
provider.Name(),
|
||||
provider.upstreamURL,
|
||||
provider.LogFile(),
|
||||
"/usr/local/bin/repo",
|
||||
)
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
readedScriptContent, err := ioutil.ReadFile(scriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
|
||||
|
||||
err = provider.Run()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
|
||||
Convey("If a command fails", func() {
|
||||
scriptContent := `exit 1`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
readedScriptContent, err := ioutil.ReadFile(scriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
|
||||
|
||||
err = provider.Run()
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
})
|
||||
|
||||
Convey("If a long job is killed", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
sleep 5
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go func() {
|
||||
err = provider.Run()
|
||||
ctx.So(err, ShouldNotBeNil)
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestTwoStageRsyncProvider(t *testing.T) {
|
||||
Convey("TwoStageRsync Provider should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := twoStageRsyncConfig{
|
||||
name: "tuna-two-stage-rsync",
|
||||
upstreamURL: "rsync://mirrors.tuna.moe/",
|
||||
stage1Profile: "debian",
|
||||
rsyncCmd: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
excludeFile: tmpFile,
|
||||
}
|
||||
|
||||
provider, err := newTwoStageRsyncProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(provider.Name(), ShouldEqual, c.name)
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
|
||||
Convey("Try a command", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo "syncing to $(pwd)"
|
||||
echo $@
|
||||
sleep 1
|
||||
echo "Done"
|
||||
exit 0
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"syncing to %s\n"+
|
||||
"%s\n"+
|
||||
"Done\n"+
|
||||
"syncing to %s\n"+
|
||||
"%s\n"+
|
||||
"Done\n",
|
||||
provider.WorkingDir(),
|
||||
fmt.Sprintf(
|
||||
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
|
||||
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+
|
||||
"--exclude-from %s %s %s",
|
||||
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
|
||||
),
|
||||
provider.WorkingDir(),
|
||||
fmt.Sprintf(
|
||||
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
|
||||
"--delete --delete-after --delay-updates --safe-links "+
|
||||
"--timeout=120 --contimeout=120 -6 --exclude-from %s %s %s",
|
||||
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
|
||||
),
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
// fmt.Println(string(loggedContent))
|
||||
|
||||
})
|
||||
Convey("Try terminating", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $@
|
||||
sleep 4
|
||||
exit 0
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go func() {
|
||||
err = provider.Run()
|
||||
ctx.So(err, ShouldNotBeNil)
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
|
||||
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+
|
||||
"--exclude-from %s %s %s\n",
|
||||
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
// fmt.Println(string(loggedContent))
|
||||
})
|
||||
})
|
||||
}
|
97
worker/rsync_provider.go
Normal file
97
worker/rsync_provider.go
Normal file
@ -0,0 +1,97 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type rsyncConfig struct {
|
||||
name string
|
||||
rsyncCmd string
|
||||
upstreamURL, password, excludeFile string
|
||||
workingDir, logDir, logFile string
|
||||
useIPv6 bool
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
// An RsyncProvider provides the implementation to rsync-based syncing jobs
|
||||
type rsyncProvider struct {
|
||||
baseProvider
|
||||
rsyncConfig
|
||||
options []string
|
||||
}
|
||||
|
||||
func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
|
||||
// TODO: check config options
|
||||
if !strings.HasSuffix(c.upstreamURL, "/") {
|
||||
return nil, errors.New("rsync upstream URL should ends with /")
|
||||
}
|
||||
provider := &rsyncProvider{
|
||||
baseProvider: baseProvider{
|
||||
name: c.name,
|
||||
ctx: NewContext(),
|
||||
interval: c.interval,
|
||||
},
|
||||
rsyncConfig: c,
|
||||
}
|
||||
|
||||
if c.rsyncCmd == "" {
|
||||
provider.rsyncCmd = "rsync"
|
||||
}
|
||||
|
||||
options := []string{
|
||||
"-aHvh", "--no-o", "--no-g", "--stats",
|
||||
"--exclude", ".~tmp~/",
|
||||
"--delete", "--delete-after", "--delay-updates",
|
||||
"--safe-links", "--timeout=120", "--contimeout=120",
|
||||
}
|
||||
|
||||
if c.useIPv6 {
|
||||
options = append(options, "-6")
|
||||
}
|
||||
|
||||
if c.excludeFile != "" {
|
||||
options = append(options, "--exclude-from", c.excludeFile)
|
||||
}
|
||||
provider.options = options
|
||||
|
||||
provider.ctx.Set(_WorkingDirKey, c.workingDir)
|
||||
provider.ctx.Set(_LogDirKey, c.logDir)
|
||||
provider.ctx.Set(_LogFileKey, c.logFile)
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func (p *rsyncProvider) Upstream() string {
|
||||
return p.upstreamURL
|
||||
}
|
||||
|
||||
func (p *rsyncProvider) Run() error {
|
||||
if err := p.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return p.Wait()
|
||||
}
|
||||
|
||||
func (p *rsyncProvider) Start() error {
|
||||
|
||||
env := map[string]string{}
|
||||
if p.password != "" {
|
||||
env["RSYNC_PASSWORD"] = p.password
|
||||
}
|
||||
command := []string{p.rsyncCmd}
|
||||
command = append(command, p.options...)
|
||||
command = append(command, p.upstreamURL, p.WorkingDir())
|
||||
|
||||
p.cmd = newCmdJob(p, command, p.WorkingDir(), env)
|
||||
if err := p.prepareLogFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
p.isRunning.Store(true)
|
||||
return nil
|
||||
}
|
118
worker/runner.go
Normal file
118
worker/runner.go
Normal file
@ -0,0 +1,118 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// runner is to run os commands giving command line, env and log file
|
||||
// it's an alternative to python-sh or go-sh
|
||||
|
||||
var errProcessNotStarted = errors.New("Process Not Started")
|
||||
|
||||
type cmdJob struct {
|
||||
cmd *exec.Cmd
|
||||
workingDir string
|
||||
env map[string]string
|
||||
logFile *os.File
|
||||
finished chan empty
|
||||
provider mirrorProvider
|
||||
}
|
||||
|
||||
func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, env map[string]string) *cmdJob {
|
||||
var cmd *exec.Cmd
|
||||
|
||||
if provider.Cgroup() != nil {
|
||||
c := "cgexec"
|
||||
args := []string{"-g", provider.Cgroup().Cgroup()}
|
||||
args = append(args, cmdAndArgs...)
|
||||
cmd = exec.Command(c, args...)
|
||||
} else {
|
||||
if len(cmdAndArgs) == 1 {
|
||||
cmd = exec.Command(cmdAndArgs[0])
|
||||
} else if len(cmdAndArgs) > 1 {
|
||||
c := cmdAndArgs[0]
|
||||
args := cmdAndArgs[1:]
|
||||
cmd = exec.Command(c, args...)
|
||||
} else if len(cmdAndArgs) == 0 {
|
||||
panic("Command length should be at least 1!")
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir)
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
logger.Debugf("Making dir %s", workingDir)
|
||||
if err = os.MkdirAll(workingDir, 0755); err != nil {
|
||||
logger.Errorf("Error making dir %s", workingDir)
|
||||
}
|
||||
}
|
||||
|
||||
cmd.Dir = workingDir
|
||||
cmd.Env = newEnviron(env, true)
|
||||
|
||||
return &cmdJob{
|
||||
cmd: cmd,
|
||||
workingDir: workingDir,
|
||||
env: env,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cmdJob) Start() error {
|
||||
c.finished = make(chan empty, 1)
|
||||
return c.cmd.Start()
|
||||
}
|
||||
|
||||
func (c *cmdJob) Wait() error {
|
||||
err := c.cmd.Wait()
|
||||
close(c.finished)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *cmdJob) SetLogFile(logFile *os.File) {
|
||||
c.cmd.Stdout = logFile
|
||||
c.cmd.Stderr = logFile
|
||||
}
|
||||
|
||||
func (c *cmdJob) Terminate() error {
|
||||
if c.cmd == nil || c.cmd.Process == nil {
|
||||
return errProcessNotStarted
|
||||
}
|
||||
err := unix.Kill(c.cmd.Process.Pid, syscall.SIGTERM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
unix.Kill(c.cmd.Process.Pid, syscall.SIGKILL)
|
||||
return errors.New("SIGTERM failed to kill the job")
|
||||
case <-c.finished:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Copied from go-sh
|
||||
func newEnviron(env map[string]string, inherit bool) []string { //map[string]string {
|
||||
environ := make([]string, 0, len(env))
|
||||
if inherit {
|
||||
for _, line := range os.Environ() {
|
||||
// if os environment and env collapses,
|
||||
// omit the os one
|
||||
k := strings.Split(line, "=")[0]
|
||||
if _, ok := env[k]; ok {
|
||||
continue
|
||||
}
|
||||
environ = append(environ, line)
|
||||
}
|
||||
}
|
||||
for k, v := range env {
|
||||
environ = append(environ, k+"="+v)
|
||||
}
|
||||
return environ
|
||||
}
|
72
worker/schedule.go
Normal file
72
worker/schedule.go
Normal file
@ -0,0 +1,72 @@
|
||||
package worker
|
||||
|
||||
// schedule queue for jobs
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ryszard/goskiplist/skiplist"
|
||||
)
|
||||
|
||||
type scheduleQueue struct {
|
||||
sync.Mutex
|
||||
list *skiplist.SkipList
|
||||
}
|
||||
|
||||
func timeLessThan(l, r interface{}) bool {
|
||||
tl := l.(time.Time)
|
||||
tr := r.(time.Time)
|
||||
return tl.Before(tr)
|
||||
}
|
||||
|
||||
func newScheduleQueue() *scheduleQueue {
|
||||
queue := new(scheduleQueue)
|
||||
queue.list = skiplist.NewCustomMap(timeLessThan)
|
||||
return queue
|
||||
}
|
||||
|
||||
func (q *scheduleQueue) AddJob(schedTime time.Time, job *mirrorJob) {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
q.list.Set(schedTime, job)
|
||||
}
|
||||
|
||||
// pop out the first job if it's time to run it
|
||||
func (q *scheduleQueue) Pop() *mirrorJob {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
first := q.list.SeekToFirst()
|
||||
if first == nil {
|
||||
return nil
|
||||
}
|
||||
defer first.Close()
|
||||
|
||||
t := first.Key().(time.Time)
|
||||
// logger.Debug("First job should run @%v", t)
|
||||
if t.Before(time.Now()) {
|
||||
job := first.Value().(*mirrorJob)
|
||||
q.list.Delete(first.Key())
|
||||
return job
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// remove job
|
||||
func (q *scheduleQueue) Remove(name string) bool {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
cur := q.list.Iterator()
|
||||
defer cur.Close()
|
||||
|
||||
for cur.Next() {
|
||||
cj := cur.Value().(*mirrorJob)
|
||||
if cj.Name() == name {
|
||||
q.list.Delete(cur.Key())
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
50
worker/schedule_test.go
Normal file
50
worker/schedule_test.go
Normal file
@ -0,0 +1,50 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestSchedule(t *testing.T) {
|
||||
|
||||
Convey("MirrorJobSchedule should work", t, func(ctx C) {
|
||||
schedule := newScheduleQueue()
|
||||
|
||||
Convey("When poping on empty schedule", func() {
|
||||
job := schedule.Pop()
|
||||
So(job, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("When adding some jobs", func() {
|
||||
c := cmdConfig{
|
||||
name: "schedule_test",
|
||||
}
|
||||
provider, _ := newCmdProvider(c)
|
||||
job := newMirrorJob(provider)
|
||||
sched := time.Now().Add(1 * time.Second)
|
||||
|
||||
schedule.AddJob(sched, job)
|
||||
So(schedule.Pop(), ShouldBeNil)
|
||||
time.Sleep(1200 * time.Millisecond)
|
||||
So(schedule.Pop(), ShouldEqual, job)
|
||||
|
||||
})
|
||||
Convey("When removing jobs", func() {
|
||||
c := cmdConfig{
|
||||
name: "schedule_test",
|
||||
}
|
||||
provider, _ := newCmdProvider(c)
|
||||
job := newMirrorJob(provider)
|
||||
sched := time.Now().Add(1 * time.Second)
|
||||
|
||||
schedule.AddJob(sched, job)
|
||||
So(schedule.Remove("something"), ShouldBeFalse)
|
||||
So(schedule.Remove("schedule_test"), ShouldBeTrue)
|
||||
time.Sleep(1200 * time.Millisecond)
|
||||
So(schedule.Pop(), ShouldBeNil)
|
||||
})
|
||||
|
||||
})
|
||||
}
|
140
worker/two_stage_rsync_provider.go
Normal file
140
worker/two_stage_rsync_provider.go
Normal file
@ -0,0 +1,140 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type twoStageRsyncConfig struct {
|
||||
name string
|
||||
rsyncCmd string
|
||||
stage1Profile string
|
||||
upstreamURL, password, excludeFile string
|
||||
workingDir, logDir, logFile string
|
||||
useIPv6 bool
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
// An RsyncProvider provides the implementation to rsync-based syncing jobs
|
||||
type twoStageRsyncProvider struct {
|
||||
baseProvider
|
||||
twoStageRsyncConfig
|
||||
stage1Options []string
|
||||
stage2Options []string
|
||||
}
|
||||
|
||||
var rsyncStage1Profiles = map[string]([]string){
|
||||
"debian": []string{"dists/"},
|
||||
"debian-oldstyle": []string{
|
||||
"Packages*", "Sources*", "Release*",
|
||||
"InRelease", "i18n/*", "ls-lR*", "dep11/*",
|
||||
},
|
||||
}
|
||||
|
||||
func newTwoStageRsyncProvider(c twoStageRsyncConfig) (*twoStageRsyncProvider, error) {
|
||||
// TODO: check config options
|
||||
if !strings.HasSuffix(c.upstreamURL, "/") {
|
||||
return nil, errors.New("rsync upstream URL should ends with /")
|
||||
}
|
||||
|
||||
provider := &twoStageRsyncProvider{
|
||||
baseProvider: baseProvider{
|
||||
name: c.name,
|
||||
ctx: NewContext(),
|
||||
interval: c.interval,
|
||||
},
|
||||
twoStageRsyncConfig: c,
|
||||
stage1Options: []string{
|
||||
"-aHvh", "--no-o", "--no-g", "--stats",
|
||||
"--exclude", ".~tmp~/",
|
||||
"--safe-links", "--timeout=120", "--contimeout=120",
|
||||
},
|
||||
stage2Options: []string{
|
||||
"-aHvh", "--no-o", "--no-g", "--stats",
|
||||
"--exclude", ".~tmp~/",
|
||||
"--delete", "--delete-after", "--delay-updates",
|
||||
"--safe-links", "--timeout=120", "--contimeout=120",
|
||||
},
|
||||
}
|
||||
|
||||
if c.rsyncCmd == "" {
|
||||
provider.rsyncCmd = "rsync"
|
||||
}
|
||||
|
||||
provider.ctx.Set(_WorkingDirKey, c.workingDir)
|
||||
provider.ctx.Set(_LogDirKey, c.logDir)
|
||||
provider.ctx.Set(_LogFileKey, c.logFile)
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func (p *twoStageRsyncProvider) Upstream() string {
|
||||
return p.upstreamURL
|
||||
}
|
||||
|
||||
func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
|
||||
var options []string
|
||||
if stage == 1 {
|
||||
options = append(options, p.stage1Options...)
|
||||
stage1Excludes, ok := rsyncStage1Profiles[p.stage1Profile]
|
||||
if !ok {
|
||||
return nil, errors.New("Invalid Stage 1 Profile")
|
||||
}
|
||||
for _, exc := range stage1Excludes {
|
||||
options = append(options, "--exclude", exc)
|
||||
}
|
||||
|
||||
} else if stage == 2 {
|
||||
options = append(options, p.stage2Options...)
|
||||
} else {
|
||||
return []string{}, fmt.Errorf("Invalid stage: %d", stage)
|
||||
}
|
||||
|
||||
if p.useIPv6 {
|
||||
options = append(options, "-6")
|
||||
}
|
||||
|
||||
if p.excludeFile != "" {
|
||||
options = append(options, "--exclude-from", p.excludeFile)
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func (p *twoStageRsyncProvider) Run() error {
|
||||
|
||||
env := map[string]string{}
|
||||
if p.password != "" {
|
||||
env["RSYNC_PASSWORD"] = p.password
|
||||
}
|
||||
|
||||
stages := []int{1, 2}
|
||||
for _, stage := range stages {
|
||||
command := []string{p.rsyncCmd}
|
||||
options, err := p.Options(stage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
command = append(command, options...)
|
||||
command = append(command, p.upstreamURL, p.WorkingDir())
|
||||
|
||||
p.cmd = newCmdJob(p, command, p.WorkingDir(), env)
|
||||
if err := p.prepareLogFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
p.isRunning.Store(true)
|
||||
|
||||
err = p.cmd.Wait()
|
||||
p.isRunning.Store(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
437
worker/worker.go
Normal file
437
worker/worker.go
Normal file
@ -0,0 +1,437 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
var tunasyncWorker *Worker
|
||||
|
||||
// A Worker is a instance of tunasync worker
|
||||
type Worker struct {
|
||||
cfg *Config
|
||||
providers map[string]mirrorProvider
|
||||
jobs map[string]*mirrorJob
|
||||
|
||||
managerChan chan jobMessage
|
||||
semaphore chan empty
|
||||
|
||||
schedule *scheduleQueue
|
||||
httpEngine *gin.Engine
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// GetTUNASyncWorker returns a singalton worker
|
||||
func GetTUNASyncWorker(cfg *Config) *Worker {
|
||||
if tunasyncWorker != nil {
|
||||
return tunasyncWorker
|
||||
}
|
||||
|
||||
w := &Worker{
|
||||
cfg: cfg,
|
||||
providers: make(map[string]mirrorProvider),
|
||||
jobs: make(map[string]*mirrorJob),
|
||||
|
||||
managerChan: make(chan jobMessage, 32),
|
||||
semaphore: make(chan empty, cfg.Global.Concurrent),
|
||||
|
||||
schedule: newScheduleQueue(),
|
||||
}
|
||||
|
||||
if cfg.Manager.CACert != "" {
|
||||
httpClient, err := CreateHTTPClient(cfg.Manager.CACert)
|
||||
if err != nil {
|
||||
logger.Errorf("Error initializing HTTP client: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
w.httpClient = httpClient
|
||||
}
|
||||
|
||||
w.initJobs()
|
||||
w.makeHTTPServer()
|
||||
tunasyncWorker = w
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *Worker) initProviders() {
|
||||
c := w.cfg
|
||||
|
||||
formatLogDir := func(logDir string, m mirrorConfig) string {
|
||||
tmpl, err := template.New("logDirTmpl-" + m.Name).Parse(logDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var formatedLogDir bytes.Buffer
|
||||
tmpl.Execute(&formatedLogDir, m)
|
||||
return formatedLogDir.String()
|
||||
}
|
||||
|
||||
for _, mirror := range c.Mirrors {
|
||||
logDir := mirror.LogDir
|
||||
mirrorDir := mirror.MirrorDir
|
||||
if logDir == "" {
|
||||
logDir = c.Global.LogDir
|
||||
}
|
||||
if mirrorDir == "" {
|
||||
mirrorDir = filepath.Join(
|
||||
c.Global.MirrorDir, mirror.Name,
|
||||
)
|
||||
}
|
||||
if mirror.Interval == 0 {
|
||||
mirror.Interval = c.Global.Interval
|
||||
}
|
||||
logDir = formatLogDir(logDir, mirror)
|
||||
|
||||
// IsMaster
|
||||
isMaster := true
|
||||
if mirror.Role == "slave" {
|
||||
isMaster = false
|
||||
} else {
|
||||
if mirror.Role != "" && mirror.Role != "master" {
|
||||
logger.Warningf("Invalid role configuration for %s", mirror.Name)
|
||||
}
|
||||
}
|
||||
|
||||
var provider mirrorProvider
|
||||
|
||||
switch mirror.Provider {
|
||||
case ProvCommand:
|
||||
pc := cmdConfig{
|
||||
name: mirror.Name,
|
||||
upstreamURL: mirror.Upstream,
|
||||
command: mirror.Command,
|
||||
workingDir: mirrorDir,
|
||||
logDir: logDir,
|
||||
logFile: filepath.Join(logDir, "latest.log"),
|
||||
interval: time.Duration(mirror.Interval) * time.Minute,
|
||||
env: mirror.Env,
|
||||
}
|
||||
p, err := newCmdProvider(pc)
|
||||
p.isMaster = isMaster
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
provider = p
|
||||
case ProvRsync:
|
||||
rc := rsyncConfig{
|
||||
name: mirror.Name,
|
||||
upstreamURL: mirror.Upstream,
|
||||
rsyncCmd: mirror.Command,
|
||||
password: mirror.Password,
|
||||
excludeFile: mirror.ExcludeFile,
|
||||
workingDir: mirrorDir,
|
||||
logDir: logDir,
|
||||
logFile: filepath.Join(logDir, "latest.log"),
|
||||
useIPv6: mirror.UseIPv6,
|
||||
interval: time.Duration(mirror.Interval) * time.Minute,
|
||||
}
|
||||
p, err := newRsyncProvider(rc)
|
||||
p.isMaster = isMaster
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
provider = p
|
||||
case ProvTwoStageRsync:
|
||||
rc := twoStageRsyncConfig{
|
||||
name: mirror.Name,
|
||||
stage1Profile: mirror.Stage1Profile,
|
||||
upstreamURL: mirror.Upstream,
|
||||
rsyncCmd: mirror.Command,
|
||||
password: mirror.Password,
|
||||
excludeFile: mirror.ExcludeFile,
|
||||
workingDir: mirrorDir,
|
||||
logDir: logDir,
|
||||
logFile: filepath.Join(logDir, "latest.log"),
|
||||
useIPv6: mirror.UseIPv6,
|
||||
interval: time.Duration(mirror.Interval) * time.Minute,
|
||||
}
|
||||
p, err := newTwoStageRsyncProvider(rc)
|
||||
p.isMaster = isMaster
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
provider = p
|
||||
default:
|
||||
panic(errors.New("Invalid mirror provider"))
|
||||
|
||||
}
|
||||
|
||||
provider.AddHook(newLogLimiter(provider))
|
||||
|
||||
// Add Cgroup Hook
|
||||
if w.cfg.Cgroup.Enable {
|
||||
provider.AddHook(
|
||||
newCgroupHook(provider, w.cfg.Cgroup.BasePath, w.cfg.Cgroup.Group),
|
||||
)
|
||||
}
|
||||
|
||||
// ExecOnSuccess hook
|
||||
if mirror.ExecOnSuccess != "" {
|
||||
h, err := newExecPostHook(provider, execOnSuccess, mirror.ExecOnSuccess)
|
||||
if err != nil {
|
||||
logger.Errorf("Error initializing mirror %s: %s", mirror.Name, err.Error())
|
||||
panic(err)
|
||||
}
|
||||
provider.AddHook(h)
|
||||
}
|
||||
// ExecOnFailure hook
|
||||
if mirror.ExecOnFailure != "" {
|
||||
h, err := newExecPostHook(provider, execOnFailure, mirror.ExecOnFailure)
|
||||
if err != nil {
|
||||
logger.Errorf("Error initializing mirror %s: %s", mirror.Name, err.Error())
|
||||
panic(err)
|
||||
}
|
||||
provider.AddHook(h)
|
||||
}
|
||||
|
||||
w.providers[provider.Name()] = provider
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) initJobs() {
|
||||
w.initProviders()
|
||||
|
||||
for name, provider := range w.providers {
|
||||
w.jobs[name] = newMirrorJob(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// Ctrl server receives commands from the manager
|
||||
func (w *Worker) makeHTTPServer() {
|
||||
s := gin.New()
|
||||
s.Use(gin.Recovery())
|
||||
|
||||
s.POST("/", func(c *gin.Context) {
|
||||
var cmd WorkerCmd
|
||||
|
||||
if err := c.BindJSON(&cmd); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"msg": "Invalid request"})
|
||||
return
|
||||
}
|
||||
job, ok := w.jobs[cmd.MirrorID]
|
||||
if !ok {
|
||||
c.JSON(http.StatusNotFound, gin.H{"msg": fmt.Sprintf("Mirror ``%s'' not found", cmd.MirrorID)})
|
||||
return
|
||||
}
|
||||
logger.Noticef("Received command: %v", cmd)
|
||||
// if job disabled, start them first
|
||||
switch cmd.Cmd {
|
||||
case CmdStart, CmdRestart:
|
||||
if job.State() == stateDisabled {
|
||||
go job.Run(w.managerChan, w.semaphore)
|
||||
}
|
||||
}
|
||||
switch cmd.Cmd {
|
||||
case CmdStart:
|
||||
job.ctrlChan <- jobStart
|
||||
case CmdRestart:
|
||||
job.ctrlChan <- jobRestart
|
||||
case CmdStop:
|
||||
// if job is disabled, no goroutine would be there
|
||||
// receiving this signal
|
||||
w.schedule.Remove(job.Name())
|
||||
if job.State() != stateDisabled {
|
||||
job.ctrlChan <- jobStop
|
||||
}
|
||||
case CmdDisable:
|
||||
w.schedule.Remove(job.Name())
|
||||
if job.State() != stateDisabled {
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
}
|
||||
case CmdPing:
|
||||
job.ctrlChan <- jobStart
|
||||
default:
|
||||
c.JSON(http.StatusNotAcceptable, gin.H{"msg": "Invalid Command"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"msg": "OK"})
|
||||
})
|
||||
w.httpEngine = s
|
||||
}
|
||||
|
||||
func (w *Worker) runHTTPServer() {
|
||||
addr := fmt.Sprintf("%s:%d", w.cfg.Server.Addr, w.cfg.Server.Port)
|
||||
|
||||
httpServer := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: w.httpEngine,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
if w.cfg.Server.SSLCert == "" && w.cfg.Server.SSLKey == "" {
|
||||
if err := httpServer.ListenAndServe(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
if err := httpServer.ListenAndServeTLS(w.cfg.Server.SSLCert, w.cfg.Server.SSLKey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs worker forever
|
||||
func (w *Worker) Run() {
|
||||
w.registorWorker()
|
||||
go w.runHTTPServer()
|
||||
w.runSchedule()
|
||||
}
|
||||
|
||||
func (w *Worker) runSchedule() {
|
||||
mirrorList := w.fetchJobStatus()
|
||||
unset := make(map[string]bool)
|
||||
for name := range w.jobs {
|
||||
unset[name] = true
|
||||
}
|
||||
// Fetch mirror list stored in the manager
|
||||
// put it on the scheduled time
|
||||
// if it's disabled, ignore it
|
||||
for _, m := range mirrorList {
|
||||
if job, ok := w.jobs[m.Name]; ok {
|
||||
delete(unset, m.Name)
|
||||
switch m.Status {
|
||||
case Disabled:
|
||||
job.SetState(stateDisabled)
|
||||
continue
|
||||
case Paused:
|
||||
job.SetState(statePaused)
|
||||
go job.Run(w.managerChan, w.semaphore)
|
||||
continue
|
||||
default:
|
||||
job.SetState(stateReady)
|
||||
go job.Run(w.managerChan, w.semaphore)
|
||||
stime := m.LastUpdate.Add(job.provider.Interval())
|
||||
logger.Debugf("Scheduling job %s @%s", job.Name(), stime.Format("2006-01-02 15:04:05"))
|
||||
w.schedule.AddJob(stime, job)
|
||||
}
|
||||
}
|
||||
}
|
||||
// some new jobs may be added
|
||||
// which does not exist in the
|
||||
// manager's mirror list
|
||||
for name := range unset {
|
||||
job := w.jobs[name]
|
||||
job.SetState(stateReady)
|
||||
go job.Run(w.managerChan, w.semaphore)
|
||||
w.schedule.AddJob(time.Now(), job)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case jobMsg := <-w.managerChan:
|
||||
// got status update from job
|
||||
job := w.jobs[jobMsg.name]
|
||||
if job.State() != stateReady {
|
||||
logger.Infof("Job %s state is not ready, skip adding new schedule", jobMsg.name)
|
||||
continue
|
||||
}
|
||||
|
||||
// syncing status is only meaningful when job
|
||||
// is running. If it's paused or disabled
|
||||
// a sync failure signal would be emitted
|
||||
// which needs to be ignored
|
||||
w.updateStatus(jobMsg)
|
||||
|
||||
// only successful or the final failure msg
|
||||
// can trigger scheduling
|
||||
if jobMsg.schedule {
|
||||
schedTime := time.Now().Add(job.provider.Interval())
|
||||
logger.Noticef(
|
||||
"Next scheduled time for %s: %s",
|
||||
job.Name(),
|
||||
schedTime.Format("2006-01-02 15:04:05"),
|
||||
)
|
||||
w.schedule.AddJob(schedTime, job)
|
||||
}
|
||||
|
||||
case <-time.Tick(5 * time.Second):
|
||||
// check schedule every 5 seconds
|
||||
if job := w.schedule.Pop(); job != nil {
|
||||
job.ctrlChan <- jobStart
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Name returns worker name
|
||||
func (w *Worker) Name() string {
|
||||
return w.cfg.Global.Name
|
||||
}
|
||||
|
||||
// URL returns the url to http server of the worker
|
||||
func (w *Worker) URL() string {
|
||||
proto := "https"
|
||||
if w.cfg.Server.SSLCert == "" && w.cfg.Server.SSLKey == "" {
|
||||
proto = "http"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s://%s:%d/", proto, w.cfg.Server.Hostname, w.cfg.Server.Port)
|
||||
}
|
||||
|
||||
func (w *Worker) registorWorker() {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers",
|
||||
w.cfg.Manager.APIBase,
|
||||
)
|
||||
|
||||
msg := WorkerStatus{
|
||||
ID: w.Name(),
|
||||
URL: w.URL(),
|
||||
}
|
||||
|
||||
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to register worker")
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) updateStatus(jobMsg jobMessage) {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs/%s",
|
||||
w.cfg.Manager.APIBase,
|
||||
w.Name(),
|
||||
jobMsg.name,
|
||||
)
|
||||
p := w.providers[jobMsg.name]
|
||||
smsg := MirrorStatus{
|
||||
Name: jobMsg.name,
|
||||
Worker: w.cfg.Global.Name,
|
||||
IsMaster: p.IsMaster(),
|
||||
Status: jobMsg.status,
|
||||
Upstream: p.Upstream(),
|
||||
Size: "unknown",
|
||||
ErrorMsg: jobMsg.msg,
|
||||
}
|
||||
|
||||
if _, err := PostJSON(url, smsg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) fetchJobStatus() []MirrorStatus {
|
||||
var mirrorList []MirrorStatus
|
||||
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs",
|
||||
w.cfg.Manager.APIBase,
|
||||
w.Name(),
|
||||
)
|
||||
|
||||
if _, err := GetJSON(url, &mirrorList, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to fetch job status: %s", err.Error())
|
||||
}
|
||||
|
||||
return mirrorList
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user