Compare commits

..

No commits in common. "master" and "v0.2.0a3" have entirely different histories.

69 changed files with 1184 additions and 6616 deletions

View File

@ -1,38 +0,0 @@
name: release
on:
push:
tags:
- 'v*'
workflow_dispatch:
jobs:
build:
name: Build
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '^1.23'
id: go
- name: Build
run: |
TAG=$(git describe --tags)
for i in linux-amd64 linux-arm64 linux-riscv64 linux-loong64; do
make ARCH=$i all
tar -cz --numeric-owner --owner root --group root -f tunasync-${TAG}-$i-bin.tar.gz -C build-$i tunasync tunasynctl
done
- name: Create Release
uses: softprops/action-gh-release@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
tag_name: ${{ github.ref_name }}
name: Release ${{ github.ref_name }}
prerelease: false
files: |
tunasync-*.tar.gz

View File

@ -1,247 +0,0 @@
name: tunasync
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
workflow_dispatch:
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '^1.23'
id: go
- name: Get dependencies
run: |
go get -v -t -d ./cmd/tunasync
go get -v -t -d ./cmd/tunasynctl
- name: Build
run: |
make tunasync
make tunasynctl
- name: Keep artifacts
uses: actions/upload-artifact@v4
with:
name: tunasync-bin
path: build-linux-amd64/
test:
name: Test
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
steps:
- name: Setup test dependencies
run: |
sudo apt-get update
sudo apt-get install -y cgroup-tools
docker pull alpine:3.8
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '^1.22'
id: go
- name: Run Unit tests.
run: |
go install github.com/wadey/gocovmerge@latest
sudo systemd-run --service-type=oneshot --uid="$(id --user)" --pipe --wait \
--property=Delegate=yes --setenv=USECURCGROUP=1 \
--setenv=TERM=xterm-256color --same-dir \
make test
- name: Run Additional Unit tests.
run: |
make build-test-worker
sudo mkdir /sys/fs/cgroup/tunasync
sudo ./worker.test -test.v=true -test.coverprofile profile2.gcov -test.run TestCgroup
sudo rmdir /sys/fs/cgroup/tunasync
touch /tmp/dummy_exec
chmod +x /tmp/dummy_exec
run_test_reexec (){
case="$1"
shift
argv0="$1"
shift
(TESTREEXEC="$case" TERM=xterm-256color exec -a "$argv0" ./worker.test -test.v=true -test.coverprofile "profile5_$case.gcov" -test.run TestReexec -- "$@")
}
run_test_reexec 1 tunasync-exec __dummy__
run_test_reexec 2 tunasync-exec /tmp/dummy_exec
run_test_reexec 3 tunasync-exec /tmp/dummy_exec 3< <(echo -n "abrt")
run_test_reexec 4 tunasync-exec /tmp/dummy_exec 3< <(echo -n "cont")
run_test_reexec 5 tunasync-exec2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: network=host
- name: Cache Docker layers
uses: actions/cache@v4
if: github.event_name == 'push'
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Cache Docker layers
uses: actions/cache@v4
if: github.event_name == 'pull_request'
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-
${{ runner.os }}-buildx-
- name: Cache Docker layers
if: github.event_name != 'push' && github.event_name != 'pull_request'
run: |
echo "I do not know how to setup cache"
exit -1
- name: Prepare cache directory
run: |
mkdir -p /tmp/.buildx-cache
- name: Build Docker image for uml rootfs
uses: docker/build-push-action@v6
with:
context: .umlrootfs
file: .umlrootfs/Dockerfile
push: true
tags: localhost:5000/umlrootfs
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Fetch and install uml package
run: |
sudo apt-get update
sudo apt-get install -y debian-archive-keyring
sudo ln -sf /usr/share/keyrings/debian-archive-keyring.gpg /etc/apt/trusted.gpg.d/
echo "deb http://deb.debian.org/debian bullseye main" | sudo tee /etc/apt/sources.list.d/bullseye.list
sudo apt-get update
apt-get download user-mode-linux/bullseye
sudo rm /etc/apt/sources.list.d/bullseye.list
sudo apt-get update
sudo mv user-mode-linux_*.deb /tmp/uml.deb
sudo apt-get install --no-install-recommends -y /tmp/uml.deb
sudo rm /tmp/uml.deb
sudo apt-get install --no-install-recommends -y rsh-redone-client
- name: Prepare uml environment
run: |
docker container create --name umlrootfs localhost:5000/umlrootfs
sudo mkdir -p umlrootfs
docker container export umlrootfs | sudo tar -xv -C umlrootfs
docker container rm umlrootfs
sudo cp -a --target-directory=umlrootfs/lib/ /usr/lib/uml/modules
/bin/echo -e "127.0.0.1 localhost\n254.255.255.1 host" | sudo tee umlrootfs/etc/hosts
sudo ip tuntap add dev umltap mode tap
sudo ip addr add 254.255.255.1/24 dev umltap
sudo ip link set umltap up
- name: Start Uml
run: |
start_uml () {
sudo bash -c 'linux root=/dev/root rootflags=/ rw rootfstype=hostfs mem=2G eth0=tuntap,umltap hostfs="$PWD/umlrootfs" con1=pts systemd.unified_cgroup_hierarchy=0 & pid=$!; echo "UMLINUX_PID=$pid" >> '"$GITHUB_ENV"
}
( start_uml )
started=0
for i in $(seq 1 60); do
if ping -c 1 -w 1 254.255.255.2; then
started=1
break
fi
done
if [ "$started" != "1" ]; then
echo "Failed to wait Umlinux online"
exit 1
fi
- name: Prepare Uml Environment
run: |
CUSER="$(id --user --name)"
CUID="$(id --user)"
CGID="$(id --group)"
sudo chroot umlrootfs bash --noprofile --norc -eo pipefail << EOF
groupadd --gid "${CGID?}" "${CUSER?}"
useradd --create-home --home-dir "/home/${CUSER}" --gid "${CGID?}" \
--uid "${CUID?}" --shell "\$(which bash)" "${CUSER?}"
EOF
ln ./worker.test "umlrootfs/home/${CUSER}/worker.test"
- name: Run Tests in Cgroupv1
run: |
CUSER="$(id --user --name)"
sudo rsh 254.255.255.2 bash --noprofile --norc -eo pipefail << EOF
exec 2>&1
cd "/home/${CUSER}"
lssubsys -am
cgcreate -a "$CUSER" -t "$CUSER" -g cpu:tunasync
cgcreate -a "$CUSER" -t "$CUSER" -g memory:tunasync
TERM=xterm-256color ./worker.test -test.v=true -test.coverprofile \
profile3.gcov -test.run TestCgroup
cgexec -g "*:/" bash -c "echo 0 > /sys/fs/cgroup/systemd/tasks; exec sudo -u $CUSER env USECURCGROUP=1 TERM=xterm-256color cgexec -g cpu,memory:tunasync ./worker.test -test.v=true -test.coverprofile profile4.gcov -test.run TestCgroup"
EOF
- name: Stop Uml
run: |
sudo rsh 254.255.255.2 systemctl poweroff
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sudo kill -TERM "$UMLINUX_PID" || true
sleep 1
fi
fi
if [ -e "/proc/$UMLINUX_PID" ]; then
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sudo kill -KILL "$UMLINUX_PID" || true
sleep 1
fi
fi
- name: Combine coverage files
run : |
CUSER="$(id --user --name)"
"${HOME}/go/bin/gocovmerge" profile.gcov profile2.gcov \
"umlrootfs/home/${CUSER}/profile3.gcov" \
"umlrootfs/home/${CUSER}/profile4.gcov" \
profile5_*.gcov > merged.gcov
# remove cmdline tools from coverage statistics
grep -v "cmd/.*\.go" merged.gcov > profile-all.gcov
- name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1
with:
infile: profile-all.gcov
outfile: coverage.lcov
- name: Coveralls
uses: coverallsapp/github-action@v2
with:
github-token: ${{ secrets.github_token }}
path-to-lcov: coverage.lcov

3
.gitignore vendored
View File

@ -1,4 +1 @@
/build
/build-*
worker.test
profile*

30
.testandcover.bash Executable file
View File

@ -0,0 +1,30 @@
#!/bin/bash
function die() {
echo $*
exit 1
}
export GOPATH=`pwd`:$GOPATH
make travis
# Initialize profile.cov
echo "mode: count" > profile.cov
# Initialize error tracking
ERROR=""
# Test each package and append coverage profile info to profile.cov
for pkg in `cat .testpackages.txt`
do
go test -v -covermode=count -coverprofile=profile_tmp.cov $pkg || ERROR="Error testing $pkg"
[ -f profile_tmp.cov ] && {
tail -n +2 profile_tmp.cov >> profile.cov || die "Unable to append coverage for $pkg"
}
done
if [ ! -z "$ERROR" ]
then
die "Encountered error, last error was: $ERROR"
fi

43
.travis.yml Normal file
View File

@ -0,0 +1,43 @@
sudo: required
language: go
go:
- 1.6
before_install:
- sudo apt-get install cgroup-bin
- go get github.com/smartystreets/goconvey
- go get golang.org/x/tools/cmd/cover
- go get -v github.com/mattn/goveralls
os:
- linux
services:
- docker
before_script:
- lssubsys -am
- sudo cgcreate -a $USER -t $USER -g cpu:tunasync
- sudo cgcreate -a $USER -t $USER -g memory:tunasync
- docker pull alpine
script:
- ./.testandcover.bash
after_success:
- goveralls -coverprofile=profile.cov -service=travis-ci
before_deploy: "echo 'ready to deploy?'"
deploy:
provider: releases
file:
- "build/tunasync-linux-bin.tar.gz"
api_key:
secure: "F9kaVaR1mxEh2+EL9Nm8GZmbVY98pXCJA0LGDNrq1C2vU61AUNOeX6yI1mMklHNZPLBqoFDvGN1M5HnJ+xWCFH+KnJgLD2GVIAcAxFNpcNWQe8XKE5heklNsIQNQfuh/rJKM6YzeDB9G5RN4Y76iL4WIAXhNnMm48W6jLnWhf70="
skip_cleanup: true
overwrite: true
on:
tags: true
all_branches: true

View File

@ -1,8 +0,0 @@
FROM debian:bullseye
RUN apt-get update && apt-get install -y systemd rsh-redone-server ifupdown sudo kmod cgroup-tools systemd-sysv
RUN echo "host" > /root/.rhosts && \
chmod 600 /root/.rhosts && \
/bin/echo -e "auto eth0\niface eth0 inet static\naddress 254.255.255.2/24" > /etc/network/interfaces.d/eth0 && \
sed -i '/pam_securetty/d' /etc/pam.d/rlogin && \
cp /usr/share/systemd/tmp.mount /etc/systemd/system && \
systemctl enable tmp.mount

13
.vscode/settings.json vendored
View File

@ -1,13 +0,0 @@
{
"cSpell.words": [
"Btrfs",
"Debugf",
"Infof",
"Noticef",
"Warningf",
"cgroup",
"mergo",
"tmpl",
"zpool"
]
}

View File

@ -1,28 +1,21 @@
LDFLAGS="-X main.buildstamp=`date -u '+%s'` -X main.githash=`git rev-parse HEAD`"
ARCH ?= linux-amd64
ARCH_LIST = $(subst -, ,$(ARCH))
GOOS = $(word 1, $(ARCH_LIST))
GOARCH = $(word 2, $(ARCH_LIST))
BUILDBIN = tunasync tunasynctl
all: $(BUILDBIN)
all: get tunasync tunasynctl
build-$(ARCH):
mkdir -p $@
travis: get tunasync tunasynctl travis-package
$(BUILDBIN): % : build-$(ARCH) build-$(ARCH)/%
get:
go get ./cmd/tunasync
go get ./cmd/tunasynctl
$(BUILDBIN:%=build-$(ARCH)/%) : build-$(ARCH)/% : cmd/%
GOOS=$(GOOS) GOARCH=$(GOARCH) go get ./$<
GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -o $@ -ldflags ${LDFLAGS} github.com/tuna/tunasync/$<
build:
mkdir -p build
test:
go test -v -covermode=count -coverprofile=profile.gcov ./...
tunasync: build
go build -o build/tunasync -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasync
build-test-worker:
CGO_ENABLED=0 go test -c -covermode=count github.com/tuna/tunasync/worker
tunasynctl: build
go build -o build/tunasynctl -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasynctl
clean:
rm -rf build-$(ARCH)
.PHONY: all test $(BUILDBIN) build-test-worker clean
travis-package: tunasync tunasynctl
tar zcf build/tunasync-linux-bin.tar.gz -C build tunasync tunasynctl

102
README.md
View File

@ -1,7 +1,8 @@
# tunasync
tunasync
========
![Build Status](https://github.com/tuna/tunasync/workflows/tunasync/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=master)](https://coveralls.io/github/tuna/tunasync?branch=master)
[![Build Status](https://travis-ci.org/tuna/tunasync.svg?branch=dev)](https://travis-ci.org/tuna/tunasync)
[![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=dev)](https://coveralls.io/github/tuna/tunasync?branch=dev)
[![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/)
![GPLv3](https://img.shields.io/badge/license-GPLv3-blue.svg)
@ -11,14 +12,14 @@
## Download
Pre-built binary for Linux x86_64 and ARM64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest).
Pre-built binary for Linux x86_64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest).
## Design
```text
```
# Architecture
- Manager: Central instance for status and job management
- Manager: Centural instance on status and job management
- Worker: Runs mirror jobs
+------------+ +---+ +---+
@ -39,25 +40,84 @@ Pre-built binary for Linux x86_64 and ARM64 is available at [Github releases](ht
# Job Run Process
PreSyncing Syncing Success
+-----------+ +----------+ +-----------+ +-------------+ +--------------+
| pre-job +--+->| pre-exec +--->| job run +--->| post-exec +-+-->| post-success |
+-----------+ ^ +----------+ +-----------+ +-------------+ | +--------------+
| |
| +-----------------+ | Failed
+----------------+ post-fail |<---------------+
+-----------------+
PreSyncing Syncing Success
+-----------+ +-----------+ +-------------+ +--------------+
| pre-job +--+->| job run +--->| post-exec +-+-->| post-success |
+-----------+ ^ +-----------+ +-------------+ | +--------------+
| |
| +-----------------+ | Failed
+------+ post-fail |<---------+
+-----------------+
```
## Generate Self-Signed Certificate
Fisrt, create root CA
```
openssl genrsa -out rootCA.key 2048
openssl req -x509 -new -nodes -key rootCA.key -days 365 -out rootCA.crt
```
Create host key
```
openssl genrsa -out host.key 2048
```
Now create CSR, before that, write a `req.cnf`
```
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
[req_distinguished_name]
countryName = Country Name (2 letter code)
countryName_default = CN
stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_default = BJ
localityName = Locality Name (eg, city)
localityName_default = Beijing
organizationalUnitName = Organizational Unit Name (eg, section)
organizationalUnitName_default = TUNA
commonName = Common Name (server FQDN or domain name)
commonName_default = <server_FQDN>
commonName_max = 64
[v3_req]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = <server_FQDN_1>
DNS.2 = <server_FQDN_2>
```
Substitute `<server_FQDN>` with your server's FQDN, then run
```
openssl req -new -key host.key -out host.csr -config req.cnf
```
Finally generate and sign host cert with root CA
```
openssl x509 -req -in host.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out host.crt -days 365 -extensions v3_req -extfile req.cnf
```
## Building
Go version: 1.22
Setup GOPATH like [this](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable).
```shell
# for native arch
> make all
# for other arch
> make ARCH=linux-arm64 all
Then:
```
go get -d github.com/tuna/tunasync/cmd/tunasync
cd $GOPATH/src/github.com/tuna/tunasync
make
```
Binaries are in `build-$ARCH/`, e.g., `build-linux-amd64/`.
If you have multiple `GOPATH`s, replace the `$GOPATH` with your first one.

View File

@ -9,10 +9,9 @@ import (
"time"
"github.com/gin-gonic/gin"
"github.com/moby/sys/reexec"
"github.com/pkg/profile"
"github.com/urfave/cli"
"gopkg.in/op/go-logging.v1"
"gopkg.in/urfave/cli.v1"
tunasync "github.com/tuna/tunasync/internal"
"github.com/tuna/tunasync/manager"
@ -40,7 +39,7 @@ func startManager(c *cli.Context) error {
m := manager.GetTUNASyncManager(cfg)
if m == nil {
logger.Errorf("Error intializing TUNA sync manager.")
logger.Errorf("Error intializing TUNA sync worker.")
os.Exit(1)
}
@ -61,7 +60,7 @@ func startWorker(c *cli.Context) error {
os.Exit(1)
}
w := worker.NewTUNASyncWorker(cfg)
w := worker.GetTUNASyncWorker(cfg)
if w == nil {
logger.Errorf("Error intializing TUNA sync worker.")
os.Exit(1)
@ -110,10 +109,6 @@ func startWorker(c *cli.Context) error {
func main() {
if reexec.Init() {
return
}
cli.VersionPrinter = func(c *cli.Context) {
var builddate string
if buildstamp == "" {

View File

@ -3,17 +3,16 @@ package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"text/template"
"time"
"github.com/BurntSushi/toml"
"github.com/urfave/cli"
"gopkg.in/op/go-logging.v1"
"gopkg.in/urfave/cli.v1"
tunasync "github.com/tuna/tunasync/internal"
)
@ -33,7 +32,7 @@ const (
userCfgFile = "$HOME/.config/tunasync/ctl.conf" // user-specific conf
)
var logger = logging.MustGetLogger("tunasynctl")
var logger = logging.MustGetLogger("tunasynctl-cmd")
var baseURL string
var client *http.Client
@ -42,7 +41,7 @@ func initializeWrapper(handler cli.ActionFunc) cli.ActionFunc {
return func(c *cli.Context) error {
err := initialize(c)
if err != nil {
return cli.NewExitError(err.Error(), 1)
return cli.NewExitError("", 1)
}
return handler(c)
}
@ -56,9 +55,8 @@ type config struct {
func loadConfig(cfgFile string, cfg *config) error {
if cfgFile != "" {
logger.Infof("Loading config: %s", cfgFile)
if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
// logger.Errorf(err.Error())
logger.Errorf(err.Error())
return err
}
}
@ -68,7 +66,7 @@ func loadConfig(cfgFile string, cfg *config) error {
func initialize(c *cli.Context) error {
// init logger
tunasync.InitLogger(c.Bool("verbose"), c.Bool("debug"), false)
tunasync.InitLogger(c.Bool("verbose"), c.Bool("verbose"), false)
cfg := new(config)
@ -78,23 +76,14 @@ func initialize(c *cli.Context) error {
// find config file and load config
if _, err := os.Stat(systemCfgFile); err == nil {
err = loadConfig(systemCfgFile, cfg)
if err != nil {
return err
}
loadConfig(systemCfgFile, cfg)
}
logger.Debug("user config file: %s", os.ExpandEnv(userCfgFile))
fmt.Println(os.ExpandEnv(userCfgFile))
if _, err := os.Stat(os.ExpandEnv(userCfgFile)); err == nil {
err = loadConfig(os.ExpandEnv(userCfgFile), cfg)
if err != nil {
return err
}
loadConfig(os.ExpandEnv(userCfgFile), cfg)
}
if c.String("config") != "" {
err := loadConfig(c.String("config"), cfg)
if err != nil {
return err
}
loadConfig(c.String("config"), cfg)
}
// override config using the command-line arguments
@ -122,8 +111,8 @@ func initialize(c *cli.Context) error {
var err error
client, err = tunasync.CreateHTTPClient(cfg.CACert)
if err != nil {
err = fmt.Errorf("error initializing HTTP client: %s", err.Error())
// logger.Error(err.Error())
err = fmt.Errorf("Error initializing HTTP client: %s", err.Error())
logger.Error(err.Error())
return err
}
@ -146,14 +135,14 @@ func listWorkers(c *cli.Context) error {
err.Error()),
1)
}
fmt.Println(string(b))
fmt.Print(string(b))
return nil
}
func listJobs(c *cli.Context) error {
var genericJobs interface{}
// FIXME: there should be an API on manager server side that return MirrorStatus list to tunasynctl
var jobs []tunasync.MirrorStatus
if c.Bool("all") {
var jobs []tunasync.WebMirrorStatus
_, err := tunasync.GetJSON(baseURL+listJobsPath, &jobs, client)
if err != nil {
return cli.NewExitError(
@ -161,33 +150,8 @@ func listJobs(c *cli.Context) error {
"of all jobs from manager server: %s", err.Error()),
1)
}
if statusStr := c.String("status"); statusStr != "" {
filteredJobs := make([]tunasync.WebMirrorStatus, 0, len(jobs))
var statuses []tunasync.SyncStatus
for _, s := range strings.Split(statusStr, ",") {
var status tunasync.SyncStatus
err = status.UnmarshalJSON([]byte("\"" + strings.TrimSpace(s) + "\""))
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error parsing status: %s", err.Error()),
1)
}
statuses = append(statuses, status)
}
for _, job := range jobs {
for _, s := range statuses {
if job.Status == s {
filteredJobs = append(filteredJobs, job)
break
}
}
}
genericJobs = filteredJobs
} else {
genericJobs = jobs
}
} else {
var jobs []tunasync.MirrorStatus
args := c.Args()
if len(args) == 0 {
return cli.NewExitError(
@ -201,72 +165,31 @@ func listJobs(c *cli.Context) error {
_, err := tunasync.GetJSON(fmt.Sprintf("%s/workers/%s/jobs",
baseURL, workerID), &workerJobs, client)
if err != nil {
logger.Infof("Failed to correctly get jobs"+
logger.Errorf("Filed to correctly get jobs"+
" for worker %s: %s", workerID, err.Error())
}
ans <- workerJobs
}(workerID)
}
for range args {
job := <-ans
if job == nil {
return cli.NewExitError(
fmt.Sprintf("Failed to correctly get information "+
"of jobs from at least one manager"),
1)
}
jobs = append(jobs, job...)
jobs = append(jobs, <-ans...)
}
genericJobs = jobs
}
if format := c.String("format"); format != "" {
tpl := template.New("")
_, err := tpl.Parse(format)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error parsing format template: %s", err.Error()),
1)
}
switch jobs := genericJobs.(type) {
case []tunasync.WebMirrorStatus:
for _, job := range jobs {
err = tpl.Execute(os.Stdout, job)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Println()
}
case []tunasync.MirrorStatus:
for _, job := range jobs {
err = tpl.Execute(os.Stdout, job)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Println()
}
}
} else {
b, err := json.MarshalIndent(genericJobs, "", " ")
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Println(string(b))
b, err := json.MarshalIndent(jobs, "", " ")
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out informations: %s", err.Error()),
1)
}
fmt.Printf(string(b))
return nil
}
func updateMirrorSize(c *cli.Context) error {
args := c.Args()
if len(args) != 2 {
return cli.NewExitError("Usage: tunasynctl set-size -w <worker-id> <mirror> <size>", 1)
return cli.NewExitError("Usage: tunasynctl -w <worker-id> <mirror> <size>", 1)
}
workerID := c.String("worker")
mirrorID := args.Get(0)
@ -292,7 +215,7 @@ func updateMirrorSize(c *cli.Context) error {
1)
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
return cli.NewExitError(
fmt.Sprintf("Manager failed to update mirror size: %s", body), 1,
@ -310,53 +233,7 @@ func updateMirrorSize(c *cli.Context) error {
)
}
fmt.Printf("Successfully updated mirror size to %s\n", mirrorSize)
return nil
}
func removeWorker(c *cli.Context) error {
args := c.Args()
if len(args) != 0 {
return cli.NewExitError("Usage: tunasynctl -w <worker-id>", 1)
}
workerID := c.String("worker")
if len(workerID) == 0 {
return cli.NewExitError("Please specify the <worker-id>", 1)
}
url := fmt.Sprintf("%s/workers/%s", baseURL, workerID)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
logger.Panicf("Invalid HTTP Request: %s", err.Error())
}
resp, err := client.Do(req)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to send request to manager: %s", err.Error()), 1)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
1)
}
return cli.NewExitError(fmt.Sprintf("Failed to correctly send"+
" command: HTTP status code is not 200: %s", body),
1)
}
res := map[string]string{}
_ = json.NewDecoder(resp.Body).Decode(&res)
if res["message"] == "deleted" {
fmt.Println("Successfully removed the worker")
} else {
return cli.NewExitError("Failed to remove the worker", 1)
}
logger.Infof("Successfully updated mirror size to %s", mirrorSize)
return nil
}
@ -376,7 +253,7 @@ func flushDisabledJobs(c *cli.Context) error {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -388,7 +265,7 @@ func flushDisabledJobs(c *cli.Context) error {
1)
}
fmt.Println("Successfully flushed disabled jobs")
logger.Info("Successfully flushed disabled jobs")
return nil
}
@ -409,16 +286,11 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
"argument WORKER", 1)
}
options := map[string]bool{}
if c.Bool("force") {
options["force"] = true
}
cmd := tunasync.ClientCmd{
Cmd: cmd,
MirrorID: mirrorID,
WorkerID: c.String("worker"),
Args: argsList,
Options: options,
}
resp, err := tunasync.PostJSON(baseURL+cmdPath, cmd, client)
if err != nil {
@ -430,7 +302,7 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -441,7 +313,7 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
" command: HTTP status code is not 200: %s", body),
1)
}
fmt.Println("Successfully send the command")
logger.Info("Succesfully send command")
return nil
}
@ -449,11 +321,6 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
return func(c *cli.Context) error {
if c.String("worker") == "" {
return cli.NewExitError("Please specify the worker with -w <worker-id>", 1)
}
cmd := tunasync.ClientCmd{
Cmd: cmd,
WorkerID: c.String("worker"),
@ -468,7 +335,7 @@ func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -479,7 +346,7 @@ func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
" command: HTTP status code is not 200: %s", body),
1)
}
fmt.Println("Successfully send the command")
logger.Info("Succesfully send command")
return nil
}
@ -536,10 +403,6 @@ func main() {
Name: "verbose, v",
Usage: "Enable verbosely logging",
},
cli.BoolFlag{
Name: "debug",
Usage: "Enable debugging logging",
},
}
cmdFlags := []cli.Flag{
cli.StringFlag{
@ -548,11 +411,6 @@ func main() {
},
}
forceStartFlag := cli.BoolFlag{
Name: "force, f",
Usage: "Override the concurrent limit",
}
app.Commands = []cli.Command{
{
Name: "list",
@ -563,14 +421,6 @@ func main() {
Name: "all, a",
Usage: "List all jobs of all workers",
},
cli.StringFlag{
Name: "status, s",
Usage: "Filter output based on status provided",
},
cli.StringFlag{
Name: "format, f",
Usage: "Pretty-print containers using a Go template",
},
}...),
Action: initializeWrapper(listJobs),
},
@ -586,18 +436,6 @@ func main() {
Flags: commonFlags,
Action: initializeWrapper(listWorkers),
},
{
Name: "rm-worker",
Usage: "Remove a worker",
Flags: append(
commonFlags,
cli.StringFlag{
Name: "worker, w",
Usage: "worker-id of the worker to be removed",
},
),
Action: initializeWrapper(removeWorker),
},
{
Name: "set-size",
Usage: "Set mirror size",
@ -613,7 +451,7 @@ func main() {
{
Name: "start",
Usage: "Start a job",
Flags: append(append(commonFlags, cmdFlags...), forceStartFlag),
Flags: append(commonFlags, cmdFlags...),
Action: initializeWrapper(cmdJob(tunasync.CmdStart)),
},
{

View File

@ -1,141 +0,0 @@
# About Tunasync and cgroup
Optionally, tunasync can be integrated with cgroup to have better control and tracking processes started by mirror jobs. Also, limiting memory usage of a mirror job also requires cgroup support.
## How cgroup are utilized in tunasync?
If cgroup are enabled globally, all the mirror jobs, except those running in docker containers, are run in separate cgroups. If `mem_limit` is specified, it will be applied to the cgroup. For jobs running in docker containers, `mem_limit` is applied via `docker run` command.
## Tl;dr: What's the recommended configuration?
### If you are using v1 (legacy, hybrid) cgroup hierarchy:
`tunasync-worker.service`:
```
[Unit]
Description = TUNA mirrors sync worker
After=network.target
[Service]
Type=simple
User=tunasync
PermissionsStartOnly=true
ExecStartPre=/usr/bin/cgcreate -t tunasync -a tunasync -g memory:tunasync
ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd
ExecReload=/bin/kill -SIGHUP $MAINPID
ExecStopPost=/usr/bin/cgdelete memory:tunasync
[Install]
WantedBy=multi-user.target
```
`worker.conf`:
``` toml
[cgroup]
enable = true
group = "tunasync"
```
### If you are using v2 (unified) cgroup hierarchy:
`tunasync-worker.service`:
```
[Unit]
Description = TUNA mirrors sync worker
After=network.target
[Service]
Type=simple
User=tunasync
ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd
ExecReload=/bin/kill -SIGHUP $MAINPID
Delegate=yes
[Install]
WantedBy=multi-user.target
```
`worker.conf`:
``` toml
[cgroup]
enable = true
```
## Two versions of cgroups
Due to various of reasons, there are two versions of cgroups in the kernel, which are incompatible with each other. Most of the current linux distributions adopts systemd as the init system, which relies on cgroup and is responsible for initializing cgroup. As a result, the selection of the version of cgroups is mainly decided by systemd. Since version 243, the "unified" cgroup hierarchy setup has become the default.
Tunasync can automatically detect which version of cgroup is in use and enable the corresponding operating interface, but due to the fact that systemd behaves slightly differently in the two cases, different configurations for tunasync are recomended.
## Two modes of group name discovery
Two modes of group name discovery are provided: implicit mode and manual mode.
### Manual Mode
In this mode, the administrator should 1. manually create an empty cgroup (for cgroup v2 unified hierarchy) or empty cgroups in certain controller subsystems with the same name (for cgroup v1 hybird hierarchy); 2. change the ownership of the cgroups to the running user of the tunasync worker; and 3. specify the path in the configuration. On start, tunasync will automatically detect which controllers are enabled (for v1) or enable needed controllers (for v2).
Example 1:
``` bash
# suppose we have cgroup v1
sudo mkdir -p /sys/fs/cgroup/cpu/test/tunasync
sudo mkdir -p /sys/fs/cgroup/memory/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/cpu/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/memory/test/tunasync
# in worker.conf, we have group = "/test/tunasync" or "test/tunasync"
tunasync worker -c /path/to/worker.conf
```
In the above scenario, tunasync will detect the enabled subsystem controllers are cpu and memory. When running a mirror job named `foo`, sub-cgroups will be created in both `/sys/fs/cgroup/cpu/test/tunasync/foo` and `/sys/fs/cgroup/memory/test/tunasync/foo`.
Example 2 (not recommended):
``` bash
# suppose we have cgroup v2
sudo mkdir -p /sys/fs/cgroup/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/test/tunasync
# in worker.conf, we have group = "/test/tunasync" or "test/tunasync"
tunasync worker -c /path/to/worker.conf
```
In the above scenario, tunasync will directly use the cgroup `/sys/fs/cgroup/test/tunasync`. In most cases, due to the design of cgroupv2, since tunasync is not running as root, tunasync won't have the permission to move the processes it starts to the correct cgroup. That's because cgroup2 requires the operating process should also have the write permission of the common ancestor of the source group and the target group when moving processes between groups. So this example is only for demonstration of the functionality and you should prevent it.
### Implicit mode
In this mode, tunasync will use the cgroup it is currently running in and create sub-groups for jobs in that group. Tunasync will first create a sub-group named `__worker` in that group, and move itself in the `__worker` sub-group, to prevent processes in non-leaf cgroups.
Mostly, this mode is cooperated with the `Delegate=yes` option of the systemd service configuration of tunasync, which will permit the running process to self-manage the cgroup the service in running in. Due to security considerations, systemd won't give write permissions of the current running cgroups to the service when using v1 (legacy, hybrid) cgroup hierarchy and non-root user, so it is more meaningful to use this mode with v2 cgroup hierarchy.
## Configruation
``` toml
[cgroup]
enable = true
base_path = "/sys/fs/cgroup"
group = "tunasync"
subsystem = "memory"
```
The defination of the above options is:
* `enable`: `Bool`, specifies whether cgroup is enabled. When cgroup is disabled, `memory_limit` for non-docker jobs will be ignored, and the following options are also ignored.
* `group`: `String`, specifies the cgroup tunasync will use. When not provided, or provided with empty string, cgroup discovery will work in "Implicit mode", i.e. will create sub-cgroups in the current running cgroup. Otherwise, cgroup discovery will work in "Manual mode", where tunasync will create sub-cgroups in the specified cgroup.
* `base_path`: `String`, ignored. It originally specifies the mounting path of cgroup filesystem, but for making everything work, it is now required that the cgroup filesystem should be mounted at its default path(`/sys/fs/cgroup`).
* `subsystem `: `String`, ignored. It originally specifies which cgroupv1 controller is enabled and now becomes meaningless since the discovery is now automatic.
## References:
* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html]()
* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/index.html]()
* [https://systemd.io/CGROUP_DELEGATION/]()
* [https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Delegate=]()

View File

@ -1,5 +1,4 @@
# tunasync 上手指南
date: 2016-10-31 00:50:00
[tunasync](https://github.com/tuna/tunasync) 是[清华大学 TUNA 镜像源](https://mirrors.tuna.tsinghua.edu.cn)目前使用的镜像方案。
@ -8,42 +7,42 @@ date: 2016-10-31 00:50:00
本例中:
- 只镜像[elvish](https://elv.sh)项目
- 禁用了https
- 禁用了cgroup支持
- 只镜像[elvish](https://elvish.io)项目
- 禁用了https
- 禁用了cgroup支持
## 获得tunasync
### 二进制包
到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-amd64-bin.tar.gz` 即可。
到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-bin.tar.gz` 即可。
### 自行编译
```shell
> make
```
$ make
```
## 配置
```shell
> mkdir ~/tunasync_demo
> mkdir /tmp/tunasync
```
$ mkdir ~/tunasync_demo
$ mkdir /tmp/tunasync
```
编辑 `~/tunasync_demo/worker.conf`:
`~/tunasync_demo/worker.conf`:
```conf
```
[global]
name = "test_worker"
log_dir = "/tmp/tunasync/log/tunasync/{{.Name}}"
mirror_dir = "/tmp/tunasync"
concurrent = 10
interval = 120
interval = 1
[manager]
api_base = "http://localhost:12345"
token = ""
token = "some_token"
ca_cert = ""
[cgroup]
@ -61,13 +60,13 @@ ssl_key = ""
[[mirrors]]
name = "elvish"
provider = "rsync"
upstream = "rsync://rsync.elv.sh/elvish/"
upstream = "rsync://rsync.elvish.io/elvish/"
use_ipv6 = false
```
编辑 `~/tunasync_demo/manager.conf`:
`~/tunasync_demo/manager.conf`:
```conf
```
debug = false
[server]
@ -82,52 +81,22 @@ db_file = "/tmp/tunasync/manager.db"
ca_cert = ""
```
除了 bolt 以外,还支持 badger、leveldb 和 redis 的数据库后端。对于 badger 和 leveldb只需要修改 db_type。如果使用 redis 作为数据库后端,把 db_type 改为 redis并把下面的 db_file 设为 redis 服务器的地址: `redis://user:password@host:port/db_number`
### 运行
```shell
> tunasync manager --config ~/tunasync_demo/manager.conf
> tunasync worker --config ~/tunasync_demo/worker.conf
```
$ tunasync manager --config ~/tunasync_demo/manager.conf
$ tunasync worker --config ~/tunasync_demo/worker.conf
```
本例中,镜像的数据在 `/tmp/tunasync/`
### 控制
查看同步状态
```shell
> tunasynctl list -p 12345 --all
```
tunasynctl 也支持配置文件。配置文件可以放在 `/etc/tunasync/ctl.conf` 或者 `~/.config/tunasync/ctl.conf` 两个位置,后者可以覆盖前者的配置值。
配置文件内容为:
```conf
manager_addr = "127.0.0.1"
manager_port = 12345
ca_cert = ""
```
### 安全
worker 和 manager 之间用 http(s) 通信,如果你 worker 和 manager 都是在本机,那么没必要使用 https。此时 manager 就不指定 `ssl_key``ssl_cert`留空worker 的 `ca_cert` 留空,`api_base``http://` 开头。
如果需要加密的通信manager 需要指定 `ssl_key``ssl_cert`worker 要指定 `ca_cert`,并且 `api_base` 应该是 `https://` 开头。
本例中,镜像的数据在`/tmp/tunasync/`
## 更进一步
可以参看
```shell
> tunasync manager --help
> tunasync worker --help
```
$ tunasync manager --help
$ tunasync worker --help
```
可以看一下 log 目录
一些 worker 配置文件示例 [workers.conf](workers.conf)。
你可能会用到的操作 [tips.md](tips.md)。

View File

@ -1,93 +0,0 @@
## 删除某worker的某镜像
先确定已经给tunasynctl写好config文件`~/.config/tunasync/ctl.conf`
```toml
manager_addr = "127.0.0.1"
manager_port = 12345
ca_cert = ""
```
接着
```shell
$ tunasynctl disable -w <worker_id> <mirror_name>
$ tunasynctl flush
```
## 热重载 `worker.conf`
```shell
$ tunasynctl reload -w <worker_id>
```
e.g. 删除 `test_worker``elvish` 镜像:
1. 删除存放镜像的文件夹
2. 删除 `worker.conf` 中对应的 `mirror` 段落
3. 接着操作:
```shell
$ tunasynctl reload -w test_worker
$ tunasynctl disable -w test_worker elvish
$ tunasynctl flush
```
4. (可选)最后删除日志文件夹里的日志
## 删除worker
```shell
$ tunasynctl rm-worker -w <worker_id>
```
e.g.
```shell
$ tunasynctl rm-worker -w test_worker
```
## 更新镜像的大小
```shell
$ tunasynctl set-size -w <worker_id> <mirror_name> <size>
```
其中,末尾的 <size> 参数,由操作者设定,或由某定时脚本生成
由于 `du -s` 比较耗时故镜像大小可直接由rsync的日志文件读出
## Btrfs 文件系统快照
如果镜像文件存放在以 Btrfs 为文件系统的分区中,可启用由 Btrfs 提供的快照 (Snapshot) 功能。对于每一个镜像tunasync 在每次成功同步后更新其快照。
`worker.conf` 中添加如下配置,即可启用 Btrfs 快照功能:
```toml
[btrfs_snapshot]
enable = true
snapshot_path = "/path/to/snapshot/directory"
```
其中 `snapshot_path` 为快照所在目录。如将其作为发布版本,则镜像同步过程对于镜像站用户而言具有原子性。如此可避免用户接收到仍处于“中间态”的(未完成同步的)文件。
也可以在 `[[mirrors]]` 中为特定镜像单独指定快照路径,如:
```toml
[[mirrors]]
name = "elvish"
provider = "rsync"
upstream = "rsync://rsync.elv.sh/elvish/"
interval = 1440
snapshot_path = "/data/publish/elvish"
```
**提示:**
若运行 tunasync 的用户无 root 权限,请确保该用户对镜像同步目录和快照目录均具有写和执行权限,并使用 [`user_subvol_rm_allowed` 选项](https://btrfs.wiki.kernel.org/index.php/Manpage/btrfs(5)#MOUNT_OPTIONS)挂载相应的 Btrfs 分区。

View File

@ -1,822 +0,0 @@
# /home/scripts in this example points to https://github.com/tuna/tunasync-scripts/
[global]
name = "mirror_worker"
log_dir = "/srv/tunasync/log/tunasync/{{.Name}}"
mirror_dir = "/srv/tunasync"
concurrent = 10
interval = 120
# ensure the exec user be add into `docker` group
[docker]
# in `command provider` can use docker_image and docker_volumes
enable = true
[manager]
api_base = "http://localhost:12345"
token = "some_token"
ca_cert = ""
[cgroup]
enable = false
base_path = "/sys/fs/cgroup"
group = "tunasync"
[server]
hostname = "localhost"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = ""
ssl_key = ""
[[mirrors]]
name = "adobe-fonts"
interval = 1440
provider = "command"
upstream = "https://github.com/adobe-fonts"
command = "/home/scripts/adobe-fonts.sh"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "AdoptOpenJDK"
interval = 5760
provider = "command"
command = "/home/scripts/adoptopenjdk.py"
upstream = "https://adoptopenjdk.jfrog.io/adoptopenjdk"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "alpine"
provider = "rsync"
upstream = "rsync://rsync.alpinelinux.org/alpine/"
memory_limit = "256M"
[[mirrors]]
name = "anaconda"
provider = "command"
upstream = "https://repo.continuum.io/"
command = "/home/scripts/anaconda.py --delete"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "apache"
provider = "rsync"
upstream = "rsync://rsync.apache.org/apache-dist/"
use_ipv4 = true
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "armbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/apt/"
memory_limit = "256M"
[[mirrors]]
name = "armbian-releases"
provider = "rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/dl/"
memory_limit = "256M"
[[mirrors]]
name = "bananian"
provider = "command"
upstream = "https://dl.bananian.org/"
command = "/home/scripts/lftp.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "bioconductor"
provider = "rsync"
upstream = "master.bioconductor.org:./"
rsync_options = [ "--rsh=ssh -i /root/id_rsa -o PasswordAuthentication=no -l sync" ]
exclude_file = "/etc/excludes/bioconductor.txt"
memory_limit = "256M"
[[mirrors]]
name = "blender"
provider = "rsync"
upstream = "rsync://mirrors.dotsrc.org/blender/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/blender.txt"
interval = 1440
memory_limit = "256M"
[[mirrors]]
name = "chakra"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/packages/"
memory_limit = "256M"
[[mirrors]]
name = "chakra-releases"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/releases/"
memory_limit = "256M"
[[mirrors]]
name = "chef"
interval = 1440
provider = "command"
upstream = "https://packages.chef.io/repos"
command = "/home/scripts/chef.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "clickhouse"
interval = 2880
provider = "rsync"
upstream = "rsync://repo.yandex.ru/yandexrepo/clickhouse/"
exclude_file = "/etc/excludes/clickhouse.txt"
memory_limit = "256M"
[[mirrors]]
name = "clojars"
provider = "command"
upstream = "s3://clojars-repo-production/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.dualstack.us-east-2.amazonaws.com"
#TUNASYNC_S3_ENDPOINT = "https://s3.us-east-2.amazonaws.com"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "CPAN"
provider = "rsync"
upstream = "rsync://cpan-rsync.perl.org/CPAN/"
memory_limit = "256M"
[[mirrors]]
name = "CRAN"
provider = "rsync"
upstream = "rsync://cran.r-project.org/CRAN/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "CTAN"
provider = "rsync"
upstream = "rsync://mirrors.rit.edu/CTAN/"
memory_limit = "256M"
[[mirrors]]
name = "dart-pub"
provider = "command"
upstream = "https://pub.dev/api"
command = "/home/scripts/pub.sh"
interval = 30
docker_image = "tunathu/pub-mirror:latest"
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/dart-pub"
[[mirrors]]
name = "debian"
provider = "command"
upstream = "rsync://mirrors.tuna.tsinghua.edu.cn/debian/"
command = "/home/scripts/debian.sh sync:archive:debian"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/ftpsync"
docker_volumes = [
"/etc/misc/ftpsync-debian.conf:/ftpsync/etc/ftpsync-debian.conf:ro",
"/log/ftpsync:/home/log/tunasync/ftpsync",
]
[mirrors.env]
FTPSYNC_LOG_DIR = "/home/log/tunasync/ftpsync"
[[mirrors]]
name = "docker-ce"
provider = "command"
upstream = "https://download.docker.com/"
command = "timeout 3h /home/scripts/docker-ce.py --workers 10 --fast-skip"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ELK"
interval = 1440
provider = "command"
upstream = "https://packages.elastic.co"
command = "/home/scripts/ELK.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
WGET_OPTIONS = "-6"
[[mirrors]]
name = "elasticstack"
interval = 1440
provider = "command"
upstream = "https://artifacts.elastic.co/"
command = "/home/scripts/elastic.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "erlang-solutions"
interval = 1440
provider = "command"
upstream = "https://packages.erlang-solutions.com"
command = "/home/scripts/erlang.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "flutter"
interval = 1440
provider = "command"
upstream = "https://storage.googleapis.com/flutter_infra/"
command = "/home/scripts/flutter.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "github-release"
provider = "command"
upstream = "https://api.github.com/repos/"
command = "/home/scripts/github-release.py --workers 5"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
GITHUB_TOKEN = "xxxxx"
[[mirrors]]
name = "gitlab-ce"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ce/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-ee"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ee/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-runner"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/runner/gitlab-runner"
command = "/home/scripts/gitlab-runner.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "grafana"
interval = 1440
provider = "command"
upstream = "https://packages.grafana.com/oss"
command = "/home/scripts/grafana.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "hackage"
provider = "command"
command = "/home/scripts/hackage.sh"
upstream = "https://hackage.haskell.org/"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew-bottles"
provider = "command"
upstream = "https://homebrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "influxdata"
interval = 1440
provider = "command"
upstream = "https://repos.influxdata.com"
command = "/home/scripts/influxdata.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "kali"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ftp.nluug.nl/kali/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "kali-images"
provider = "rsync"
upstream = "rsync://ftp.nluug.nl/kali-images/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "KaOS"
provider = "rsync"
upstream = "rsync://kaosx.tk/kaos/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kernel"
provider = "rsync"
upstream = "rsync://rsync.kernel.org/pub/linux/kernel/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kicad"
provider = "command"
upstream = "s3://kicad-downloads/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.cern.ch"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "kodi"
provider = "rsync"
upstream = "rsync://mirror.yandex.ru/mirrors/xbmc/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
use_ipv6 = true
[[mirrors]]
name = "kubernetes"
interval = 2880
provider = "command"
upstream = "http://packages.cloud.google.com"
command = "/home/scripts/kubernetes.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "linuxbrew-bottles"
provider = "command"
upstream = "https://linuxbrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
RUN_LINUXBREW = "true"
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "linuxmint"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://mirrors.kernel.org/linuxmint-packages/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "lxc-images"
provider = "command"
upstream = "https://us.images.linuxcontainers.org/"
command = "/home/scripts/lxc-images.sh"
docker_image = "tunathu/tunasync-scripts:latest"
interval = 720
[[mirrors]]
name = "lyx"
provider = "command"
upstream = "ftp://ftp.lyx.org/pub/lyx/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "mongodb"
interval = 1440
provider = "command"
upstream = "https://repo.mongodb.org"
command = "/home/scripts/mongodb.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "msys2"
provider = "command"
upstream = "http://repo.msys2.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "mysql"
interval = 30
provider = "command"
upstream = "https://repo.mysql.com"
command = "/home/scripts/mysql.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
USE_IPV6 = "1"
[[mirrors]]
name = "nix"
interval = 1440
provider = "command"
upstream = "s3://nix-releases/nix/"
command = "/home/scripts/nix.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
MIRROR_BASE_URL = 'https://mirrors.tuna.tsinghua.edu.cn/nix/'
[[mirrors]]
name = "nix-channels"
interval = 300
provider = "command"
upstream = "https://nixos.org/channels"
command = "timeout 20h /home/scripts/nix-channels.py"
docker_image = "tunathu/nix-channels:latest"
docker_options = [
"--cpus", "20",
]
[[mirrors]]
name = "nodesource"
provider = "command"
upstream = "https://deb.nodesource.com/"
command = "/home/scripts/nodesource.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "openresty"
provider = "command"
upstream = "https://openresty.org/package/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "packagist"
provider = "command"
upstream = "http://packagist.org/"
command = "/home/scripts/packagist.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "proxmox"
interval = 1440
provider = "command"
upstream = "http://download.proxmox.com"
command = "/home/scripts/proxmox.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "pypi"
provider = "command"
upstream = "https://pypi.python.org/"
command = "/home/scripts/pypi.sh"
docker_image = "tunathu/bandersnatch:latest"
interval = 5
[[mirrors]]
name = "qt"
provider = "rsync"
upstream = "rsync://master.qt-project.org/qt-all/"
exclude_file = "/etc/excludes/qt.txt"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "raspberrypi"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://apt-repo.raspberrypi.org/archive/debian/"
memory_limit = "256M"
[[mirrors]]
name = "raspbian-images"
interval = 5760
provider = "command"
upstream = "https://downloads.raspberrypi.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x ^icons/$ -c --only-missing -v --no-perms"
[[mirrors]]
name = "raspbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://archive.raspbian.org/archive/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "redhat"
provider = "rsync"
upstream = "rsync://ftp.redhat.com/redhat/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
exclude_file = "/etc/excludes/redhat.txt"
interval = 1440
[mirrors.env]
RSYNC_PROXY="127.0.0.1:8123"
[[mirrors]]
name = "remi"
interval = 1440
provider = "command"
upstream = "rsync://rpms.remirepo.net"
command = "/home/scripts/remi.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "repo-ck"
provider = "command"
upstream = "http://repo-ck.com"
command = "/home/scripts/repo-ck.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ros"
provider = "rsync"
upstream = "rsync://mirror.umd.edu/packages.ros.org/ros/"
memory_limit = "256M"
[[mirrors]]
name = "ros2"
interval = 1440
provider = "command"
upstream = "http://packages.ros.org/ros2"
command = "/home/scripts/ros2.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "rubygems"
provider = "command"
upstream = "https://rubygems.org"
command = "/home/scripts/rubygems.sh"
docker_image = "tunathu/rubygems-mirror"
interval = 60
# set environment varialbes
[mirrors.env]
INIT = "0"
[[mirrors]]
name = "rudder"
interval = 2880
provider = "command"
upstream = "https://repository.rudder.io"
command = "/home/scripts/rudder.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "rustup"
provider = "command"
upstream = "https://rustup.rs/"
command = "/home/scripts/rustup.sh"
interval = 1440
docker_image = "tunathu/rustup-mirror:latest"
docker_volumes = [
]
docker_options = [
]
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/rustup"
[[mirrors]]
name = "saltstack"
interval = 1440 # required on http://repo.saltstack.com/#mirror
provider = "command"
upstream = "s3://s3/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.repo.saltstack.com"
TUNASYNC_AWS_OPTIONS = "--delete --exact-timestamps"
[[mirrors]]
name = "solus"
provider = "rsync"
upstream = "rsync://mirrors.rit.edu/solus/"
rsync_options = [ "--exclude", "/shannon", "--exclude", "/unstable" ]
memory_limit = "256M"
[[mirrors]]
name = "stackage"
provider = "command"
command = "/home/scripts/stackage.py"
upstream = "https://www.stackage.org/"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
GIT_COMMITTER_NAME = "TUNA mirrors"
GIT_COMMITTER_EMAIL = "mirrors@tuna.tsinghua.edu.cn"
[[mirrors]]
name = "steamos"
interval = 1440
provider = "command"
upstream = "http://repo.steampowered.com"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer --exclude icons/ "
[[mirrors]]
name = "termux"
interval = 1440
provider = "command"
upstream = "https://dl.bintray.com/termux/termux-packages-24/"
command = "/home/scripts/termux.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ubuntu"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://archive.ubuntu.com/ubuntu/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "ubuntu-ports"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ports.ubuntu.com/ubuntu-ports/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/ubuntu-ports-exclude.txt"
memory_limit = "256M"
[[mirrors]]
name = "virtualbox"
interval = 1440
provider = "command"
upstream = "http://download.virtualbox.org/virtualbox"
command = "/home/scripts/virtualbox.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "winehq"
provider = "command"
upstream = "ftp://ftp.winehq.org/pub/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x wine-builds.old/ -x /\\..+"
[[mirrors]]
name = "zabbix"
provider = "rsync"
upstream = "rsync://repo.zabbix.com/mirror/"
rsync_options = [ "--delete-excluded", "--chmod=o+r,Do+x,Fa-x" ]
memory_limit = "256M"
[[mirrors]]
name = "AOSP"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://android.googlesource.com/mirror/manifest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "lineageOS"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://github.com/LineageOS/mirror"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "chromiumos"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/cros.sh"
upstream = "https://chromium.googlesource.com"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
USE_BITMAP_INDEX = "1"
CONCURRENT_JOBS = "20"
[[mirrors]]
name = "crates.io-index.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "https://github.com/rust-lang/crates.io-index.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "flutter-sdk.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/flutter/flutter.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gcc.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://gcc.gnu.org/git/gcc.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gentoo-portage.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/gentoo-mirror/gentoo.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "git-repo"
provider = "command"
command = "/home/tunasync-scripts/git-repo.sh"
upstream = "https://gerrit.googlesource.com/git-repo"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew"
provider = "command"
command = "/home/tunasync-scripts/homebrew.sh"
upstream = "https://github.com/Homebrew"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "CocoaPods"
provider = "command"
command = "/home/tunasync-scripts/cocoapods.sh"
upstream = "https://github.com/CocoaPods"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "pybombs"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/pybombs.sh"
upstream = "https://github.com/scateu/pybombs-mirror/"
docker_image = "tunathu/tunasync-scripts:latest"
docker_volumes = ["/home/pybombs-mirror:/opt/pybombs-mirror"]
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[mirrors.env]
PYBOMBS_MIRROR_SCRIPT_PATH = "/opt/pybombs-mirror"
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/pybombs"
[[mirrors]]
name = "llvm"
provider = "command"
command = "/home/tunasync-scripts/llvm.sh"
upstream = "https://git.llvm.org/git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
# vim: ft=toml

89
go.mod
View File

@ -1,89 +0,0 @@
module github.com/tuna/tunasync
go 1.23.0
toolchain go1.23.5
require (
github.com/BurntSushi/toml v1.4.0
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/boltdb/bolt v1.3.1
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe
github.com/containerd/cgroups/v3 v3.0.5
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6
github.com/dgraph-io/badger/v2 v2.2007.4
github.com/docker/go-units v0.5.0
github.com/gin-gonic/gin v1.10.0
github.com/go-redis/redis/v8 v8.11.5
github.com/imdario/mergo v0.3.16
github.com/moby/moby v28.0.1+incompatible
github.com/moby/sys/reexec v0.1.0
github.com/opencontainers/runtime-spec v1.2.1
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46
github.com/smartystreets/goconvey v1.6.4
github.com/syndtr/goleveldb v1.0.0
github.com/urfave/cli v1.22.16
golang.org/x/sys v0.30.0
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473
)
replace github.com/boltdb/bolt v1.3.1 => go.etcd.io/bbolt v1.3.11
require (
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/bytedance/sonic v1.12.9 // indirect
github.com/bytedance/sonic/loader v0.2.3 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cilium/ebpf v0.17.3 // indirect
github.com/cloudwego/base64x v0.1.5 // indirect
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/dennwc/ioctl v1.0.0 // indirect
github.com/dgraph-io/ristretto v0.2.0 // indirect
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/fgprof v0.9.5 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/gin-contrib/sse v1.0.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.25.0 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gomodule/redigo v1.8.2 // indirect
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/smartystreets/assertions v1.2.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
golang.org/x/arch v0.14.0 // indirect
golang.org/x/crypto v0.35.0 // indirect
golang.org/x/net v0.35.0 // indirect
golang.org/x/text v0.22.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

343
go.sum
View File

@ -1,343 +0,0 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bytedance/sonic v1.12.7 h1:CQU8pxOy9HToxhndH0Kx/S1qU/CuS9GnKYrGioDcU1Q=
github.com/bytedance/sonic v1.12.7/go.mod h1:tnbal4mxOMju17EGfknm2XyYcpyCnIROYOEYuemj13I=
github.com/bytedance/sonic v1.12.9 h1:Od1BvK55NnewtGaJsTDeAOSnLVO2BTSLOe0+ooKokmQ=
github.com/bytedance/sonic v1.12.9/go.mod h1:uVvFidNmlt9+wa31S1urfwwthTWteBgG0hWuoKAXTx8=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.2 h1:jxAJuN9fOot/cyz5Q6dUuMJF5OqQ6+5GfA8FjjQ0R4o=
github.com/bytedance/sonic/loader v0.2.2/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/bytedance/sonic/loader v0.2.3 h1:yctD0Q3v2NOGfSWPLPvG2ggA2kV6TS6s4wioyEqssH0=
github.com/bytedance/sonic/loader v0.2.3/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cilium/ebpf v0.17.1 h1:G8mzU81R2JA1nE5/8SRubzqvBMmAmri2VL8BIZPWvV0=
github.com/cilium/ebpf v0.17.1/go.mod h1:vay2FaYSmIlv3r8dNACd4mW/OCaZLJKJOo+IHBvCIO8=
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe h1:69JI97HlzP+PH5Mi1thcGlDoBr6PS2Oe+l3mNmAkbs4=
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6 h1:fV+JlCY0cCJh3l0jfE7iB3ZmrdfJSgfcjdrCQhPokGg=
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA=
github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg=
github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0=
github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o=
github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E=
github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o=
github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.25.0 h1:5Dh7cjvzR7BRZadnsVOzPhWsrwUr0nmsZJxEAnFLNO8=
github.com/go-playground/validator/v10 v10.25.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus=
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM=
github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM=
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/moby v27.4.1+incompatible h1:z6detzbcLRt7U+w4ovHV+8oYpJfpHKTmUbFWPG6cudA=
github.com/moby/moby v27.4.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
github.com/moby/moby v28.0.1+incompatible h1:10ejBTwFhM3/9p6pSaKrLyXnx7QzzCmCYHAedOp67cQ=
github.com/moby/moby v28.0.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
github.com/moby/sys/reexec v0.1.0 h1:RrBi8e0EBTLEgfruBOFcxtElzRGTEUkeIFaVXgU7wok=
github.com/moby/sys/reexec v0.1.0/go.mod h1:EqjBg8F3X7iZe5pU6nRZnYCMUTXoxsjiIfHup5wYIN8=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ=
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
golang.org/x/arch v0.13.0 h1:KCkqVVV1kGg0X87TFysjCJ8MxtZEIU4Ja/yXGeoECdA=
golang.org/x/arch v0.13.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4=
golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=

View File

@ -24,12 +24,9 @@ func InitLogger(verbose, debug, withSystemd bool) {
if debug {
logging.SetLevel(logging.DEBUG, "tunasync")
logging.SetLevel(logging.DEBUG, "tunasynctl")
} else if verbose {
logging.SetLevel(logging.INFO, "tunasync")
logging.SetLevel(logging.INFO, "tunasynctl")
} else {
logging.SetLevel(logging.NOTICE, "tunasync")
logging.SetLevel(logging.NOTICE, "tunasynctl")
}
}

View File

@ -1,8 +1,6 @@
package internal
import (
"bytes"
"encoding/json"
"fmt"
"time"
)
@ -10,36 +8,23 @@ import (
// A MirrorStatus represents a msg when
// a worker has done syncing
type MirrorStatus struct {
Name string `json:"name"`
Worker string `json:"worker"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate time.Time `json:"last_update"`
LastStarted time.Time `json:"last_started"`
LastEnded time.Time `json:"last_ended"`
Scheduled time.Time `json:"next_schedule"`
Upstream string `json:"upstream"`
Size string `json:"size"`
ErrorMsg string `json:"error_msg"`
Name string `json:"name"`
Worker string `json:"worker"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate time.Time `json:"last_update"`
Upstream string `json:"upstream"`
Size string `json:"size"`
ErrorMsg string `json:"error_msg"`
}
// A WorkerStatus is the information struct that describe
// a worker, and sent from the manager to clients.
type WorkerStatus struct {
ID string `json:"id"`
URL string `json:"url"` // worker url
Token string `json:"token"` // session token
LastOnline time.Time `json:"last_online"` // last seen
LastRegister time.Time `json:"last_register"` // last register time
}
type MirrorSchedules struct {
Schedules []MirrorSchedule `json:"schedules"`
}
type MirrorSchedule struct {
MirrorName string `json:"name"`
NextSchedule time.Time `json:"next_schedule"`
ID string `json:"id"`
URL string `json:"url"` // worker url
Token string `json:"token"` // session token
LastOnline time.Time `json:"last_online"` // last seen
}
// A CmdVerb is an action to a job or worker
@ -62,54 +47,29 @@ const (
)
func (c CmdVerb) String() string {
mapping := map[CmdVerb]string{
CmdStart: "start",
CmdStop: "stop",
CmdDisable: "disable",
CmdRestart: "restart",
CmdPing: "ping",
CmdReload: "reload",
switch c {
case CmdStart:
return "start"
case CmdStop:
return "stop"
case CmdDisable:
return "disable"
case CmdRestart:
return "restart"
case CmdPing:
return "ping"
case CmdReload:
return "reload"
}
return mapping[c]
}
func NewCmdVerbFromString(s string) CmdVerb {
mapping := map[string]CmdVerb{
"start": CmdStart,
"stop": CmdStop,
"disable": CmdDisable,
"restart": CmdRestart,
"ping": CmdPing,
"reload": CmdReload,
}
return mapping[s]
}
// Marshal and Unmarshal for CmdVerb
func (s CmdVerb) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString(`"`)
buffer.WriteString(s.String())
buffer.WriteString(`"`)
return buffer.Bytes(), nil
}
func (s *CmdVerb) UnmarshalJSON(b []byte) error {
var j string
err := json.Unmarshal(b, &j)
if err != nil {
return err
}
*s = NewCmdVerbFromString(j)
return nil
return "unknown"
}
// A WorkerCmd is the command message send from the
// manager to a worker
type WorkerCmd struct {
Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"`
Args []string `json:"args"`
Options map[string]bool `json:"options"`
Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"`
Args []string `json:"args"`
}
func (c WorkerCmd) String() string {
@ -122,9 +82,8 @@ func (c WorkerCmd) String() string {
// A ClientCmd is the command message send from client
// to the manager
type ClientCmd struct {
Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"`
WorkerID string `json:"worker_id"`
Args []string `json:"args"`
Options map[string]bool `json:"options"`
Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"`
WorkerID string `json:"worker_id"`
Args []string `json:"args"`
}

View File

@ -1,72 +0,0 @@
package internal
import (
"encoding/json"
"strconv"
"time"
)
type textTime struct {
time.Time
}
func (t textTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
}
func (t *textTime) UnmarshalJSON(b []byte) error {
s := string(b)
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
*t = textTime{t2}
return err
}
type stampTime struct {
time.Time
}
func (t stampTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Unix())
}
func (t *stampTime) UnmarshalJSON(b []byte) error {
ts, err := strconv.Atoi(string(b))
if err != nil {
return err
}
*t = stampTime{time.Unix(int64(ts), 0)}
return err
}
// WebMirrorStatus is the mirror status to be shown in the web page
type WebMirrorStatus struct {
Name string `json:"name"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate textTime `json:"last_update"`
LastUpdateTs stampTime `json:"last_update_ts"`
LastStarted textTime `json:"last_started"`
LastStartedTs stampTime `json:"last_started_ts"`
LastEnded textTime `json:"last_ended"`
LastEndedTs stampTime `json:"last_ended_ts"`
Scheduled textTime `json:"next_schedule"`
ScheduledTs stampTime `json:"next_schedule_ts"`
Upstream string `json:"upstream"`
Size string `json:"size"` // approximate size
}
func BuildWebMirrorStatus(m MirrorStatus) WebMirrorStatus {
return WebMirrorStatus{
Name: m.Name,
IsMaster: m.IsMaster,
Status: m.Status,
LastUpdate: textTime{m.LastUpdate},
LastUpdateTs: stampTime{m.LastUpdate},
LastStarted: textTime{m.LastStarted},
LastStartedTs: stampTime{m.LastStarted},
LastEnded: textTime{m.LastEnded},
LastEndedTs: stampTime{m.LastEnded},
Scheduled: textTime{m.Scheduled},
ScheduledTs: stampTime{m.Scheduled},
Upstream: m.Upstream,
Size: m.Size,
}
}

View File

@ -1,97 +0,0 @@
package internal
import (
"encoding/json"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestStatus(t *testing.T) {
Convey("status json ser-de should work", t, func() {
tz := "Asia/Tokyo"
loc, err := time.LoadLocation(tz)
So(err, ShouldBeNil)
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
m := WebMirrorStatus{
Name: "tunalinux",
Status: Success,
LastUpdate: textTime{t},
LastUpdateTs: stampTime{t},
LastStarted: textTime{t},
LastStartedTs: stampTime{t},
LastEnded: textTime{t},
LastEndedTs: stampTime{t},
Scheduled: textTime{t},
ScheduledTs: stampTime{t},
Size: "5GB",
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
}
b, err := json.Marshal(m)
So(err, ShouldBeNil)
//fmt.Println(string(b))
var m2 WebMirrorStatus
err = json.Unmarshal(b, &m2)
So(err, ShouldBeNil)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.LastEndedTs.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.Scheduled.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.ScheduledTs.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.Scheduled.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.ScheduledTs.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
Convey("BuildWebMirrorStatus should work", t, func() {
m := MirrorStatus{
Name: "arch-sync3",
Worker: "testWorker",
IsMaster: true,
Status: Failed,
LastUpdate: time.Now().Add(-time.Minute * 30),
LastStarted: time.Now().Add(-time.Minute * 1),
LastEnded: time.Now(),
Scheduled: time.Now().Add(time.Minute * 5),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
}
var m2 WebMirrorStatus = BuildWebMirrorStatus(m)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.LastEndedTs.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.Scheduled.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.ScheduledTs.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.Scheduled.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.ScheduledTs.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
}

View File

@ -6,53 +6,26 @@ import (
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"regexp"
"time"
)
var rsyncExitValues = map[int]string{
0: "Success",
1: "Syntax or usage error",
2: "Protocol incompatibility",
3: "Errors selecting input/output files, dirs",
4: "Requested action not supported: an attempt was made to manipulate 64-bit files on a platform that cannot support them; or an option was specified that is supported by the client and not by the server.",
5: "Error starting client-server protocol",
6: "Daemon unable to append to log-file",
10: "Error in socket I/O",
11: "Error in file I/O",
12: "Error in rsync protocol data stream",
13: "Errors with program diagnostics",
14: "Error in IPC code",
20: "Received SIGUSR1 or SIGINT",
21: "Some error returned by waitpid()",
22: "Error allocating core memory buffers",
23: "Partial transfer due to error",
24: "Partial transfer due to vanished source files",
25: "The --max-delete limit stopped deletions",
30: "Timeout in data send/receive",
35: "Timeout waiting for daemon connection",
}
// GetTLSConfig generate tls.Config from CAFile
func GetTLSConfig(CAFile string) (*tls.Config, error) {
caCert, err := os.ReadFile(CAFile)
caCert, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
return nil, errors.New("failed to add CA to pool")
return nil, errors.New("Failed to add CA to pool")
}
tlsConfig := &tls.Config{
RootCAs: caCertPool,
}
// tlsConfig.BuildNameToCertificate()
tlsConfig.BuildNameToCertificate()
return tlsConfig, nil
}
@ -105,52 +78,9 @@ func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response,
return resp, errors.New("HTTP status code is not 200")
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, err
}
return resp, json.Unmarshal(body, obj)
}
// FindAllSubmatchInFile calls re.FindAllSubmatch to find matches in given file
func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]byte, err error) {
if fileName == "/dev/null" {
err = errors.New("invalid log file")
return
}
if content, err := os.ReadFile(fileName); err == nil {
matches = re.FindAllSubmatch(content, -1)
// fmt.Printf("FindAllSubmatchInFile: %q\n", matches)
}
return
}
// ExtractSizeFromLog uses a regexp to extract the size from log files
func ExtractSizeFromLog(logFile string, re *regexp.Regexp) string {
matches, _ := FindAllSubmatchInFile(logFile, re)
if len(matches) == 0 {
return ""
}
// return the first capture group of the last occurrence
return string(matches[len(matches)-1][1])
}
// ExtractSizeFromRsyncLog extracts the size from rsync logs
func ExtractSizeFromRsyncLog(logFile string) string {
// (?m) flag enables multi-line mode
re := regexp.MustCompile(`(?m)^Total file size: ([0-9\.]+[KMGTP]?) bytes`)
return ExtractSizeFromLog(logFile, re)
}
// TranslateRsyncErrorCode translates the exit code of rsync to a message
func TranslateRsyncErrorCode(cmdErr error) (exitCode int, msg string) {
if exiterr, ok := cmdErr.(*exec.ExitError); ok {
exitCode = exiterr.ExitCode()
strerr, valid := rsyncExitValues[exitCode]
if valid {
msg = fmt.Sprintf("rsync error: %s", strerr)
}
}
return
}

View File

@ -1,41 +0,0 @@
package internal
import (
"os"
"path/filepath"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestExtractSizeFromRsyncLog(t *testing.T) {
realLogContent := `
Number of files: 998,470 (reg: 925,484, dir: 58,892, link: 14,094)
Number of created files: 1,049 (reg: 1,049)
Number of deleted files: 1,277 (reg: 1,277)
Number of regular files transferred: 5,694
Total file size: 1.33T bytes
Total transferred file size: 2.86G bytes
Literal data: 780.62M bytes
Matched data: 2.08G bytes
File list size: 37.55M
File list generation time: 7.845 seconds
File list transfer time: 0.000 seconds
Total bytes sent: 7.55M
Total bytes received: 823.25M
sent 7.55M bytes received 823.25M bytes 5.11M bytes/sec
total size is 1.33T speedup is 1,604.11
`
Convey("Log parser should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
logFile := filepath.Join(tmpDir, "rs.log")
err = os.WriteFile(logFile, []byte(realLogContent), 0755)
So(err, ShouldBeNil)
res := ExtractSizeFromRsyncLog(logFile)
So(res, ShouldEqual, "1.33T")
})
}

View File

@ -1,4 +1,3 @@
package internal
// Version of the program
const Version string = "0.9.3"
const Version string = "0.2-dev"

View File

@ -2,7 +2,7 @@ package manager
import (
"github.com/BurntSushi/toml"
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v1"
)
// A Config is the top-level toml-serializaible config struct
@ -29,7 +29,6 @@ type FileConfig struct {
CACert string `toml:"ca_cert"`
}
// LoadConfig loads config from specified file
func LoadConfig(cfgFile string, c *cli.Context) (*Config, error) {
cfg := new(Config)

View File

@ -2,13 +2,14 @@ package manager
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"github.com/BurntSushi/toml"
. "github.com/smartystreets/goconvey/convey"
"github.com/urfave/cli"
"gopkg.in/urfave/cli.v1"
)
func TestConfig(t *testing.T) {
@ -36,11 +37,11 @@ func TestConfig(t *testing.T) {
Convey("load Config should work", t, func() {
Convey("create config file & cli context", func() {
tmpfile, err := os.CreateTemp("", "tunasync")
tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()

View File

@ -4,13 +4,8 @@ import (
"encoding/json"
"fmt"
"strings"
"time"
bolt "github.com/boltdb/bolt"
"github.com/dgraph-io/badger/v2"
"github.com/go-redis/redis/v8"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb"
"github.com/boltdb/bolt"
. "github.com/tuna/tunasync/internal"
)
@ -19,9 +14,7 @@ type dbAdapter interface {
Init() error
ListWorkers() ([]WorkerStatus, error)
GetWorker(workerID string) (WorkerStatus, error)
DeleteWorker(workerID string) error
CreateWorker(w WorkerStatus) (WorkerStatus, error)
RefreshWorker(workerID string) (WorkerStatus, error)
UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error)
GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error)
ListMirrorStatus(workerID string) ([]MirrorStatus, error)
@ -30,14 +23,21 @@ type dbAdapter interface {
Close() error
}
// interface for a kv database
type kvAdapter interface {
InitBucket(bucket string) error
Get(bucket string, key string) ([]byte, error)
GetAll(bucket string) (map[string][]byte, error)
Put(bucket string, key string, value []byte) error
Delete(bucket string, key string) error
Close() error
func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) {
if dbType == "bolt" {
innerDB, err := bolt.Open(dbFile, 0600, nil)
if err != nil {
return nil, err
}
db := boltAdapter{
db: innerDB,
dbFile: dbFile,
}
err = db.Init()
return &db, err
}
// unsupported db-type
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
}
const (
@ -45,222 +45,153 @@ const (
_statusBucketKey = "mirror_status"
)
func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) {
if dbType == "bolt" {
innerDB, err := bolt.Open(dbFile, 0600, &bolt.Options{
Timeout: 5 * time.Second,
})
if err != nil {
return nil, err
}
db := boltAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "redis" {
opt, err := redis.ParseURL(dbFile)
if err != nil {
return nil, fmt.Errorf("bad redis url: %s", err)
}
innerDB := redis.NewClient(opt)
db := redisAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "badger" {
innerDB, err := badger.Open(badger.DefaultOptions(dbFile))
if err != nil {
return nil, err
}
db := badgerAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "leveldb" {
innerDB, err := leveldb.OpenFile(dbFile, nil)
if err != nil {
return nil, err
}
db := leveldbAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
}
// unsupported db-type
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
type boltAdapter struct {
db *bolt.DB
dbFile string
}
// use the underlying kv database to store data
type kvDBAdapter struct {
db kvAdapter
}
func (b *kvDBAdapter) Init() error {
err := b.db.InitBucket(_workerBucketKey)
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
err = b.db.InitBucket(_statusBucketKey)
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
return err
}
func (b *kvDBAdapter) ListWorkers() (ws []WorkerStatus, err error) {
var workers map[string][]byte
workers, err = b.db.GetAll(_workerBucketKey)
var w WorkerStatus
for _, v := range workers {
jsonErr := json.Unmarshal(v, &w)
if jsonErr != nil {
err = errors.Wrap(err, jsonErr.Error())
continue
func (b *boltAdapter) Init() (err error) {
return b.db.Update(func(tx *bolt.Tx) error {
_, err = tx.CreateBucketIfNotExists([]byte(_workerBucketKey))
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
ws = append(ws, w)
}
_, err = tx.CreateBucketIfNotExists([]byte(_statusBucketKey))
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _statusBucketKey, err.Error())
}
return nil
})
}
func (b *boltAdapter) ListWorkers() (ws []WorkerStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
c := bucket.Cursor()
var w WorkerStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
jsonErr := json.Unmarshal(v, &w)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
continue
}
ws = append(ws, w)
}
return err
})
return
}
func (b *kvDBAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
var v []byte
v, _ = b.db.Get(_workerBucketKey, workerID)
if v == nil {
err = fmt.Errorf("invalid workerID %s", workerID)
} else {
err = json.Unmarshal(v, &w)
}
func (b *boltAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
v := bucket.Get([]byte(workerID))
if v == nil {
return fmt.Errorf("invalid workerID %s", workerID)
}
err := json.Unmarshal(v, &w)
return err
})
return
}
func (b *kvDBAdapter) DeleteWorker(workerID string) error {
v, _ := b.db.Get(_workerBucketKey, workerID)
if v == nil {
return fmt.Errorf("invalid workerID %s", workerID)
}
return b.db.Delete(_workerBucketKey, workerID)
}
func (b *kvDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
v, err := json.Marshal(w)
if err == nil {
err = b.db.Put(_workerBucketKey, w.ID, v)
}
func (b *boltAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
v, err := json.Marshal(w)
if err != nil {
return err
}
err = bucket.Put([]byte(w.ID), v)
return err
})
return w, err
}
func (b *kvDBAdapter) RefreshWorker(workerID string) (w WorkerStatus, err error) {
w, err = b.GetWorker(workerID)
if err == nil {
w.LastOnline = time.Now()
w, err = b.CreateWorker(w)
}
return w, err
}
func (b *kvDBAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
func (b *boltAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
id := mirrorID + "/" + workerID
v, err := json.Marshal(status)
if err == nil {
err = b.db.Put(_statusBucketKey, id, v)
}
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
v, err := json.Marshal(status)
err = bucket.Put([]byte(id), v)
return err
})
return status, err
}
func (b *kvDBAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) {
func (b *boltAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) {
id := mirrorID + "/" + workerID
var v []byte
v, err = b.db.Get(_statusBucketKey, id)
if v == nil {
err = fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID)
} else if err == nil {
err = json.Unmarshal(v, &m)
}
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
v := bucket.Get([]byte(id))
if v == nil {
return fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID)
}
err := json.Unmarshal(v, &m)
return err
})
return
}
func (b *kvDBAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) {
var vals map[string][]byte
vals, err = b.db.GetAll(_statusBucketKey)
if err != nil {
return
}
func (b *boltAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
c := bucket.Cursor()
var m MirrorStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
if wID := strings.Split(string(k), "/")[1]; wID == workerID {
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
continue
}
ms = append(ms, m)
}
}
return err
})
return
}
for k, v := range vals {
if wID := strings.Split(k, "/")[1]; wID == workerID {
var m MirrorStatus
func (b *boltAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
c := bucket.Cursor()
var m MirrorStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = errors.Wrap(err, jsonErr.Error())
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
continue
}
ms = append(ms, m)
}
}
return err
})
return
}
func (b *kvDBAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) {
var vals map[string][]byte
vals, err = b.db.GetAll(_statusBucketKey)
if err != nil {
return
}
for _, v := range vals {
func (b *boltAdapter) FlushDisabledJobs() (err error) {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
c := bucket.Cursor()
var m MirrorStatus
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = errors.Wrap(err, jsonErr.Error())
continue
}
ms = append(ms, m)
}
return
}
func (b *kvDBAdapter) FlushDisabledJobs() (err error) {
var vals map[string][]byte
vals, err = b.db.GetAll(_statusBucketKey)
if err != nil {
return
}
for k, v := range vals {
var m MirrorStatus
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = errors.Wrap(err, jsonErr.Error())
continue
}
if m.Status == Disabled || len(m.Name) == 0 {
deleteErr := b.db.Delete(_statusBucketKey, k)
if deleteErr != nil {
err = errors.Wrap(err, deleteErr.Error())
for k, v := c.First(); k != nil; k, v = c.Next() {
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
continue
}
if m.Status == Disabled || len(m.Name) == 0 {
err = c.Delete()
}
}
}
return err
})
return
}
func (b *kvDBAdapter) Close() error {
func (b *boltAdapter) Close() error {
if b.db != nil {
return b.db.Close()
}

View File

@ -1,67 +0,0 @@
package manager
import (
"github.com/dgraph-io/badger/v2"
)
// implement kv interface backed by badger
type badgerAdapter struct {
db *badger.DB
}
func (b *badgerAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *badgerAdapter) Get(bucket string, key string) (v []byte, err error) {
b.db.View(func(tx *badger.Txn) error {
var item *badger.Item
item, err = tx.Get([]byte(bucket + key))
if item != nil {
v, err = item.ValueCopy(nil)
}
return nil
})
return
}
func (b *badgerAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
b.db.View(func(tx *badger.Txn) error {
it := tx.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
prefix := []byte(bucket)
m = make(map[string][]byte)
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
k := string(item.Key())
actualKey := k[len(bucket):]
var v []byte
v, err = item.ValueCopy(nil)
m[actualKey] = v
}
return nil
})
return
}
func (b *badgerAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Update(func(tx *badger.Txn) error {
err := tx.Set([]byte(bucket+key), value)
return err
})
return err
}
func (b *badgerAdapter) Delete(bucket string, key string) error {
err := b.db.Update(func(tx *badger.Txn) error {
err := tx.Delete([]byte(bucket + key))
return err
})
return err
}
func (b *badgerAdapter) Close() error {
return b.db.Close()
}

View File

@ -1,66 +0,0 @@
package manager
import (
"fmt"
bolt "github.com/boltdb/bolt"
)
// implement kv interface backed by boltdb
type boltAdapter struct {
db *bolt.DB
}
func (b *boltAdapter) InitBucket(bucket string) (err error) {
return b.db.Update(func(tx *bolt.Tx) error {
_, err = tx.CreateBucketIfNotExists([]byte(bucket))
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
return nil
})
}
func (b *boltAdapter) Get(bucket string, key string) (v []byte, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
v = bucket.Get([]byte(key))
return nil
})
return
}
func (b *boltAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
c := bucket.Cursor()
m = make(map[string][]byte)
for k, v := c.First(); k != nil; k, v = c.Next() {
m[string(k)] = v
}
return nil
})
return
}
func (b *boltAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
err := bucket.Put([]byte(key), value)
return err
})
return err
}
func (b *boltAdapter) Delete(bucket string, key string) error {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
err := bucket.Delete([]byte(key))
return err
})
return err
}
func (b *boltAdapter) Close() error {
return b.db.Close()
}

View File

@ -1,51 +0,0 @@
package manager
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
)
// implement kv interface backed by leveldb
type leveldbAdapter struct {
db *leveldb.DB
}
func (b *leveldbAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *leveldbAdapter) Get(bucket string, key string) (v []byte, err error) {
v, err = b.db.Get([]byte(bucket+key), nil)
return
}
func (b *leveldbAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
it := b.db.NewIterator(util.BytesPrefix([]byte(bucket)), nil)
defer it.Release()
m = make(map[string][]byte)
for it.Next() {
k := string(it.Key())
actualKey := k[len(bucket):]
// it.Value() changes on next iteration
val := it.Value()
v := make([]byte, len(val))
copy(v, val)
m[actualKey] = v
}
return
}
func (b *leveldbAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Put([]byte(bucket+key), []byte(value), nil)
return err
}
func (b *leveldbAdapter) Delete(bucket string, key string) error {
err := b.db.Delete([]byte(bucket+key), nil)
return err
}
func (b *leveldbAdapter) Close() error {
return b.db.Close()
}

View File

@ -1,54 +0,0 @@
package manager
import (
"context"
"github.com/go-redis/redis/v8"
)
// implement kv interface backed by redis
type redisAdapter struct {
db *redis.Client
}
var ctx = context.Background()
func (b *redisAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *redisAdapter) Get(bucket string, key string) (v []byte, err error) {
var val string
val, err = b.db.HGet(ctx, bucket, key).Result()
if err == nil {
v = []byte(val)
}
return
}
func (b *redisAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
var val map[string]string
val, err = b.db.HGetAll(ctx, bucket).Result()
if err == nil && val != nil {
m = make(map[string][]byte)
for k, v := range val {
m[k] = []byte(v)
}
}
return
}
func (b *redisAdapter) Put(bucket string, key string, value []byte) error {
_, err := b.db.HSet(ctx, bucket, key, string(value)).Result()
return err
}
func (b *redisAdapter) Delete(bucket string, key string) error {
_, err := b.db.HDel(ctx, bucket, key).Result()
return err
}
func (b *redisAdapter) Close() error {
return b.db.Close()
}

View File

@ -2,167 +2,19 @@ package manager
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"testing"
"time"
"github.com/alicebob/miniredis"
. "github.com/smartystreets/goconvey/convey"
. "github.com/tuna/tunasync/internal"
)
func SortMirrorStatus(status []MirrorStatus) {
sort.Slice(status, func(l, r int) bool {
return status[l].Name < status[r].Name
})
}
func DBAdapterTest(db dbAdapter) {
var err error
testWorkerIDs := []string{"test_worker1", "test_worker2"}
Convey("create worker", func() {
for _, id := range testWorkerIDs {
w := WorkerStatus{
ID: id,
Token: "token_" + id,
LastOnline: time.Now(),
LastRegister: time.Now(),
}
_, err = db.CreateWorker(w)
So(err, ShouldBeNil)
}
Convey("get existent worker", func() {
_, err := db.GetWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
})
Convey("list existent workers", func() {
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
Convey("get non-existent worker", func() {
_, err := db.GetWorker("invalid workerID")
So(err, ShouldNotBeNil)
})
Convey("delete existent worker", func() {
err := db.DeleteWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
_, err = db.GetWorker(testWorkerIDs[0])
So(err, ShouldNotBeNil)
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 1)
})
Convey("delete non-existent worker", func() {
err := db.DeleteWorker("invalid workerID")
So(err, ShouldNotBeNil)
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
})
Convey("update mirror status", func() {
status := []MirrorStatus{
{
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
},
{
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Disabled,
LastUpdate: time.Now().Add(-time.Hour),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
{
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Success,
LastUpdate: time.Now().Add(-time.Minute),
LastStarted: time.Now().Add(-time.Second),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
}
SortMirrorStatus(status)
for _, s := range status {
_, err := db.UpdateMirrorStatus(s.Worker, s.Name, s)
So(err, ShouldBeNil)
}
Convey("get mirror status", func() {
m, err := db.GetMirrorStatus(testWorkerIDs[0], status[0].Name)
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal(status[0])
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(m)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("list mirror status", func() {
ms, err := db.ListMirrorStatus(testWorkerIDs[0])
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal([]MirrorStatus{status[0]})
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("list all mirror status", func() {
ms, err := db.ListAllMirrorStatus()
So(err, ShouldBeNil)
SortMirrorStatus(ms)
expectedJSON, err := json.Marshal(status)
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("flush disabled jobs", func() {
ms, err := db.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 3)
err = db.FlushDisabledJobs()
So(err, ShouldBeNil)
ms, err = db.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 2)
})
})
}
func TestDBAdapter(t *testing.T) {
func TestBoltAdapter(t *testing.T) {
Convey("boltAdapter should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
@ -176,60 +28,114 @@ func TestDBAdapter(t *testing.T) {
So(err, ShouldBeNil)
}()
DBAdapterTest(boltDB)
})
testWorkerIDs := []string{"test_worker1", "test_worker2"}
Convey("create worker", func() {
for _, id := range testWorkerIDs {
w := WorkerStatus{
ID: id,
Token: "token_" + id,
LastOnline: time.Now(),
}
w, err = boltDB.CreateWorker(w)
So(err, ShouldBeNil)
}
Convey("redisAdapter should work", t, func() {
mr, err := miniredis.Run()
So(err, ShouldBeNil)
Convey("get exists worker", func() {
_, err := boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
})
addr := fmt.Sprintf("redis://%s", mr.Addr())
redisDB, err := makeDBAdapter("redis", addr)
So(err, ShouldBeNil)
Convey("list exist worker", func() {
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
defer func() {
// close redisDB
err := redisDB.Close()
So(err, ShouldBeNil)
mr.Close()
}()
Convey("get inexist worker", func() {
_, err := boltDB.GetWorker("invalid workerID")
So(err, ShouldNotBeNil)
})
})
DBAdapterTest(redisDB)
})
Convey("update mirror status", func() {
status := []MirrorStatus{
MirrorStatus{
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
},
MirrorStatus{
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Disabled,
LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
MirrorStatus{
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
}
Convey("badgerAdapter should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
for _, s := range status {
_, err := boltDB.UpdateMirrorStatus(s.Worker, s.Name, s)
So(err, ShouldBeNil)
dbType, dbFile := "badger", filepath.Join(tmpDir, "badger.db")
badgerDB, err := makeDBAdapter(dbType, dbFile)
So(err, ShouldBeNil)
}
defer func() {
// close badgerDB
err := badgerDB.Close()
So(err, ShouldBeNil)
}()
Convey("get mirror status", func() {
m, err := boltDB.GetMirrorStatus(testWorkerIDs[0], status[0].Name)
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal(status[0])
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(m)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
DBAdapterTest(badgerDB)
})
Convey("list mirror status", func() {
ms, err := boltDB.ListMirrorStatus(testWorkerIDs[0])
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal([]MirrorStatus{status[0]})
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("leveldbAdapter should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
Convey("list all mirror status", func() {
ms, err := boltDB.ListAllMirrorStatus()
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal(status)
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
dbType, dbFile := "leveldb", filepath.Join(tmpDir, "leveldb.db")
leveldbDB, err := makeDBAdapter(dbType, dbFile)
So(err, ShouldBeNil)
Convey("flush disabled jobs", func() {
ms, err := boltDB.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 3)
err = boltDB.FlushDisabledJobs()
So(err, ShouldBeNil)
ms, err = boltDB.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 2)
})
defer func() {
// close leveldbDB
err := leveldbDB.Close()
So(err, ShouldBeNil)
}()
})
DBAdapterTest(leveldbDB)
})
}

View File

@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"net/http"
"sync"
"time"
"github.com/gin-gonic/gin"
@ -24,7 +23,6 @@ type Manager struct {
cfg *Config
engine *gin.Engine
adapter dbAdapter
rwmu sync.RWMutex
httpClient *http.Client
}
@ -86,14 +84,11 @@ func GetTUNASyncManager(cfg *Config) *Manager {
// workerID should be valid in this route group
workerValidateGroup := s.engine.Group("/workers", s.workerIDValidator)
{
// delete specified worker
workerValidateGroup.DELETE(":id", s.deleteWorker)
// get job list
workerValidateGroup.GET(":id/jobs", s.listJobsOfWorker)
// post job status
workerValidateGroup.POST(":id/jobs/:job", s.updateJobOfWorker)
workerValidateGroup.POST(":id/jobs/:job/size", s.updateMirrorSize)
workerValidateGroup.POST(":id/schedules", s.updateSchedulesOfWorker)
}
// for tunasynctl to post commands
@ -129,11 +124,9 @@ func (s *Manager) Run() {
}
}
// listAllJobs respond with all jobs of specified workers
// listAllJobs repond with all jobs of specified workers
func (s *Manager) listAllJobs(c *gin.Context) {
s.rwmu.RLock()
mirrorStatusList, err := s.adapter.ListAllMirrorStatus()
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list all mirror status: %s",
err.Error(),
@ -142,11 +135,11 @@ func (s *Manager) listAllJobs(c *gin.Context) {
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
webMirStatusList := []WebMirrorStatus{}
webMirStatusList := []webMirrorStatus{}
for _, m := range mirrorStatusList {
webMirStatusList = append(
webMirStatusList,
BuildWebMirrorStatus(m),
convertMirrorStatus(m),
)
}
c.JSON(http.StatusOK, webMirStatusList)
@ -154,9 +147,7 @@ func (s *Manager) listAllJobs(c *gin.Context) {
// flushDisabledJobs deletes all jobs that marks as deleted
func (s *Manager) flushDisabledJobs(c *gin.Context) {
s.rwmu.Lock()
err := s.adapter.FlushDisabledJobs()
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to flush disabled jobs: %s",
err.Error(),
@ -168,30 +159,10 @@ func (s *Manager) flushDisabledJobs(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{_infoKey: "flushed"})
}
// deleteWorker deletes one worker by id
func (s *Manager) deleteWorker(c *gin.Context) {
workerID := c.Param("id")
s.rwmu.Lock()
err := s.adapter.DeleteWorker(workerID)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to delete worker: %s",
err.Error(),
)
c.Error(err)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
logger.Noticef("Worker <%s> deleted", workerID)
c.JSON(http.StatusOK, gin.H{_infoKey: "deleted"})
}
// listWorkers respond with information of all the workers
// listWrokers respond with informations of all the workers
func (s *Manager) listWorkers(c *gin.Context) {
var workerInfos []WorkerStatus
s.rwmu.RLock()
workers, err := s.adapter.ListWorkers()
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list workers: %s",
err.Error(),
@ -203,11 +174,8 @@ func (s *Manager) listWorkers(c *gin.Context) {
for _, w := range workers {
workerInfos = append(workerInfos,
WorkerStatus{
ID: w.ID,
URL: w.URL,
Token: "REDACTED",
LastOnline: w.LastOnline,
LastRegister: w.LastRegister,
ID: w.ID,
LastOnline: w.LastOnline,
})
}
c.JSON(http.StatusOK, workerInfos)
@ -218,7 +186,6 @@ func (s *Manager) registerWorker(c *gin.Context) {
var _worker WorkerStatus
c.BindJSON(&_worker)
_worker.LastOnline = time.Now()
_worker.LastRegister = time.Now()
newWorker, err := s.adapter.CreateWorker(_worker)
if err != nil {
err := fmt.Errorf("failed to register worker: %s",
@ -237,9 +204,7 @@ func (s *Manager) registerWorker(c *gin.Context) {
// listJobsOfWorker respond with all the jobs of the specified worker
func (s *Manager) listJobsOfWorker(c *gin.Context) {
workerID := c.Param("id")
s.rwmu.RLock()
mirrorStatusList, err := s.adapter.ListMirrorStatus(workerID)
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list jobs of worker %s: %s",
workerID, err.Error(),
@ -257,53 +222,6 @@ func (s *Manager) returnErrJSON(c *gin.Context, code int, err error) {
})
}
func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
workerID := c.Param("id")
var schedules MirrorSchedules
c.BindJSON(&schedules)
for _, schedule := range schedules.Schedules {
mirrorName := schedule.MirrorName
if len(mirrorName) == 0 {
s.returnErrJSON(
c, http.StatusBadRequest,
errors.New("mirror Name should not be empty"),
)
}
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
curStatus, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
if err != nil {
logger.Errorf("failed to get job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
continue
}
if curStatus.Scheduled == schedule.NextSchedule {
// no changes, skip update
continue
}
curStatus.Scheduled = schedule.NextSchedule
s.rwmu.Lock()
_, err = s.adapter.UpdateMirrorStatus(workerID, mirrorName, curStatus)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
c.Error(err)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
}
type empty struct{}
c.JSON(http.StatusOK, empty{})
}
func (s *Manager) updateJobOfWorker(c *gin.Context) {
workerID := c.Param("id")
var status MirrorStatus
@ -312,33 +230,18 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
if len(mirrorName) == 0 {
s.returnErrJSON(
c, http.StatusBadRequest,
errors.New("mirror Name should not be empty"),
errors.New("Mirror Name should not be empty"),
)
}
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
curTime := time.Now()
if status.Status == PreSyncing && curStatus.Status != PreSyncing {
status.LastStarted = curTime
} else {
status.LastStarted = curStatus.LastStarted
}
// Only successful syncing needs last_update
if status.Status == Success {
status.LastUpdate = curTime
status.LastUpdate = time.Now()
} else {
status.LastUpdate = curStatus.LastUpdate
}
if status.Status == Success || status.Status == Failed {
status.LastEnded = curTime
} else {
status.LastEnded = curStatus.LastEnded
}
// Only message with meaningful size updates the mirror size
if len(curStatus.Size) > 0 && curStatus.Size != "unknown" {
@ -355,9 +258,7 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
logger.Noticef("Job [%s] @<%s> %s", status.Name, status.Worker, status.Status)
}
s.rwmu.Lock()
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
@ -379,10 +280,7 @@ func (s *Manager) updateMirrorSize(c *gin.Context) {
c.BindJSON(&msg)
mirrorName := msg.Name
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
status, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
if err != nil {
logger.Errorf(
"Failed to get status of mirror %s @<%s>: %s",
@ -399,9 +297,7 @@ func (s *Manager) updateMirrorSize(c *gin.Context) {
logger.Noticef("Mirror size of [%s] @<%s>: %s", status.Name, status.Worker, status.Size)
s.rwmu.Lock()
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
@ -424,9 +320,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
return
}
s.rwmu.RLock()
w, err := s.adapter.GetWorker(workerID)
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("worker %s is not registered yet", workerID)
s.returnErrJSON(c, http.StatusBadRequest, err)
@ -438,14 +332,11 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
Cmd: clientCmd.Cmd,
MirrorID: clientCmd.MirrorID,
Args: clientCmd.Args,
Options: clientCmd.Options,
}
// update job status, even if the job did not disable successfully,
// this status should be set as disabled
s.rwmu.RLock()
curStat, _ := s.adapter.GetMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID)
s.rwmu.RUnlock()
changed := false
switch clientCmd.Cmd {
case CmdDisable:
@ -456,9 +347,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
changed = true
}
if changed {
s.rwmu.Lock()
s.adapter.UpdateMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID, curStat)
s.rwmu.Unlock()
}
logger.Noticef("Posting command '%s %s' to <%s>", clientCmd.Cmd, clientCmd.MirrorID, clientCmd.WorkerID)

View File

@ -3,12 +3,10 @@ package manager
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
@ -23,32 +21,29 @@ const (
)
func TestHTTPServer(t *testing.T) {
var listenPort = 5000
Convey("HTTP server should work", t, func(ctx C) {
listenPort++
port := listenPort
addr := "127.0.0.1"
baseURL := fmt.Sprintf("http://%s:%d", addr, port)
InitLogger(true, true, false)
s := GetTUNASyncManager(&Config{Debug: true})
s.cfg.Server.Addr = addr
s.cfg.Server.Port = port
s := GetTUNASyncManager(&Config{Debug: false})
So(s, ShouldNotBeNil)
s.setDBAdapter(&mockDBAdapter{
workerStore: map[string]WorkerStatus{
_magicBadWorkerID: {
_magicBadWorkerID: WorkerStatus{
ID: _magicBadWorkerID,
}},
statusStore: make(map[string]MirrorStatus),
})
go s.Run()
time.Sleep(50 * time.Millisecond)
port := rand.Intn(10000) + 20000
baseURL := fmt.Sprintf("http://127.0.0.1:%d", port)
go func() {
s.engine.Run(fmt.Sprintf("127.0.0.1:%d", port))
}()
time.Sleep(50 * time.Microsecond)
resp, err := http.Get(baseURL + "/ping")
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
So(resp.Header.Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
body, err := ioutil.ReadAll(resp.Body)
So(err, ShouldBeNil)
var p map[string]string
err = json.Unmarshal(body, &p)
@ -66,34 +61,6 @@ func TestHTTPServer(t *testing.T) {
So(msg[_errorKey], ShouldEqual, fmt.Sprintf("failed to list jobs of worker %s: %s", _magicBadWorkerID, "database fail"))
})
Convey("when register multiple workers", func(ctx C) {
N := 10
var cnt uint32
for i := 0; i < N; i++ {
go func(id int) {
w := WorkerStatus{
ID: fmt.Sprintf("worker%d", id),
}
resp, err := PostJSON(baseURL+"/workers", w, nil)
ctx.So(err, ShouldBeNil)
ctx.So(resp.StatusCode, ShouldEqual, http.StatusOK)
atomic.AddUint32(&cnt, 1)
}(i)
}
time.Sleep(2 * time.Second)
So(cnt, ShouldEqual, N)
Convey("list all workers", func(ctx C) {
resp, err := http.Get(baseURL + "/workers")
So(err, ShouldBeNil)
defer resp.Body.Close()
var actualResponseObj []WorkerStatus
err = json.NewDecoder(resp.Body).Decode(&actualResponseObj)
So(err, ShouldBeNil)
So(len(actualResponseObj), ShouldEqual, N+1)
})
})
Convey("when register a worker", func(ctx C) {
w := WorkerStatus{
ID: "test_worker1",
@ -112,34 +79,7 @@ func TestHTTPServer(t *testing.T) {
So(len(actualResponseObj), ShouldEqual, 2)
})
Convey("delete an existent worker", func(ctx C) {
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/workers/%s", baseURL, w.ID), nil)
So(err, ShouldBeNil)
clt := &http.Client{}
resp, err := clt.Do(req)
So(err, ShouldBeNil)
defer resp.Body.Close()
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
So(err, ShouldBeNil)
So(res[_infoKey], ShouldEqual, "deleted")
})
Convey("delete non-existent worker", func(ctx C) {
invalidWorker := "test_worker233"
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/workers/%s", baseURL, invalidWorker), nil)
So(err, ShouldBeNil)
clt := &http.Client{}
resp, err := clt.Do(req)
So(err, ShouldBeNil)
defer resp.Body.Close()
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
So(err, ShouldBeNil)
So(res[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
})
Convey("flush disabled jobs", func(ctx C) {
Convey("flush disabled jobs", func(ctx C) {
req, err := http.NewRequest("DELETE", baseURL+"/jobs/disabled", nil)
So(err, ShouldBeNil)
clt := &http.Client{}
@ -162,8 +102,8 @@ func TestHTTPServer(t *testing.T) {
Size: "unknown",
}
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("list mirror status of an existed worker", func(ctx C) {
@ -180,44 +120,12 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
So(m.LastStarted.IsZero(), ShouldBeTrue) // hasn't been initialized yet
So(time.Since(m.LastEnded), ShouldBeLessThan, 1*time.Second)
})
// start syncing
status.Status = PreSyncing
time.Sleep(1 * time.Second)
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("update mirror status to PreSync - starting sync", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Since(m.LastUpdate), ShouldBeGreaterThan, 1*time.Second)
So(time.Since(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Since(m.LastEnded), ShouldBeLessThan, 3*time.Second)
So(time.Since(m.LastEnded), ShouldBeGreaterThan, 1*time.Second)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
})
Convey("list all job status of all workers", func(ctx C) {
var ms []WebMirrorStatus
var ms []webMirrorStatus
resp, err := GetJSON(baseURL+"/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
@ -228,9 +136,7 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate.Time), ShouldBeLessThan, 3*time.Second)
So(time.Since(m.LastStarted.Time), ShouldBeLessThan, 2*time.Second)
So(time.Since(m.LastEnded.Time), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 1*time.Second)
})
@ -259,26 +165,10 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, "5GB")
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Since(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Since(m.LastEnded), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
})
})
Convey("Update schedule of valid mirrors", func(ctx C) {
msg := MirrorSchedules{
Schedules: []MirrorSchedule{
{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)},
{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)},
},
}
url := fmt.Sprintf("%s/workers/%s/schedules", baseURL, status.Worker)
resp, err := PostJSON(url, msg, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
})
Convey("Update size of an invalid mirror", func(ctx C) {
msg := struct {
Name string `json:"name"`
@ -290,47 +180,18 @@ func TestHTTPServer(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusInternalServerError)
})
// what if status changed to failed
status.Status = Failed
time.Sleep(3 * time.Second)
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("What if syncing job failed", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate), ShouldBeGreaterThan, 3*time.Second)
So(time.Since(m.LastStarted), ShouldBeGreaterThan, 3*time.Second)
So(time.Since(m.LastEnded), ShouldBeLessThan, 1*time.Second)
})
})
Convey("update mirror status of an inexisted worker", func(ctx C) {
invalidWorker := "test_worker2"
status := MirrorStatus{
Name: "arch-sync2",
Worker: invalidWorker,
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastStarted: time.Now(),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
Name: "arch-sync2",
Worker: invalidWorker,
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
}
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s",
baseURL, status.Worker, status.Name), status, nil)
@ -342,24 +203,6 @@ func TestHTTPServer(t *testing.T) {
So(err, ShouldBeNil)
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
})
Convey("update schedule of an non-existent worker", func(ctx C) {
invalidWorker := "test_worker2"
sch := MirrorSchedules{
Schedules: []MirrorSchedule{
{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)},
{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)},
},
}
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/schedules",
baseURL, invalidWorker), sch, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
defer resp.Body.Close()
var msg map[string]string
err = json.NewDecoder(resp.Body).Decode(&msg)
So(err, ShouldBeNil)
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
})
Convey("handle client command", func(ctx C) {
cmdChan := make(chan WorkerCmd, 1)
workerServer := makeMockWorkerServer(cmdChan)
@ -378,11 +221,11 @@ func TestHTTPServer(t *testing.T) {
// run the mock worker server
workerServer.Run(bindAddress)
}()
time.Sleep(50 * time.Millisecond)
time.Sleep(50 * time.Microsecond)
// verify the worker mock server is running
workerResp, err := http.Get(workerBaseURL + "/ping")
So(err, ShouldBeNil)
defer workerResp.Body.Close()
So(err, ShouldBeNil)
So(workerResp.StatusCode, ShouldEqual, http.StatusOK)
Convey("when client send wrong cmd", func(ctx C) {
@ -392,8 +235,8 @@ func TestHTTPServer(t *testing.T) {
WorkerID: "not_exist_worker",
}
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
})
@ -405,8 +248,9 @@ func TestHTTPServer(t *testing.T) {
}
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
time.Sleep(50 * time.Microsecond)
select {
@ -425,8 +269,6 @@ func TestHTTPServer(t *testing.T) {
type mockDBAdapter struct {
workerStore map[string]WorkerStatus
statusStore map[string]MirrorStatus
workerLock sync.RWMutex
statusLock sync.RWMutex
}
func (b *mockDBAdapter) Init() error {
@ -434,60 +276,35 @@ func (b *mockDBAdapter) Init() error {
}
func (b *mockDBAdapter) ListWorkers() ([]WorkerStatus, error) {
b.workerLock.RLock()
workers := make([]WorkerStatus, len(b.workerStore))
idx := 0
for _, w := range b.workerStore {
workers[idx] = w
idx++
}
b.workerLock.RUnlock()
return workers, nil
}
func (b *mockDBAdapter) GetWorker(workerID string) (WorkerStatus, error) {
b.workerLock.RLock()
defer b.workerLock.RUnlock()
w, ok := b.workerStore[workerID]
if !ok {
return WorkerStatus{}, fmt.Errorf("invalid workerId")
}
return w, nil
}
func (b *mockDBAdapter) DeleteWorker(workerID string) error {
b.workerLock.Lock()
delete(b.workerStore, workerID)
b.workerLock.Unlock()
return nil
}
func (b *mockDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
// _, ok := b.workerStore[w.ID]
// if ok {
// return workerStatus{}, fmt.Errorf("duplicate worker name")
// }
b.workerLock.Lock()
b.workerStore[w.ID] = w
b.workerLock.Unlock()
return w, nil
}
func (b *mockDBAdapter) RefreshWorker(workerID string) (w WorkerStatus, err error) {
w, err = b.GetWorker(workerID)
if err == nil {
w.LastOnline = time.Now()
w, err = b.CreateWorker(w)
}
return w, err
}
func (b *mockDBAdapter) GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error) {
id := mirrorID + "/" + workerID
b.statusLock.RLock()
status, ok := b.statusStore[id]
b.statusLock.RUnlock()
if !ok {
return MirrorStatus{}, fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID)
}
@ -501,9 +318,7 @@ func (b *mockDBAdapter) UpdateMirrorStatus(workerID, mirrorID string, status Mir
// }
id := mirrorID + "/" + workerID
b.statusLock.Lock()
b.statusStore[id] = status
b.statusLock.Unlock()
return status, nil
}
@ -513,23 +328,19 @@ func (b *mockDBAdapter) ListMirrorStatus(workerID string) ([]MirrorStatus, error
if workerID == _magicBadWorkerID {
return []MirrorStatus{}, fmt.Errorf("database fail")
}
b.statusLock.RLock()
for k, v := range b.statusStore {
if wID := strings.Split(k, "/")[1]; wID == workerID {
mirrorStatusList = append(mirrorStatusList, v)
}
}
b.statusLock.RUnlock()
return mirrorStatusList, nil
}
func (b *mockDBAdapter) ListAllMirrorStatus() ([]MirrorStatus, error) {
var mirrorStatusList []MirrorStatus
b.statusLock.RLock()
for _, v := range b.statusStore {
mirrorStatusList = append(mirrorStatusList, v)
}
b.statusLock.RUnlock()
return mirrorStatusList, nil
}

62
manager/status.go Normal file
View File

@ -0,0 +1,62 @@
package manager
import (
"encoding/json"
"strconv"
"time"
. "github.com/tuna/tunasync/internal"
)
type textTime struct {
time.Time
}
func (t textTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
}
func (t *textTime) UnmarshalJSON(b []byte) error {
s := string(b)
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
*t = textTime{t2}
return err
}
type stampTime struct {
time.Time
}
func (t stampTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Unix())
}
func (t *stampTime) UnmarshalJSON(b []byte) error {
ts, err := strconv.Atoi(string(b))
if err != nil {
return err
}
*t = stampTime{time.Unix(int64(ts), 0)}
return err
}
// webMirrorStatus is the mirror status to be shown in the web page
type webMirrorStatus struct {
Name string `json:"name"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate textTime `json:"last_update"`
LastUpdateTs stampTime `json:"last_update_ts"`
Upstream string `json:"upstream"`
Size string `json:"size"` // approximate size
}
func convertMirrorStatus(m MirrorStatus) webMirrorStatus {
return webMirrorStatus{
Name: m.Name,
IsMaster: m.IsMaster,
Status: m.Status,
LastUpdate: textTime{m.LastUpdate},
LastUpdateTs: stampTime{m.LastUpdate},
Upstream: m.Upstream,
Size: m.Size,
}
}

44
manager/status_test.go Normal file
View File

@ -0,0 +1,44 @@
package manager
import (
"encoding/json"
"testing"
"time"
tunasync "github.com/tuna/tunasync/internal"
. "github.com/smartystreets/goconvey/convey"
)
func TestStatus(t *testing.T) {
Convey("status json ser-de should work", t, func() {
tz := "Asia/Tokyo"
loc, err := time.LoadLocation(tz)
So(err, ShouldBeNil)
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
m := webMirrorStatus{
Name: "tunalinux",
Status: tunasync.Success,
LastUpdate: textTime{t},
LastUpdateTs: stampTime{t},
Size: "5GB",
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
}
b, err := json.Marshal(m)
So(err, ShouldBeNil)
//fmt.Println(string(b))
var m2 webMirrorStatus
err = json.Unmarshal(b, &m2)
So(err, ShouldBeNil)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
}

View File

@ -1,4 +1,3 @@
//go:build ignore
// +build ignore
package main

View File

@ -1,4 +1,3 @@
//go:build ignore
// +build ignore
package main

View File

@ -1,4 +1,3 @@
//go:build ignore
// +build ignore
package main

View File

@ -1,4 +1,3 @@
//go:build ignore
// +build ignore
package main

View File

@ -15,14 +15,12 @@ type baseProvider struct {
ctx *Context
name string
interval time.Duration
retry int
timeout time.Duration
isMaster bool
cmd *cmdJob
logFileFd *os.File
isRunning atomic.Value
successExitCodes []int
cmd *cmdJob
isRunning atomic.Value
logFile *os.File
cgroup *cgroupHook
zfs *zfsHook
@ -54,14 +52,6 @@ func (p *baseProvider) Interval() time.Duration {
return p.interval
}
func (p *baseProvider) Retry() int {
return p.retry
}
func (p *baseProvider) Timeout() time.Duration {
return p.timeout
}
func (p *baseProvider) IsMaster() bool {
return p.isMaster
}
@ -121,34 +111,24 @@ func (p *baseProvider) Docker() *dockerHook {
return p.docker
}
func (p *baseProvider) prepareLogFile(append bool) error {
func (p *baseProvider) prepareLogFile() error {
if p.LogFile() == "/dev/null" {
p.cmd.SetLogFile(nil)
return nil
}
appendMode := 0
if append {
appendMode = os.O_APPEND
if p.logFile == nil {
logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
return err
}
p.logFile = logFile
}
logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE|appendMode, 0644)
if err != nil {
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
return err
}
p.logFileFd = logFile
p.cmd.SetLogFile(logFile)
p.cmd.SetLogFile(p.logFile)
return nil
}
func (p *baseProvider) closeLogFile() (err error) {
if p.logFileFd != nil {
err = p.logFileFd.Close()
p.logFileFd = nil
}
return
}
func (p *baseProvider) Run(started chan empty) error {
func (p *baseProvider) Run() error {
panic("Not Implemented")
}
@ -163,42 +143,32 @@ func (p *baseProvider) IsRunning() bool {
func (p *baseProvider) Wait() error {
defer func() {
logger.Debugf("set isRunning to false: %s", p.Name())
p.Lock()
p.isRunning.Store(false)
if p.logFile != nil {
p.logFile.Close()
p.logFile = nil
}
p.Unlock()
}()
logger.Debugf("calling Wait: %s", p.Name())
return p.cmd.Wait()
}
func (p *baseProvider) Terminate() error {
p.Lock()
defer p.Unlock()
logger.Debugf("terminating provider: %s", p.Name())
if !p.IsRunning() {
logger.Warningf("Terminate() called while IsRunning is false: %s", p.Name())
return nil
}
p.Lock()
if p.logFile != nil {
p.logFile.Close()
p.logFile = nil
}
p.Unlock()
err := p.cmd.Terminate()
p.isRunning.Store(false)
return err
}
func (p *baseProvider) DataSize() string {
return ""
}
func (p *baseProvider) SetSuccessExitCodes(codes []int) {
if codes == nil {
p.successExitCodes = []int{}
} else {
p.successExitCodes = codes
}
}
func (p *baseProvider) GetSuccessExitCodes() []int {
if p.successExitCodes == nil {
return []int{}
}
return p.successExitCodes
}

View File

@ -1,93 +0,0 @@
//go:build linux
// +build linux
package worker
import (
"fmt"
"os"
"path/filepath"
"github.com/dennwc/btrfs"
)
type btrfsSnapshotHook struct {
provider mirrorProvider
mirrorSnapshotPath string
}
// the user who runs the jobs (typically `tunasync`) should be granted the permission to run btrfs commands
// TODO: check if the filesystem is Btrfs
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
mirrorSnapshotPath := mirror.SnapshotPath
if mirrorSnapshotPath == "" {
mirrorSnapshotPath = filepath.Join(snapshotPath, provider.Name())
}
return &btrfsSnapshotHook{
provider: provider,
mirrorSnapshotPath: mirrorSnapshotPath,
}
}
// check if path `snapshotPath/providerName` exists
// Case 1: Not exists => create a new subvolume
// Case 2: Exists as a subvolume => nothing to do
// Case 3: Exists as a directory => error detected
func (h *btrfsSnapshotHook) preJob() error {
path := h.provider.WorkingDir()
if _, err := os.Stat(path); os.IsNotExist(err) {
// create subvolume
err := btrfs.CreateSubVolume(path)
if err != nil {
logger.Errorf("failed to create Btrfs subvolume %s: %s", path, err.Error())
return err
}
logger.Noticef("created new Btrfs subvolume %s", path)
} else {
if is, err := btrfs.IsSubVolume(path); err != nil {
return err
} else if !is {
return fmt.Errorf("path %s exists but isn't a Btrfs subvolume", path)
}
}
return nil
}
func (h *btrfsSnapshotHook) preExec() error {
return nil
}
func (h *btrfsSnapshotHook) postExec() error {
return nil
}
// delete old snapshot if exists, then create a new snapshot
func (h *btrfsSnapshotHook) postSuccess() error {
if _, err := os.Stat(h.mirrorSnapshotPath); !os.IsNotExist(err) {
isSubVol, err := btrfs.IsSubVolume(h.mirrorSnapshotPath)
if err != nil {
return err
} else if !isSubVol {
return fmt.Errorf("path %s exists and isn't a Btrfs snapshot", h.mirrorSnapshotPath)
}
// is old snapshot => delete it
if err := btrfs.DeleteSubVolume(h.mirrorSnapshotPath); err != nil {
logger.Errorf("failed to delete old Btrfs snapshot %s", h.mirrorSnapshotPath)
return err
}
logger.Noticef("deleted old snapshot %s", h.mirrorSnapshotPath)
}
// create a new writable snapshot
// (the snapshot is writable so that it can be deleted easily)
if err := btrfs.SnapshotSubVolume(h.provider.WorkingDir(), h.mirrorSnapshotPath, false); err != nil {
logger.Errorf("failed to create new Btrfs snapshot %s", h.mirrorSnapshotPath)
return err
}
logger.Noticef("created new Btrfs snapshot %s", h.mirrorSnapshotPath)
return nil
}
// keep the old snapshot => nothing to do
func (h *btrfsSnapshotHook) postFail() error {
return nil
}

View File

@ -1,31 +0,0 @@
//go:build !linux
// +build !linux
package worker
type btrfsSnapshotHook struct {
}
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
return &btrfsSnapshotHook{}
}
func (h *btrfsSnapshotHook) postExec() error {
return nil
}
func (h *btrfsSnapshotHook) postFail() error {
return nil
}
func (h *btrfsSnapshotHook) postSuccess() error {
return nil
}
func (h *btrfsSnapshotHook) preExec() error {
return nil
}
func (h *btrfsSnapshotHook) preJob() error {
return nil
}

View File

@ -1,297 +1,63 @@
package worker
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"time"
"golang.org/x/sys/unix"
cgroups "github.com/containerd/cgroups/v3"
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
"github.com/moby/sys/reexec"
contspecs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/codeskyblue/go-sh"
)
type cgroupHook struct {
emptyHook
cgCfg cgroupConfig
memLimit MemBytes
cgMgrV1 cgv1.Cgroup
cgMgrV2 *cgv2.Manager
provider mirrorProvider
basePath string
baseGroup string
created bool
subsystem string
memLimit string
}
type execCmd string
const (
cmdCont execCmd = "cont"
cmdAbrt execCmd = "abrt"
)
func init() {
reexec.Register("tunasync-exec", waitExec)
}
func waitExec() {
binary, err := exec.LookPath(os.Args[1])
if err != nil {
panic(err)
func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit string) *cgroupHook {
if basePath == "" {
basePath = "/sys/fs/cgroup"
}
pipe := os.NewFile(3, "pipe")
if pipe != nil {
if _, err := pipe.Stat(); err == nil {
cmdBytes, err := io.ReadAll(pipe)
if err != nil {
panic(err)
}
if err := pipe.Close(); err != nil {
}
cmd := execCmd(string(cmdBytes))
switch cmd {
case cmdAbrt:
fallthrough
default:
panic("Exited on request")
case cmdCont:
}
}
if baseGroup == "" {
baseGroup = "tunasync"
}
args := os.Args[1:]
env := os.Environ()
if err := syscall.Exec(binary, args, env); err != nil {
panic(err)
if subsystem == "" {
subsystem = "cpu"
}
panic("Exec failed.")
}
func initCgroup(cfg *cgroupConfig) error {
logger.Debugf("Initializing cgroup")
baseGroup := cfg.Group
//subsystem := cfg.Subsystem
// If baseGroup is empty, it implies using the cgroup of the current process
// otherwise, it refers to a absolute group path
if baseGroup != "" {
baseGroup = filepath.Join("/", baseGroup)
}
cfg.isUnified = cgroups.Mode() == cgroups.Unified
if cfg.isUnified {
logger.Debugf("Cgroup V2 detected")
g := baseGroup
if g == "" {
logger.Debugf("Detecting my cgroup path")
var err error
if g, err = cgv2.NestedGroupPath(""); err != nil {
return err
}
}
logger.Infof("Using cgroup path: %s", g)
var err error
if cfg.cgMgrV2, err = cgv2.Load(g); err != nil {
return err
}
if baseGroup == "" {
logger.Debugf("Creating a sub group and move all processes into it")
wkrMgr, err := cfg.cgMgrV2.NewChild("__worker", nil)
if err != nil {
return err
}
for {
logger.Debugf("Reading pids")
procs, err := cfg.cgMgrV2.Procs(false)
if err != nil {
logger.Errorf("Cannot read pids in that group")
return err
}
if len(procs) == 0 {
break
}
for _, p := range procs {
if err := wkrMgr.AddProc(p); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
return err
}
}
}
}
} else {
logger.Debugf("Trying to create a sub group in that group")
testMgr, err := cfg.cgMgrV2.NewChild("__test", nil)
if err != nil {
logger.Errorf("Cannot create a sub group in the cgroup")
return err
}
if err := testMgr.Delete(); err != nil {
return err
}
procs, err := cfg.cgMgrV2.Procs(false)
if err != nil {
logger.Errorf("Cannot read pids in that group")
return err
}
if len(procs) != 0 {
return fmt.Errorf("There are remaining processes in cgroup %s", baseGroup)
}
}
} else {
logger.Debugf("Cgroup V1 detected")
var pather cgv1.Path
if baseGroup != "" {
pather = cgv1.StaticPath(baseGroup)
} else {
pather = (func(p cgv1.Path) cgv1.Path {
return func(subsys cgv1.Name) (string, error) {
path, err := p(subsys)
if err != nil {
return "", err
}
if path == "/" {
return "", cgv1.ErrControllerNotActive
}
return path, err
}
})(cgv1.NestedPath(""))
}
logger.Infof("Loading cgroup")
var err error
if cfg.cgMgrV1, err = cgv1.Load(pather, func(cfg *cgv1.InitConfig) error {
cfg.InitCheck = cgv1.AllowAny
return nil
}); err != nil {
return err
}
logger.Debugf("Available subsystems:")
for _, subsys := range cfg.cgMgrV1.Subsystems() {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Debugf("%s: %s", subsys.Name(), p)
}
if baseGroup == "" {
logger.Debugf("Creating a sub group and move all processes into it")
wkrMgr, err := cfg.cgMgrV1.New("__worker", &contspecs.LinuxResources{})
if err != nil {
return err
}
for _, subsys := range cfg.cgMgrV1.Subsystems() {
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
for {
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name())
return err
}
if len(procs) == 0 {
break
}
for _, proc := range procs {
if err := wkrMgr.Add(proc); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
return err
}
}
}
}
}
} else {
logger.Debugf("Trying to create a sub group in that group")
testMgr, err := cfg.cgMgrV1.New("__test", &contspecs.LinuxResources{})
if err != nil {
logger.Errorf("Cannot create a sub group in the cgroup")
return err
}
if err := testMgr.Delete(); err != nil {
return err
}
for _, subsys := range cfg.cgMgrV1.Subsystems() {
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name())
return err
}
if len(procs) != 0 {
p, err := pather(subsys.Name())
if err != nil {
return err
}
return fmt.Errorf("There are remaining processes in cgroup %s of subsystem %s", p, subsys.Name())
}
}
}
}
return nil
}
func newCgroupHook(p mirrorProvider, cfg cgroupConfig, memLimit MemBytes) *cgroupHook {
return &cgroupHook{
emptyHook: emptyHook{
provider: p,
},
cgCfg: cfg,
memLimit: memLimit,
provider: p,
basePath: basePath,
baseGroup: baseGroup,
subsystem: subsystem,
}
}
func (c *cgroupHook) preExec() error {
if c.cgCfg.isUnified {
logger.Debugf("Creating v2 cgroup for task %s", c.provider.Name())
var resSet *cgv2.Resources
if c.memLimit != 0 {
resSet = &cgv2.Resources{
Memory: &cgv2.Memory{
Max: func(i int64) *int64 { return &i }(c.memLimit.Value()),
},
}
}
subMgr, err := c.cgCfg.cgMgrV2.NewChild(c.provider.Name(), resSet)
if err != nil {
logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV2 = subMgr
} else {
logger.Debugf("Creating v1 cgroup for task %s", c.provider.Name())
var resSet contspecs.LinuxResources
if c.memLimit != 0 {
resSet = contspecs.LinuxResources{
Memory: &contspecs.LinuxMemory{
Limit: func(i int64) *int64 { return &i }(c.memLimit.Value()),
},
}
}
subMgr, err := c.cgCfg.cgMgrV1.New(c.provider.Name(), &resSet)
if err != nil {
logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV1 = subMgr
c.created = true
if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil {
return err
}
if c.subsystem != "memory" {
return nil
}
if c.memLimit != "" {
gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name())
return sh.Command(
"cgset", "-r",
fmt.Sprintf("memory.limit_in_bytes=%s", c.memLimit),
gname,
).Run()
}
return nil
}
@ -302,59 +68,36 @@ func (c *cgroupHook) postExec() error {
logger.Errorf("Error killing tasks: %s", err.Error())
}
if c.cgCfg.isUnified {
logger.Debugf("Deleting v2 cgroup for task %s", c.provider.Name())
if err := c.cgMgrV2.Delete(); err != nil {
logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV2 = nil
} else {
logger.Debugf("Deleting v1 cgroup for task %s", c.provider.Name())
if err := c.cgMgrV1.Delete(); err != nil {
logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV1 = nil
}
return nil
c.created = false
return sh.Command("cgdelete", c.Cgroup()).Run()
}
func (c *cgroupHook) Cgroup() string {
name := c.provider.Name()
return fmt.Sprintf("%s:%s/%s", c.subsystem, c.baseGroup, name)
}
func (c *cgroupHook) killAll() error {
if c.cgCfg.isUnified {
if c.cgMgrV2 == nil {
return nil
}
} else {
if c.cgMgrV1 == nil {
return nil
}
if !c.created {
return nil
}
name := c.provider.Name()
readTaskList := func() ([]int, error) {
taskList := []int{}
if c.cgCfg.isUnified {
procs, err := c.cgMgrV2.Procs(false)
taskFile, err := os.Open(filepath.Join(c.basePath, c.subsystem, c.baseGroup, name, "tasks"))
if err != nil {
return taskList, err
}
defer taskFile.Close()
scanner := bufio.NewScanner(taskFile)
for scanner.Scan() {
pid, err := strconv.Atoi(scanner.Text())
if err != nil {
return []int{}, err
}
for _, proc := range procs {
taskList = append(taskList, int(proc))
}
} else {
taskSet := make(map[int]struct{})
for _, subsys := range c.cgMgrV1.Subsystems() {
procs, err := c.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
return []int{}, err
}
for _, proc := range procs {
taskSet[proc.Pid] = struct{}{}
}
}
for proc := range taskSet {
taskList = append(taskList, proc)
return taskList, err
}
taskList = append(taskList, pid)
}
return taskList, nil
}

View File

@ -1,124 +1,40 @@
package worker
import (
"errors"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"testing"
"time"
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
units "github.com/docker/go-units"
"github.com/moby/sys/reexec"
. "github.com/smartystreets/goconvey/convey"
)
func init() {
_, testReexec := os.LookupEnv("TESTREEXEC")
if !testReexec {
reexec.Init()
}
}
func TestReexec(t *testing.T) {
testCase, testReexec := os.LookupEnv("TESTREEXEC")
if !testReexec {
return
}
for len(os.Args) > 1 {
thisArg := os.Args[1]
os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
if thisArg == "--" {
break
}
}
switch testCase {
case "1":
Convey("Reexec should panic when command not found", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, exec.ErrNotFound)
})
case "2":
Convey("Reexec should run when fd 3 is not open", t, func(ctx C) {
So((func() error {
pipe := os.NewFile(3, "pipe")
if pipe == nil {
return errors.New("pipe is nil")
} else {
_, err := pipe.Stat()
return err
}
})(), ShouldNotBeNil)
So(func() {
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "3":
Convey("Reexec should fail when fd 3 is sent with abrt cmd", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, "Exited on request")
})
case "4":
Convey("Reexec should run when fd 3 is sent with cont cmd", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "5":
Convey("Reexec should not be triggered when argv[0] is not reexec", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldNotPanic)
})
}
}
func TestCgroup(t *testing.T) {
var cgcf *cgroupConfig
Convey("init cgroup", t, func(ctx C) {
_, useCurrentCgroup := os.LookupEnv("USECURCGROUP")
cgcf = &cgroupConfig{BasePath: "/sys/fs/cgroup", Group: "tunasync", Subsystem: "cpu"}
if useCurrentCgroup {
cgcf.Group = ""
}
err := initCgroup(cgcf)
Convey("Cgroup Should Work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
if cgcf.isUnified {
So(cgcf.cgMgrV2, ShouldNotBeNil)
} else {
So(cgcf.cgMgrV1, ShouldNotBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
daemonScript := filepath.Join(tmpDir, "daemon.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
bgPidfile := filepath.Join(tmpDir, "bg.pid")
c := cmdConfig{
name: "tuna-cgroup",
upstreamURL: "http://mirrors.tuna.moe/",
command: cmdScript + " " + daemonScript,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
env: map[string]string{
"BG_PIDFILE": bgPidfile,
},
}
Convey("Cgroup Should Work", func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
daemonScript := filepath.Join(tmpDir, "daemon.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
bgPidfile := filepath.Join(tmpDir, "bg.pid")
c := cmdConfig{
name: "tuna-cgroup",
upstreamURL: "http://mirrors.tuna.moe/",
command: cmdScript + " " + daemonScript,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
env: map[string]string{
"BG_PIDFILE": bgPidfile,
},
}
cmdScriptContent := `#!/bin/bash
cmdScriptContent := `#!/bin/bash
redirect-std() {
[[ -t 0 ]] && exec </dev/null
[[ -t 1 ]] && exec >/dev/null
@ -129,13 +45,13 @@ redirect-std() {
close-fds() {
eval exec {3..255}\>\&-
}
# full daemonization of external command with setsid
daemonize() {
(
redirect-std
cd /
close-fds
redirect-std
cd /
close-fds
exec setsid "$@"
) &
}
@ -144,180 +60,91 @@ echo $$
daemonize $@
sleep 5
`
daemonScriptContent := `#!/bin/bash
daemonScriptContent := `#!/bin/bash
echo $$ > $BG_PIDFILE
sleep 30
`
err = os.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
err = ioutil.WriteFile(daemonScript, []byte(daemonScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "")
provider.AddHook(cg)
err = cg.preExec()
if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
So(err, ShouldBeNil)
go func() {
err = provider.Run()
ctx.So(err, ShouldNotBeNil)
}()
time.Sleep(1 * time.Second)
// Deamon should be started
daemonPidBytes, err := ioutil.ReadFile(bgPidfile)
So(err, ShouldBeNil)
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
logger.Debug("daemon pid: %s", daemonPid)
procDir := filepath.Join("/proc", daemonPid)
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
err = provider.Terminate()
So(err, ShouldBeNil)
// Deamon won't be killed
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
// Deamon can be killed by cgroup killer
cg.postExec()
_, err = os.Stat(procDir)
So(os.IsNotExist(err), ShouldBeTrue)
})
Convey("Rsync Memory Should Be Limited", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna-cgroup",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "512M")
provider.AddHook(cg)
err = cg.preExec()
if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
if cg.subsystem == "memory" {
memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes"))
So(err, ShouldBeNil)
err = os.WriteFile(daemonScript, []byte(daemonScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, *cgcf, 0)
provider.AddHook(cg)
err = cg.preExec()
So(err, ShouldBeNil)
go func() {
err := provider.Run(make(chan empty, 1))
ctx.So(err, ShouldNotBeNil)
}()
time.Sleep(1 * time.Second)
// Deamon should be started
daemonPidBytes, err := os.ReadFile(bgPidfile)
So(err, ShouldBeNil)
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
logger.Debug("daemon pid: %s", daemonPid)
procDir := filepath.Join("/proc", daemonPid)
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
err = provider.Terminate()
So(err, ShouldBeNil)
// Deamon won't be killed
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
// Deamon can be killed by cgroup killer
cg.postExec()
_, err = os.Stat(procDir)
So(os.IsNotExist(err), ShouldBeTrue)
})
Convey("Rsync Memory Should Be Limited", func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna-cgroup",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, *cgcf, 512*units.MiB)
provider.AddHook(cg)
err = cg.preExec()
So(err, ShouldBeNil)
if cgcf.isUnified {
cgpath := filepath.Join(cgcf.BasePath, cgcf.Group, provider.Name())
if useCurrentCgroup {
group, err := cgv2.NestedGroupPath(filepath.Join("..", provider.Name()))
So(err, ShouldBeNil)
cgpath = filepath.Join(cgcf.BasePath, group)
}
memoLimit, err := os.ReadFile(filepath.Join(cgpath, "memory.max"))
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
} else {
for _, subsys := range cg.cgMgrV1.Subsystems() {
if subsys.Name() == cgv1.Memory {
cgpath := filepath.Join(cgcf.Group, provider.Name())
if useCurrentCgroup {
p, err := cgv1.NestedPath(filepath.Join("..", provider.Name()))(cgv1.Memory)
So(err, ShouldBeNil)
cgpath = p
}
memoLimit, err := os.ReadFile(filepath.Join(cgcf.BasePath, "memory", cgpath, "memory.limit_in_bytes"))
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
}
}
}
cg.postExec()
So(cg.cgMgrV1, ShouldBeNil)
})
Reset(func() {
if cgcf.isUnified {
if cgcf.Group == "" {
wkrg, err := cgv2.NestedGroupPath("")
So(err, ShouldBeNil)
wkrMgr, _ := cgv2.Load(wkrg)
allCtrls, err := wkrMgr.Controllers()
So(err, ShouldBeNil)
err = wkrMgr.ToggleControllers(allCtrls, cgv2.Disable)
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV2
for {
logger.Debugf("Restoring pids")
procs, err := wkrMgr.Procs(false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, p := range procs {
if err := origMgr.AddProc(p); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
So(err, ShouldBeNil)
}
}
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
} else {
if cgcf.Group == "" {
pather := (func(p cgv1.Path) cgv1.Path {
return func(subsys cgv1.Name) (string, error) {
path, err := p(subsys)
if err != nil {
return "", err
}
if path == "/" {
return "", cgv1.ErrControllerNotActive
}
return path, err
}
})(cgv1.NestedPath(""))
wkrMgr, err := cgv1.Load(pather, func(cfg *cgv1.InitConfig) error {
cfg.InitCheck = cgv1.AllowAny
return nil
})
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV1
for _, subsys := range wkrMgr.Subsystems() {
for {
procs, err := wkrMgr.Processes(subsys.Name(), false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, proc := range procs {
if err := origMgr.Add(proc); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
So(err, ShouldBeNil)
}
}
}
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
}
})
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
}
cg.postExec()
})
}

View File

@ -1,13 +1,9 @@
package worker
import (
"errors"
"fmt"
"regexp"
"time"
"github.com/anmitsu/go-shlex"
"github.com/tuna/tunasync/internal"
)
type cmdConfig struct {
@ -15,34 +11,22 @@ type cmdConfig struct {
upstreamURL, command string
workingDir, logDir, logFile string
interval time.Duration
retry int
timeout time.Duration
env map[string]string
failOnMatch string
sizePattern string
}
type cmdProvider struct {
baseProvider
cmdConfig
command []string
dataSize string
failOnMatch *regexp.Regexp
sizePattern *regexp.Regexp
command []string
}
func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
// TODO: check config options
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &cmdProvider{
baseProvider: baseProvider{
name: c.name,
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
cmdConfig: c,
}
@ -56,22 +40,6 @@ func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
return nil, err
}
provider.command = cmd
if len(c.failOnMatch) > 0 {
var err error
failOnMatch, err := regexp.Compile(c.failOnMatch)
if err != nil {
return nil, errors.New("fail-on-match regexp error: " + err.Error())
}
provider.failOnMatch = failOnMatch
}
if len(c.sizePattern) > 0 {
var err error
sizePattern, err := regexp.Compile(c.sizePattern)
if err != nil {
return nil, errors.New("size-pattern regexp error: " + err.Error())
}
provider.sizePattern = sizePattern
}
return provider, nil
}
@ -84,45 +52,14 @@ func (p *cmdProvider) Upstream() string {
return p.upstreamURL
}
func (p *cmdProvider) DataSize() string {
return p.dataSize
}
func (p *cmdProvider) Run(started chan empty) error {
p.dataSize = ""
defer p.closeLogFile()
func (p *cmdProvider) Run() error {
if err := p.Start(); err != nil {
return err
}
started <- empty{}
if err := p.Wait(); err != nil {
return err
}
if p.failOnMatch != nil {
matches, err := internal.FindAllSubmatchInFile(p.LogFile(), p.failOnMatch)
logger.Infof("FindAllSubmatchInFile: %q\n", matches)
if err != nil {
return err
}
if len(matches) != 0 {
logger.Debug("Fail-on-match: %r", matches)
return fmt.Errorf("Fail-on-match regexp found %d matches", len(matches))
}
}
if p.sizePattern != nil {
p.dataSize = internal.ExtractSizeFromLog(p.LogFile(), p.sizePattern)
}
return nil
return p.Wait()
}
func (p *cmdProvider) Start() error {
p.Lock()
defer p.Unlock()
if p.IsRunning() {
return errors.New("provider is currently running")
}
env := map[string]string{
"TUNASYNC_MIRROR_NAME": p.Name(),
"TUNASYNC_WORKING_DIR": p.WorkingDir(),
@ -134,7 +71,7 @@ func (p *cmdProvider) Start() error {
env[k] = v
}
p.cmd = newCmdJob(p, p.command, p.WorkingDir(), env)
if err := p.prepareLogFile(false); err != nil {
if err := p.prepareLogFile(); err != nil {
return err
}
@ -142,6 +79,5 @@ func (p *cmdProvider) Start() error {
return err
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil
}

View File

@ -1,6 +1,6 @@
package worker
// put global variables and types here
// put global viables and types here
import (
"gopkg.in/op/go-logging.v1"
@ -8,6 +8,6 @@ import (
type empty struct{}
const defaultMaxRetry = 2
const maxRetry = 2
var logger = logging.MustGetLogger("tunasync")

View File

@ -6,10 +6,6 @@ import (
"path/filepath"
"github.com/BurntSushi/toml"
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
units "github.com/docker/go-units"
"github.com/imdario/mergo"
)
type providerEnum uint8
@ -37,16 +33,14 @@ func (p *providerEnum) UnmarshalText(text []byte) error {
// Config represents worker config options
type Config struct {
Global globalConfig `toml:"global"`
Manager managerConfig `toml:"manager"`
Server serverConfig `toml:"server"`
Cgroup cgroupConfig `toml:"cgroup"`
ZFS zfsConfig `toml:"zfs"`
BtrfsSnapshot btrfsSnapshotConfig `toml:"btrfs_snapshot"`
Docker dockerConfig `toml:"docker"`
Include includeConfig `toml:"include"`
MirrorsConf []mirrorConfig `toml:"mirrors"`
Mirrors []mirrorConfig
Global globalConfig `toml:"global"`
Manager managerConfig `toml:"manager"`
Server serverConfig `toml:"server"`
Cgroup cgroupConfig `toml:"cgroup"`
ZFS zfsConfig `toml:"zfs"`
Docker dockerConfig `toml:"docker"`
Include includeConfig `toml:"include"`
Mirrors []mirrorConfig `toml:"mirrors"`
}
type globalConfig struct {
@ -55,17 +49,9 @@ type globalConfig struct {
MirrorDir string `toml:"mirror_dir"`
Concurrent int `toml:"concurrent"`
Interval int `toml:"interval"`
Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
// appended to the options generated by rsync_provider, but before mirror-specific options
RsyncOptions []string `toml:"rsync_options"`
ExecOnSuccess []string `toml:"exec_on_success"`
ExecOnFailure []string `toml:"exec_on_failure"`
// merged with mirror-specific options. make sure you know what you are doing!
SuccessExitCodes []int `toml:"dangerous_global_success_exit_codes"`
}
type managerConfig struct {
@ -96,9 +82,6 @@ type cgroupConfig struct {
BasePath string `toml:"base_path"`
Group string `toml:"group"`
Subsystem string `toml:"subsystem"`
isUnified bool
cgMgrV1 cgv1.Cgroup
cgMgrV2 *cgv2.Manager
}
type dockerConfig struct {
@ -112,11 +95,6 @@ type zfsConfig struct {
Zpool string `toml:"zpool"`
}
type btrfsSnapshotConfig struct {
Enable bool `toml:"enable"`
SnapshotPath string `toml:"snapshot_path"`
}
type includeConfig struct {
IncludeMirrors string `toml:"include_mirrors"`
}
@ -125,80 +103,36 @@ type includedMirrorConfig struct {
Mirrors []mirrorConfig `toml:"mirrors"`
}
type MemBytes int64
// Set sets the value of the MemBytes by passing a string
func (m *MemBytes) Set(value string) error {
val, err := units.RAMInBytes(value)
*m = MemBytes(val)
return err
}
// Type returns the type
func (m *MemBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemBytes) Value() int64 {
return int64(*m)
}
// UnmarshalJSON is the customized unmarshaler for MemBytes
func (m *MemBytes) UnmarshalText(s []byte) error {
val, err := units.RAMInBytes(string(s))
*m = MemBytes(val)
return err
}
type mirrorConfig struct {
Name string `toml:"name"`
Provider providerEnum `toml:"provider"`
Upstream string `toml:"upstream"`
Interval int `toml:"interval"`
Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
MirrorDir string `toml:"mirror_dir"`
MirrorSubDir string `toml:"mirror_subdir"`
LogDir string `toml:"log_dir"`
Env map[string]string `toml:"env"`
Role string `toml:"role"`
Name string `toml:"name"`
Provider providerEnum `toml:"provider"`
Upstream string `toml:"upstream"`
Interval int `toml:"interval"`
MirrorDir string `toml:"mirror_dir"`
LogDir string `toml:"log_dir"`
Env map[string]string `toml:"env"`
Role string `toml:"role"`
// These two options over-write the global options
ExecOnSuccess []string `toml:"exec_on_success"`
ExecOnFailure []string `toml:"exec_on_failure"`
// These two options are appended to the global options
// These two options the global options
ExecOnSuccessExtra []string `toml:"exec_on_success_extra"`
ExecOnFailureExtra []string `toml:"exec_on_failure_extra"`
// will be merged with global option
SuccessExitCodes []int `toml:"success_exit_codes"`
Command string `toml:"command"`
UseIPv6 bool `toml:"use_ipv6"`
ExcludeFile string `toml:"exclude_file"`
Username string `toml:"username"`
Password string `toml:"password"`
Stage1Profile string `toml:"stage1_profile"`
Command string `toml:"command"`
FailOnMatch string `toml:"fail_on_match"`
SizePattern string `toml:"size_pattern"`
UseIPv6 bool `toml:"use_ipv6"`
UseIPv4 bool `toml:"use_ipv4"`
ExcludeFile string `toml:"exclude_file"`
Username string `toml:"username"`
Password string `toml:"password"`
RsyncNoTimeo bool `toml:"rsync_no_timeout"`
RsyncTimeout int `toml:"rsync_timeout"`
RsyncOptions []string `toml:"rsync_options"`
RsyncOverride []string `toml:"rsync_override"`
RsyncOverrideOnly bool `toml:"rsync_override_only"` // only use provided overridden options if true
Stage1Profile string `toml:"stage1_profile"`
MemoryLimit MemBytes `toml:"memory_limit"`
MemoryLimit string `toml:"memory_limit"`
DockerImage string `toml:"docker_image"`
DockerVolumes []string `toml:"docker_volumes"`
DockerOptions []string `toml:"docker_options"`
SnapshotPath string `toml:"snapshot_path"`
ChildMirrors []mirrorConfig `toml:"mirrors"`
}
// LoadConfig loads configuration
@ -225,36 +159,9 @@ func LoadConfig(cfgFile string) (*Config, error) {
logger.Errorf(err.Error())
return nil, err
}
cfg.MirrorsConf = append(cfg.MirrorsConf, incMirCfg.Mirrors...)
}
}
for _, m := range cfg.MirrorsConf {
if err := recursiveMirrors(cfg, nil, m); err != nil {
return nil, err
cfg.Mirrors = append(cfg.Mirrors, incMirCfg.Mirrors...)
}
}
return cfg, nil
}
func recursiveMirrors(cfg *Config, parent *mirrorConfig, mirror mirrorConfig) error {
var curMir mirrorConfig
if parent != nil {
curMir = *parent
}
curMir.ChildMirrors = nil
if err := mergo.Merge(&curMir, mirror, mergo.WithOverride); err != nil {
return err
}
if mirror.ChildMirrors == nil {
cfg.Mirrors = append(cfg.Mirrors, curMir)
} else {
for _, m := range mirror.ChildMirrors {
if err := recursiveMirrors(cfg, &curMir, m); err != nil {
return err
}
}
}
return nil
}

View File

@ -53,43 +53,34 @@ func diffMirrorConfig(oldList, newList []mirrorConfig) []mirrorCfgTrans {
sort.Sort(sortableMirrorList(oList))
sort.Sort(sortableMirrorList(nList))
if len(oList) != 0 && len(nList) != 0 {
// insert a tail node to both lists
// as the maximum node
lastOld, lastNew := oList[len(oList)-1], nList[len(nList)-1]
maxName := lastOld.Name
if lastNew.Name > lastOld.Name {
maxName = lastNew.Name
}
Nil := mirrorConfig{Name: "~" + maxName}
if Nil.Name <= maxName {
panic("Nil.Name should be larger than maxName")
}
oList, nList = append(oList, Nil), append(nList, Nil)
// insert a tail node to both lists
// as the maximum node
lastOld, lastNew := oList[len(oList)-1], nList[len(nList)-1]
maxName := lastOld.Name
if lastNew.Name > lastOld.Name {
maxName = lastNew.Name
}
Nil := mirrorConfig{Name: "~" + maxName}
if Nil.Name <= maxName {
panic("Nil.Name should be larger than maxName")
}
oList, nList = append(oList, Nil), append(nList, Nil)
// iterate over both lists to find the difference
for i, j := 0, 0; i < len(oList) && j < len(nList); {
o, n := oList[i], nList[j]
if n.Name < o.Name {
operations = append(operations, mirrorCfgTrans{diffAdd, n})
j++
} else if o.Name < n.Name {
operations = append(operations, mirrorCfgTrans{diffDelete, o})
i++
} else {
if !reflect.DeepEqual(o, n) {
operations = append(operations, mirrorCfgTrans{diffModify, n})
}
i++
j++
// iterate over both lists to find the difference
for i, j := 0, 0; i < len(oList) && j < len(nList); {
o, n := oList[i], nList[j]
if n.Name < o.Name {
operations = append(operations, mirrorCfgTrans{diffAdd, n})
j++
} else if o.Name < n.Name {
operations = append(operations, mirrorCfgTrans{diffDelete, o})
i++
} else {
if !reflect.DeepEqual(o, n) {
operations = append(operations, mirrorCfgTrans{diffModify, n})
}
}
} else {
for i := 0; i < len(oList); i++ {
operations = append(operations, mirrorCfgTrans{diffDelete, oList[i]})
}
for i := 0; i < len(nList); i++ {
operations = append(operations, mirrorCfgTrans{diffAdd, nList[i]})
i++
j++
}
}

View File

@ -10,12 +10,12 @@ import (
func TestConfigDiff(t *testing.T) {
Convey("When old and new configs are equal", t, func() {
oldList := []mirrorConfig{
{Name: "debian"},
{Name: "debian-security"},
{Name: "fedora"},
{Name: "archlinux"},
{Name: "AOSP"},
{Name: "ubuntu"},
mirrorConfig{Name: "debian"},
mirrorConfig{Name: "debian-security"},
mirrorConfig{Name: "fedora"},
mirrorConfig{Name: "archlinux"},
mirrorConfig{Name: "AOSP"},
mirrorConfig{Name: "ubuntu"},
}
newList := make([]mirrorConfig, len(oldList))
copy(newList, oldList)
@ -23,49 +23,21 @@ func TestConfigDiff(t *testing.T) {
difference := diffMirrorConfig(oldList, newList)
So(len(difference), ShouldEqual, 0)
})
Convey("When old config is empty", t, func() {
newList := []mirrorConfig{
{Name: "debian"},
{Name: "debian-security"},
{Name: "fedora"},
{Name: "archlinux"},
{Name: "AOSP"},
{Name: "ubuntu"},
}
oldList := make([]mirrorConfig, 0)
difference := diffMirrorConfig(oldList, newList)
So(len(difference), ShouldEqual, len(newList))
})
Convey("When new config is empty", t, func() {
oldList := []mirrorConfig{
{Name: "debian"},
{Name: "debian-security"},
{Name: "fedora"},
{Name: "archlinux"},
{Name: "AOSP"},
{Name: "ubuntu"},
}
newList := make([]mirrorConfig, 0)
difference := diffMirrorConfig(oldList, newList)
So(len(difference), ShouldEqual, len(oldList))
})
Convey("When giving two config lists with different names", t, func() {
oldList := []mirrorConfig{
{Name: "debian"},
{Name: "debian-security"},
{Name: "fedora"},
{Name: "archlinux"},
{Name: "AOSP", Env: map[string]string{"REPO": "/usr/bin/repo"}},
{Name: "ubuntu"},
mirrorConfig{Name: "debian"},
mirrorConfig{Name: "debian-security"},
mirrorConfig{Name: "fedora"},
mirrorConfig{Name: "archlinux"},
mirrorConfig{Name: "AOSP", Env: map[string]string{"REPO": "/usr/bin/repo"}},
mirrorConfig{Name: "ubuntu"},
}
newList := []mirrorConfig{
{Name: "debian"},
{Name: "debian-cd"},
{Name: "archlinuxcn"},
{Name: "AOSP", Env: map[string]string{"REPO": "/usr/local/bin/aosp-repo"}},
{Name: "ubuntu-ports"},
mirrorConfig{Name: "debian"},
mirrorConfig{Name: "debian-cd"},
mirrorConfig{Name: "archlinuxcn"},
mirrorConfig{Name: "AOSP", Env: map[string]string{"REPO": "/usr/local/bin/aosp-repo"}},
mirrorConfig{Name: "ubuntu-ports"},
}
difference := diffMirrorConfig(oldList, newList)

View File

@ -2,12 +2,10 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
units "github.com/docker/go-units"
. "github.com/smartystreets/goconvey/convey"
)
@ -20,8 +18,6 @@ log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
[manager]
api_base = "https://127.0.0.1:5000"
@ -39,8 +35,6 @@ name = "AOSP"
provider = "command"
upstream = "https://aosp.google.com/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/git/AOSP"
exec_on_success = [
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
@ -54,15 +48,12 @@ provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ftp.debian.org/debian/"
use_ipv6 = true
memory_limit = "256MiB"
[[mirrors]]
name = "fedora"
provider = "rsync"
upstream = "rsync://ftp.fedoraproject.org/fedora/"
use_ipv6 = true
memory_limit = "128M"
exclude_file = "/etc/tunasync.d/fedora-exclude.txt"
exec_on_failure = [
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
@ -76,11 +67,11 @@ exec_on_failure = [
})
Convey("Everything should work on valid config file", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
@ -90,9 +81,9 @@ exec_on_failure = [
tmpDir,
)
curCfgBlob := cfgBlob + incSection
cfgBlob = cfgBlob + incSection
err = os.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
@ -116,17 +107,15 @@ provider = "two-stage-rsync"
stage1_profile = "debian"
use_ipv6 = true
`
err = os.WriteFile(filepath.Join(tmpDir, "debian.conf"), []byte(incBlob1), 0644)
err = ioutil.WriteFile(filepath.Join(tmpDir, "debian.conf"), []byte(incBlob1), 0644)
So(err, ShouldEqual, nil)
err = os.WriteFile(filepath.Join(tmpDir, "ubuntu.conf"), []byte(incBlob2), 0644)
err = ioutil.WriteFile(filepath.Join(tmpDir, "ubuntu.conf"), []byte(incBlob2), 0644)
So(err, ShouldEqual, nil)
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
So(cfg.Global.Name, ShouldEqual, "test_worker")
So(cfg.Global.Interval, ShouldEqual, 240)
So(cfg.Global.Retry, ShouldEqual, 3)
So(cfg.Global.Timeout, ShouldEqual, 86400)
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
@ -137,100 +126,6 @@ use_ipv6 = true
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
So(m.Provider, ShouldEqual, provCommand)
So(m.Interval, ShouldEqual, 720)
So(m.Retry, ShouldEqual, 2)
So(m.Timeout, ShouldEqual, 3600)
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
m = cfg.Mirrors[1]
So(m.Name, ShouldEqual, "debian")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.MemoryLimit.Value(), ShouldEqual, 256*units.MiB)
m = cfg.Mirrors[2]
So(m.Name, ShouldEqual, "fedora")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provRsync)
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
So(m.MemoryLimit.Value(), ShouldEqual, 128*units.MiB)
m = cfg.Mirrors[3]
So(m.Name, ShouldEqual, "debian-cd")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.MemoryLimit.Value(), ShouldEqual, 0)
m = cfg.Mirrors[4]
So(m.Name, ShouldEqual, "debian-security")
m = cfg.Mirrors[5]
So(m.Name, ShouldEqual, "ubuntu")
So(len(cfg.Mirrors), ShouldEqual, 6)
})
Convey("Everything should work on nested config file", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
tmpDir, err := os.MkdirTemp("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
incSection := fmt.Sprintf(
"\n[include]\n"+
"include_mirrors = \"%s/*.conf\"",
tmpDir,
)
curCfgBlob := cfgBlob + incSection
err = os.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
incBlob1 := `
[[mirrors]]
name = "ipv6s"
use_ipv6 = true
[[mirrors.mirrors]]
name = "debians"
mirror_subdir = "debian"
provider = "two-stage-rsync"
stage1_profile = "debian"
[[mirrors.mirrors.mirrors]]
name = "debian-security"
upstream = "rsync://test.host/debian-security/"
[[mirrors.mirrors.mirrors]]
name = "ubuntu"
stage1_profile = "ubuntu"
upstream = "rsync://test.host2/ubuntu/"
[[mirrors.mirrors]]
name = "debian-cd"
provider = "rsync"
upstream = "rsync://test.host3/debian-cd/"
`
err = os.WriteFile(filepath.Join(tmpDir, "nest.conf"), []byte(incBlob1), 0644)
So(err, ShouldEqual, nil)
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
So(cfg.Global.Name, ShouldEqual, "test_worker")
So(cfg.Global.Interval, ShouldEqual, 240)
So(cfg.Global.Retry, ShouldEqual, 3)
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
So(cfg.Server.Hostname, ShouldEqual, "worker1.example.com")
m := cfg.Mirrors[0]
So(m.Name, ShouldEqual, "AOSP")
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
So(m.Provider, ShouldEqual, provCommand)
So(m.Interval, ShouldEqual, 720)
So(m.Retry, ShouldEqual, 2)
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
m = cfg.Mirrors[1]
@ -245,32 +140,25 @@ use_ipv6 = true
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
m = cfg.Mirrors[3]
So(m.Name, ShouldEqual, "debian-security")
So(m.Name, ShouldEqual, "debian-cd")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.UseIPv6, ShouldEqual, true)
So(m.Stage1Profile, ShouldEqual, "debian")
m = cfg.Mirrors[4]
So(m.Name, ShouldEqual, "ubuntu")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.UseIPv6, ShouldEqual, true)
So(m.Stage1Profile, ShouldEqual, "ubuntu")
So(m.Name, ShouldEqual, "debian-security")
m = cfg.Mirrors[5]
So(m.Name, ShouldEqual, "debian-cd")
So(m.UseIPv6, ShouldEqual, true)
So(m.Provider, ShouldEqual, provRsync)
So(m.Name, ShouldEqual, "ubuntu")
So(len(cfg.Mirrors), ShouldEqual, 6)
})
Convey("Providers can be inited from a valid config file", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
@ -315,266 +203,4 @@ use_ipv6 = true
So(rp.excludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
})
Convey("MirrorSubdir should work", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
timeout = 86400
retry = 3
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "ipv6s"
use_ipv6 = true
[[mirrors.mirrors]]
name = "debians"
mirror_subdir = "debian"
provider = "two-stage-rsync"
stage1_profile = "debian"
[[mirrors.mirrors.mirrors]]
name = "debian-security"
upstream = "rsync://test.host/debian-security/"
[[mirrors.mirrors.mirrors]]
name = "ubuntu"
stage1_profile = "ubuntu"
upstream = "rsync://test.host2/ubuntu/"
[[mirrors.mirrors]]
name = "debian-cd"
provider = "rsync"
upstream = "rsync://test.host3/debian-cd/"
`
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p := providers["debian-security"]
So(p.Name(), ShouldEqual, "debian-security")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian-security")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian-security/latest.log")
r2p, ok := p.(*twoStageRsyncProvider)
So(ok, ShouldBeTrue)
So(r2p.stage1Profile, ShouldEqual, "debian")
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian/debian-security")
p = providers["ubuntu"]
So(p.Name(), ShouldEqual, "ubuntu")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/ubuntu")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/ubuntu/latest.log")
r2p, ok = p.(*twoStageRsyncProvider)
So(ok, ShouldBeTrue)
So(r2p.stage1Profile, ShouldEqual, "ubuntu")
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian/ubuntu")
p = providers["debian-cd"]
So(p.Name(), ShouldEqual, "debian-cd")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian-cd")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian-cd/latest.log")
rp, ok := p.(*rsyncProvider)
So(ok, ShouldBeTrue)
So(rp.WorkingDir(), ShouldEqual, "/data/mirrors/debian-cd")
So(p.Timeout(), ShouldEqual, 86400*time.Second)
})
Convey("rsync_override_only should work", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "foo"
provider = "rsync"
upstream = "rsync://foo.bar/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/foo"
rsync_override = ["--bar", "baz"]
rsync_override_only = true
`
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p, ok := providers["foo"].(*rsyncProvider)
So(ok, ShouldBeTrue)
So(p.options, ShouldResemble, []string{"--bar", "baz"})
})
Convey("rsync global options should work", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
rsync_options = ["--global"]
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "foo"
provider = "rsync"
upstream = "rsync://foo.bar/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/foo"
rsync_override = ["--override"]
rsync_options = ["--local"]
`
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p, ok := providers["foo"].(*rsyncProvider)
So(ok, ShouldBeTrue)
So(p.options, ShouldResemble, []string{
"--override", // from mirror.rsync_override
"--timeout=120", // generated by newRsyncProvider
"--global", // from global.rsync_options
"--local", // from mirror.rsync_options
})
})
Convey("success_exit_codes should work globally and per mirror", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
dangerous_global_success_exit_codes = [10, 20]
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "foo"
provider = "rsync"
upstream = "rsync://foo.bar/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/foo"
success_exit_codes = [30, 40]
`
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p, ok := providers["foo"].(*rsyncProvider)
So(ok, ShouldBeTrue)
So(p.successExitCodes, ShouldResemble, []int{10, 20, 30, 40})
})
}

View File

@ -3,40 +3,30 @@ package worker
import (
"fmt"
"os"
"time"
"github.com/codeskyblue/go-sh"
)
type dockerHook struct {
emptyHook
image string
volumes []string
options []string
memoryLimit MemBytes
provider mirrorProvider
image string
volumes []string
options []string
}
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
volumes := []string{}
volumes = append(volumes, gCfg.Volumes...)
volumes = append(volumes, mCfg.DockerVolumes...)
if len(mCfg.ExcludeFile) > 0 {
arg := fmt.Sprintf("%s:%s:ro", mCfg.ExcludeFile, mCfg.ExcludeFile)
volumes = append(volumes, arg)
}
options := []string{}
options = append(options, gCfg.Options...)
options = append(options, mCfg.DockerOptions...)
return &dockerHook{
emptyHook: emptyHook{
provider: p,
},
image: mCfg.DockerImage,
volumes: volumes,
options: options,
memoryLimit: mCfg.MemoryLimit,
provider: p,
image: mCfg.DockerImage,
volumes: volumes,
options: options,
}
}
@ -69,27 +59,6 @@ func (d *dockerHook) postExec() error {
// sh.Command(
// "docker", "rm", "-f", d.Name(),
// ).Run()
name := d.Name()
retry := 10
for ; retry > 0; retry-- {
out, err := sh.Command(
"docker", "ps", "-a",
"--filter", "name=^"+name+"$",
"--format", "{{.Status}}",
).Output()
if err != nil {
logger.Errorf("docker ps failed: %v", err)
break
}
if len(out) == 0 {
break
}
logger.Debugf("container %s still exists: '%s'", name, string(out))
time.Sleep(1 * time.Second)
}
if retry == 0 {
logger.Warningf("container %s not removed automatically, next sync may fail", name)
}
d.provider.ExitContext()
return nil
}

View File

@ -2,45 +2,29 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
units "github.com/docker/go-units"
"github.com/codeskyblue/go-sh"
. "github.com/smartystreets/goconvey/convey"
)
func cmdRun(p string, args []string) {
cmd := exec.Command(p, args...)
out, err := cmd.CombinedOutput()
if err != nil {
logger.Debugf("cmdRun failed %s", err)
return
}
logger.Debugf("cmdRun: ", string(out))
}
func getDockerByName(name string) (string, error) {
// docker ps -f 'name=$name' --format '{{.Names}}'
out, err := sh.Command(
"docker", "ps", "-a",
"docker", "ps",
"--filter", "name="+name,
"--format", "{{.Names}}",
).Output()
if err == nil {
logger.Debugf("docker ps: '%s'", string(out))
}
return string(out), err
}
func TestDocker(t *testing.T) {
Convey("Docker Should Work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
@ -62,23 +46,20 @@ func TestDocker(t *testing.T) {
cmdScriptContent := `#!/bin/sh
echo ${TEST_CONTENT}
sleep 20
sleep 10
`
err = os.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
d := &dockerHook{
emptyHook: emptyHook{
provider: provider,
},
image: "alpine:3.8",
provider: provider,
image: "alpine",
volumes: []string{
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
},
memoryLimit: 512 * units.MiB,
}
provider.AddHook(d)
So(provider.Docker(), ShouldNotBeNil)
@ -86,27 +67,12 @@ sleep 20
err = d.preExec()
So(err, ShouldBeNil)
cmdRun("docker", []string{"images"})
exitedErr := make(chan error, 1)
go func() {
err = provider.Run(make(chan empty, 1))
logger.Debugf("provider.Run() exited")
if err != nil {
logger.Errorf("provider.Run() failed: %v", err)
}
exitedErr <- err
err = provider.Run()
ctx.So(err, ShouldNotBeNil)
}()
// Wait for docker running
for wait := 0; wait < 8; wait++ {
names, err := getDockerByName(d.Name())
So(err, ShouldBeNil)
if names != "" {
break
}
time.Sleep(1 * time.Second)
}
// cmdRun("ps", []string{"aux"})
time.Sleep(1 * time.Second)
// assert container running
names, err := getDockerByName(d.Name())
@ -116,16 +82,13 @@ sleep 20
err = provider.Terminate()
So(err, ShouldBeNil)
// cmdRun("ps", []string{"aux"})
<-exitedErr
// container should be terminated and removed
names, err = getDockerByName(d.Name())
So(err, ShouldBeNil)
So(names, ShouldEqual, "")
// check log content
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput+"\n")

View File

@ -18,6 +18,7 @@ const (
type execPostHook struct {
emptyHook
provider mirrorProvider
// exec on success or on failure
execOn uint8
@ -36,11 +37,9 @@ func newExecPostHook(provider mirrorProvider, execOn uint8, command string) (*ex
}
return &execPostHook{
emptyHook: emptyHook{
provider: provider,
},
execOn: execOn,
command: cmd,
provider: provider,
execOn: execOn,
command: cmd,
}, nil
}

View File

@ -1,6 +1,7 @@
package worker
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -12,7 +13,7 @@ import (
func TestExecPost(t *testing.T) {
Convey("ExecPost should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -45,7 +46,7 @@ echo $TUNASYNC_UPSTREAM_URL
echo $TUNASYNC_LOG_FILE
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
go job.Run(managerChan, semaphore)
@ -63,7 +64,7 @@ echo $TUNASYNC_LOG_FILE
expectedOutput := "success\n"
outputContent, err := os.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
So(err, ShouldBeNil)
So(string(outputContent), ShouldEqual, expectedOutput)
})
@ -84,14 +85,14 @@ echo $TUNASYNC_LOG_FILE
exit 1
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
for i := 0; i < defaultMaxRetry; i++ {
for i := 0; i < maxRetry; i++ {
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
@ -104,7 +105,7 @@ exit 1
expectedOutput := "failure\n"
outputContent, err := os.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
So(err, ShouldBeNil)
So(string(outputContent), ShouldEqual, expectedOutput)
})

View File

@ -5,7 +5,6 @@ import (
"fmt"
"sync"
"sync/atomic"
"time"
tunasync "github.com/tuna/tunasync/internal"
)
@ -15,13 +14,12 @@ import (
type ctrlAction uint8
const (
jobStart ctrlAction = iota
jobStop // stop syncing keep the job
jobDisable // disable the job (stops goroutine)
jobRestart // restart syncing
jobPing // ensure the goroutine is alive
jobHalt // worker halts
jobForceStart // ignore concurrent limit
jobStart ctrlAction = iota
jobStop // stop syncing keep the job
jobDisable // disable the job (stops goroutine)
jobRestart // restart syncing
jobPing // ensure the goroutine is alive
jobHalt // worker halts
)
type jobMessage struct {
@ -53,7 +51,6 @@ type mirrorJob struct {
ctrlChan chan ctrlAction
disabled chan empty
state uint32
size string
}
func newMirrorJob(provider mirrorProvider) *mirrorJob {
@ -87,12 +84,10 @@ func (m *mirrorJob) SetProvider(provider mirrorProvider) error {
// runMirrorJob is the goroutine where syncing job runs in
// arguments:
//
// provider: mirror provider object
// ctrlChan: receives messages from the manager
// managerChan: push messages to the manager, this channel should have a larger buffer
// sempaphore: make sure the concurrent running syncing job won't explode
//
// provider: mirror provider object
// ctrlChan: receives messages from the manager
// managerChan: push messages to the manager, this channel should have a larger buffer
// sempaphore: make sure the concurrent running syncing job won't explode
// TODO: message struct for managerChan
func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) error {
jobsDone.Add(1)
@ -115,7 +110,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
managerChan <- jobMessage{
tunasync.Failed, m.Name(),
fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
true,
false,
}
return err
}
@ -141,7 +136,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
return err
}
for retry := 0; retry < provider.Retry(); retry++ {
for retry := 0; retry < maxRetry; retry++ {
stopASAP := false // stop job as soon as possible
if retry > 0 {
@ -157,43 +152,26 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
var syncErr error
syncDone := make(chan error, 1)
started := make(chan empty, 10) // we may receive "started" more than one time (e.g. two_stage_rsync)
go func() {
err := provider.Run(started)
syncDone <- err
err := provider.Run()
if !stopASAP {
syncDone <- err
}
}()
select { // Wait until provider started or error happened
case err := <-syncDone:
logger.Errorf("failed to start provider %s: %s", m.Name(), err.Error())
syncDone <- err // it will be read again later
case <-started:
logger.Debug("provider started")
}
// Now terminating the provider is feasible
var termErr error
timeout := provider.Timeout()
if timeout <= 0 {
timeout = 100000 * time.Hour // never time out
}
select {
case syncErr = <-syncDone:
logger.Debug("syncing done")
case <-time.After(timeout):
logger.Notice("provider timeout")
termErr = provider.Terminate()
syncErr = fmt.Errorf("%s timeout after %v", m.Name(), timeout)
case <-kill:
logger.Debug("received kill")
stopASAP = true
termErr = provider.Terminate()
err := provider.Terminate()
if err != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
return err
}
syncErr = errors.New("killed by manager")
}
if termErr != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), termErr.Error())
return termErr
}
// post-exec hooks
herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")
@ -204,33 +182,26 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
if syncErr == nil {
// syncing success
logger.Noticef("succeeded syncing %s", m.Name())
managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
// post-success hooks
logger.Debug("post-success hooks")
err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success")
if err != nil {
return err
}
} else {
// syncing failed
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
// post-fail hooks
logger.Debug("post-fail hooks")
err := runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
if err != nil {
return err
}
}
if syncErr == nil {
// syncing success
m.size = provider.DataSize()
managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
return nil
}
// syncing failed
managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == provider.Retry()-1) && (m.State() == stateReady)}
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == maxRetry-1) && (m.State() == stateReady)}
// post-fail hooks
logger.Debug("post-fail hooks")
err = runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
if err != nil {
return err
}
// gracefully exit
if stopASAP {
logger.Debug("No retry, exit directly")
@ -241,26 +212,22 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
return nil
}
runJob := func(kill <-chan empty, jobDone chan<- empty, bypassSemaphore <-chan empty) {
runJob := func(kill <-chan empty, jobDone chan<- empty) {
select {
case semaphore <- empty{}:
defer func() { <-semaphore }()
runJobWrapper(kill, jobDone)
case <-bypassSemaphore:
logger.Noticef("Concurrent limit ignored by %s", m.Name())
runJobWrapper(kill, jobDone)
case <-kill:
jobDone <- empty{}
return
}
}
bypassSemaphore := make(chan empty, 1)
for {
if m.State() == stateReady {
kill := make(chan empty)
jobDone := make(chan empty)
go runJob(kill, jobDone, bypassSemaphore)
go runJob(kill, jobDone)
_wait_for_job:
select {
@ -281,14 +248,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
m.SetState(stateReady)
close(kill)
<-jobDone
time.Sleep(time.Second) // Restart may fail if the process was not exited yet
continue
case jobForceStart:
select { //non-blocking
default:
case bypassSemaphore <- empty{}:
}
fallthrough
case jobStart:
m.SetState(stateReady)
goto _wait_for_job
@ -312,14 +272,8 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
case jobDisable:
m.SetState(stateDisabled)
return nil
case jobForceStart:
select { //non-blocking
default:
case bypassSemaphore <- empty{}:
}
fallthrough
case jobRestart:
fallthrough
m.SetState(stateReady)
case jobStart:
m.SetState(stateReady)
default:

View File

@ -2,6 +2,7 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -16,7 +17,7 @@ func TestMirrorJob(t *testing.T) {
InitLogger(true, true, false)
Convey("MirrorJob should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -30,7 +31,6 @@ func TestMirrorJob(t *testing.T) {
logDir: tmpDir,
logFile: tmpFile,
interval: 1 * time.Second,
timeout: 7 * time.Second,
}
provider, err := newCmdProvider(c)
@ -41,7 +41,6 @@ func TestMirrorJob(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("For a normal mirror job", func(ctx C) {
scriptContent := `#!/bin/bash
@ -57,9 +56,9 @@ func TestMirrorJob(t *testing.T) {
provider.upstreamURL,
provider.LogFile(),
)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile)
readedScriptContent, err := ioutil.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
@ -85,7 +84,7 @@ func TestMirrorJob(t *testing.T) {
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Success)
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobStart
@ -113,81 +112,13 @@ func TestMirrorJob(t *testing.T) {
})
Convey("When running long jobs with post-fail hook", func(ctx C) {
scriptContent := `#!/bin/bash
echo '++++++'
echo $TUNASYNC_WORKING_DIR
echo $0 sleeping
sleep 3
echo $TUNASYNC_WORKING_DIR
echo '------'
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
hookScriptFile := filepath.Join(tmpDir, "hook.sh")
err = os.WriteFile(hookScriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
h, err := newExecPostHook(provider, execOnFailure, hookScriptFile)
So(err, ShouldBeNil)
provider.AddHook(h)
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, 1)
job := newMirrorJob(provider)
Convey("If we kill it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("If we kill it then start it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
time.Sleep(2 * time.Second)
logger.Debugf("Now starting...\n")
job.ctrlChan <- jobStart
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
job.ctrlChan <- jobDisable
<-job.disabled
})
})
Convey("When running long jobs", func(ctx C) {
scriptContent := `#!/bin/bash
echo $TUNASYNC_WORKING_DIR
sleep 5
echo $TUNASYNC_WORKING_DIR
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10)
@ -204,15 +135,13 @@ echo $TUNASYNC_WORKING_DIR
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
job.ctrlChan <- jobStop
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
@ -235,305 +164,14 @@ echo $TUNASYNC_WORKING_DIR
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("If we restart it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobRestart
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
msg = <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Success)
expectedOutput := fmt.Sprintf(
"%s\n%s\n",
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("If we disable it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobDisable
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
<-job.disabled
})
Convey("If we stop it twice, than start it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
job.ctrlChan <- jobStop // should be ignored
job.ctrlChan <- jobStart
msg = <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Success)
expectedOutput := fmt.Sprintf(
"%s\n%s\n",
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
})
Convey("When a job timed out", func(ctx C) {
scriptContent := `#!/bin/bash
echo $TUNASYNC_WORKING_DIR
sleep 10
echo $TUNASYNC_WORKING_DIR
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, 1)
job := newMirrorJob(provider)
Convey("It should be automatically terminated", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("It should be retried", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
for i := 0; i < defaultMaxRetry; i++ {
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldContainSubstring, "timeout after")
// re-schedule after last try
So(msg.schedule, ShouldEqual, i == defaultMaxRetry-1)
}
job.ctrlChan <- jobDisable
<-job.disabled
})
})
})
}
func TestConcurrentMirrorJobs(t *testing.T) {
InitLogger(true, true, false)
Convey("Concurrent MirrorJobs should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
const CONCURRENT = 5
var providers [CONCURRENT]*cmdProvider
var jobs [CONCURRENT]*mirrorJob
for i := 0; i < CONCURRENT; i++ {
c := cmdConfig{
name: fmt.Sprintf("job-%d", i),
upstreamURL: "http://mirrors.tuna.moe/",
command: "sleep 2",
workingDir: tmpDir,
logDir: tmpDir,
logFile: "/dev/null",
interval: 10 * time.Second,
}
var err error
providers[i], err = newCmdProvider(c)
So(err, ShouldBeNil)
jobs[i] = newMirrorJob(providers[i])
}
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, CONCURRENT-2)
countingJobs := func(managerChan chan jobMessage, totalJobs, concurrentCheck int) (peakConcurrent, counterFailed int) {
counterEnded := 0
counterRunning := 0
peakConcurrent = 0
counterFailed = 0
for counterEnded < totalJobs {
msg := <-managerChan
switch msg.status {
case PreSyncing:
counterRunning++
case Syncing:
case Failed:
counterFailed++
fallthrough
case Success:
counterEnded++
counterRunning--
default:
So(0, ShouldEqual, 1)
}
// Test if semaphore works
So(counterRunning, ShouldBeLessThanOrEqualTo, concurrentCheck)
if counterRunning > peakConcurrent {
peakConcurrent = counterRunning
}
}
// select {
// case msg := <-managerChan:
// logger.Errorf("extra message received: %v", msg)
// So(0, ShouldEqual, 1)
// case <-time.After(2 * time.Second):
// }
return
}
Convey("When we run them all", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
}
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
Convey("If we cancel one job", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobRestart
time.Sleep(200 * time.Millisecond)
}
// Cancel the one waiting for semaphore
jobs[len(jobs)-1].ctrlChan <- jobStop
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT-1, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
Convey("If we override the concurrent limit", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(200 * time.Millisecond)
}
jobs[len(jobs)-1].ctrlChan <- jobForceStart
jobs[len(jobs)-2].ctrlChan <- jobForceStart
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT)
So(peakConcurrent, ShouldEqual, CONCURRENT)
So(counterFailed, ShouldEqual, 0)
time.Sleep(1 * time.Second)
// fmt.Println("Restart them")
for _, job := range jobs {
job.ctrlChan <- jobStart
}
peakConcurrent, counterFailed = countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
})
}

View File

@ -2,6 +2,7 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -13,13 +14,12 @@ import (
type logLimiter struct {
emptyHook
provider mirrorProvider
}
func newLogLimiter(provider mirrorProvider) *logLimiter {
return &logLimiter{
emptyHook: emptyHook{
provider: provider,
},
provider: provider,
}
}
@ -38,7 +38,7 @@ func (l *logLimiter) preExec() error {
}
logDir := p.LogDir()
files, err := os.ReadDir(logDir)
files, err := ioutil.ReadDir(logDir)
if err != nil {
if os.IsNotExist(err) {
os.MkdirAll(logDir, 0755)
@ -49,8 +49,7 @@ func (l *logLimiter) preExec() error {
matchedFiles := []os.FileInfo{}
for _, f := range files {
if strings.HasPrefix(f.Name(), p.Name()) {
info, _ := f.Info()
matchedFiles = append(matchedFiles, info)
matchedFiles = append(matchedFiles, f)
}
}

View File

@ -2,6 +2,7 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -13,8 +14,8 @@ import (
func TestLogLimiter(t *testing.T) {
Convey("LogLimiter should work", t, func(ctx C) {
tmpDir, _ := os.MkdirTemp("", "tunasync")
tmpLogDir, err := os.MkdirTemp("", "tunasync-log")
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpLogDir, err := ioutil.TempDir("", "tunasync-log")
defer os.RemoveAll(tmpDir)
defer os.RemoveAll(tmpLogDir)
So(err, ShouldBeNil)
@ -57,7 +58,7 @@ echo $TUNASYNC_UPSTREAM_URL
echo $TUNASYNC_LOG_FILE
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
go job.Run(managerChan, semaphore)
@ -85,7 +86,7 @@ echo $TUNASYNC_LOG_FILE
logFile,
)
loggedContent, err := os.ReadFile(filepath.Join(provider.LogDir(), "latest"))
loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
})
@ -103,7 +104,7 @@ echo $TUNASYNC_LOG_FILE
sleep 5
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
go job.Run(managerChan, semaphore)
@ -133,10 +134,10 @@ sleep 5
logFile,
)
loggedContent, err := os.ReadFile(filepath.Join(provider.LogDir(), "latest"))
loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
loggedContent, err = os.ReadFile(logFile + ".fail")
loggedContent, err = ioutil.ReadFile(logFile + ".fail")
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
})

View File

@ -24,9 +24,9 @@ type mirrorProvider interface {
Type() providerEnum
// Start then Wait
Run(started chan empty) error
// Start the job
// run mirror job in background
Run() error
// run mirror job in background
Start() error
// Wait job to finish
Wait() error
@ -45,14 +45,11 @@ type mirrorProvider interface {
Hooks() []jobHook
Interval() time.Duration
Retry() int
Timeout() time.Duration
WorkingDir() string
LogDir() string
LogFile() string
IsMaster() bool
DataSize() string
// enter context
EnterContext() *Context
@ -60,10 +57,6 @@ type mirrorProvider interface {
ExitContext() *Context
// return context
Context() *Context
// set in newMirrorProvider, used by cmdJob.Wait
SetSuccessExitCodes(codes []int)
GetSuccessExitCodes() []int
}
// newProvider creates a mirrorProvider instance
@ -87,18 +80,12 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
}
if mirrorDir == "" {
mirrorDir = filepath.Join(
cfg.Global.MirrorDir, mirror.MirrorSubDir, mirror.Name,
cfg.Global.MirrorDir, mirror.Name,
)
}
if mirror.Interval == 0 {
mirror.Interval = cfg.Global.Interval
}
if mirror.Retry == 0 {
mirror.Retry = cfg.Global.Retry
}
if mirror.Timeout == 0 {
mirror.Timeout = cfg.Global.Timeout
}
logDir = formatLogDir(logDir, mirror)
// IsMaster
@ -120,78 +107,57 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
upstreamURL: mirror.Upstream,
command: mirror.Command,
workingDir: mirrorDir,
failOnMatch: mirror.FailOnMatch,
sizePattern: mirror.SizePattern,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
env: mirror.Env,
}
p, err := newCmdProvider(pc)
p.isMaster = isMaster
if err != nil {
panic(err)
}
p.isMaster = isMaster
provider = p
case provRsync:
rc := rsyncConfig{
name: mirror.Name,
upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command,
username: mirror.Username,
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions,
rsyncNeverTimeout: mirror.RsyncNoTimeo,
rsyncTimeoutValue: mirror.RsyncTimeout,
globalOptions: cfg.Global.RsyncOptions,
overriddenOptions: mirror.RsyncOverride,
useOverrideOnly: mirror.RsyncOverrideOnly,
rsyncEnv: mirror.Env,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
useIPv4: mirror.UseIPv4,
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
name: mirror.Name,
upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command,
username: mirror.Username,
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
interval: time.Duration(mirror.Interval) * time.Minute,
}
p, err := newRsyncProvider(rc)
p.isMaster = isMaster
if err != nil {
panic(err)
}
p.isMaster = isMaster
provider = p
case provTwoStageRsync:
rc := twoStageRsyncConfig{
name: mirror.Name,
stage1Profile: mirror.Stage1Profile,
upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command,
username: mirror.Username,
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions,
rsyncNeverTimeout: mirror.RsyncNoTimeo,
rsyncTimeoutValue: mirror.RsyncTimeout,
rsyncEnv: mirror.Env,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
useIPv4: mirror.UseIPv4,
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
name: mirror.Name,
stage1Profile: mirror.Stage1Profile,
upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command,
username: mirror.Username,
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
interval: time.Duration(mirror.Interval) * time.Minute,
}
p, err := newTwoStageRsyncProvider(rc)
p.isMaster = isMaster
if err != nil {
panic(err)
}
p.isMaster = isMaster
provider = p
default:
panic(errors.New("Invalid mirror provider"))
@ -205,11 +171,6 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
}
// Add Btrfs Snapshot Hook
if cfg.BtrfsSnapshot.Enable {
provider.AddHook(newBtrfsSnapshotHook(provider, cfg.BtrfsSnapshot.SnapshotPath, mirror))
}
// Add Docker Hook
if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))
@ -218,7 +179,8 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
// Add Cgroup Hook
provider.AddHook(
newCgroupHook(
provider, cfg.Cgroup, mirror.MemoryLimit,
provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group,
cfg.Cgroup.Subsystem, mirror.MemoryLimit,
),
)
}
@ -253,17 +215,5 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
}
addHookFromCmdList(mirror.ExecOnFailureExtra, execOnFailure)
successExitCodes := []int{}
if cfg.Global.SuccessExitCodes != nil {
successExitCodes = append(successExitCodes, cfg.Global.SuccessExitCodes...)
}
if mirror.SuccessExitCodes != nil {
successExitCodes = append(successExitCodes, mirror.SuccessExitCodes...)
}
if len(successExitCodes) > 0 {
logger.Infof("Non-zero success exit codes set for mirror %s: %v", mirror.Name, successExitCodes)
provider.SetSuccessExitCodes(successExitCodes)
}
return provider
}

View File

@ -2,9 +2,9 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
"time"
@ -13,7 +13,7 @@ import (
func TestRsyncProvider(t *testing.T) {
Convey("Rsync Provider should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
@ -27,7 +27,6 @@ func TestRsyncProvider(t *testing.T) {
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
timeout: 100 * time.Second,
interval: 600 * time.Second,
}
@ -40,7 +39,6 @@ func TestRsyncProvider(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("When entering a context (auto exit)", func() {
func() {
@ -75,91 +73,55 @@ func TestRsyncProvider(t *testing.T) {
echo "syncing to $(pwd)"
echo $RSYNC_PASSWORD $@
sleep 1
echo "Total file size: 1.33T bytes"
echo "Done"
exit 0
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"%s\n"+
"Total file size: 1.33T bytes\n"+
"Done\n",
targetDir,
provider.WorkingDir(),
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 -6 %s %s",
"--timeout=120 --contimeout=120 -6 %s %s",
provider.upstreamURL, provider.WorkingDir(),
),
)
err = provider.Run(make(chan empty, 1))
err = provider.Run()
So(err, ShouldBeNil)
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
So(provider.DataSize(), ShouldEqual, "1.33T")
})
})
Convey("If the rsync program fails", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
Convey("in the rsyncProvider", func() {
c := rsyncConfig{
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
extraOptions: []string{"--somethine-invalid"},
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Syntax or usage error")
})
})
}
func TestRsyncProviderWithAuthentication(t *testing.T) {
Convey("Rsync Provider with password should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
proxyAddr := "127.0.0.1:1233"
c := rsyncConfig{
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
username: "tunasync",
password: "tunasyncpassword",
workingDir: tmpDir,
extraOptions: []string{"--delete-excluded"},
rsyncTimeoutValue: 30,
rsyncEnv: map[string]string{"RSYNC_PROXY": proxyAddr},
logDir: tmpDir,
logFile: tmpFile,
useIPv4: true,
interval: 600 * time.Second,
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
username: "tunasync",
password: "tunasyncpassword",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
@ -174,32 +136,30 @@ func TestRsyncProviderWithAuthentication(t *testing.T) {
Convey("Let's try a run", func() {
scriptContent := `#!/bin/bash
echo "syncing to $(pwd)"
echo $USER $RSYNC_PASSWORD $RSYNC_PROXY $@
echo $USER $RSYNC_PASSWORD $@
sleep 1
echo "Done"
exit 0
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"%s\n"+
"Done\n",
targetDir,
provider.WorkingDir(),
fmt.Sprintf(
"%s %s %s -aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
"%s %s -aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--timeout=30 -4 --delete-excluded %s %s",
provider.username, provider.password, proxyAddr,
provider.upstreamURL, provider.WorkingDir(),
"--timeout=120 --contimeout=120 -6 %s %s",
provider.username, provider.password, provider.upstreamURL, provider.WorkingDir(),
),
)
err = provider.Run(make(chan empty, 1))
err = provider.Run()
So(err, ShouldBeNil)
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
@ -208,144 +168,9 @@ exit 0
})
}
func TestRsyncProviderWithOverriddenOptions(t *testing.T) {
Convey("Rsync Provider with overridden options should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
rsyncNeverTimeout: true,
overriddenOptions: []string{"-aHvh", "--no-o", "--no-g", "--stats"},
extraOptions: []string{"--delete-excluded"},
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
So(provider.Name(), ShouldEqual, c.name)
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
Convey("Let's try a run", func() {
scriptContent := `#!/bin/bash
echo "syncing to $(pwd)"
echo $@
sleep 1
echo "Done"
exit 0
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"-aHvh --no-o --no-g --stats -6 --delete-excluded %s %s\n"+
"Done\n",
targetDir,
provider.upstreamURL,
provider.WorkingDir(),
)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
})
})
}
func TestRsyncProviderWithDocker(t *testing.T) {
Convey("Rsync in Docker should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
excludeFile := filepath.Join(tmpDir, "exclude.txt")
g := &Config{
Global: globalConfig{
Retry: 2,
},
Docker: dockerConfig{
Enable: true,
Volumes: []string{
scriptFile + ":/bin/myrsync",
"/etc/gai.conf:/etc/gai.conf:ro",
},
},
}
c := mirrorConfig{
Name: "tuna",
Provider: provRsync,
Upstream: "rsync://rsync.tuna.moe/tuna/",
Command: "/bin/myrsync",
ExcludeFile: excludeFile,
DockerImage: "alpine:3.8",
LogDir: tmpDir,
MirrorDir: tmpDir,
UseIPv6: true,
Timeout: 100,
Interval: 600,
}
provider := newMirrorProvider(c, g)
So(provider.Type(), ShouldEqual, provRsync)
So(provider.Name(), ShouldEqual, c.Name)
So(provider.WorkingDir(), ShouldEqual, c.MirrorDir)
So(provider.LogDir(), ShouldEqual, c.LogDir)
cmdScriptContent := `#!/bin/sh
#echo "$@"
while [[ $# -gt 0 ]]; do
if [[ "$1" = "--exclude-from" ]]; then
cat "$2"
shift
fi
shift
done
`
err = os.WriteFile(scriptFile, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
err = os.WriteFile(excludeFile, []byte("__some_pattern"), 0755)
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
err = hook.preExec()
So(err, ShouldBeNil)
}
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
err = hook.postExec()
So(err, ShouldBeNil)
}
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, "__some_pattern")
})
}
func TestCmdProvider(t *testing.T) {
Convey("Command Provider should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -390,245 +215,71 @@ echo $AOSP_REPO_BIN
provider.LogFile(),
"/usr/local/bin/repo",
)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile)
readedScriptContent, err := ioutil.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1))
err = provider.Run()
So(err, ShouldBeNil)
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
})
Convey("If a command fails", func() {
scriptContent := `exit 1`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile)
readedScriptContent, err := ioutil.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1))
err = provider.Run()
So(err, ShouldNotBeNil)
})
Convey("If a long job is killed", func(ctx C) {
scriptContent := `#!/bin/bash
sleep 10
sleep 5
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
started := make(chan empty, 1)
go func() {
err := provider.Run(started)
err = provider.Run()
ctx.So(err, ShouldNotBeNil)
}()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second)
err = provider.Terminate()
So(err, ShouldBeNil)
})
})
Convey("Command Provider without log file should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
c := cmdConfig{
name: "run-ls",
upstreamURL: "http://mirrors.tuna.moe/",
command: "ls",
workingDir: tmpDir,
logDir: tmpDir,
logFile: "/dev/null",
interval: 600 * time.Second,
}
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
So(provider.IsMaster(), ShouldEqual, false)
So(provider.ZFS(), ShouldBeNil)
So(provider.Type(), ShouldEqual, provCommand)
So(provider.Name(), ShouldEqual, c.name)
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
Convey("Run the command", func() {
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
})
Convey("Command Provider with RegExprs should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
name: "run-uptime",
upstreamURL: "http://mirrors.tuna.moe/",
command: "uptime",
failOnMatch: "",
sizePattern: "",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
}
Convey("when fail-on-match regexp matches", func() {
c.failOnMatch = `[a-z]+`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
Convey("when fail-on-match regexp does not match", func() {
c.failOnMatch = `load average_`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
Convey("when fail-on-match regexp meets /dev/null", func() {
c.failOnMatch = `load average_`
c.logFile = "/dev/null"
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
})
Convey("when size-pattern regexp matches", func() {
c.sizePattern = `load average: ([\d\.]+)`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
So(provider.DataSize(), ShouldNotBeEmpty)
_, err = strconv.ParseFloat(provider.DataSize(), 32)
So(err, ShouldBeNil)
})
Convey("when size-pattern regexp does not match", func() {
c.sizePattern = `load ave: ([\d\.]+)`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
Convey("when size-pattern regexp meets /dev/null", func() {
c.sizePattern = `load ave: ([\d\.]+)`
c.logFile = "/dev/null"
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
})
Convey("Command Provider with successExitCodes should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
name: "tuna-cmd",
upstreamURL: "http://mirrors.tuna.moe/",
command: "bash " + scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
}
provider, err := newCmdProvider(c)
provider.SetSuccessExitCodes([]int{199, 200})
So(err, ShouldBeNil)
So(provider.Type(), ShouldEqual, provCommand)
So(provider.Name(), ShouldEqual, c.name)
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
So(provider.GetSuccessExitCodes(), ShouldResemble, []int{199, 200})
Convey("Command exits with configured successExitCodes", func() {
scriptContent := `exit 199`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
Convey("Command exits with unknown exit code", func() {
scriptContent := `exit 201`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
})
})
}
func TestTwoStageRsyncProvider(t *testing.T) {
Convey("TwoStageRsync Provider should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := twoStageRsyncConfig{
name: "tuna-two-stage-rsync",
upstreamURL: "rsync://mirrors.tuna.moe/",
stage1Profile: "debian",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
excludeFile: tmpFile,
rsyncTimeoutValue: 30,
extraOptions: []string{"--delete-excluded", "--cache"},
username: "hello",
password: "world",
name: "tuna-two-stage-rsync",
upstreamURL: "rsync://mirrors.tuna.moe/",
stage1Profile: "debian",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
excludeFile: tmpFile,
}
provider, err := newTwoStageRsyncProvider(c)
@ -649,13 +300,12 @@ sleep 1
echo "Done"
exit 0
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 2))
err = provider.Run()
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"%s\n"+
@ -663,23 +313,23 @@ exit 0
"syncing to %s\n"+
"%s\n"+
"Done\n",
targetDir,
provider.WorkingDir(),
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ --safe-links "+
"--include=*.diff/ --include=by-hash/ --exclude=*.diff/Index --exclude=Contents* --exclude=Packages* --exclude=Sources* --exclude=Release* --exclude=InRelease --exclude=i18n/* --exclude=dep11/* --exclude=installer-*/current --exclude=ls-lR* --timeout=30 -6 "+
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+
"--exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
),
targetDir,
provider.WorkingDir(),
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--delete-excluded --cache --timeout=30 -6 --exclude-from %s %s %s",
"--timeout=120 --contimeout=120 -6 --exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
),
)
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
@ -688,65 +338,32 @@ exit 0
Convey("Try terminating", func(ctx C) {
scriptContent := `#!/bin/bash
echo $@
sleep 10
sleep 4
exit 0
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
started := make(chan empty, 2)
go func() {
err := provider.Run(started)
err = provider.Run()
ctx.So(err, ShouldNotBeNil)
}()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second)
err = provider.Terminate()
So(err, ShouldBeNil)
expectedOutput := fmt.Sprintf(
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ --safe-links "+
"--include=*.diff/ --include=by-hash/ --exclude=*.diff/Index --exclude=Contents* --exclude=Packages* --exclude=Sources* --exclude=Release* --exclude=InRelease --exclude=i18n/* --exclude=dep11/* --exclude=installer-*/current --exclude=ls-lR* --timeout=30 -6 "+
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+
"--exclude-from %s %s %s\n",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
)
loggedContent, err := os.ReadFile(provider.LogFile())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldStartWith, expectedOutput)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
})
})
Convey("If the rsync program fails", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
Convey("in the twoStageRsyncProvider", func() {
c := twoStageRsyncConfig{
name: "tuna-two-stage-rsync",
upstreamURL: "rsync://0.0.0.1/",
stage1Profile: "debian",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
excludeFile: tmpFile,
}
provider, err := newTwoStageRsyncProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 2))
So(err, ShouldNotBeNil)
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Error in socket I/O")
})
})
}

View File

@ -2,37 +2,24 @@ package worker
import (
"errors"
"fmt"
"strings"
"time"
"github.com/tuna/tunasync/internal"
)
type rsyncConfig struct {
name string
rsyncCmd string
upstreamURL, username, password, excludeFile string
extraOptions []string
globalOptions []string
overriddenOptions []string
useOverrideOnly bool
rsyncNeverTimeout bool
rsyncTimeoutValue int
rsyncEnv map[string]string
workingDir, logDir, logFile string
useIPv6, useIPv4 bool
useIPv6 bool
interval time.Duration
retry int
timeout time.Duration
}
// An RsyncProvider provides the implementation to rsync-based syncing jobs
type rsyncProvider struct {
baseProvider
rsyncConfig
options []string
dataSize string
options []string
}
func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
@ -40,16 +27,11 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
if !strings.HasSuffix(c.upstreamURL, "/") {
return nil, errors.New("rsync upstream URL should ends with /")
}
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &rsyncProvider{
baseProvider: baseProvider{
name: c.name,
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
rsyncConfig: c,
}
@ -57,56 +39,20 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
if c.rsyncCmd == "" {
provider.rsyncCmd = "rsync"
}
if c.rsyncEnv == nil {
provider.rsyncEnv = map[string]string{}
}
if c.username != "" {
provider.rsyncEnv["USER"] = c.username
}
if c.password != "" {
provider.rsyncEnv["RSYNC_PASSWORD"] = c.password
}
options := []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/",
"--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates",
"--safe-links",
}
if c.overriddenOptions != nil {
options = c.overriddenOptions
"--safe-links", "--timeout=120", "--contimeout=120",
}
if c.useOverrideOnly {
if c.overriddenOptions == nil {
return nil, errors.New("rsync_override_only is set but no rsync_override provided")
}
// use overridden options only
} else {
if !c.rsyncNeverTimeout {
timeo := 120
if c.rsyncTimeoutValue > 0 {
timeo = c.rsyncTimeoutValue
}
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
}
if c.useIPv6 {
options = append(options, "-6")
}
if c.useIPv6 {
options = append(options, "-6")
} else if c.useIPv4 {
options = append(options, "-4")
}
if c.excludeFile != "" {
options = append(options, "--exclude-from", c.excludeFile)
}
if c.globalOptions != nil {
options = append(options, c.globalOptions...)
}
if c.extraOptions != nil {
options = append(options, c.extraOptions...)
}
if c.excludeFile != "" {
options = append(options, "--exclude-from", c.excludeFile)
}
provider.options = options
@ -125,45 +71,28 @@ func (p *rsyncProvider) Upstream() string {
return p.upstreamURL
}
func (p *rsyncProvider) DataSize() string {
return p.dataSize
}
func (p *rsyncProvider) Run(started chan empty) error {
p.dataSize = ""
defer p.closeLogFile()
func (p *rsyncProvider) Run() error {
if err := p.Start(); err != nil {
return err
}
started <- empty{}
if err := p.Wait(); err != nil {
code, msg := internal.TranslateRsyncErrorCode(err)
if code != 0 {
logger.Debug("Rsync exitcode %d (%s)", code, msg)
if p.logFileFd != nil {
p.logFileFd.WriteString(msg + "\n")
}
}
return err
}
p.dataSize = internal.ExtractSizeFromRsyncLog(p.LogFile())
return nil
return p.Wait()
}
func (p *rsyncProvider) Start() error {
p.Lock()
defer p.Unlock()
if p.IsRunning() {
return errors.New("provider is currently running")
env := map[string]string{}
if p.username != "" {
env["USER"] = p.username
}
if p.password != "" {
env["RSYNC_PASSWORD"] = p.password
}
command := []string{p.rsyncCmd}
command = append(command, p.options...)
command = append(command, p.upstreamURL, p.WorkingDir())
p.cmd = newCmdJob(p, command, p.WorkingDir(), p.rsyncEnv)
if err := p.prepareLogFile(false); err != nil {
p.cmd = newCmdJob(p, command, p.WorkingDir(), env)
if err := p.prepareLogFile(); err != nil {
return err
}
@ -171,6 +100,5 @@ func (p *rsyncProvider) Start() error {
return err
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil
}

View File

@ -5,15 +5,12 @@ import (
"fmt"
"os"
"os/exec"
"slices"
"strings"
"sync"
"syscall"
"time"
"github.com/codeskyblue/go-sh"
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
"github.com/moby/sys/reexec"
"golang.org/x/sys/unix"
)
@ -59,10 +56,6 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
kv := fmt.Sprintf("%s=%s", k, v)
args = append(args, "-e", kv)
}
// set memlimit
if d.memoryLimit != 0 {
args = append(args, "-m", fmt.Sprint(d.memoryLimit.Value()))
}
// apply options
args = append(args, d.options...)
// apply image and command
@ -73,7 +66,10 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
cmd = exec.Command(c, args...)
} else if provider.Cgroup() != nil {
cmd = reexec.Command(append([]string{"tunasync-exec"}, cmdAndArgs...)...)
c := "cgexec"
args := []string{"-g", provider.Cgroup().Cgroup()}
args = append(args, cmdAndArgs...)
cmd = exec.Command(c, args...)
} else {
if len(cmdAndArgs) == 1 {
@ -108,59 +104,9 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
}
func (c *cmdJob) Start() error {
cg := c.provider.Cgroup()
var (
pipeR *os.File
pipeW *os.File
)
if cg != nil {
logger.Debugf("Preparing cgroup sync pipes for job %s", c.provider.Name())
var err error
pipeR, pipeW, err = os.Pipe()
if err != nil {
return err
}
c.cmd.ExtraFiles = []*os.File{pipeR}
defer pipeR.Close()
defer pipeW.Close()
}
logger.Debugf("Command start: %v", c.cmd.Args)
// logger.Debugf("Command start: %v", c.cmd.Args)
c.finished = make(chan empty, 1)
if err := c.cmd.Start(); err != nil {
return err
}
if cg != nil {
if err := pipeR.Close(); err != nil {
return err
}
if c.cmd == nil || c.cmd.Process == nil {
return errProcessNotStarted
}
pid := c.cmd.Process.Pid
if cg.cgCfg.isUnified {
if err := cg.cgMgrV2.AddProc(uint64(pid)); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring")
} else {
return err
}
}
} else {
if err := cg.cgMgrV1.Add(cgv1.Process{Pid: pid}); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring")
} else {
return err
}
}
}
if _, err := pipeW.WriteString(string(cmdCont)); err != nil {
return err
}
}
return nil
return c.cmd.Start()
}
func (c *cmdJob) Wait() error {
@ -172,18 +118,9 @@ func (c *cmdJob) Wait() error {
return c.retErr
default:
err := c.cmd.Wait()
c.retErr = err
close(c.finished)
if err != nil {
code := err.(*exec.ExitError).ExitCode()
allowedCodes := c.provider.GetSuccessExitCodes()
if slices.Contains(allowedCodes, code) {
// process exited with non-success status
logger.Infof("Command %s exited with code %d: treated as success (allowed: %v)", c.cmd.Args, code, allowedCodes)
} else {
c.retErr = err
}
}
return c.retErr
return err
}
}
@ -212,10 +149,10 @@ func (c *cmdJob) Terminate() error {
select {
case <-time.After(2 * time.Second):
unix.Kill(c.cmd.Process.Pid, syscall.SIGKILL)
logger.Warningf("SIGTERM failed to kill the job in 2s. SIGKILL sent")
return errors.New("SIGTERM failed to kill the job")
case <-c.finished:
return nil
}
return nil
}
// Copied from go-sh

View File

@ -15,11 +15,6 @@ type scheduleQueue struct {
jobs map[string]bool
}
type jobScheduleInfo struct {
jobName string
nextScheduled time.Time
}
func timeLessThan(l, r interface{}) bool {
tl := l.(time.Time)
tr := r.(time.Time)
@ -33,20 +28,6 @@ func newScheduleQueue() *scheduleQueue {
return queue
}
func (q *scheduleQueue) GetJobs() (jobs []jobScheduleInfo) {
cur := q.list.Iterator()
defer cur.Close()
for cur.Next() {
cj := cur.Value().(*mirrorJob)
jobs = append(jobs, jobScheduleInfo{
cj.Name(),
cur.Key().(time.Time),
})
}
return
}
func (q *scheduleQueue) AddJob(schedTime time.Time, job *mirrorJob) {
q.Lock()
defer q.Unlock()

View File

@ -5,8 +5,6 @@ import (
"fmt"
"strings"
"time"
"github.com/tuna/tunasync/internal"
)
type twoStageRsyncConfig struct {
@ -14,15 +12,9 @@ type twoStageRsyncConfig struct {
rsyncCmd string
stage1Profile string
upstreamURL, username, password, excludeFile string
extraOptions []string
rsyncNeverTimeout bool
rsyncTimeoutValue int
rsyncEnv map[string]string
workingDir, logDir, logFile string
useIPv6, useIPv4 bool
useIPv6 bool
interval time.Duration
retry int
timeout time.Duration
}
// An RsyncProvider provides the implementation to rsync-based syncing jobs
@ -31,28 +23,13 @@ type twoStageRsyncProvider struct {
twoStageRsyncConfig
stage1Options []string
stage2Options []string
dataSize string
}
// ref: https://salsa.debian.org/mirror-team/archvsync/-/blob/master/bin/ftpsync#L431
var rsyncStage1Profiles = map[string]([]string){
"debian": []string{
"--include=*.diff/",
"--include=by-hash/",
"--exclude=*.diff/Index",
"--exclude=Contents*",
"--exclude=Packages*",
"--exclude=Sources*",
"--exclude=Release*",
"--exclude=InRelease",
"--exclude=i18n/*",
"--exclude=dep11/*",
"--exclude=installer-*/current",
"--exclude=ls-lR*",
},
"debian": []string{"dists/"},
"debian-oldstyle": []string{
"--exclude=Packages*", "--exclude=Sources*", "--exclude=Release*",
"--exclude=InRelease", "--exclude=i18n/*", "--exclude=ls-lR*", "--exclude=dep11/*",
"Packages*", "Sources*", "Release*",
"InRelease", "i18n/*", "ls-lR*", "dep11/*",
},
}
@ -61,41 +38,27 @@ func newTwoStageRsyncProvider(c twoStageRsyncConfig) (*twoStageRsyncProvider, er
if !strings.HasSuffix(c.upstreamURL, "/") {
return nil, errors.New("rsync upstream URL should ends with /")
}
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &twoStageRsyncProvider{
baseProvider: baseProvider{
name: c.name,
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
twoStageRsyncConfig: c,
stage1Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/",
"--safe-links",
"--exclude", ".~tmp~/",
"--safe-links", "--timeout=120", "--contimeout=120",
},
stage2Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/",
"--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates",
"--safe-links",
"--safe-links", "--timeout=120", "--contimeout=120",
},
}
if c.rsyncEnv == nil {
provider.rsyncEnv = map[string]string{}
}
if c.username != "" {
provider.rsyncEnv["USER"] = c.username
}
if c.password != "" {
provider.rsyncEnv["RSYNC_PASSWORD"] = c.password
}
if c.rsyncCmd == "" {
provider.rsyncCmd = "rsync"
}
@ -115,41 +78,26 @@ func (p *twoStageRsyncProvider) Upstream() string {
return p.upstreamURL
}
func (p *twoStageRsyncProvider) DataSize() string {
return p.dataSize
}
func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
var options []string
if stage == 1 {
options = append(options, p.stage1Options...)
stage1Profile, ok := rsyncStage1Profiles[p.stage1Profile]
stage1Excludes, ok := rsyncStage1Profiles[p.stage1Profile]
if !ok {
return nil, errors.New("Invalid Stage 1 Profile")
}
options = append(options, stage1Profile...)
for _, exc := range stage1Excludes {
options = append(options, "--exclude", exc)
}
} else if stage == 2 {
options = append(options, p.stage2Options...)
if p.extraOptions != nil {
options = append(options, p.extraOptions...)
}
} else {
return []string{}, fmt.Errorf("Invalid stage: %d", stage)
}
if !p.rsyncNeverTimeout {
timeo := 120
if p.rsyncTimeoutValue > 0 {
timeo = p.rsyncTimeoutValue
}
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
}
if p.useIPv6 {
options = append(options, "-6")
} else if p.useIPv4 {
options = append(options, "-4")
}
if p.excludeFile != "" {
@ -159,15 +107,17 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
return options, nil
}
func (p *twoStageRsyncProvider) Run(started chan empty) error {
p.Lock()
defer p.Unlock()
func (p *twoStageRsyncProvider) Run() error {
defer p.Wait()
if p.IsRunning() {
return errors.New("provider is currently running")
env := map[string]string{}
if p.username != "" {
env["USER"] = p.username
}
if p.password != "" {
env["RSYNC_PASSWORD"] = p.password
}
p.dataSize = ""
stages := []int{1, 2}
for _, stage := range stages {
command := []string{p.rsyncCmd}
@ -178,33 +128,21 @@ func (p *twoStageRsyncProvider) Run(started chan empty) error {
command = append(command, options...)
command = append(command, p.upstreamURL, p.WorkingDir())
p.cmd = newCmdJob(p, command, p.WorkingDir(), p.rsyncEnv)
if err := p.prepareLogFile(stage > 1); err != nil {
p.cmd = newCmdJob(p, command, p.WorkingDir(), env)
if err := p.prepareLogFile(); err != nil {
return err
}
defer p.closeLogFile()
if err = p.cmd.Start(); err != nil {
return err
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
started <- empty{}
p.Unlock()
err = p.Wait()
p.Lock()
err = p.cmd.Wait()
p.isRunning.Store(false)
if err != nil {
code, msg := internal.TranslateRsyncErrorCode(err)
if code != 0 {
logger.Debug("Rsync exitcode %d (%s)", code, msg)
if p.logFileFd != nil {
p.logFileFd.WriteString(msg + "\n")
}
}
return err
}
}
p.dataSize = internal.ExtractSizeFromRsyncLog(p.LogFile())
return nil
}

View File

@ -12,6 +12,8 @@ import (
. "github.com/tuna/tunasync/internal"
)
var tunasyncWorker *Worker
// A Worker is a instance of tunasync worker
type Worker struct {
L sync.Mutex
@ -27,11 +29,10 @@ type Worker struct {
httpClient *http.Client
}
// NewTUNASyncWorker creates a worker
func NewTUNASyncWorker(cfg *Config) *Worker {
if cfg.Global.Retry == 0 {
cfg.Global.Retry = defaultMaxRetry
// GetTUNASyncWorker returns a singalton worker
func GetTUNASyncWorker(cfg *Config) *Worker {
if tunasyncWorker != nil {
return tunasyncWorker
}
w := &Worker{
@ -54,20 +55,15 @@ func NewTUNASyncWorker(cfg *Config) *Worker {
w.httpClient = httpClient
}
if cfg.Cgroup.Enable {
if err := initCgroup(&cfg.Cgroup); err != nil {
logger.Errorf("Error initializing Cgroup: %s", err.Error())
return nil
}
}
w.initJobs()
w.makeHTTPServer()
tunasyncWorker = w
return w
}
// Run runs worker forever
func (w *Worker) Run() {
w.registerWorker()
w.registorWorker()
go w.runHTTPServer()
w.runSchedule()
}
@ -223,11 +219,7 @@ func (w *Worker) makeHTTPServer() {
}
switch cmd.Cmd {
case CmdStart:
if cmd.Options["force"] {
job.ctrlChan <- jobForceStart
} else {
job.ctrlChan <- jobStart
}
job.ctrlChan <- jobStart
case CmdRestart:
job.ctrlChan <- jobRestart
case CmdStop:
@ -314,10 +306,7 @@ func (w *Worker) runSchedule() {
w.L.Unlock()
schedInfo := w.schedule.GetJobs()
w.updateSchedInfo(schedInfo)
tick := time.NewTicker(5 * time.Second).C
tick := time.Tick(5 * time.Second)
for {
select {
case jobMsg := <-w.managerChan:
@ -353,9 +342,6 @@ func (w *Worker) runSchedule() {
w.schedule.AddJob(schedTime, job)
}
schedInfo = w.schedule.GetJobs()
w.updateSchedInfo(schedInfo)
case <-tick:
// check schedule every 5 seconds
if job := w.schedule.Pop(); job != nil {
@ -399,7 +385,7 @@ func (w *Worker) URL() string {
return fmt.Sprintf("%s://%s:%d/", proto, w.cfg.Server.Hostname, w.cfg.Server.Port)
}
func (w *Worker) registerWorker() {
func (w *Worker) registorWorker() {
msg := WorkerStatus{
ID: w.Name(),
URL: w.URL(),
@ -408,17 +394,8 @@ func (w *Worker) registerWorker() {
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf("%s/workers", root)
logger.Debugf("register on manager url: %s", url)
for retry := 10; retry > 0; {
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to register worker")
retry--
if retry > 0 {
time.Sleep(1 * time.Second)
logger.Noticef("Retrying... (%d)", retry)
}
} else {
break
}
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to register worker")
}
}
}
@ -435,12 +412,6 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
ErrorMsg: jobMsg.msg,
}
// Certain Providers (rsync for example) may know the size of mirror,
// so we report it to Manager here
if len(job.size) != 0 {
smsg.Size = job.size
}
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf(
"%s/workers/%s/jobs/%s", root, w.Name(), jobMsg.name,
@ -452,27 +423,6 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
}
}
func (w *Worker) updateSchedInfo(schedInfo []jobScheduleInfo) {
var s []MirrorSchedule
for _, sched := range schedInfo {
s = append(s, MirrorSchedule{
MirrorName: sched.jobName,
NextSchedule: sched.nextScheduled,
})
}
msg := MirrorSchedules{Schedules: s}
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf(
"%s/workers/%s/schedules", root, w.Name(),
)
logger.Debugf("reporting on manager url: %s", url)
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to upload schedules: %s", err.Error())
}
}
}
func (w *Worker) fetchJobStatus() []MirrorStatus {
var mirrorList []MirrorStatus
apiBase := w.cfg.Manager.APIBaseList()[0]

View File

@ -1,256 +0,0 @@
package worker
import (
"net/http"
"strconv"
"testing"
"time"
"github.com/gin-gonic/gin"
. "github.com/smartystreets/goconvey/convey"
. "github.com/tuna/tunasync/internal"
)
type workTestFunc func(*Worker)
var managerPort = 5001
var workerPort = 5002
func makeMockManagerServer(recvData chan interface{}) *gin.Engine {
r := gin.Default()
r.GET("/ping", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"_infoKey": "pong"})
})
r.POST("/workers", func(c *gin.Context) {
var _worker WorkerStatus
c.BindJSON(&_worker)
_worker.LastOnline = time.Now()
_worker.LastRegister = time.Now()
recvData <- _worker
c.JSON(http.StatusOK, _worker)
})
r.POST("/workers/dut/schedules", func(c *gin.Context) {
var _sch MirrorSchedules
c.BindJSON(&_sch)
recvData <- _sch
c.JSON(http.StatusOK, empty{})
})
r.POST("/workers/dut/jobs/:job", func(c *gin.Context) {
var status MirrorStatus
c.BindJSON(&status)
recvData <- status
c.JSON(http.StatusOK, status)
})
r.GET("/workers/dut/jobs", func(c *gin.Context) {
mirrorStatusList := []MirrorStatus{}
c.JSON(http.StatusOK, mirrorStatusList)
})
return r
}
func startWorkerThenStop(cfg *Config, tester workTestFunc) {
exitedChan := make(chan int)
w := NewTUNASyncWorker(cfg)
So(w, ShouldNotBeNil)
go func() {
w.Run()
exitedChan <- 1
}()
tester(w)
w.Halt()
select {
case exited := <-exitedChan:
So(exited, ShouldEqual, 1)
case <-time.After(2 * time.Second):
So(0, ShouldEqual, 1)
}
}
func sendCommandToWorker(workerURL string, httpClient *http.Client, cmd CmdVerb, mirror string) {
workerCmd := WorkerCmd{
Cmd: cmd,
MirrorID: mirror,
}
logger.Debugf("POST to %s with cmd %s", workerURL, cmd)
_, err := PostJSON(workerURL, workerCmd, httpClient)
So(err, ShouldBeNil)
}
func TestWorker(t *testing.T) {
InitLogger(false, true, false)
recvDataChan := make(chan interface{})
_s := makeMockManagerServer(recvDataChan)
httpServer := &http.Server{
Addr: "localhost:" + strconv.Itoa(managerPort),
Handler: _s,
ReadTimeout: 2 * time.Second,
WriteTimeout: 2 * time.Second,
}
go func() {
err := httpServer.ListenAndServe()
So(err, ShouldBeNil)
}()
// Wait for http server starting
time.Sleep(500 * time.Millisecond)
Convey("Worker should work", t, func(ctx C) {
httpClient, err := CreateHTTPClient("")
So(err, ShouldBeNil)
workerPort++
workerCfg := Config{
Global: globalConfig{
Name: "dut",
LogDir: "/tmp",
MirrorDir: "/tmp",
Concurrent: 2,
Interval: 1,
},
Server: serverConfig{
Hostname: "localhost",
Addr: "127.0.0.1",
Port: workerPort,
},
Manager: managerConfig{
APIBase: "http://localhost:" + strconv.Itoa(managerPort),
},
}
logger.Debugf("worker port %d", workerPort)
Convey("with no job", func(ctx C) {
dummyTester := func(*Worker) {
registered := false
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
registered = true
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(reg.URL, httpClient, CmdStart, "foobar")
} else if sch, ok := data.(MirrorSchedules); ok {
So(len(sch.Schedules), ShouldEqual, 0)
}
case <-time.After(2 * time.Second):
So(registered, ShouldBeTrue)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
Convey("with one job", func(ctx C) {
workerCfg.Mirrors = []mirrorConfig{
{
Name: "job-ls",
Provider: provCommand,
Command: "ls",
},
}
dummyTester := func(*Worker) {
url := ""
jobRunning := false
lastStatus := SyncStatus(None)
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
url = reg.URL
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(url, httpClient, CmdStart, "job-ls")
} else if sch, ok := data.(MirrorSchedules); ok {
if !jobRunning {
So(len(sch.Schedules), ShouldEqual, 1)
So(sch.Schedules[0].MirrorName, ShouldEqual, "job-ls")
So(sch.Schedules[0].NextSchedule,
ShouldHappenBetween,
time.Now().Add(-2*time.Second),
time.Now().Add(1*time.Minute))
}
} else if status, ok := data.(MirrorStatus); ok {
logger.Noticef("Job %s status %s", status.Name, status.Status.String())
jobRunning = status.Status == PreSyncing || status.Status == Syncing
So(status.Status, ShouldNotEqual, Failed)
lastStatus = status.Status
}
case <-time.After(2 * time.Second):
So(url, ShouldNotEqual, "")
So(jobRunning, ShouldBeFalse)
So(lastStatus, ShouldEqual, Success)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
Convey("with several jobs", func(ctx C) {
workerCfg.Mirrors = []mirrorConfig{
{
Name: "job-ls-1",
Provider: provCommand,
Command: "ls",
},
{
Name: "job-fail",
Provider: provCommand,
Command: "non-existent-command-xxxx",
},
{
Name: "job-ls-2",
Provider: provCommand,
Command: "ls",
},
}
dummyTester := func(*Worker) {
url := ""
lastStatus := make(map[string]SyncStatus)
nextSch := make(map[string]time.Time)
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
url = reg.URL
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(url, httpClient, CmdStart, "job-fail")
sendCommandToWorker(url, httpClient, CmdStart, "job-ls-1")
sendCommandToWorker(url, httpClient, CmdStart, "job-ls-2")
} else if sch, ok := data.(MirrorSchedules); ok {
//So(len(sch.Schedules), ShouldEqual, 3)
for _, item := range sch.Schedules {
nextSch[item.MirrorName] = item.NextSchedule
}
} else if status, ok := data.(MirrorStatus); ok {
logger.Noticef("Job %s status %s", status.Name, status.Status.String())
jobRunning := status.Status == PreSyncing || status.Status == Syncing
if !jobRunning {
if status.Name == "job-fail" {
So(status.Status, ShouldEqual, Failed)
} else {
So(status.Status, ShouldNotEqual, Failed)
}
}
lastStatus[status.Name] = status.Status
}
case <-time.After(2 * time.Second):
So(len(lastStatus), ShouldEqual, 3)
So(len(nextSch), ShouldEqual, 3)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
})
}

View File

@ -3,7 +3,6 @@ package worker
import (
"fmt"
"os"
"os/user"
"strings"
"github.com/codeskyblue/go-sh"
@ -11,44 +10,36 @@ import (
type zfsHook struct {
emptyHook
zpool string
provider mirrorProvider
zpool string
}
func newZfsHook(provider mirrorProvider, zpool string) *zfsHook {
return &zfsHook{
emptyHook: emptyHook{
provider: provider,
},
zpool: zpool,
provider: provider,
zpool: zpool,
}
}
func (z *zfsHook) printHelpMessage() {
zfsDataset := fmt.Sprintf("%s/%s", z.zpool, z.provider.Name())
zfsDataset = strings.ToLower(zfsDataset)
workingDir := z.provider.WorkingDir()
logger.Infof("You may create the ZFS dataset with:")
logger.Infof(" zfs create '%s'", zfsDataset)
logger.Infof(" zfs set mountpoint='%s' '%s'", workingDir, zfsDataset)
usr, err := user.Current()
if err != nil || usr.Uid == "0" {
return
}
logger.Infof(" chown %s '%s'", usr.Uid, workingDir)
}
// check if working directory is a zfs dataset
// create zfs dataset for a new mirror
func (z *zfsHook) preJob() error {
workingDir := z.provider.WorkingDir()
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
logger.Errorf("Directory %s doesn't exist", workingDir)
z.printHelpMessage()
return err
}
if err := sh.Command("mountpoint", "-q", workingDir).Run(); err != nil {
logger.Errorf("%s is not a mount point", workingDir)
z.printHelpMessage()
return err
// sudo zfs create $zfsDataset
// sudo zfs set mountpoint=${absPath} ${zfsDataset}
zfsDataset := fmt.Sprintf("%s/%s", z.zpool, z.provider.Name())
// Unknown issue of ZFS:
// dataset name should not contain upper case letters
zfsDataset = strings.ToLower(zfsDataset)
logger.Infof("Creating ZFS dataset %s", zfsDataset)
if err := sh.Command("sudo", "zfs", "create", zfsDataset).Run(); err != nil {
return err
}
logger.Infof("Mount ZFS dataset %s to %s", zfsDataset, workingDir)
if err := sh.Command("sudo", "zfs", "set", "mountpoint="+workingDir, zfsDataset).Run(); err != nil {
return err
}
}
return nil
}

View File

@ -1,47 +0,0 @@
package worker
import (
"os"
"path/filepath"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestZFSHook(t *testing.T) {
Convey("ZFS Hook should work", t, func(ctx C) {
tmpDir, _ := os.MkdirTemp("", "tunasync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
name: "tuna_zfs_hook_test",
upstreamURL: "http://mirrors.tuna.moe/",
command: "ls",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 1 * time.Second,
}
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
Convey("When working directory doesn't exist", func(ctx C) {
errRm := os.RemoveAll(tmpDir)
So(errRm, ShouldBeNil)
hook := newZfsHook(provider, "test_pool")
err := hook.preJob()
So(err, ShouldNotBeNil)
})
Convey("When working directory is not a mount point", func(ctx C) {
defer os.RemoveAll(tmpDir)
hook := newZfsHook(provider, "test_pool")
err := hook.preJob()
So(err, ShouldNotBeNil)
})
})
}