Compare commits

..

No commits in common. "master" and "v0.6.2" have entirely different histories.

60 changed files with 990 additions and 4108 deletions

View File

@ -1,38 +1,54 @@
name: release name: release
on: on:
push: push:
# Sequence of patterns matched against refs/tags
tags: tags:
- 'v*' - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
workflow_dispatch:
jobs: jobs:
build: build:
name: Build name: Build
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: write
steps: steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4 - name: Set up Go 1.13
- name: Set up Go uses: actions/setup-go@v1
uses: actions/setup-go@v5
with: with:
go-version: '^1.23' go-version: 1.13
id: go id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./cmd/tunasync
go get -v -t -d ./cmd/tunasynctl
- name: Build - name: Build
run: | run: |
TAG=$(git describe --tags) make tunasync
for i in linux-amd64 linux-arm64 linux-riscv64 linux-loong64; do make tunasynctl
make ARCH=$i all tar -jcf build/tunasync-linux-bin.tar.bz2 -C build tunasync tunasynctl
tar -cz --numeric-owner --owner root --group root -f tunasync-${TAG}-$i-bin.tar.gz -C build-$i tunasync tunasynctl
done
- name: Create Release - name: Create Release
uses: softprops/action-gh-release@v2 id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
token: ${{ secrets.GITHUB_TOKEN }} tag_name: ${{ github.ref }}
tag_name: ${{ github.ref_name }} release_name: Release ${{ github.ref }}
name: Release ${{ github.ref_name }} draft: false
prerelease: false prerelease: false
files: | - name: Upload Release Asset
tunasync-*.tar.gz id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
asset_path: ./build/tunasync-linux-bin.tar.bz2
asset_name: tunasync-linux-bin.tar.bz2
asset_content_type: application/x-bzip2

View File

@ -1,11 +1,6 @@
name: tunasync name: tunasync
on: on: [push]
push:
branches: [ master ]
pull_request:
branches: [ master ]
workflow_dispatch:
jobs: jobs:
@ -14,15 +9,15 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Check out code into the Go module directory - name: Set up Go 1.13
uses: actions/checkout@v4 uses: actions/setup-go@v1
- name: Set up Go
uses: actions/setup-go@v5
with: with:
go-version: '^1.23' go-version: 1.13
id: go id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies - name: Get dependencies
run: | run: |
go get -v -t -d ./cmd/tunasync go get -v -t -d ./cmd/tunasync
@ -34,214 +29,45 @@ jobs:
make tunasynctl make tunasynctl
- name: Keep artifacts - name: Keep artifacts
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v1
with: with:
name: tunasync-bin name: tunasync-bin
path: build-linux-amd64/ path: build/
test: test:
name: Test name: Test
runs-on: ubuntu-latest runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
steps: steps:
- name: Setup test dependencies - name: Setup test dependencies
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y cgroup-tools sudo apt-get install -y cgroup-bin
docker pull alpine:3.8 docker pull alpine:3.8
lssubsys -am
sudo cgcreate -a $USER -t $USER -g cpu:tunasync
sudo cgcreate -a $USER -t $USER -g memory:tunasync
- name: Check out code into the Go module directory - name: Set up Go 1.13
uses: actions/checkout@v4 uses: actions/setup-go@v1
- name: Set up Go
uses: actions/setup-go@v5
with: with:
go-version: '^1.22' go-version: 1.13
id: go id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Run Unit tests. - name: Run Unit tests.
run: | run: make test
go install github.com/wadey/gocovmerge@latest
sudo systemd-run --service-type=oneshot --uid="$(id --user)" --pipe --wait \
--property=Delegate=yes --setenv=USECURCGROUP=1 \
--setenv=TERM=xterm-256color --same-dir \
make test
- name: Run Additional Unit tests.
run: |
make build-test-worker
sudo mkdir /sys/fs/cgroup/tunasync
sudo ./worker.test -test.v=true -test.coverprofile profile2.gcov -test.run TestCgroup
sudo rmdir /sys/fs/cgroup/tunasync
touch /tmp/dummy_exec
chmod +x /tmp/dummy_exec
run_test_reexec (){
case="$1"
shift
argv0="$1"
shift
(TESTREEXEC="$case" TERM=xterm-256color exec -a "$argv0" ./worker.test -test.v=true -test.coverprofile "profile5_$case.gcov" -test.run TestReexec -- "$@")
}
run_test_reexec 1 tunasync-exec __dummy__
run_test_reexec 2 tunasync-exec /tmp/dummy_exec
run_test_reexec 3 tunasync-exec /tmp/dummy_exec 3< <(echo -n "abrt")
run_test_reexec 4 tunasync-exec /tmp/dummy_exec 3< <(echo -n "cont")
run_test_reexec 5 tunasync-exec2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: network=host
- name: Cache Docker layers
uses: actions/cache@v4
if: github.event_name == 'push'
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Cache Docker layers
uses: actions/cache@v4
if: github.event_name == 'pull_request'
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-
${{ runner.os }}-buildx-
- name: Cache Docker layers
if: github.event_name != 'push' && github.event_name != 'pull_request'
run: |
echo "I do not know how to setup cache"
exit -1
- name: Prepare cache directory
run: |
mkdir -p /tmp/.buildx-cache
- name: Build Docker image for uml rootfs
uses: docker/build-push-action@v6
with:
context: .umlrootfs
file: .umlrootfs/Dockerfile
push: true
tags: localhost:5000/umlrootfs
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Fetch and install uml package
run: |
sudo apt-get update
sudo apt-get install -y debian-archive-keyring
sudo ln -sf /usr/share/keyrings/debian-archive-keyring.gpg /etc/apt/trusted.gpg.d/
echo "deb http://deb.debian.org/debian bullseye main" | sudo tee /etc/apt/sources.list.d/bullseye.list
sudo apt-get update
apt-get download user-mode-linux/bullseye
sudo rm /etc/apt/sources.list.d/bullseye.list
sudo apt-get update
sudo mv user-mode-linux_*.deb /tmp/uml.deb
sudo apt-get install --no-install-recommends -y /tmp/uml.deb
sudo rm /tmp/uml.deb
sudo apt-get install --no-install-recommends -y rsh-redone-client
- name: Prepare uml environment
run: |
docker container create --name umlrootfs localhost:5000/umlrootfs
sudo mkdir -p umlrootfs
docker container export umlrootfs | sudo tar -xv -C umlrootfs
docker container rm umlrootfs
sudo cp -a --target-directory=umlrootfs/lib/ /usr/lib/uml/modules
/bin/echo -e "127.0.0.1 localhost\n254.255.255.1 host" | sudo tee umlrootfs/etc/hosts
sudo ip tuntap add dev umltap mode tap
sudo ip addr add 254.255.255.1/24 dev umltap
sudo ip link set umltap up
- name: Start Uml
run: |
start_uml () {
sudo bash -c 'linux root=/dev/root rootflags=/ rw rootfstype=hostfs mem=2G eth0=tuntap,umltap hostfs="$PWD/umlrootfs" con1=pts systemd.unified_cgroup_hierarchy=0 & pid=$!; echo "UMLINUX_PID=$pid" >> '"$GITHUB_ENV"
}
( start_uml )
started=0
for i in $(seq 1 60); do
if ping -c 1 -w 1 254.255.255.2; then
started=1
break
fi
done
if [ "$started" != "1" ]; then
echo "Failed to wait Umlinux online"
exit 1
fi
- name: Prepare Uml Environment
run: |
CUSER="$(id --user --name)"
CUID="$(id --user)"
CGID="$(id --group)"
sudo chroot umlrootfs bash --noprofile --norc -eo pipefail << EOF
groupadd --gid "${CGID?}" "${CUSER?}"
useradd --create-home --home-dir "/home/${CUSER}" --gid "${CGID?}" \
--uid "${CUID?}" --shell "\$(which bash)" "${CUSER?}"
EOF
ln ./worker.test "umlrootfs/home/${CUSER}/worker.test"
- name: Run Tests in Cgroupv1
run: |
CUSER="$(id --user --name)"
sudo rsh 254.255.255.2 bash --noprofile --norc -eo pipefail << EOF
exec 2>&1
cd "/home/${CUSER}"
lssubsys -am
cgcreate -a "$CUSER" -t "$CUSER" -g cpu:tunasync
cgcreate -a "$CUSER" -t "$CUSER" -g memory:tunasync
TERM=xterm-256color ./worker.test -test.v=true -test.coverprofile \
profile3.gcov -test.run TestCgroup
cgexec -g "*:/" bash -c "echo 0 > /sys/fs/cgroup/systemd/tasks; exec sudo -u $CUSER env USECURCGROUP=1 TERM=xterm-256color cgexec -g cpu,memory:tunasync ./worker.test -test.v=true -test.coverprofile profile4.gcov -test.run TestCgroup"
EOF
- name: Stop Uml
run: |
sudo rsh 254.255.255.2 systemctl poweroff
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sudo kill -TERM "$UMLINUX_PID" || true
sleep 1
fi
fi
if [ -e "/proc/$UMLINUX_PID" ]; then
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sudo kill -KILL "$UMLINUX_PID" || true
sleep 1
fi
fi
- name: Combine coverage files
run : |
CUSER="$(id --user --name)"
"${HOME}/go/bin/gocovmerge" profile.gcov profile2.gcov \
"umlrootfs/home/${CUSER}/profile3.gcov" \
"umlrootfs/home/${CUSER}/profile4.gcov" \
profile5_*.gcov > merged.gcov
# remove cmdline tools from coverage statistics
grep -v "cmd/.*\.go" merged.gcov > profile-all.gcov
- name: Convert coverage to lcov - name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1 uses: jandelgado/gcov2lcov-action@v1.0.0
with: with:
infile: profile-all.gcov infile: profile.cov
outfile: coverage.lcov outfile: coverage.lcov
- name: Coveralls - name: Coveralls
uses: coverallsapp/github-action@v2 uses: coverallsapp/github-action@v1.0.1
with: with:
github-token: ${{ secrets.github_token }} github-token: ${{ secrets.github_token }}
path-to-lcov: coverage.lcov path-to-lcov: coverage.lcov

3
.gitignore vendored
View File

@ -1,4 +1 @@
/build /build
/build-*
worker.test
profile*

View File

@ -1,8 +0,0 @@
FROM debian:bullseye
RUN apt-get update && apt-get install -y systemd rsh-redone-server ifupdown sudo kmod cgroup-tools systemd-sysv
RUN echo "host" > /root/.rhosts && \
chmod 600 /root/.rhosts && \
/bin/echo -e "auto eth0\niface eth0 inet static\naddress 254.255.255.2/24" > /etc/network/interfaces.d/eth0 && \
sed -i '/pam_securetty/d' /etc/pam.d/rlogin && \
cp /usr/share/systemd/tmp.mount /etc/systemd/system && \
systemctl enable tmp.mount

13
.vscode/settings.json vendored
View File

@ -1,13 +0,0 @@
{
"cSpell.words": [
"Btrfs",
"Debugf",
"Infof",
"Noticef",
"Warningf",
"cgroup",
"mergo",
"tmpl",
"zpool"
]
}

View File

@ -1,28 +1,19 @@
LDFLAGS="-X main.buildstamp=`date -u '+%s'` -X main.githash=`git rev-parse HEAD`" LDFLAGS="-X main.buildstamp=`date -u '+%s'` -X main.githash=`git rev-parse HEAD`"
ARCH ?= linux-amd64
ARCH_LIST = $(subst -, ,$(ARCH))
GOOS = $(word 1, $(ARCH_LIST))
GOARCH = $(word 2, $(ARCH_LIST))
BUILDBIN = tunasync tunasynctl
all: $(BUILDBIN) all: get tunasync tunasynctl
build-$(ARCH): get:
mkdir -p $@ go get ./cmd/tunasync
go get ./cmd/tunasynctl
$(BUILDBIN): % : build-$(ARCH) build-$(ARCH)/% build:
mkdir -p build
$(BUILDBIN:%=build-$(ARCH)/%) : build-$(ARCH)/% : cmd/% tunasync: build
GOOS=$(GOOS) GOARCH=$(GOARCH) go get ./$< go build -o build/tunasync -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasync
GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -o $@ -ldflags ${LDFLAGS} github.com/tuna/tunasync/$<
tunasynctl: build
go build -o build/tunasynctl -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasynctl
test: test:
go test -v -covermode=count -coverprofile=profile.gcov ./... go test -v -covermode=count -coverprofile=profile.cov ./...
build-test-worker:
CGO_ENABLED=0 go test -c -covermode=count github.com/tuna/tunasync/worker
clean:
rm -rf build-$(ARCH)
.PHONY: all test $(BUILDBIN) build-test-worker clean

View File

@ -1,4 +1,5 @@
# tunasync tunasync
========
![Build Status](https://github.com/tuna/tunasync/workflows/tunasync/badge.svg) ![Build Status](https://github.com/tuna/tunasync/workflows/tunasync/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=master)](https://coveralls.io/github/tuna/tunasync?branch=master) [![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=master)](https://coveralls.io/github/tuna/tunasync?branch=master)
@ -11,11 +12,11 @@
## Download ## Download
Pre-built binary for Linux x86_64 and ARM64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest). Pre-built binary for Linux x86_64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest).
## Design ## Design
```text ```
# Architecture # Architecture
- Manager: Central instance for status and job management - Manager: Central instance for status and job management
@ -49,15 +50,13 @@ PreSyncing Syncing Succe
+-----------------+ +-----------------+
``` ```
## Building ## Building
Go version: 1.22 Go version: 1.13
```shell ```
# for native arch make all
> make all
# for other arch
> make ARCH=linux-arm64 all
``` ```
Binaries are in `build-$ARCH/`, e.g., `build-linux-amd64/`. Binaries in the `build/`.

View File

@ -9,10 +9,9 @@ import (
"time" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/moby/sys/reexec"
"github.com/pkg/profile" "github.com/pkg/profile"
"github.com/urfave/cli"
"gopkg.in/op/go-logging.v1" "gopkg.in/op/go-logging.v1"
"github.com/urfave/cli"
tunasync "github.com/tuna/tunasync/internal" tunasync "github.com/tuna/tunasync/internal"
"github.com/tuna/tunasync/manager" "github.com/tuna/tunasync/manager"
@ -40,7 +39,7 @@ func startManager(c *cli.Context) error {
m := manager.GetTUNASyncManager(cfg) m := manager.GetTUNASyncManager(cfg)
if m == nil { if m == nil {
logger.Errorf("Error intializing TUNA sync manager.") logger.Errorf("Error intializing TUNA sync worker.")
os.Exit(1) os.Exit(1)
} }
@ -110,10 +109,6 @@ func startWorker(c *cli.Context) error {
func main() { func main() {
if reexec.Init() {
return
}
cli.VersionPrinter = func(c *cli.Context) { cli.VersionPrinter = func(c *cli.Context) {
var builddate string var builddate string
if buildstamp == "" { if buildstamp == "" {

View File

@ -3,12 +3,11 @@ package main
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"text/template"
"time" "time"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
@ -122,7 +121,7 @@ func initialize(c *cli.Context) error {
var err error var err error
client, err = tunasync.CreateHTTPClient(cfg.CACert) client, err = tunasync.CreateHTTPClient(cfg.CACert)
if err != nil { if err != nil {
err = fmt.Errorf("error initializing HTTP client: %s", err.Error()) err = fmt.Errorf("Error initializing HTTP client: %s", err.Error())
// logger.Error(err.Error()) // logger.Error(err.Error())
return err return err
@ -161,31 +160,8 @@ func listJobs(c *cli.Context) error {
"of all jobs from manager server: %s", err.Error()), "of all jobs from manager server: %s", err.Error()),
1) 1)
} }
if statusStr := c.String("status"); statusStr != "" { genericJobs = jobs
filteredJobs := make([]tunasync.WebMirrorStatus, 0, len(jobs))
var statuses []tunasync.SyncStatus
for _, s := range strings.Split(statusStr, ",") {
var status tunasync.SyncStatus
err = status.UnmarshalJSON([]byte("\"" + strings.TrimSpace(s) + "\""))
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error parsing status: %s", err.Error()),
1)
}
statuses = append(statuses, status)
}
for _, job := range jobs {
for _, s := range statuses {
if job.Status == s {
filteredJobs = append(filteredJobs, job)
break
}
}
}
genericJobs = filteredJobs
} else {
genericJobs = jobs
}
} else { } else {
var jobs []tunasync.MirrorStatus var jobs []tunasync.MirrorStatus
args := c.Args() args := c.Args()
@ -220,53 +196,20 @@ func listJobs(c *cli.Context) error {
genericJobs = jobs genericJobs = jobs
} }
if format := c.String("format"); format != "" { b, err := json.MarshalIndent(genericJobs, "", " ")
tpl := template.New("") if err != nil {
_, err := tpl.Parse(format) return cli.NewExitError(
if err != nil { fmt.Sprintf("Error printing out information: %s", err.Error()),
return cli.NewExitError( 1)
fmt.Sprintf("Error parsing format template: %s", err.Error()),
1)
}
switch jobs := genericJobs.(type) {
case []tunasync.WebMirrorStatus:
for _, job := range jobs {
err = tpl.Execute(os.Stdout, job)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Println()
}
case []tunasync.MirrorStatus:
for _, job := range jobs {
err = tpl.Execute(os.Stdout, job)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Println()
}
}
} else {
b, err := json.MarshalIndent(genericJobs, "", " ")
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Println(string(b))
} }
fmt.Println(string(b))
return nil return nil
} }
func updateMirrorSize(c *cli.Context) error { func updateMirrorSize(c *cli.Context) error {
args := c.Args() args := c.Args()
if len(args) != 2 { if len(args) != 2 {
return cli.NewExitError("Usage: tunasynctl set-size -w <worker-id> <mirror> <size>", 1) return cli.NewExitError("Usage: tunasynctl -w <worker-id> <mirror> <size>", 1)
} }
workerID := c.String("worker") workerID := c.String("worker")
mirrorID := args.Get(0) mirrorID := args.Get(0)
@ -292,7 +235,7 @@ func updateMirrorSize(c *cli.Context) error {
1) 1)
} }
defer resp.Body.Close() defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body) body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return cli.NewExitError( return cli.NewExitError(
fmt.Sprintf("Manager failed to update mirror size: %s", body), 1, fmt.Sprintf("Manager failed to update mirror size: %s", body), 1,
@ -338,7 +281,7 @@ func removeWorker(c *cli.Context) error {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return cli.NewExitError( return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()), fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -351,7 +294,7 @@ func removeWorker(c *cli.Context) error {
} }
res := map[string]string{} res := map[string]string{}
_ = json.NewDecoder(resp.Body).Decode(&res) err = json.NewDecoder(resp.Body).Decode(&res)
if res["message"] == "deleted" { if res["message"] == "deleted" {
fmt.Println("Successfully removed the worker") fmt.Println("Successfully removed the worker")
} else { } else {
@ -376,7 +319,7 @@ func flushDisabledJobs(c *cli.Context) error {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return cli.NewExitError( return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()), fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -430,7 +373,7 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return cli.NewExitError( return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()), fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -468,7 +411,7 @@ func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return cli.NewExitError( return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()), fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -563,14 +506,6 @@ func main() {
Name: "all, a", Name: "all, a",
Usage: "List all jobs of all workers", Usage: "List all jobs of all workers",
}, },
cli.StringFlag{
Name: "status, s",
Usage: "Filter output based on status provided",
},
cli.StringFlag{
Name: "format, f",
Usage: "Pretty-print containers using a Go template",
},
}...), }...),
Action: initializeWrapper(listJobs), Action: initializeWrapper(listJobs),
}, },

View File

@ -1,141 +0,0 @@
# About Tunasync and cgroup
Optionally, tunasync can be integrated with cgroup to have better control and tracking processes started by mirror jobs. Also, limiting memory usage of a mirror job also requires cgroup support.
## How cgroup are utilized in tunasync?
If cgroup are enabled globally, all the mirror jobs, except those running in docker containers, are run in separate cgroups. If `mem_limit` is specified, it will be applied to the cgroup. For jobs running in docker containers, `mem_limit` is applied via `docker run` command.
## Tl;dr: What's the recommended configuration?
### If you are using v1 (legacy, hybrid) cgroup hierarchy:
`tunasync-worker.service`:
```
[Unit]
Description = TUNA mirrors sync worker
After=network.target
[Service]
Type=simple
User=tunasync
PermissionsStartOnly=true
ExecStartPre=/usr/bin/cgcreate -t tunasync -a tunasync -g memory:tunasync
ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd
ExecReload=/bin/kill -SIGHUP $MAINPID
ExecStopPost=/usr/bin/cgdelete memory:tunasync
[Install]
WantedBy=multi-user.target
```
`worker.conf`:
``` toml
[cgroup]
enable = true
group = "tunasync"
```
### If you are using v2 (unified) cgroup hierarchy:
`tunasync-worker.service`:
```
[Unit]
Description = TUNA mirrors sync worker
After=network.target
[Service]
Type=simple
User=tunasync
ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd
ExecReload=/bin/kill -SIGHUP $MAINPID
Delegate=yes
[Install]
WantedBy=multi-user.target
```
`worker.conf`:
``` toml
[cgroup]
enable = true
```
## Two versions of cgroups
Due to various of reasons, there are two versions of cgroups in the kernel, which are incompatible with each other. Most of the current linux distributions adopts systemd as the init system, which relies on cgroup and is responsible for initializing cgroup. As a result, the selection of the version of cgroups is mainly decided by systemd. Since version 243, the "unified" cgroup hierarchy setup has become the default.
Tunasync can automatically detect which version of cgroup is in use and enable the corresponding operating interface, but due to the fact that systemd behaves slightly differently in the two cases, different configurations for tunasync are recomended.
## Two modes of group name discovery
Two modes of group name discovery are provided: implicit mode and manual mode.
### Manual Mode
In this mode, the administrator should 1. manually create an empty cgroup (for cgroup v2 unified hierarchy) or empty cgroups in certain controller subsystems with the same name (for cgroup v1 hybird hierarchy); 2. change the ownership of the cgroups to the running user of the tunasync worker; and 3. specify the path in the configuration. On start, tunasync will automatically detect which controllers are enabled (for v1) or enable needed controllers (for v2).
Example 1:
``` bash
# suppose we have cgroup v1
sudo mkdir -p /sys/fs/cgroup/cpu/test/tunasync
sudo mkdir -p /sys/fs/cgroup/memory/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/cpu/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/memory/test/tunasync
# in worker.conf, we have group = "/test/tunasync" or "test/tunasync"
tunasync worker -c /path/to/worker.conf
```
In the above scenario, tunasync will detect the enabled subsystem controllers are cpu and memory. When running a mirror job named `foo`, sub-cgroups will be created in both `/sys/fs/cgroup/cpu/test/tunasync/foo` and `/sys/fs/cgroup/memory/test/tunasync/foo`.
Example 2 (not recommended):
``` bash
# suppose we have cgroup v2
sudo mkdir -p /sys/fs/cgroup/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/test/tunasync
# in worker.conf, we have group = "/test/tunasync" or "test/tunasync"
tunasync worker -c /path/to/worker.conf
```
In the above scenario, tunasync will directly use the cgroup `/sys/fs/cgroup/test/tunasync`. In most cases, due to the design of cgroupv2, since tunasync is not running as root, tunasync won't have the permission to move the processes it starts to the correct cgroup. That's because cgroup2 requires the operating process should also have the write permission of the common ancestor of the source group and the target group when moving processes between groups. So this example is only for demonstration of the functionality and you should prevent it.
### Implicit mode
In this mode, tunasync will use the cgroup it is currently running in and create sub-groups for jobs in that group. Tunasync will first create a sub-group named `__worker` in that group, and move itself in the `__worker` sub-group, to prevent processes in non-leaf cgroups.
Mostly, this mode is cooperated with the `Delegate=yes` option of the systemd service configuration of tunasync, which will permit the running process to self-manage the cgroup the service in running in. Due to security considerations, systemd won't give write permissions of the current running cgroups to the service when using v1 (legacy, hybrid) cgroup hierarchy and non-root user, so it is more meaningful to use this mode with v2 cgroup hierarchy.
## Configruation
``` toml
[cgroup]
enable = true
base_path = "/sys/fs/cgroup"
group = "tunasync"
subsystem = "memory"
```
The defination of the above options is:
* `enable`: `Bool`, specifies whether cgroup is enabled. When cgroup is disabled, `memory_limit` for non-docker jobs will be ignored, and the following options are also ignored.
* `group`: `String`, specifies the cgroup tunasync will use. When not provided, or provided with empty string, cgroup discovery will work in "Implicit mode", i.e. will create sub-cgroups in the current running cgroup. Otherwise, cgroup discovery will work in "Manual mode", where tunasync will create sub-cgroups in the specified cgroup.
* `base_path`: `String`, ignored. It originally specifies the mounting path of cgroup filesystem, but for making everything work, it is now required that the cgroup filesystem should be mounted at its default path(`/sys/fs/cgroup`).
* `subsystem `: `String`, ignored. It originally specifies which cgroupv1 controller is enabled and now becomes meaningless since the discovery is now automatic.
## References:
* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html]()
* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/index.html]()
* [https://systemd.io/CGROUP_DELEGATION/]()
* [https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Delegate=]()

View File

@ -1,5 +1,4 @@
# tunasync 上手指南 # tunasync 上手指南
date: 2016-10-31 00:50:00 date: 2016-10-31 00:50:00
[tunasync](https://github.com/tuna/tunasync) 是[清华大学 TUNA 镜像源](https://mirrors.tuna.tsinghua.edu.cn)目前使用的镜像方案。 [tunasync](https://github.com/tuna/tunasync) 是[清华大学 TUNA 镜像源](https://mirrors.tuna.tsinghua.edu.cn)目前使用的镜像方案。
@ -8,38 +7,38 @@ date: 2016-10-31 00:50:00
本例中: 本例中:
- 只镜像[elvish](https://elv.sh)项目 - 只镜像[elvish](https://elvish.io)项目
- 禁用了https - 禁用了https
- 禁用了cgroup支持 - 禁用了cgroup支持
## 获得tunasync ## 获得tunasync
### 二进制包 ### 二进制包
到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-amd64-bin.tar.gz` 即可。 到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-bin.tar.gz` 即可。
### 自行编译 ### 自行编译
```shell ```
> make $ make
``` ```
## 配置 ## 配置
```shell ```
> mkdir ~/tunasync_demo $ mkdir ~/tunasync_demo
> mkdir /tmp/tunasync $ mkdir /tmp/tunasync
``` ```
编辑 `~/tunasync_demo/worker.conf`: `~/tunasync_demo/worker.conf`:
```conf ```
[global] [global]
name = "test_worker" name = "test_worker"
log_dir = "/tmp/tunasync/log/tunasync/{{.Name}}" log_dir = "/tmp/tunasync/log/tunasync/{{.Name}}"
mirror_dir = "/tmp/tunasync" mirror_dir = "/tmp/tunasync"
concurrent = 10 concurrent = 10
interval = 120 interval = 1
[manager] [manager]
api_base = "http://localhost:12345" api_base = "http://localhost:12345"
@ -61,13 +60,13 @@ ssl_key = ""
[[mirrors]] [[mirrors]]
name = "elvish" name = "elvish"
provider = "rsync" provider = "rsync"
upstream = "rsync://rsync.elv.sh/elvish/" upstream = "rsync://rsync.elvish.io/elvish/"
use_ipv6 = false use_ipv6 = false
``` ```
编辑 `~/tunasync_demo/manager.conf`: `~/tunasync_demo/manager.conf`:
```conf ```
debug = false debug = false
[server] [server]
@ -82,30 +81,28 @@ db_file = "/tmp/tunasync/manager.db"
ca_cert = "" ca_cert = ""
``` ```
除了 bolt 以外,还支持 badger、leveldb 和 redis 的数据库后端。对于 badger 和 leveldb只需要修改 db_type。如果使用 redis 作为数据库后端,把 db_type 改为 redis并把下面的 db_file 设为 redis 服务器的地址: `redis://user:password@host:port/db_number`
### 运行 ### 运行
```shell ```
> tunasync manager --config ~/tunasync_demo/manager.conf $ tunasync manager --config ~/tunasync_demo/manager.conf
> tunasync worker --config ~/tunasync_demo/worker.conf $ tunasync worker --config ~/tunasync_demo/worker.conf
``` ```
本例中,镜像的数据在 `/tmp/tunasync/` 本例中,镜像的数据在`/tmp/tunasync/`
### 控制 ### 控制
查看同步状态 查看同步状态
```shell ```
> tunasynctl list -p 12345 --all $ tunasynctl list -p 12345 --all
``` ```
tunasynctl 也支持配置文件。配置文件可以放在 `/etc/tunasync/ctl.conf` 或者 `~/.config/tunasync/ctl.conf` 两个位置,后者可以覆盖前者的配置值。 tunasynctl 也支持配置文件。配置文件可以放在 `/etc/tunasync/ctl.conf` 或者 `~/.config/tunasync/ctl.conf` 两个位置,后者可以覆盖前者的配置值。
配置文件内容为: 配置文件内容为:
```conf ```
manager_addr = "127.0.0.1" manager_addr = "127.0.0.1"
manager_port = 12345 manager_port = 12345
ca_cert = "" ca_cert = ""
@ -121,13 +118,13 @@ worker 和 manager 之间用 http(s) 通信,如果你 worker 和 manager 都
可以参看 可以参看
```shell ```
> tunasync manager --help $ tunasync manager --help
> tunasync worker --help $ tunasync worker --help
``` ```
可以看一下 log 目录 可以看一下 log 目录
一些 worker 配置文件示例 [workers.conf](workers.conf) 一些 worker 配置文件示例 [workers.conf](workers.conf)
你可能会用到的操作 [tips.md](tips.md) 你可能会用到的操作 [tips.md](tips.md)

View File

@ -83,7 +83,7 @@ snapshot_path = "/path/to/snapshot/directory"
[[mirrors]] [[mirrors]]
name = "elvish" name = "elvish"
provider = "rsync" provider = "rsync"
upstream = "rsync://rsync.elv.sh/elvish/" upstream = "rsync://rsync.elvish.io/elvish/"
interval = 1440 interval = 1440
snapshot_path = "/data/publish/elvish" snapshot_path = "/data/publish/elvish"
``` ```

View File

@ -1,16 +1,10 @@
# /home/scripts in this example points to https://github.com/tuna/tunasync-scripts/
[global] [global]
name = "mirror_worker" name = "mirror_worker"
log_dir = "/srv/tunasync/log/tunasync/{{.Name}}" log_dir = "/srv/tunasync/log/tunasync/{{.Name}}"
mirror_dir = "/srv/tunasync" mirror_dir = "/srv/tunasync"
concurrent = 10 concurrent = 10
interval = 120 interval = 1
# ensure the exec user be add into `docker` group
[docker]
# in `command provider` can use docker_image and docker_volumes
enable = true
[manager] [manager]
api_base = "http://localhost:12345" api_base = "http://localhost:12345"
@ -28,637 +22,52 @@ listen_addr = "127.0.0.1"
listen_port = 6000 listen_port = 6000
ssl_cert = "" ssl_cert = ""
ssl_key = "" ssl_key = ""
[[mirrors]] [[mirrors]]
name = "adobe-fonts" name = "adobe-fonts"
interval = 1440 interval = 1440
provider = "command" provider = "command"
upstream = "https://github.com/adobe-fonts" upstream = "https://github.com/adobe-fonts"
#https://github.com/tuna/tunasync-scripts/blob/master/adobe-fonts.sh
command = "/home/scripts/adobe-fonts.sh" command = "/home/scripts/adobe-fonts.sh"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest" docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "AdoptOpenJDK"
interval = 5760
provider = "command"
command = "/home/scripts/adoptopenjdk.py"
upstream = "https://adoptopenjdk.jfrog.io/adoptopenjdk"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "alpine"
provider = "rsync"
upstream = "rsync://rsync.alpinelinux.org/alpine/"
memory_limit = "256M"
[[mirrors]] [[mirrors]]
name = "anaconda" name = "anaconda"
provider = "command" provider = "command"
upstream = "https://repo.continuum.io/" upstream = "https://repo.continuum.io/"
command = "/home/scripts/anaconda.py --delete" #https://github.com/tuna/tunasync-scripts/blob/master/anaconda.py
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)" command = "/home/scripts/anaconda.py"
interval = 720 interval = 1440
docker_image = "tunathu/tunasync-scripts:latest" docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]] [[mirrors]]
name = "apache" name = "gnu"
provider = "rsync" provider = "rsync"
upstream = "rsync://rsync.apache.org/apache-dist/" upstream = "rsync://mirrors.ocf.berkeley.edu/gnu/"
use_ipv4 = true
rsync_options = [ "--delete-excluded" ] rsync_options = [ "--delete-excluded" ]
memory_limit = "256M" memory_limit = "256M"
[[mirrors]]
name = "armbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/apt/"
memory_limit = "256M"
[[mirrors]]
name = "armbian-releases"
provider = "rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/dl/"
memory_limit = "256M"
[[mirrors]]
name = "bananian"
provider = "command"
upstream = "https://dl.bananian.org/"
command = "/home/scripts/lftp.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "bioconductor"
provider = "rsync"
upstream = "master.bioconductor.org:./"
rsync_options = [ "--rsh=ssh -i /root/id_rsa -o PasswordAuthentication=no -l sync" ]
exclude_file = "/etc/excludes/bioconductor.txt"
memory_limit = "256M"
[[mirrors]]
name = "blender"
provider = "rsync"
upstream = "rsync://mirrors.dotsrc.org/blender/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/blender.txt"
interval = 1440
memory_limit = "256M"
[[mirrors]]
name = "chakra"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/packages/"
memory_limit = "256M"
[[mirrors]]
name = "chakra-releases"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/releases/"
memory_limit = "256M"
[[mirrors]]
name = "chef"
interval = 1440
provider = "command"
upstream = "https://packages.chef.io/repos"
command = "/home/scripts/chef.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "clickhouse"
interval = 2880
provider = "rsync"
upstream = "rsync://repo.yandex.ru/yandexrepo/clickhouse/"
exclude_file = "/etc/excludes/clickhouse.txt"
memory_limit = "256M"
[[mirrors]]
name = "clojars"
provider = "command"
upstream = "s3://clojars-repo-production/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.dualstack.us-east-2.amazonaws.com"
#TUNASYNC_S3_ENDPOINT = "https://s3.us-east-2.amazonaws.com"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "CPAN"
provider = "rsync"
upstream = "rsync://cpan-rsync.perl.org/CPAN/"
memory_limit = "256M"
[[mirrors]]
name = "CRAN"
provider = "rsync"
upstream = "rsync://cran.r-project.org/CRAN/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "CTAN"
provider = "rsync"
upstream = "rsync://mirrors.rit.edu/CTAN/"
memory_limit = "256M"
[[mirrors]]
name = "dart-pub"
provider = "command"
upstream = "https://pub.dev/api"
command = "/home/scripts/pub.sh"
interval = 30
docker_image = "tunathu/pub-mirror:latest"
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/dart-pub"
[[mirrors]]
name = "debian"
provider = "command"
upstream = "rsync://mirrors.tuna.tsinghua.edu.cn/debian/"
command = "/home/scripts/debian.sh sync:archive:debian"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/ftpsync"
docker_volumes = [
"/etc/misc/ftpsync-debian.conf:/ftpsync/etc/ftpsync-debian.conf:ro",
"/log/ftpsync:/home/log/tunasync/ftpsync",
]
[mirrors.env]
FTPSYNC_LOG_DIR = "/home/log/tunasync/ftpsync"
[[mirrors]]
name = "docker-ce"
provider = "command"
upstream = "https://download.docker.com/"
command = "timeout 3h /home/scripts/docker-ce.py --workers 10 --fast-skip"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ELK"
interval = 1440
provider = "command"
upstream = "https://packages.elastic.co"
command = "/home/scripts/ELK.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
WGET_OPTIONS = "-6"
[[mirrors]]
name = "elasticstack"
interval = 1440
provider = "command"
upstream = "https://artifacts.elastic.co/"
command = "/home/scripts/elastic.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "erlang-solutions"
interval = 1440
provider = "command"
upstream = "https://packages.erlang-solutions.com"
command = "/home/scripts/erlang.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "flutter"
interval = 1440
provider = "command"
upstream = "https://storage.googleapis.com/flutter_infra/"
command = "/home/scripts/flutter.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "github-release"
provider = "command"
upstream = "https://api.github.com/repos/"
command = "/home/scripts/github-release.py --workers 5"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
GITHUB_TOKEN = "xxxxx"
[[mirrors]]
name = "gitlab-ce"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ce/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-ee"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ee/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-runner"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/runner/gitlab-runner"
command = "/home/scripts/gitlab-runner.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "grafana"
interval = 1440
provider = "command"
upstream = "https://packages.grafana.com/oss"
command = "/home/scripts/grafana.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "hackage"
provider = "command"
command = "/home/scripts/hackage.sh"
upstream = "https://hackage.haskell.org/"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew-bottles"
provider = "command"
upstream = "https://homebrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "influxdata"
interval = 1440
provider = "command"
upstream = "https://repos.influxdata.com"
command = "/home/scripts/influxdata.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "kali"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ftp.nluug.nl/kali/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "kali-images"
provider = "rsync"
upstream = "rsync://ftp.nluug.nl/kali-images/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "KaOS"
provider = "rsync"
upstream = "rsync://kaosx.tk/kaos/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kernel"
provider = "rsync"
upstream = "rsync://rsync.kernel.org/pub/linux/kernel/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kicad"
provider = "command"
upstream = "s3://kicad-downloads/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.cern.ch"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "kodi"
provider = "rsync"
upstream = "rsync://mirror.yandex.ru/mirrors/xbmc/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
use_ipv6 = true
[[mirrors]]
name = "kubernetes"
interval = 2880
provider = "command"
upstream = "http://packages.cloud.google.com"
command = "/home/scripts/kubernetes.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "linuxbrew-bottles"
provider = "command"
upstream = "https://linuxbrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
RUN_LINUXBREW = "true"
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "linuxmint"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://mirrors.kernel.org/linuxmint-packages/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "lxc-images"
provider = "command"
upstream = "https://us.images.linuxcontainers.org/"
command = "/home/scripts/lxc-images.sh"
docker_image = "tunathu/tunasync-scripts:latest"
interval = 720
[[mirrors]]
name = "lyx"
provider = "command"
upstream = "ftp://ftp.lyx.org/pub/lyx/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "mongodb"
interval = 1440
provider = "command"
upstream = "https://repo.mongodb.org"
command = "/home/scripts/mongodb.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "msys2"
provider = "command"
upstream = "http://repo.msys2.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "mysql"
interval = 30
provider = "command"
upstream = "https://repo.mysql.com"
command = "/home/scripts/mysql.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
USE_IPV6 = "1"
[[mirrors]]
name = "nix"
interval = 1440
provider = "command"
upstream = "s3://nix-releases/nix/"
command = "/home/scripts/nix.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
MIRROR_BASE_URL = 'https://mirrors.tuna.tsinghua.edu.cn/nix/'
[[mirrors]]
name = "nix-channels"
interval = 300
provider = "command"
upstream = "https://nixos.org/channels"
command = "timeout 20h /home/scripts/nix-channels.py"
docker_image = "tunathu/nix-channels:latest"
docker_options = [
"--cpus", "20",
]
[[mirrors]]
name = "nodesource"
provider = "command"
upstream = "https://deb.nodesource.com/"
command = "/home/scripts/nodesource.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "openresty"
provider = "command"
upstream = "https://openresty.org/package/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "packagist"
provider = "command"
upstream = "http://packagist.org/"
command = "/home/scripts/packagist.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "proxmox"
interval = 1440
provider = "command"
upstream = "http://download.proxmox.com"
command = "/home/scripts/proxmox.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]] [[mirrors]]
name = "pypi" name = "pypi"
provider = "command" provider = "command"
upstream = "https://pypi.python.org/" upstream = "https://pypi.python.org/"
#https://github.com/tuna/tunasync-scripts/blob/master/pypi.sh
command = "/home/scripts/pypi.sh" command = "/home/scripts/pypi.sh"
docker_image = "tunathu/bandersnatch:latest" docker_image = "tunathu/tunasync-scripts:latest"
interval = 5 interval = 5
[[mirrors]]
name = "qt"
provider = "rsync"
upstream = "rsync://master.qt-project.org/qt-all/"
exclude_file = "/etc/excludes/qt.txt"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "raspberrypi"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://apt-repo.raspberrypi.org/archive/debian/"
memory_limit = "256M"
[[mirrors]]
name = "raspbian-images"
interval = 5760
provider = "command"
upstream = "https://downloads.raspberrypi.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x ^icons/$ -c --only-missing -v --no-perms"
[[mirrors]]
name = "raspbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://archive.raspbian.org/archive/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "redhat"
provider = "rsync"
upstream = "rsync://ftp.redhat.com/redhat/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
exclude_file = "/etc/excludes/redhat.txt"
interval = 1440
[mirrors.env]
RSYNC_PROXY="127.0.0.1:8123"
[[mirrors]]
name = "remi"
interval = 1440
provider = "command"
upstream = "rsync://rpms.remirepo.net"
command = "/home/scripts/remi.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "repo-ck"
provider = "command"
upstream = "http://repo-ck.com"
command = "/home/scripts/repo-ck.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ros"
provider = "rsync"
upstream = "rsync://mirror.umd.edu/packages.ros.org/ros/"
memory_limit = "256M"
[[mirrors]]
name = "ros2"
interval = 1440
provider = "command"
upstream = "http://packages.ros.org/ros2"
command = "/home/scripts/ros2.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "rubygems"
provider = "command"
upstream = "https://rubygems.org"
command = "/home/scripts/rubygems.sh"
docker_image = "tunathu/rubygems-mirror"
interval = 60
# set environment varialbes # set environment varialbes
[mirrors.env] [mirrors.env]
INIT = "0" INIT = "0"
[[mirrors]]
name = "rudder"
interval = 2880
provider = "command"
upstream = "https://repository.rudder.io"
command = "/home/scripts/rudder.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]] [[mirrors]]
name = "rustup" name = "debian"
provider = "command" interval = 720
upstream = "https://rustup.rs/"
command = "/home/scripts/rustup.sh"
interval = 1440
docker_image = "tunathu/rustup-mirror:latest"
docker_volumes = [
]
docker_options = [
]
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/rustup"
[[mirrors]]
name = "saltstack"
interval = 1440 # required on http://repo.saltstack.com/#mirror
provider = "command"
upstream = "s3://s3/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.repo.saltstack.com"
TUNASYNC_AWS_OPTIONS = "--delete --exact-timestamps"
[[mirrors]]
name = "solus"
provider = "rsync" provider = "rsync"
upstream = "rsync://mirrors.rit.edu/solus/" upstream = "rsync://mirrors.tuna.tsinghua.edu.cn/debian/"
rsync_options = [ "--exclude", "/shannon", "--exclude", "/unstable" ]
memory_limit = "256M" memory_limit = "256M"
[[mirrors]]
name = "stackage"
provider = "command"
command = "/home/scripts/stackage.py"
upstream = "https://www.stackage.org/"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
GIT_COMMITTER_NAME = "TUNA mirrors"
GIT_COMMITTER_EMAIL = "mirrors@tuna.tsinghua.edu.cn"
[[mirrors]]
name = "steamos"
interval = 1440
provider = "command"
upstream = "http://repo.steampowered.com"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer --exclude icons/ "
[[mirrors]]
name = "termux"
interval = 1440
provider = "command"
upstream = "https://dl.bintray.com/termux/termux-packages-24/"
command = "/home/scripts/termux.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]] [[mirrors]]
name = "ubuntu" name = "ubuntu"
provider = "two-stage-rsync" provider = "two-stage-rsync"
@ -667,156 +76,4 @@ upstream = "rsync://archive.ubuntu.com/ubuntu/"
rsync_options = [ "--delete-excluded" ] rsync_options = [ "--delete-excluded" ]
memory_limit = "256M" memory_limit = "256M"
[[mirrors]]
name = "ubuntu-ports"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ports.ubuntu.com/ubuntu-ports/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/ubuntu-ports-exclude.txt"
memory_limit = "256M"
[[mirrors]]
name = "virtualbox"
interval = 1440
provider = "command"
upstream = "http://download.virtualbox.org/virtualbox"
command = "/home/scripts/virtualbox.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "winehq"
provider = "command"
upstream = "ftp://ftp.winehq.org/pub/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x wine-builds.old/ -x /\\..+"
[[mirrors]]
name = "zabbix"
provider = "rsync"
upstream = "rsync://repo.zabbix.com/mirror/"
rsync_options = [ "--delete-excluded", "--chmod=o+r,Do+x,Fa-x" ]
memory_limit = "256M"
[[mirrors]]
name = "AOSP"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://android.googlesource.com/mirror/manifest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "lineageOS"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://github.com/LineageOS/mirror"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "chromiumos"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/cros.sh"
upstream = "https://chromium.googlesource.com"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
USE_BITMAP_INDEX = "1"
CONCURRENT_JOBS = "20"
[[mirrors]]
name = "crates.io-index.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "https://github.com/rust-lang/crates.io-index.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "flutter-sdk.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/flutter/flutter.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gcc.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://gcc.gnu.org/git/gcc.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gentoo-portage.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/gentoo-mirror/gentoo.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "git-repo"
provider = "command"
command = "/home/tunasync-scripts/git-repo.sh"
upstream = "https://gerrit.googlesource.com/git-repo"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew"
provider = "command"
command = "/home/tunasync-scripts/homebrew.sh"
upstream = "https://github.com/Homebrew"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "CocoaPods"
provider = "command"
command = "/home/tunasync-scripts/cocoapods.sh"
upstream = "https://github.com/CocoaPods"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "pybombs"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/pybombs.sh"
upstream = "https://github.com/scateu/pybombs-mirror/"
docker_image = "tunathu/tunasync-scripts:latest"
docker_volumes = ["/home/pybombs-mirror:/opt/pybombs-mirror"]
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[mirrors.env]
PYBOMBS_MIRROR_SCRIPT_PATH = "/opt/pybombs-mirror"
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/pybombs"
[[mirrors]]
name = "llvm"
provider = "command"
command = "/home/tunasync-scripts/llvm.sh"
upstream = "https://git.llvm.org/git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
# vim: ft=toml # vim: ft=toml

92
go.mod
View File

@ -1,89 +1,21 @@
module github.com/tuna/tunasync module github.com/tuna/tunasync
go 1.23.0 go 1.13
toolchain go1.23.5
require ( require (
github.com/BurntSushi/toml v1.4.0 github.com/BurntSushi/toml v0.3.1
github.com/alicebob/miniredis v2.5.0+incompatible github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/boltdb/bolt v1.3.1 github.com/boltdb/bolt v1.3.1
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27
github.com/containerd/cgroups/v3 v3.0.5 github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6 github.com/gin-gonic/gin v1.5.0
github.com/dgraph-io/badger/v2 v2.2007.4 github.com/imdario/mergo v0.3.9
github.com/docker/go-units v0.5.0 github.com/mattn/goveralls v0.0.5 // indirect
github.com/gin-gonic/gin v1.10.0 github.com/pkg/profile v1.4.0
github.com/go-redis/redis/v8 v8.11.5
github.com/imdario/mergo v0.3.16
github.com/moby/moby v28.0.1+incompatible
github.com/moby/sys/reexec v0.1.0
github.com/opencontainers/runtime-spec v1.2.1
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46
github.com/smartystreets/goconvey v1.6.4 github.com/smartystreets/goconvey v1.6.4
github.com/syndtr/goleveldb v1.0.0 github.com/urfave/cli v1.22.3
github.com/urfave/cli v1.22.16 golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
golang.org/x/sys v0.30.0 golang.org/x/tools v0.0.0-20200312194400-c312e98713c2 // indirect
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473
) )
replace github.com/boltdb/bolt v1.3.1 => go.etcd.io/bbolt v1.3.11
require (
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/bytedance/sonic v1.12.9 // indirect
github.com/bytedance/sonic/loader v0.2.3 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cilium/ebpf v0.17.3 // indirect
github.com/cloudwego/base64x v0.1.5 // indirect
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/dennwc/ioctl v1.0.0 // indirect
github.com/dgraph-io/ristretto v0.2.0 // indirect
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/fgprof v0.9.5 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/gin-contrib/sse v1.0.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.25.0 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gomodule/redigo v1.8.2 // indirect
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/smartystreets/assertions v1.2.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
golang.org/x/arch v0.14.0 // indirect
golang.org/x/crypto v0.35.0 // indirect
golang.org/x/net v0.35.0 // indirect
golang.org/x/text v0.22.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

377
go.sum
View File

@ -1,343 +1,104 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bytedance/sonic v1.12.7 h1:CQU8pxOy9HToxhndH0Kx/S1qU/CuS9GnKYrGioDcU1Q=
github.com/bytedance/sonic v1.12.7/go.mod h1:tnbal4mxOMju17EGfknm2XyYcpyCnIROYOEYuemj13I=
github.com/bytedance/sonic v1.12.9 h1:Od1BvK55NnewtGaJsTDeAOSnLVO2BTSLOe0+ooKokmQ=
github.com/bytedance/sonic v1.12.9/go.mod h1:uVvFidNmlt9+wa31S1urfwwthTWteBgG0hWuoKAXTx8=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.2 h1:jxAJuN9fOot/cyz5Q6dUuMJF5OqQ6+5GfA8FjjQ0R4o=
github.com/bytedance/sonic/loader v0.2.2/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/bytedance/sonic/loader v0.2.3 h1:yctD0Q3v2NOGfSWPLPvG2ggA2kV6TS6s4wioyEqssH0=
github.com/bytedance/sonic/loader v0.2.3/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cilium/ebpf v0.17.1 h1:G8mzU81R2JA1nE5/8SRubzqvBMmAmri2VL8BIZPWvV0=
github.com/cilium/ebpf v0.17.1/go.mod h1:vay2FaYSmIlv3r8dNACd4mW/OCaZLJKJOo+IHBvCIO8=
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe h1:69JI97HlzP+PH5Mi1thcGlDoBr6PS2Oe+l3mNmAkbs4= github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27 h1:HHUr4P/aKh4quafGxDT9LDasjGdlGkzLbfmmrlng3kA=
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6 h1:fV+JlCY0cCJh3l0jfE7iB3ZmrdfJSgfcjdrCQhPokGg= github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035 h1:4e+UEZaKPx0ZEiCMPUHMV51RGwbb1VJGCYqRFn/qmWM=
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA= github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA=
github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg= github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg=
github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0= github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0=
github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/gin-gonic/gin v1.5.0 h1:fi+bqFAx/oLK54somfCtEZs9HeH1LHVoEPUgARpTqyc=
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E=
github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o=
github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.25.0 h1:5Dh7cjvzR7BRZadnsVOzPhWsrwUr0nmsZJxEAnFLNO8=
github.com/go-playground/validator/v10 v10.25.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus=
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM=
github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM=
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/mattn/goveralls v0.0.5 h1:spfq8AyZ0cCk57Za6/juJ5btQxeE1FaEGMdfcI+XO48=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/mattn/goveralls v0.0.5/go.mod h1:Xg2LHi51faXLyKXwsndxiW6uxEEQT9+3sjGzzwU4xy0=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/moby v27.4.1+incompatible h1:z6detzbcLRt7U+w4ovHV+8oYpJfpHKTmUbFWPG6cudA=
github.com/moby/moby v27.4.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
github.com/moby/moby v28.0.1+incompatible h1:10ejBTwFhM3/9p6pSaKrLyXnx7QzzCmCYHAedOp67cQ=
github.com/moby/moby v28.0.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
github.com/moby/sys/reexec v0.1.0 h1:RrBi8e0EBTLEgfruBOFcxtElzRGTEUkeIFaVXgU7wok=
github.com/moby/sys/reexec v0.1.0/go.mod h1:EqjBg8F3X7iZe5pU6nRZnYCMUTXoxsjiIfHup5wYIN8=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/urfave/cli v1.22.3 h1:FpNT6zq26xNpHZy08emi755QwzLPs6Pukqjlc7RfOMU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ=
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
golang.org/x/arch v0.13.0 h1:KCkqVVV1kGg0X87TFysjCJ8MxtZEIU4Ja/yXGeoECdA=
golang.org/x/arch v0.13.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4=
golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= golang.org/x/tools v0.0.0-20200113040837-eac381796e91/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= golang.org/x/tools v0.0.0-20200312194400-c312e98713c2 h1:6TB4+MaZlkcSsJDu+BS5yxSEuZIYhjWz+jhbSLEZylI=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= golang.org/x/tools v0.0.0-20200312194400-c312e98713c2/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvRQyEAKbw1xc=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=

View File

@ -1,8 +1,6 @@
package internal package internal
import ( import (
"bytes"
"encoding/json"
"fmt" "fmt"
"time" "time"
) )
@ -26,11 +24,10 @@ type MirrorStatus struct {
// A WorkerStatus is the information struct that describe // A WorkerStatus is the information struct that describe
// a worker, and sent from the manager to clients. // a worker, and sent from the manager to clients.
type WorkerStatus struct { type WorkerStatus struct {
ID string `json:"id"` ID string `json:"id"`
URL string `json:"url"` // worker url URL string `json:"url"` // worker url
Token string `json:"token"` // session token Token string `json:"token"` // session token
LastOnline time.Time `json:"last_online"` // last seen LastOnline time.Time `json:"last_online"` // last seen
LastRegister time.Time `json:"last_register"` // last register time
} }
type MirrorSchedules struct { type MirrorSchedules struct {
@ -62,45 +59,21 @@ const (
) )
func (c CmdVerb) String() string { func (c CmdVerb) String() string {
mapping := map[CmdVerb]string{ switch c {
CmdStart: "start", case CmdStart:
CmdStop: "stop", return "start"
CmdDisable: "disable", case CmdStop:
CmdRestart: "restart", return "stop"
CmdPing: "ping", case CmdDisable:
CmdReload: "reload", return "disable"
case CmdRestart:
return "restart"
case CmdPing:
return "ping"
case CmdReload:
return "reload"
} }
return mapping[c] return "unknown"
}
func NewCmdVerbFromString(s string) CmdVerb {
mapping := map[string]CmdVerb{
"start": CmdStart,
"stop": CmdStop,
"disable": CmdDisable,
"restart": CmdRestart,
"ping": CmdPing,
"reload": CmdReload,
}
return mapping[s]
}
// Marshal and Unmarshal for CmdVerb
func (s CmdVerb) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString(`"`)
buffer.WriteString(s.String())
buffer.WriteString(`"`)
return buffer.Bytes(), nil
}
func (s *CmdVerb) UnmarshalJSON(b []byte) error {
var j string
err := json.Unmarshal(b, &j)
if err != nil {
return err
}
*s = NewCmdVerbFromString(j)
return nil
} }
// A WorkerCmd is the command message send from the // A WorkerCmd is the command message send from the

View File

@ -71,7 +71,8 @@ func TestStatus(t *testing.T) {
Size: "4GB", Size: "4GB",
} }
var m2 WebMirrorStatus = BuildWebMirrorStatus(m) var m2 WebMirrorStatus
m2 = BuildWebMirrorStatus(m)
// fmt.Printf("%#v", m2) // fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name) So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status) So(m2.Status, ShouldEqual, m.Status)

View File

@ -7,9 +7,8 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"os"
"os/exec" "os/exec"
"regexp" "regexp"
"time" "time"
@ -40,19 +39,19 @@ var rsyncExitValues = map[int]string{
// GetTLSConfig generate tls.Config from CAFile // GetTLSConfig generate tls.Config from CAFile
func GetTLSConfig(CAFile string) (*tls.Config, error) { func GetTLSConfig(CAFile string) (*tls.Config, error) {
caCert, err := os.ReadFile(CAFile) caCert, err := ioutil.ReadFile(CAFile)
if err != nil { if err != nil {
return nil, err return nil, err
} }
caCertPool := x509.NewCertPool() caCertPool := x509.NewCertPool()
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok { if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
return nil, errors.New("failed to add CA to pool") return nil, errors.New("Failed to add CA to pool")
} }
tlsConfig := &tls.Config{ tlsConfig := &tls.Config{
RootCAs: caCertPool, RootCAs: caCertPool,
} }
// tlsConfig.BuildNameToCertificate() tlsConfig.BuildNameToCertificate()
return tlsConfig, nil return tlsConfig, nil
} }
@ -105,7 +104,7 @@ func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response,
return resp, errors.New("HTTP status code is not 200") return resp, errors.New("HTTP status code is not 200")
} }
defer resp.Body.Close() defer resp.Body.Close()
body, err := io.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return resp, err return resp, err
} }
@ -115,10 +114,10 @@ func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response,
// FindAllSubmatchInFile calls re.FindAllSubmatch to find matches in given file // FindAllSubmatchInFile calls re.FindAllSubmatch to find matches in given file
func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]byte, err error) { func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]byte, err error) {
if fileName == "/dev/null" { if fileName == "/dev/null" {
err = errors.New("invalid log file") err = errors.New("Invalid log file")
return return
} }
if content, err := os.ReadFile(fileName); err == nil { if content, err := ioutil.ReadFile(fileName); err == nil {
matches = re.FindAllSubmatch(content, -1) matches = re.FindAllSubmatch(content, -1)
// fmt.Printf("FindAllSubmatchInFile: %q\n", matches) // fmt.Printf("FindAllSubmatchInFile: %q\n", matches)
} }
@ -128,7 +127,7 @@ func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]by
// ExtractSizeFromLog uses a regexp to extract the size from log files // ExtractSizeFromLog uses a regexp to extract the size from log files
func ExtractSizeFromLog(logFile string, re *regexp.Regexp) string { func ExtractSizeFromLog(logFile string, re *regexp.Regexp) string {
matches, _ := FindAllSubmatchInFile(logFile, re) matches, _ := FindAllSubmatchInFile(logFile, re)
if len(matches) == 0 { if matches == nil || len(matches) == 0 {
return "" return ""
} }
// return the first capture group of the last occurrence // return the first capture group of the last occurrence

View File

@ -1,6 +1,7 @@
package internal package internal
import ( import (
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -28,11 +29,11 @@ sent 7.55M bytes received 823.25M bytes 5.11M bytes/sec
total size is 1.33T speedup is 1,604.11 total size is 1.33T speedup is 1,604.11
` `
Convey("Log parser should work", t, func() { Convey("Log parser should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
So(err, ShouldBeNil) So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
logFile := filepath.Join(tmpDir, "rs.log") logFile := filepath.Join(tmpDir, "rs.log")
err = os.WriteFile(logFile, []byte(realLogContent), 0755) err = ioutil.WriteFile(logFile, []byte(realLogContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
res := ExtractSizeFromRsyncLog(logFile) res := ExtractSizeFromRsyncLog(logFile)

View File

@ -1,4 +1,4 @@
package internal package internal
// Version of the program // Version of the program
const Version string = "0.9.3" const Version string = "0.6.2"

View File

@ -29,7 +29,6 @@ type FileConfig struct {
CACert string `toml:"ca_cert"` CACert string `toml:"ca_cert"`
} }
// LoadConfig loads config from specified file
func LoadConfig(cfgFile string, c *cli.Context) (*Config, error) { func LoadConfig(cfgFile string, c *cli.Context) (*Config, error) {
cfg := new(Config) cfg := new(Config)

View File

@ -2,6 +2,7 @@ package manager
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"strings" "strings"
"testing" "testing"
@ -36,11 +37,11 @@ func TestConfig(t *testing.T) {
Convey("load Config should work", t, func() { Convey("load Config should work", t, func() {
Convey("create config file & cli context", func() { Convey("create config file & cli context", func() {
tmpfile, err := os.CreateTemp("", "tunasync") tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644) err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer tmpfile.Close() defer tmpfile.Close()

View File

@ -4,13 +4,8 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"strings" "strings"
"time"
bolt "github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/dgraph-io/badger/v2"
"github.com/go-redis/redis/v8"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb"
. "github.com/tuna/tunasync/internal" . "github.com/tuna/tunasync/internal"
) )
@ -21,7 +16,6 @@ type dbAdapter interface {
GetWorker(workerID string) (WorkerStatus, error) GetWorker(workerID string) (WorkerStatus, error)
DeleteWorker(workerID string) error DeleteWorker(workerID string) error
CreateWorker(w WorkerStatus) (WorkerStatus, error) CreateWorker(w WorkerStatus) (WorkerStatus, error)
RefreshWorker(workerID string) (WorkerStatus, error)
UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error)
GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error) GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error)
ListMirrorStatus(workerID string) ([]MirrorStatus, error) ListMirrorStatus(workerID string) ([]MirrorStatus, error)
@ -30,14 +24,21 @@ type dbAdapter interface {
Close() error Close() error
} }
// interface for a kv database func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) {
type kvAdapter interface { if dbType == "bolt" {
InitBucket(bucket string) error innerDB, err := bolt.Open(dbFile, 0600, nil)
Get(bucket string, key string) ([]byte, error) if err != nil {
GetAll(bucket string) (map[string][]byte, error) return nil, err
Put(bucket string, key string, value []byte) error }
Delete(bucket string, key string) error db := boltAdapter{
Close() error db: innerDB,
dbFile: dbFile,
}
err = db.Init()
return &db, err
}
// unsupported db-type
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
} }
const ( const (
@ -45,222 +46,166 @@ const (
_statusBucketKey = "mirror_status" _statusBucketKey = "mirror_status"
) )
func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) { type boltAdapter struct {
if dbType == "bolt" { db *bolt.DB
innerDB, err := bolt.Open(dbFile, 0600, &bolt.Options{ dbFile string
Timeout: 5 * time.Second,
})
if err != nil {
return nil, err
}
db := boltAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "redis" {
opt, err := redis.ParseURL(dbFile)
if err != nil {
return nil, fmt.Errorf("bad redis url: %s", err)
}
innerDB := redis.NewClient(opt)
db := redisAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "badger" {
innerDB, err := badger.Open(badger.DefaultOptions(dbFile))
if err != nil {
return nil, err
}
db := badgerAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "leveldb" {
innerDB, err := leveldb.OpenFile(dbFile, nil)
if err != nil {
return nil, err
}
db := leveldbAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
}
// unsupported db-type
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
} }
// use the underlying kv database to store data func (b *boltAdapter) Init() (err error) {
type kvDBAdapter struct { return b.db.Update(func(tx *bolt.Tx) error {
db kvAdapter _, err = tx.CreateBucketIfNotExists([]byte(_workerBucketKey))
} if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
func (b *kvDBAdapter) Init() error {
err := b.db.InitBucket(_workerBucketKey)
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
err = b.db.InitBucket(_statusBucketKey)
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
return err
}
func (b *kvDBAdapter) ListWorkers() (ws []WorkerStatus, err error) {
var workers map[string][]byte
workers, err = b.db.GetAll(_workerBucketKey)
var w WorkerStatus
for _, v := range workers {
jsonErr := json.Unmarshal(v, &w)
if jsonErr != nil {
err = errors.Wrap(err, jsonErr.Error())
continue
} }
ws = append(ws, w) _, err = tx.CreateBucketIfNotExists([]byte(_statusBucketKey))
} if err != nil {
return fmt.Errorf("create bucket %s error: %s", _statusBucketKey, err.Error())
}
return nil
})
}
func (b *boltAdapter) ListWorkers() (ws []WorkerStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
c := bucket.Cursor()
var w WorkerStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
jsonErr := json.Unmarshal(v, &w)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
continue
}
ws = append(ws, w)
}
return err
})
return return
} }
func (b *kvDBAdapter) GetWorker(workerID string) (w WorkerStatus, err error) { func (b *boltAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
var v []byte err = b.db.View(func(tx *bolt.Tx) error {
v, _ = b.db.Get(_workerBucketKey, workerID) bucket := tx.Bucket([]byte(_workerBucketKey))
if v == nil { v := bucket.Get([]byte(workerID))
err = fmt.Errorf("invalid workerID %s", workerID) if v == nil {
} else { return fmt.Errorf("invalid workerID %s", workerID)
err = json.Unmarshal(v, &w) }
} err := json.Unmarshal(v, &w)
return err
})
return return
} }
func (b *kvDBAdapter) DeleteWorker(workerID string) error { func (b *boltAdapter) DeleteWorker(workerID string) (err error) {
v, _ := b.db.Get(_workerBucketKey, workerID) err = b.db.Update(func(tx *bolt.Tx) error {
if v == nil { bucket := tx.Bucket([]byte(_workerBucketKey))
return fmt.Errorf("invalid workerID %s", workerID) v := bucket.Get([]byte(workerID))
} if v == nil {
return b.db.Delete(_workerBucketKey, workerID) return fmt.Errorf("invalid workerID %s", workerID)
}
err := bucket.Delete([]byte(workerID))
return err
})
return
} }
func (b *kvDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) { func (b *boltAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
v, err := json.Marshal(w) err := b.db.Update(func(tx *bolt.Tx) error {
if err == nil { bucket := tx.Bucket([]byte(_workerBucketKey))
err = b.db.Put(_workerBucketKey, w.ID, v) v, err := json.Marshal(w)
} if err != nil {
return err
}
err = bucket.Put([]byte(w.ID), v)
return err
})
return w, err return w, err
} }
func (b *kvDBAdapter) RefreshWorker(workerID string) (w WorkerStatus, err error) { func (b *boltAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
w, err = b.GetWorker(workerID)
if err == nil {
w.LastOnline = time.Now()
w, err = b.CreateWorker(w)
}
return w, err
}
func (b *kvDBAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
id := mirrorID + "/" + workerID id := mirrorID + "/" + workerID
v, err := json.Marshal(status) err := b.db.Update(func(tx *bolt.Tx) error {
if err == nil { bucket := tx.Bucket([]byte(_statusBucketKey))
err = b.db.Put(_statusBucketKey, id, v) v, err := json.Marshal(status)
} err = bucket.Put([]byte(id), v)
return err
})
return status, err return status, err
} }
func (b *kvDBAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) { func (b *boltAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) {
id := mirrorID + "/" + workerID id := mirrorID + "/" + workerID
var v []byte err = b.db.Update(func(tx *bolt.Tx) error {
v, err = b.db.Get(_statusBucketKey, id) bucket := tx.Bucket([]byte(_statusBucketKey))
if v == nil { v := bucket.Get([]byte(id))
err = fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID) if v == nil {
} else if err == nil { return fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID)
err = json.Unmarshal(v, &m) }
} err := json.Unmarshal(v, &m)
return err
})
return return
} }
func (b *kvDBAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) { func (b *boltAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) {
var vals map[string][]byte err = b.db.View(func(tx *bolt.Tx) error {
vals, err = b.db.GetAll(_statusBucketKey) bucket := tx.Bucket([]byte(_statusBucketKey))
if err != nil { c := bucket.Cursor()
return var m MirrorStatus
} for k, v := c.First(); k != nil; k, v = c.Next() {
if wID := strings.Split(string(k), "/")[1]; wID == workerID {
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
continue
}
ms = append(ms, m)
}
}
return err
})
return
}
for k, v := range vals { func (b *boltAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) {
if wID := strings.Split(k, "/")[1]; wID == workerID { err = b.db.View(func(tx *bolt.Tx) error {
var m MirrorStatus bucket := tx.Bucket([]byte(_statusBucketKey))
c := bucket.Cursor()
var m MirrorStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
jsonErr := json.Unmarshal(v, &m) jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil { if jsonErr != nil {
err = errors.Wrap(err, jsonErr.Error()) err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
continue continue
} }
ms = append(ms, m) ms = append(ms, m)
} }
} return err
})
return return
} }
func (b *kvDBAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) { func (b *boltAdapter) FlushDisabledJobs() (err error) {
var vals map[string][]byte err = b.db.Update(func(tx *bolt.Tx) error {
vals, err = b.db.GetAll(_statusBucketKey) bucket := tx.Bucket([]byte(_statusBucketKey))
if err != nil { c := bucket.Cursor()
return
}
for _, v := range vals {
var m MirrorStatus var m MirrorStatus
jsonErr := json.Unmarshal(v, &m) for k, v := c.First(); k != nil; k, v = c.Next() {
if jsonErr != nil { jsonErr := json.Unmarshal(v, &m)
err = errors.Wrap(err, jsonErr.Error()) if jsonErr != nil {
continue err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
} continue
ms = append(ms, m) }
} if m.Status == Disabled || len(m.Name) == 0 {
return err = c.Delete()
}
func (b *kvDBAdapter) FlushDisabledJobs() (err error) {
var vals map[string][]byte
vals, err = b.db.GetAll(_statusBucketKey)
if err != nil {
return
}
for k, v := range vals {
var m MirrorStatus
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = errors.Wrap(err, jsonErr.Error())
continue
}
if m.Status == Disabled || len(m.Name) == 0 {
deleteErr := b.db.Delete(_statusBucketKey, k)
if deleteErr != nil {
err = errors.Wrap(err, deleteErr.Error())
} }
} }
} return err
})
return return
} }
func (b *kvDBAdapter) Close() error { func (b *boltAdapter) Close() error {
if b.db != nil { if b.db != nil {
return b.db.Close() return b.db.Close()
} }

View File

@ -1,67 +0,0 @@
package manager
import (
"github.com/dgraph-io/badger/v2"
)
// implement kv interface backed by badger
type badgerAdapter struct {
db *badger.DB
}
func (b *badgerAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *badgerAdapter) Get(bucket string, key string) (v []byte, err error) {
b.db.View(func(tx *badger.Txn) error {
var item *badger.Item
item, err = tx.Get([]byte(bucket + key))
if item != nil {
v, err = item.ValueCopy(nil)
}
return nil
})
return
}
func (b *badgerAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
b.db.View(func(tx *badger.Txn) error {
it := tx.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
prefix := []byte(bucket)
m = make(map[string][]byte)
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
k := string(item.Key())
actualKey := k[len(bucket):]
var v []byte
v, err = item.ValueCopy(nil)
m[actualKey] = v
}
return nil
})
return
}
func (b *badgerAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Update(func(tx *badger.Txn) error {
err := tx.Set([]byte(bucket+key), value)
return err
})
return err
}
func (b *badgerAdapter) Delete(bucket string, key string) error {
err := b.db.Update(func(tx *badger.Txn) error {
err := tx.Delete([]byte(bucket + key))
return err
})
return err
}
func (b *badgerAdapter) Close() error {
return b.db.Close()
}

View File

@ -1,66 +0,0 @@
package manager
import (
"fmt"
bolt "github.com/boltdb/bolt"
)
// implement kv interface backed by boltdb
type boltAdapter struct {
db *bolt.DB
}
func (b *boltAdapter) InitBucket(bucket string) (err error) {
return b.db.Update(func(tx *bolt.Tx) error {
_, err = tx.CreateBucketIfNotExists([]byte(bucket))
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
return nil
})
}
func (b *boltAdapter) Get(bucket string, key string) (v []byte, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
v = bucket.Get([]byte(key))
return nil
})
return
}
func (b *boltAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
c := bucket.Cursor()
m = make(map[string][]byte)
for k, v := c.First(); k != nil; k, v = c.Next() {
m[string(k)] = v
}
return nil
})
return
}
func (b *boltAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
err := bucket.Put([]byte(key), value)
return err
})
return err
}
func (b *boltAdapter) Delete(bucket string, key string) error {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
err := bucket.Delete([]byte(key))
return err
})
return err
}
func (b *boltAdapter) Close() error {
return b.db.Close()
}

View File

@ -1,51 +0,0 @@
package manager
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
)
// implement kv interface backed by leveldb
type leveldbAdapter struct {
db *leveldb.DB
}
func (b *leveldbAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *leveldbAdapter) Get(bucket string, key string) (v []byte, err error) {
v, err = b.db.Get([]byte(bucket+key), nil)
return
}
func (b *leveldbAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
it := b.db.NewIterator(util.BytesPrefix([]byte(bucket)), nil)
defer it.Release()
m = make(map[string][]byte)
for it.Next() {
k := string(it.Key())
actualKey := k[len(bucket):]
// it.Value() changes on next iteration
val := it.Value()
v := make([]byte, len(val))
copy(v, val)
m[actualKey] = v
}
return
}
func (b *leveldbAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Put([]byte(bucket+key), []byte(value), nil)
return err
}
func (b *leveldbAdapter) Delete(bucket string, key string) error {
err := b.db.Delete([]byte(bucket+key), nil)
return err
}
func (b *leveldbAdapter) Close() error {
return b.db.Close()
}

View File

@ -1,54 +0,0 @@
package manager
import (
"context"
"github.com/go-redis/redis/v8"
)
// implement kv interface backed by redis
type redisAdapter struct {
db *redis.Client
}
var ctx = context.Background()
func (b *redisAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *redisAdapter) Get(bucket string, key string) (v []byte, err error) {
var val string
val, err = b.db.HGet(ctx, bucket, key).Result()
if err == nil {
v = []byte(val)
}
return
}
func (b *redisAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
var val map[string]string
val, err = b.db.HGetAll(ctx, bucket).Result()
if err == nil && val != nil {
m = make(map[string][]byte)
for k, v := range val {
m[k] = []byte(v)
}
}
return
}
func (b *redisAdapter) Put(bucket string, key string, value []byte) error {
_, err := b.db.HSet(ctx, bucket, key, string(value)).Result()
return err
}
func (b *redisAdapter) Delete(bucket string, key string) error {
_, err := b.db.HDel(ctx, bucket, key).Result()
return err
}
func (b *redisAdapter) Close() error {
return b.db.Close()
}

View File

@ -2,167 +2,19 @@ package manager
import ( import (
"encoding/json" "encoding/json"
"fmt" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"testing" "testing"
"time" "time"
"github.com/alicebob/miniredis"
. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
. "github.com/tuna/tunasync/internal" . "github.com/tuna/tunasync/internal"
) )
func SortMirrorStatus(status []MirrorStatus) { func TestBoltAdapter(t *testing.T) {
sort.Slice(status, func(l, r int) bool {
return status[l].Name < status[r].Name
})
}
func DBAdapterTest(db dbAdapter) {
var err error
testWorkerIDs := []string{"test_worker1", "test_worker2"}
Convey("create worker", func() {
for _, id := range testWorkerIDs {
w := WorkerStatus{
ID: id,
Token: "token_" + id,
LastOnline: time.Now(),
LastRegister: time.Now(),
}
_, err = db.CreateWorker(w)
So(err, ShouldBeNil)
}
Convey("get existent worker", func() {
_, err := db.GetWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
})
Convey("list existent workers", func() {
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
Convey("get non-existent worker", func() {
_, err := db.GetWorker("invalid workerID")
So(err, ShouldNotBeNil)
})
Convey("delete existent worker", func() {
err := db.DeleteWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
_, err = db.GetWorker(testWorkerIDs[0])
So(err, ShouldNotBeNil)
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 1)
})
Convey("delete non-existent worker", func() {
err := db.DeleteWorker("invalid workerID")
So(err, ShouldNotBeNil)
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
})
Convey("update mirror status", func() {
status := []MirrorStatus{
{
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
},
{
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Disabled,
LastUpdate: time.Now().Add(-time.Hour),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
{
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Success,
LastUpdate: time.Now().Add(-time.Minute),
LastStarted: time.Now().Add(-time.Second),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
}
SortMirrorStatus(status)
for _, s := range status {
_, err := db.UpdateMirrorStatus(s.Worker, s.Name, s)
So(err, ShouldBeNil)
}
Convey("get mirror status", func() {
m, err := db.GetMirrorStatus(testWorkerIDs[0], status[0].Name)
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal(status[0])
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(m)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("list mirror status", func() {
ms, err := db.ListMirrorStatus(testWorkerIDs[0])
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal([]MirrorStatus{status[0]})
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("list all mirror status", func() {
ms, err := db.ListAllMirrorStatus()
So(err, ShouldBeNil)
SortMirrorStatus(ms)
expectedJSON, err := json.Marshal(status)
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("flush disabled jobs", func() {
ms, err := db.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 3)
err = db.FlushDisabledJobs()
So(err, ShouldBeNil)
ms, err = db.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 2)
})
})
}
func TestDBAdapter(t *testing.T) {
Convey("boltAdapter should work", t, func() { Convey("boltAdapter should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -176,60 +28,138 @@ func TestDBAdapter(t *testing.T) {
So(err, ShouldBeNil) So(err, ShouldBeNil)
}() }()
DBAdapterTest(boltDB) testWorkerIDs := []string{"test_worker1", "test_worker2"}
}) Convey("create worker", func() {
for _, id := range testWorkerIDs {
w := WorkerStatus{
ID: id,
Token: "token_" + id,
LastOnline: time.Now(),
}
w, err = boltDB.CreateWorker(w)
So(err, ShouldBeNil)
}
Convey("redisAdapter should work", t, func() { Convey("get existent worker", func() {
mr, err := miniredis.Run() _, err := boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldBeNil) So(err, ShouldBeNil)
})
addr := fmt.Sprintf("redis://%s", mr.Addr()) Convey("list existent workers", func() {
redisDB, err := makeDBAdapter("redis", addr) ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
defer func() { Convey("get non-existent worker", func() {
// close redisDB _, err := boltDB.GetWorker("invalid workerID")
err := redisDB.Close() So(err, ShouldNotBeNil)
So(err, ShouldBeNil) })
mr.Close()
}()
DBAdapterTest(redisDB) Convey("delete existent worker", func() {
}) err := boltDB.DeleteWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
_, err = boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldNotBeNil)
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 1)
})
Convey("badgerAdapter should work", t, func() { Convey("delete non-existent worker", func() {
tmpDir, err := os.MkdirTemp("", "tunasync") err := boltDB.DeleteWorker("invalid workerID")
defer os.RemoveAll(tmpDir) So(err, ShouldNotBeNil)
So(err, ShouldBeNil) ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
})
dbType, dbFile := "badger", filepath.Join(tmpDir, "badger.db") Convey("update mirror status", func() {
badgerDB, err := makeDBAdapter(dbType, dbFile) status := []MirrorStatus{
So(err, ShouldBeNil) MirrorStatus{
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
},
MirrorStatus{
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Disabled,
LastUpdate: time.Now().Add(-time.Hour),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
MirrorStatus{
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Success,
LastUpdate: time.Now().Add(-time.Minute),
LastStarted: time.Now().Add(-time.Second),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
}
defer func() { for _, s := range status {
// close badgerDB _, err := boltDB.UpdateMirrorStatus(s.Worker, s.Name, s)
err := badgerDB.Close() So(err, ShouldBeNil)
So(err, ShouldBeNil)
}()
DBAdapterTest(badgerDB) }
})
Convey("leveldbAdapter should work", t, func() { Convey("get mirror status", func() {
tmpDir, err := os.MkdirTemp("", "tunasync") m, err := boltDB.GetMirrorStatus(testWorkerIDs[0], status[0].Name)
defer os.RemoveAll(tmpDir) So(err, ShouldBeNil)
So(err, ShouldBeNil) expectedJSON, err := json.Marshal(status[0])
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(m)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
dbType, dbFile := "leveldb", filepath.Join(tmpDir, "leveldb.db") Convey("list mirror status", func() {
leveldbDB, err := makeDBAdapter(dbType, dbFile) ms, err := boltDB.ListMirrorStatus(testWorkerIDs[0])
So(err, ShouldBeNil) So(err, ShouldBeNil)
expectedJSON, err := json.Marshal([]MirrorStatus{status[0]})
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
defer func() { Convey("list all mirror status", func() {
// close leveldbDB ms, err := boltDB.ListAllMirrorStatus()
err := leveldbDB.Close() So(err, ShouldBeNil)
So(err, ShouldBeNil) expectedJSON, err := json.Marshal(status)
}() So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("flush disabled jobs", func() {
ms, err := boltDB.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 3)
err = boltDB.FlushDisabledJobs()
So(err, ShouldBeNil)
ms, err = boltDB.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 2)
})
})
DBAdapterTest(leveldbDB)
}) })
} }

View File

@ -4,7 +4,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"sync"
"time" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@ -24,7 +23,6 @@ type Manager struct {
cfg *Config cfg *Config
engine *gin.Engine engine *gin.Engine
adapter dbAdapter adapter dbAdapter
rwmu sync.RWMutex
httpClient *http.Client httpClient *http.Client
} }
@ -129,11 +127,9 @@ func (s *Manager) Run() {
} }
} }
// listAllJobs respond with all jobs of specified workers // listAllJobs repond with all jobs of specified workers
func (s *Manager) listAllJobs(c *gin.Context) { func (s *Manager) listAllJobs(c *gin.Context) {
s.rwmu.RLock()
mirrorStatusList, err := s.adapter.ListAllMirrorStatus() mirrorStatusList, err := s.adapter.ListAllMirrorStatus()
s.rwmu.RUnlock()
if err != nil { if err != nil {
err := fmt.Errorf("failed to list all mirror status: %s", err := fmt.Errorf("failed to list all mirror status: %s",
err.Error(), err.Error(),
@ -154,9 +150,7 @@ func (s *Manager) listAllJobs(c *gin.Context) {
// flushDisabledJobs deletes all jobs that marks as deleted // flushDisabledJobs deletes all jobs that marks as deleted
func (s *Manager) flushDisabledJobs(c *gin.Context) { func (s *Manager) flushDisabledJobs(c *gin.Context) {
s.rwmu.Lock()
err := s.adapter.FlushDisabledJobs() err := s.adapter.FlushDisabledJobs()
s.rwmu.Unlock()
if err != nil { if err != nil {
err := fmt.Errorf("failed to flush disabled jobs: %s", err := fmt.Errorf("failed to flush disabled jobs: %s",
err.Error(), err.Error(),
@ -171,9 +165,7 @@ func (s *Manager) flushDisabledJobs(c *gin.Context) {
// deleteWorker deletes one worker by id // deleteWorker deletes one worker by id
func (s *Manager) deleteWorker(c *gin.Context) { func (s *Manager) deleteWorker(c *gin.Context) {
workerID := c.Param("id") workerID := c.Param("id")
s.rwmu.Lock()
err := s.adapter.DeleteWorker(workerID) err := s.adapter.DeleteWorker(workerID)
s.rwmu.Unlock()
if err != nil { if err != nil {
err := fmt.Errorf("failed to delete worker: %s", err := fmt.Errorf("failed to delete worker: %s",
err.Error(), err.Error(),
@ -186,12 +178,10 @@ func (s *Manager) deleteWorker(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{_infoKey: "deleted"}) c.JSON(http.StatusOK, gin.H{_infoKey: "deleted"})
} }
// listWorkers respond with information of all the workers // listWrokers respond with informations of all the workers
func (s *Manager) listWorkers(c *gin.Context) { func (s *Manager) listWorkers(c *gin.Context) {
var workerInfos []WorkerStatus var workerInfos []WorkerStatus
s.rwmu.RLock()
workers, err := s.adapter.ListWorkers() workers, err := s.adapter.ListWorkers()
s.rwmu.RUnlock()
if err != nil { if err != nil {
err := fmt.Errorf("failed to list workers: %s", err := fmt.Errorf("failed to list workers: %s",
err.Error(), err.Error(),
@ -203,11 +193,8 @@ func (s *Manager) listWorkers(c *gin.Context) {
for _, w := range workers { for _, w := range workers {
workerInfos = append(workerInfos, workerInfos = append(workerInfos,
WorkerStatus{ WorkerStatus{
ID: w.ID, ID: w.ID,
URL: w.URL, LastOnline: w.LastOnline,
Token: "REDACTED",
LastOnline: w.LastOnline,
LastRegister: w.LastRegister,
}) })
} }
c.JSON(http.StatusOK, workerInfos) c.JSON(http.StatusOK, workerInfos)
@ -218,7 +205,6 @@ func (s *Manager) registerWorker(c *gin.Context) {
var _worker WorkerStatus var _worker WorkerStatus
c.BindJSON(&_worker) c.BindJSON(&_worker)
_worker.LastOnline = time.Now() _worker.LastOnline = time.Now()
_worker.LastRegister = time.Now()
newWorker, err := s.adapter.CreateWorker(_worker) newWorker, err := s.adapter.CreateWorker(_worker)
if err != nil { if err != nil {
err := fmt.Errorf("failed to register worker: %s", err := fmt.Errorf("failed to register worker: %s",
@ -237,9 +223,7 @@ func (s *Manager) registerWorker(c *gin.Context) {
// listJobsOfWorker respond with all the jobs of the specified worker // listJobsOfWorker respond with all the jobs of the specified worker
func (s *Manager) listJobsOfWorker(c *gin.Context) { func (s *Manager) listJobsOfWorker(c *gin.Context) {
workerID := c.Param("id") workerID := c.Param("id")
s.rwmu.RLock()
mirrorStatusList, err := s.adapter.ListMirrorStatus(workerID) mirrorStatusList, err := s.adapter.ListMirrorStatus(workerID)
s.rwmu.RUnlock()
if err != nil { if err != nil {
err := fmt.Errorf("failed to list jobs of worker %s: %s", err := fmt.Errorf("failed to list jobs of worker %s: %s",
workerID, err.Error(), workerID, err.Error(),
@ -267,16 +251,13 @@ func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
if len(mirrorName) == 0 { if len(mirrorName) == 0 {
s.returnErrJSON( s.returnErrJSON(
c, http.StatusBadRequest, c, http.StatusBadRequest,
errors.New("mirror Name should not be empty"), errors.New("Mirror Name should not be empty"),
) )
} }
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
curStatus, err := s.adapter.GetMirrorStatus(workerID, mirrorName) curStatus, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
if err != nil { if err != nil {
logger.Errorf("failed to get job %s of worker %s: %s", fmt.Errorf("failed to get job %s of worker %s: %s",
mirrorName, workerID, err.Error(), mirrorName, workerID, err.Error(),
) )
continue continue
@ -288,9 +269,7 @@ func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
} }
curStatus.Scheduled = schedule.NextSchedule curStatus.Scheduled = schedule.NextSchedule
s.rwmu.Lock()
_, err = s.adapter.UpdateMirrorStatus(workerID, mirrorName, curStatus) _, err = s.adapter.UpdateMirrorStatus(workerID, mirrorName, curStatus)
s.rwmu.Unlock()
if err != nil { if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s", err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(), mirrorName, workerID, err.Error(),
@ -312,14 +291,11 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
if len(mirrorName) == 0 { if len(mirrorName) == 0 {
s.returnErrJSON( s.returnErrJSON(
c, http.StatusBadRequest, c, http.StatusBadRequest,
errors.New("mirror Name should not be empty"), errors.New("Mirror Name should not be empty"),
) )
} }
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName) curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
curTime := time.Now() curTime := time.Now()
@ -355,9 +331,7 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
logger.Noticef("Job [%s] @<%s> %s", status.Name, status.Worker, status.Status) logger.Noticef("Job [%s] @<%s> %s", status.Name, status.Worker, status.Status)
} }
s.rwmu.Lock()
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status) newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
s.rwmu.Unlock()
if err != nil { if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s", err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(), mirrorName, workerID, err.Error(),
@ -379,10 +353,7 @@ func (s *Manager) updateMirrorSize(c *gin.Context) {
c.BindJSON(&msg) c.BindJSON(&msg)
mirrorName := msg.Name mirrorName := msg.Name
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
status, err := s.adapter.GetMirrorStatus(workerID, mirrorName) status, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
if err != nil { if err != nil {
logger.Errorf( logger.Errorf(
"Failed to get status of mirror %s @<%s>: %s", "Failed to get status of mirror %s @<%s>: %s",
@ -399,9 +370,7 @@ func (s *Manager) updateMirrorSize(c *gin.Context) {
logger.Noticef("Mirror size of [%s] @<%s>: %s", status.Name, status.Worker, status.Size) logger.Noticef("Mirror size of [%s] @<%s>: %s", status.Name, status.Worker, status.Size)
s.rwmu.Lock()
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status) newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
s.rwmu.Unlock()
if err != nil { if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s", err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(), mirrorName, workerID, err.Error(),
@ -424,9 +393,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
return return
} }
s.rwmu.RLock()
w, err := s.adapter.GetWorker(workerID) w, err := s.adapter.GetWorker(workerID)
s.rwmu.RUnlock()
if err != nil { if err != nil {
err := fmt.Errorf("worker %s is not registered yet", workerID) err := fmt.Errorf("worker %s is not registered yet", workerID)
s.returnErrJSON(c, http.StatusBadRequest, err) s.returnErrJSON(c, http.StatusBadRequest, err)
@ -443,9 +410,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
// update job status, even if the job did not disable successfully, // update job status, even if the job did not disable successfully,
// this status should be set as disabled // this status should be set as disabled
s.rwmu.RLock()
curStat, _ := s.adapter.GetMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID) curStat, _ := s.adapter.GetMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID)
s.rwmu.RUnlock()
changed := false changed := false
switch clientCmd.Cmd { switch clientCmd.Cmd {
case CmdDisable: case CmdDisable:
@ -456,9 +421,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
changed = true changed = true
} }
if changed { if changed {
s.rwmu.Lock()
s.adapter.UpdateMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID, curStat) s.adapter.UpdateMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID, curStat)
s.rwmu.Unlock()
} }
logger.Noticef("Posting command '%s %s' to <%s>", clientCmd.Cmd, clientCmd.MirrorID, clientCmd.WorkerID) logger.Noticef("Posting command '%s %s' to <%s>", clientCmd.Cmd, clientCmd.MirrorID, clientCmd.WorkerID)

View File

@ -3,12 +3,10 @@ package manager
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"math/rand" "math/rand"
"net/http" "net/http"
"strings" "strings"
"sync"
"sync/atomic"
"testing" "testing"
"time" "time"
@ -36,7 +34,7 @@ func TestHTTPServer(t *testing.T) {
So(s, ShouldNotBeNil) So(s, ShouldNotBeNil)
s.setDBAdapter(&mockDBAdapter{ s.setDBAdapter(&mockDBAdapter{
workerStore: map[string]WorkerStatus{ workerStore: map[string]WorkerStatus{
_magicBadWorkerID: { _magicBadWorkerID: WorkerStatus{
ID: _magicBadWorkerID, ID: _magicBadWorkerID,
}}, }},
statusStore: make(map[string]MirrorStatus), statusStore: make(map[string]MirrorStatus),
@ -48,7 +46,7 @@ func TestHTTPServer(t *testing.T) {
So(resp.StatusCode, ShouldEqual, http.StatusOK) So(resp.StatusCode, ShouldEqual, http.StatusOK)
So(resp.Header.Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8") So(resp.Header.Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
defer resp.Body.Close() defer resp.Body.Close()
body, err := io.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
So(err, ShouldBeNil) So(err, ShouldBeNil)
var p map[string]string var p map[string]string
err = json.Unmarshal(body, &p) err = json.Unmarshal(body, &p)
@ -66,34 +64,6 @@ func TestHTTPServer(t *testing.T) {
So(msg[_errorKey], ShouldEqual, fmt.Sprintf("failed to list jobs of worker %s: %s", _magicBadWorkerID, "database fail")) So(msg[_errorKey], ShouldEqual, fmt.Sprintf("failed to list jobs of worker %s: %s", _magicBadWorkerID, "database fail"))
}) })
Convey("when register multiple workers", func(ctx C) {
N := 10
var cnt uint32
for i := 0; i < N; i++ {
go func(id int) {
w := WorkerStatus{
ID: fmt.Sprintf("worker%d", id),
}
resp, err := PostJSON(baseURL+"/workers", w, nil)
ctx.So(err, ShouldBeNil)
ctx.So(resp.StatusCode, ShouldEqual, http.StatusOK)
atomic.AddUint32(&cnt, 1)
}(i)
}
time.Sleep(2 * time.Second)
So(cnt, ShouldEqual, N)
Convey("list all workers", func(ctx C) {
resp, err := http.Get(baseURL + "/workers")
So(err, ShouldBeNil)
defer resp.Body.Close()
var actualResponseObj []WorkerStatus
err = json.NewDecoder(resp.Body).Decode(&actualResponseObj)
So(err, ShouldBeNil)
So(len(actualResponseObj), ShouldEqual, N+1)
})
})
Convey("when register a worker", func(ctx C) { Convey("when register a worker", func(ctx C) {
w := WorkerStatus{ w := WorkerStatus{
ID: "test_worker1", ID: "test_worker1",
@ -180,9 +150,9 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream) So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size) So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster) So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate), ShouldBeLessThan, 1*time.Second) So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
So(m.LastStarted.IsZero(), ShouldBeTrue) // hasn't been initialized yet So(m.LastStarted.IsZero(), ShouldBeTrue) // hasn't been initialized yet
So(time.Since(m.LastEnded), ShouldBeLessThan, 1*time.Second) So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
}) })
@ -208,11 +178,11 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream) So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size) So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster) So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate), ShouldBeLessThan, 3*time.Second) So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Since(m.LastUpdate), ShouldBeGreaterThan, 1*time.Second) So(time.Now().Sub(m.LastUpdate), ShouldBeGreaterThan, 1*time.Second)
So(time.Since(m.LastStarted), ShouldBeLessThan, 2*time.Second) So(time.Now().Sub(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Since(m.LastEnded), ShouldBeLessThan, 3*time.Second) So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 3*time.Second)
So(time.Since(m.LastEnded), ShouldBeGreaterThan, 1*time.Second) So(time.Now().Sub(m.LastEnded), ShouldBeGreaterThan, 1*time.Second)
}) })
@ -228,9 +198,9 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream) So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size) So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster) So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate.Time), ShouldBeLessThan, 3*time.Second) So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 3*time.Second)
So(time.Since(m.LastStarted.Time), ShouldBeLessThan, 2*time.Second) So(time.Now().Sub(m.LastStarted.Time), ShouldBeLessThan, 2*time.Second)
So(time.Since(m.LastEnded.Time), ShouldBeLessThan, 3*time.Second) So(time.Now().Sub(m.LastEnded.Time), ShouldBeLessThan, 3*time.Second)
}) })
@ -259,17 +229,17 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream) So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, "5GB") So(m.Size, ShouldEqual, "5GB")
So(m.IsMaster, ShouldEqual, status.IsMaster) So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate), ShouldBeLessThan, 3*time.Second) So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Since(m.LastStarted), ShouldBeLessThan, 2*time.Second) So(time.Now().Sub(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Since(m.LastEnded), ShouldBeLessThan, 3*time.Second) So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 3*time.Second)
}) })
}) })
Convey("Update schedule of valid mirrors", func(ctx C) { Convey("Update schedule of valid mirrors", func(ctx C) {
msg := MirrorSchedules{ msg := MirrorSchedules{
Schedules: []MirrorSchedule{ []MirrorSchedule{
{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)}, MirrorSchedule{"arch-sync1", time.Now().Add(time.Minute * 10)},
{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)}, MirrorSchedule{"arch-sync2", time.Now().Add(time.Minute * 7)},
}, },
} }
@ -313,9 +283,9 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream) So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size) So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster) So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Since(m.LastUpdate), ShouldBeGreaterThan, 3*time.Second) So(time.Now().Sub(m.LastUpdate), ShouldBeGreaterThan, 3*time.Second)
So(time.Since(m.LastStarted), ShouldBeGreaterThan, 3*time.Second) So(time.Now().Sub(m.LastStarted), ShouldBeGreaterThan, 3*time.Second)
So(time.Since(m.LastEnded), ShouldBeLessThan, 1*time.Second) So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
}) })
}) })
@ -345,9 +315,9 @@ func TestHTTPServer(t *testing.T) {
Convey("update schedule of an non-existent worker", func(ctx C) { Convey("update schedule of an non-existent worker", func(ctx C) {
invalidWorker := "test_worker2" invalidWorker := "test_worker2"
sch := MirrorSchedules{ sch := MirrorSchedules{
Schedules: []MirrorSchedule{ []MirrorSchedule{
{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)}, MirrorSchedule{"arch-sync1", time.Now().Add(time.Minute * 10)},
{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)}, MirrorSchedule{"arch-sync2", time.Now().Add(time.Minute * 7)},
}, },
} }
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/schedules", resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/schedules",
@ -425,8 +395,6 @@ func TestHTTPServer(t *testing.T) {
type mockDBAdapter struct { type mockDBAdapter struct {
workerStore map[string]WorkerStatus workerStore map[string]WorkerStatus
statusStore map[string]MirrorStatus statusStore map[string]MirrorStatus
workerLock sync.RWMutex
statusLock sync.RWMutex
} }
func (b *mockDBAdapter) Init() error { func (b *mockDBAdapter) Init() error {
@ -434,22 +402,17 @@ func (b *mockDBAdapter) Init() error {
} }
func (b *mockDBAdapter) ListWorkers() ([]WorkerStatus, error) { func (b *mockDBAdapter) ListWorkers() ([]WorkerStatus, error) {
b.workerLock.RLock()
workers := make([]WorkerStatus, len(b.workerStore)) workers := make([]WorkerStatus, len(b.workerStore))
idx := 0 idx := 0
for _, w := range b.workerStore { for _, w := range b.workerStore {
workers[idx] = w workers[idx] = w
idx++ idx++
} }
b.workerLock.RUnlock()
return workers, nil return workers, nil
} }
func (b *mockDBAdapter) GetWorker(workerID string) (WorkerStatus, error) { func (b *mockDBAdapter) GetWorker(workerID string) (WorkerStatus, error) {
b.workerLock.RLock()
defer b.workerLock.RUnlock()
w, ok := b.workerStore[workerID] w, ok := b.workerStore[workerID]
if !ok { if !ok {
return WorkerStatus{}, fmt.Errorf("invalid workerId") return WorkerStatus{}, fmt.Errorf("invalid workerId")
} }
@ -457,9 +420,7 @@ func (b *mockDBAdapter) GetWorker(workerID string) (WorkerStatus, error) {
} }
func (b *mockDBAdapter) DeleteWorker(workerID string) error { func (b *mockDBAdapter) DeleteWorker(workerID string) error {
b.workerLock.Lock()
delete(b.workerStore, workerID) delete(b.workerStore, workerID)
b.workerLock.Unlock()
return nil return nil
} }
@ -468,26 +429,13 @@ func (b *mockDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
// if ok { // if ok {
// return workerStatus{}, fmt.Errorf("duplicate worker name") // return workerStatus{}, fmt.Errorf("duplicate worker name")
// } // }
b.workerLock.Lock()
b.workerStore[w.ID] = w b.workerStore[w.ID] = w
b.workerLock.Unlock()
return w, nil return w, nil
} }
func (b *mockDBAdapter) RefreshWorker(workerID string) (w WorkerStatus, err error) {
w, err = b.GetWorker(workerID)
if err == nil {
w.LastOnline = time.Now()
w, err = b.CreateWorker(w)
}
return w, err
}
func (b *mockDBAdapter) GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error) { func (b *mockDBAdapter) GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error) {
id := mirrorID + "/" + workerID id := mirrorID + "/" + workerID
b.statusLock.RLock()
status, ok := b.statusStore[id] status, ok := b.statusStore[id]
b.statusLock.RUnlock()
if !ok { if !ok {
return MirrorStatus{}, fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID) return MirrorStatus{}, fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID)
} }
@ -501,9 +449,7 @@ func (b *mockDBAdapter) UpdateMirrorStatus(workerID, mirrorID string, status Mir
// } // }
id := mirrorID + "/" + workerID id := mirrorID + "/" + workerID
b.statusLock.Lock()
b.statusStore[id] = status b.statusStore[id] = status
b.statusLock.Unlock()
return status, nil return status, nil
} }
@ -513,23 +459,19 @@ func (b *mockDBAdapter) ListMirrorStatus(workerID string) ([]MirrorStatus, error
if workerID == _magicBadWorkerID { if workerID == _magicBadWorkerID {
return []MirrorStatus{}, fmt.Errorf("database fail") return []MirrorStatus{}, fmt.Errorf("database fail")
} }
b.statusLock.RLock()
for k, v := range b.statusStore { for k, v := range b.statusStore {
if wID := strings.Split(k, "/")[1]; wID == workerID { if wID := strings.Split(k, "/")[1]; wID == workerID {
mirrorStatusList = append(mirrorStatusList, v) mirrorStatusList = append(mirrorStatusList, v)
} }
} }
b.statusLock.RUnlock()
return mirrorStatusList, nil return mirrorStatusList, nil
} }
func (b *mockDBAdapter) ListAllMirrorStatus() ([]MirrorStatus, error) { func (b *mockDBAdapter) ListAllMirrorStatus() ([]MirrorStatus, error) {
var mirrorStatusList []MirrorStatus var mirrorStatusList []MirrorStatus
b.statusLock.RLock()
for _, v := range b.statusStore { for _, v := range b.statusStore {
mirrorStatusList = append(mirrorStatusList, v) mirrorStatusList = append(mirrorStatusList, v)
} }
b.statusLock.RUnlock()
return mirrorStatusList, nil return mirrorStatusList, nil
} }

View File

@ -1,4 +1,3 @@
//go:build ignore
// +build ignore // +build ignore
package main package main

View File

@ -1,4 +1,3 @@
//go:build ignore
// +build ignore // +build ignore
package main package main

View File

@ -1,4 +1,3 @@
//go:build ignore
// +build ignore // +build ignore
package main package main

View File

@ -1,4 +1,3 @@
//go:build ignore
// +build ignore // +build ignore
package main package main

View File

@ -16,13 +16,11 @@ type baseProvider struct {
name string name string
interval time.Duration interval time.Duration
retry int retry int
timeout time.Duration
isMaster bool isMaster bool
cmd *cmdJob cmd *cmdJob
logFileFd *os.File logFileFd *os.File
isRunning atomic.Value isRunning atomic.Value
successExitCodes []int
cgroup *cgroupHook cgroup *cgroupHook
zfs *zfsHook zfs *zfsHook
@ -58,10 +56,6 @@ func (p *baseProvider) Retry() int {
return p.retry return p.retry
} }
func (p *baseProvider) Timeout() time.Duration {
return p.timeout
}
func (p *baseProvider) IsMaster() bool { func (p *baseProvider) IsMaster() bool {
return p.isMaster return p.isMaster
} }
@ -148,7 +142,7 @@ func (p *baseProvider) closeLogFile() (err error) {
return return
} }
func (p *baseProvider) Run(started chan empty) error { func (p *baseProvider) Run() error {
panic("Not Implemented") panic("Not Implemented")
} }
@ -175,7 +169,6 @@ func (p *baseProvider) Terminate() error {
defer p.Unlock() defer p.Unlock()
logger.Debugf("terminating provider: %s", p.Name()) logger.Debugf("terminating provider: %s", p.Name())
if !p.IsRunning() { if !p.IsRunning() {
logger.Warningf("Terminate() called while IsRunning is false: %s", p.Name())
return nil return nil
} }
@ -187,18 +180,3 @@ func (p *baseProvider) Terminate() error {
func (p *baseProvider) DataSize() string { func (p *baseProvider) DataSize() string {
return "" return ""
} }
func (p *baseProvider) SetSuccessExitCodes(codes []int) {
if codes == nil {
p.successExitCodes = []int{}
} else {
p.successExitCodes = codes
}
}
func (p *baseProvider) GetSuccessExitCodes() []int {
if p.successExitCodes == nil {
return []int{}
}
return p.successExitCodes
}

View File

@ -1,6 +1,3 @@
//go:build linux
// +build linux
package worker package worker
import ( import (

View File

@ -1,31 +0,0 @@
//go:build !linux
// +build !linux
package worker
type btrfsSnapshotHook struct {
}
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
return &btrfsSnapshotHook{}
}
func (h *btrfsSnapshotHook) postExec() error {
return nil
}
func (h *btrfsSnapshotHook) postFail() error {
return nil
}
func (h *btrfsSnapshotHook) postSuccess() error {
return nil
}
func (h *btrfsSnapshotHook) preExec() error {
return nil
}
func (h *btrfsSnapshotHook) preJob() error {
return nil
}

View File

@ -1,297 +1,64 @@
package worker package worker
import ( import (
"bufio"
"errors" "errors"
"fmt" "fmt"
"io"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"strconv"
"syscall" "syscall"
"time" "time"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
cgroups "github.com/containerd/cgroups/v3" "github.com/codeskyblue/go-sh"
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
"github.com/moby/sys/reexec"
contspecs "github.com/opencontainers/runtime-spec/specs-go"
) )
type cgroupHook struct { type cgroupHook struct {
emptyHook emptyHook
cgCfg cgroupConfig basePath string
memLimit MemBytes baseGroup string
cgMgrV1 cgv1.Cgroup created bool
cgMgrV2 *cgv2.Manager subsystem string
memLimit string
} }
type execCmd string func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit string) *cgroupHook {
if basePath == "" {
const ( basePath = "/sys/fs/cgroup"
cmdCont execCmd = "cont"
cmdAbrt execCmd = "abrt"
)
func init() {
reexec.Register("tunasync-exec", waitExec)
}
func waitExec() {
binary, err := exec.LookPath(os.Args[1])
if err != nil {
panic(err)
} }
if baseGroup == "" {
pipe := os.NewFile(3, "pipe") baseGroup = "tunasync"
if pipe != nil {
if _, err := pipe.Stat(); err == nil {
cmdBytes, err := io.ReadAll(pipe)
if err != nil {
panic(err)
}
if err := pipe.Close(); err != nil {
}
cmd := execCmd(string(cmdBytes))
switch cmd {
case cmdAbrt:
fallthrough
default:
panic("Exited on request")
case cmdCont:
}
}
} }
if subsystem == "" {
args := os.Args[1:] subsystem = "cpu"
env := os.Environ()
if err := syscall.Exec(binary, args, env); err != nil {
panic(err)
} }
panic("Exec failed.")
}
func initCgroup(cfg *cgroupConfig) error {
logger.Debugf("Initializing cgroup")
baseGroup := cfg.Group
//subsystem := cfg.Subsystem
// If baseGroup is empty, it implies using the cgroup of the current process
// otherwise, it refers to a absolute group path
if baseGroup != "" {
baseGroup = filepath.Join("/", baseGroup)
}
cfg.isUnified = cgroups.Mode() == cgroups.Unified
if cfg.isUnified {
logger.Debugf("Cgroup V2 detected")
g := baseGroup
if g == "" {
logger.Debugf("Detecting my cgroup path")
var err error
if g, err = cgv2.NestedGroupPath(""); err != nil {
return err
}
}
logger.Infof("Using cgroup path: %s", g)
var err error
if cfg.cgMgrV2, err = cgv2.Load(g); err != nil {
return err
}
if baseGroup == "" {
logger.Debugf("Creating a sub group and move all processes into it")
wkrMgr, err := cfg.cgMgrV2.NewChild("__worker", nil)
if err != nil {
return err
}
for {
logger.Debugf("Reading pids")
procs, err := cfg.cgMgrV2.Procs(false)
if err != nil {
logger.Errorf("Cannot read pids in that group")
return err
}
if len(procs) == 0 {
break
}
for _, p := range procs {
if err := wkrMgr.AddProc(p); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
return err
}
}
}
}
} else {
logger.Debugf("Trying to create a sub group in that group")
testMgr, err := cfg.cgMgrV2.NewChild("__test", nil)
if err != nil {
logger.Errorf("Cannot create a sub group in the cgroup")
return err
}
if err := testMgr.Delete(); err != nil {
return err
}
procs, err := cfg.cgMgrV2.Procs(false)
if err != nil {
logger.Errorf("Cannot read pids in that group")
return err
}
if len(procs) != 0 {
return fmt.Errorf("There are remaining processes in cgroup %s", baseGroup)
}
}
} else {
logger.Debugf("Cgroup V1 detected")
var pather cgv1.Path
if baseGroup != "" {
pather = cgv1.StaticPath(baseGroup)
} else {
pather = (func(p cgv1.Path) cgv1.Path {
return func(subsys cgv1.Name) (string, error) {
path, err := p(subsys)
if err != nil {
return "", err
}
if path == "/" {
return "", cgv1.ErrControllerNotActive
}
return path, err
}
})(cgv1.NestedPath(""))
}
logger.Infof("Loading cgroup")
var err error
if cfg.cgMgrV1, err = cgv1.Load(pather, func(cfg *cgv1.InitConfig) error {
cfg.InitCheck = cgv1.AllowAny
return nil
}); err != nil {
return err
}
logger.Debugf("Available subsystems:")
for _, subsys := range cfg.cgMgrV1.Subsystems() {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Debugf("%s: %s", subsys.Name(), p)
}
if baseGroup == "" {
logger.Debugf("Creating a sub group and move all processes into it")
wkrMgr, err := cfg.cgMgrV1.New("__worker", &contspecs.LinuxResources{})
if err != nil {
return err
}
for _, subsys := range cfg.cgMgrV1.Subsystems() {
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
for {
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name())
return err
}
if len(procs) == 0 {
break
}
for _, proc := range procs {
if err := wkrMgr.Add(proc); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
return err
}
}
}
}
}
} else {
logger.Debugf("Trying to create a sub group in that group")
testMgr, err := cfg.cgMgrV1.New("__test", &contspecs.LinuxResources{})
if err != nil {
logger.Errorf("Cannot create a sub group in the cgroup")
return err
}
if err := testMgr.Delete(); err != nil {
return err
}
for _, subsys := range cfg.cgMgrV1.Subsystems() {
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name())
return err
}
if len(procs) != 0 {
p, err := pather(subsys.Name())
if err != nil {
return err
}
return fmt.Errorf("There are remaining processes in cgroup %s of subsystem %s", p, subsys.Name())
}
}
}
}
return nil
}
func newCgroupHook(p mirrorProvider, cfg cgroupConfig, memLimit MemBytes) *cgroupHook {
return &cgroupHook{ return &cgroupHook{
emptyHook: emptyHook{ emptyHook: emptyHook{
provider: p, provider: p,
}, },
cgCfg: cfg, basePath: basePath,
memLimit: memLimit, baseGroup: baseGroup,
subsystem: subsystem,
} }
} }
func (c *cgroupHook) preExec() error { func (c *cgroupHook) preExec() error {
if c.cgCfg.isUnified { c.created = true
logger.Debugf("Creating v2 cgroup for task %s", c.provider.Name()) if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil {
var resSet *cgv2.Resources return err
if c.memLimit != 0 { }
resSet = &cgv2.Resources{ if c.subsystem != "memory" {
Memory: &cgv2.Memory{ return nil
Max: func(i int64) *int64 { return &i }(c.memLimit.Value()), }
}, if c.memLimit != "" {
} gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name())
} return sh.Command(
subMgr, err := c.cgCfg.cgMgrV2.NewChild(c.provider.Name(), resSet) "cgset", "-r",
if err != nil { fmt.Sprintf("memory.limit_in_bytes=%s", c.memLimit),
logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error()) gname,
return err ).Run()
}
c.cgMgrV2 = subMgr
} else {
logger.Debugf("Creating v1 cgroup for task %s", c.provider.Name())
var resSet contspecs.LinuxResources
if c.memLimit != 0 {
resSet = contspecs.LinuxResources{
Memory: &contspecs.LinuxMemory{
Limit: func(i int64) *int64 { return &i }(c.memLimit.Value()),
},
}
}
subMgr, err := c.cgCfg.cgMgrV1.New(c.provider.Name(), &resSet)
if err != nil {
logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV1 = subMgr
} }
return nil return nil
} }
@ -302,59 +69,36 @@ func (c *cgroupHook) postExec() error {
logger.Errorf("Error killing tasks: %s", err.Error()) logger.Errorf("Error killing tasks: %s", err.Error())
} }
if c.cgCfg.isUnified { c.created = false
logger.Debugf("Deleting v2 cgroup for task %s", c.provider.Name()) return sh.Command("cgdelete", c.Cgroup()).Run()
if err := c.cgMgrV2.Delete(); err != nil { }
logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error())
return err func (c *cgroupHook) Cgroup() string {
} name := c.provider.Name()
c.cgMgrV2 = nil return fmt.Sprintf("%s:%s/%s", c.subsystem, c.baseGroup, name)
} else {
logger.Debugf("Deleting v1 cgroup for task %s", c.provider.Name())
if err := c.cgMgrV1.Delete(); err != nil {
logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV1 = nil
}
return nil
} }
func (c *cgroupHook) killAll() error { func (c *cgroupHook) killAll() error {
if c.cgCfg.isUnified { if !c.created {
if c.cgMgrV2 == nil { return nil
return nil
}
} else {
if c.cgMgrV1 == nil {
return nil
}
} }
name := c.provider.Name()
readTaskList := func() ([]int, error) { readTaskList := func() ([]int, error) {
taskList := []int{} taskList := []int{}
if c.cgCfg.isUnified { taskFile, err := os.Open(filepath.Join(c.basePath, c.subsystem, c.baseGroup, name, "tasks"))
procs, err := c.cgMgrV2.Procs(false) if err != nil {
return taskList, err
}
defer taskFile.Close()
scanner := bufio.NewScanner(taskFile)
for scanner.Scan() {
pid, err := strconv.Atoi(scanner.Text())
if err != nil { if err != nil {
return []int{}, err return taskList, err
}
for _, proc := range procs {
taskList = append(taskList, int(proc))
}
} else {
taskSet := make(map[int]struct{})
for _, subsys := range c.cgMgrV1.Subsystems() {
procs, err := c.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
return []int{}, err
}
for _, proc := range procs {
taskSet[proc.Pid] = struct{}{}
}
}
for proc := range taskSet {
taskList = append(taskList, proc)
} }
taskList = append(taskList, pid)
} }
return taskList, nil return taskList, nil
} }

View File

@ -1,124 +1,40 @@
package worker package worker
import ( import (
"errors" "io/ioutil"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"syscall"
"testing" "testing"
"time" "time"
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
units "github.com/docker/go-units"
"github.com/moby/sys/reexec"
. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
) )
func init() {
_, testReexec := os.LookupEnv("TESTREEXEC")
if !testReexec {
reexec.Init()
}
}
func TestReexec(t *testing.T) {
testCase, testReexec := os.LookupEnv("TESTREEXEC")
if !testReexec {
return
}
for len(os.Args) > 1 {
thisArg := os.Args[1]
os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
if thisArg == "--" {
break
}
}
switch testCase {
case "1":
Convey("Reexec should panic when command not found", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, exec.ErrNotFound)
})
case "2":
Convey("Reexec should run when fd 3 is not open", t, func(ctx C) {
So((func() error {
pipe := os.NewFile(3, "pipe")
if pipe == nil {
return errors.New("pipe is nil")
} else {
_, err := pipe.Stat()
return err
}
})(), ShouldNotBeNil)
So(func() {
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "3":
Convey("Reexec should fail when fd 3 is sent with abrt cmd", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, "Exited on request")
})
case "4":
Convey("Reexec should run when fd 3 is sent with cont cmd", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "5":
Convey("Reexec should not be triggered when argv[0] is not reexec", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldNotPanic)
})
}
}
func TestCgroup(t *testing.T) { func TestCgroup(t *testing.T) {
var cgcf *cgroupConfig Convey("Cgroup Should Work", t, func(ctx C) {
Convey("init cgroup", t, func(ctx C) { tmpDir, err := ioutil.TempDir("", "tunasync")
_, useCurrentCgroup := os.LookupEnv("USECURCGROUP") defer os.RemoveAll(tmpDir)
cgcf = &cgroupConfig{BasePath: "/sys/fs/cgroup", Group: "tunasync", Subsystem: "cpu"}
if useCurrentCgroup {
cgcf.Group = ""
}
err := initCgroup(cgcf)
So(err, ShouldBeNil) So(err, ShouldBeNil)
if cgcf.isUnified { cmdScript := filepath.Join(tmpDir, "cmd.sh")
So(cgcf.cgMgrV2, ShouldNotBeNil) daemonScript := filepath.Join(tmpDir, "daemon.sh")
} else { tmpFile := filepath.Join(tmpDir, "log_file")
So(cgcf.cgMgrV1, ShouldNotBeNil) bgPidfile := filepath.Join(tmpDir, "bg.pid")
c := cmdConfig{
name: "tuna-cgroup",
upstreamURL: "http://mirrors.tuna.moe/",
command: cmdScript + " " + daemonScript,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
env: map[string]string{
"BG_PIDFILE": bgPidfile,
},
} }
cmdScriptContent := `#!/bin/bash
Convey("Cgroup Should Work", func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
daemonScript := filepath.Join(tmpDir, "daemon.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
bgPidfile := filepath.Join(tmpDir, "bg.pid")
c := cmdConfig{
name: "tuna-cgroup",
upstreamURL: "http://mirrors.tuna.moe/",
command: cmdScript + " " + daemonScript,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
env: map[string]string{
"BG_PIDFILE": bgPidfile,
},
}
cmdScriptContent := `#!/bin/bash
redirect-std() { redirect-std() {
[[ -t 0 ]] && exec </dev/null [[ -t 0 ]] && exec </dev/null
[[ -t 1 ]] && exec >/dev/null [[ -t 1 ]] && exec >/dev/null
@ -144,180 +60,91 @@ echo $$
daemonize $@ daemonize $@
sleep 5 sleep 5
` `
daemonScriptContent := `#!/bin/bash daemonScriptContent := `#!/bin/bash
echo $$ > $BG_PIDFILE echo $$ > $BG_PIDFILE
sleep 30 sleep 30
` `
err = os.WriteFile(cmdScript, []byte(cmdScriptContent), 0755) err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
err = ioutil.WriteFile(daemonScript, []byte(daemonScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "")
provider.AddHook(cg)
err = cg.preExec()
if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
So(err, ShouldBeNil)
go func() {
err = provider.Run()
ctx.So(err, ShouldNotBeNil)
}()
time.Sleep(1 * time.Second)
// Deamon should be started
daemonPidBytes, err := ioutil.ReadFile(bgPidfile)
So(err, ShouldBeNil)
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
logger.Debug("daemon pid: %s", daemonPid)
procDir := filepath.Join("/proc", daemonPid)
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
err = provider.Terminate()
So(err, ShouldBeNil)
// Deamon won't be killed
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
// Deamon can be killed by cgroup killer
cg.postExec()
_, err = os.Stat(procDir)
So(os.IsNotExist(err), ShouldBeTrue)
})
Convey("Rsync Memory Should Be Limited", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna-cgroup",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "512M")
provider.AddHook(cg)
err = cg.preExec()
if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
if cg.subsystem == "memory" {
memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes"))
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = os.WriteFile(daemonScript, []byte(daemonScriptContent), 0755) So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
So(err, ShouldBeNil) }
cg.postExec()
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, *cgcf, 0)
provider.AddHook(cg)
err = cg.preExec()
So(err, ShouldBeNil)
go func() {
err := provider.Run(make(chan empty, 1))
ctx.So(err, ShouldNotBeNil)
}()
time.Sleep(1 * time.Second)
// Deamon should be started
daemonPidBytes, err := os.ReadFile(bgPidfile)
So(err, ShouldBeNil)
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
logger.Debug("daemon pid: %s", daemonPid)
procDir := filepath.Join("/proc", daemonPid)
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
err = provider.Terminate()
So(err, ShouldBeNil)
// Deamon won't be killed
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
// Deamon can be killed by cgroup killer
cg.postExec()
_, err = os.Stat(procDir)
So(os.IsNotExist(err), ShouldBeTrue)
})
Convey("Rsync Memory Should Be Limited", func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna-cgroup",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, *cgcf, 512*units.MiB)
provider.AddHook(cg)
err = cg.preExec()
So(err, ShouldBeNil)
if cgcf.isUnified {
cgpath := filepath.Join(cgcf.BasePath, cgcf.Group, provider.Name())
if useCurrentCgroup {
group, err := cgv2.NestedGroupPath(filepath.Join("..", provider.Name()))
So(err, ShouldBeNil)
cgpath = filepath.Join(cgcf.BasePath, group)
}
memoLimit, err := os.ReadFile(filepath.Join(cgpath, "memory.max"))
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
} else {
for _, subsys := range cg.cgMgrV1.Subsystems() {
if subsys.Name() == cgv1.Memory {
cgpath := filepath.Join(cgcf.Group, provider.Name())
if useCurrentCgroup {
p, err := cgv1.NestedPath(filepath.Join("..", provider.Name()))(cgv1.Memory)
So(err, ShouldBeNil)
cgpath = p
}
memoLimit, err := os.ReadFile(filepath.Join(cgcf.BasePath, "memory", cgpath, "memory.limit_in_bytes"))
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
}
}
}
cg.postExec()
So(cg.cgMgrV1, ShouldBeNil)
})
Reset(func() {
if cgcf.isUnified {
if cgcf.Group == "" {
wkrg, err := cgv2.NestedGroupPath("")
So(err, ShouldBeNil)
wkrMgr, _ := cgv2.Load(wkrg)
allCtrls, err := wkrMgr.Controllers()
So(err, ShouldBeNil)
err = wkrMgr.ToggleControllers(allCtrls, cgv2.Disable)
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV2
for {
logger.Debugf("Restoring pids")
procs, err := wkrMgr.Procs(false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, p := range procs {
if err := origMgr.AddProc(p); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
So(err, ShouldBeNil)
}
}
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
} else {
if cgcf.Group == "" {
pather := (func(p cgv1.Path) cgv1.Path {
return func(subsys cgv1.Name) (string, error) {
path, err := p(subsys)
if err != nil {
return "", err
}
if path == "/" {
return "", cgv1.ErrControllerNotActive
}
return path, err
}
})(cgv1.NestedPath(""))
wkrMgr, err := cgv1.Load(pather, func(cfg *cgv1.InitConfig) error {
cfg.InitCheck = cgv1.AllowAny
return nil
})
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV1
for _, subsys := range wkrMgr.Subsystems() {
for {
procs, err := wkrMgr.Processes(subsys.Name(), false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, proc := range procs {
if err := origMgr.Add(proc); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
So(err, ShouldBeNil)
}
}
}
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
}
})
}) })
} }

View File

@ -16,7 +16,6 @@ type cmdConfig struct {
workingDir, logDir, logFile string workingDir, logDir, logFile string
interval time.Duration interval time.Duration
retry int retry int
timeout time.Duration
env map[string]string env map[string]string
failOnMatch string failOnMatch string
sizePattern string sizePattern string
@ -42,7 +41,6 @@ func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
ctx: NewContext(), ctx: NewContext(),
interval: c.interval, interval: c.interval,
retry: c.retry, retry: c.retry,
timeout: c.timeout,
}, },
cmdConfig: c, cmdConfig: c,
} }
@ -88,13 +86,12 @@ func (p *cmdProvider) DataSize() string {
return p.dataSize return p.dataSize
} }
func (p *cmdProvider) Run(started chan empty) error { func (p *cmdProvider) Run() error {
p.dataSize = "" p.dataSize = ""
defer p.closeLogFile() defer p.closeLogFile()
if err := p.Start(); err != nil { if err := p.Start(); err != nil {
return err return err
} }
started <- empty{}
if err := p.Wait(); err != nil { if err := p.Wait(); err != nil {
return err return err
} }
@ -142,6 +139,5 @@ func (p *cmdProvider) Start() error {
return err return err
} }
p.isRunning.Store(true) p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil return nil
} }

View File

@ -1,6 +1,6 @@
package worker package worker
// put global variables and types here // put global viables and types here
import ( import (
"gopkg.in/op/go-logging.v1" "gopkg.in/op/go-logging.v1"

View File

@ -6,9 +6,6 @@ import (
"path/filepath" "path/filepath"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
units "github.com/docker/go-units"
"github.com/imdario/mergo" "github.com/imdario/mergo"
) )
@ -56,16 +53,9 @@ type globalConfig struct {
Concurrent int `toml:"concurrent"` Concurrent int `toml:"concurrent"`
Interval int `toml:"interval"` Interval int `toml:"interval"`
Retry int `toml:"retry"` Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
// appended to the options generated by rsync_provider, but before mirror-specific options
RsyncOptions []string `toml:"rsync_options"`
ExecOnSuccess []string `toml:"exec_on_success"` ExecOnSuccess []string `toml:"exec_on_success"`
ExecOnFailure []string `toml:"exec_on_failure"` ExecOnFailure []string `toml:"exec_on_failure"`
// merged with mirror-specific options. make sure you know what you are doing!
SuccessExitCodes []int `toml:"dangerous_global_success_exit_codes"`
} }
type managerConfig struct { type managerConfig struct {
@ -96,9 +86,6 @@ type cgroupConfig struct {
BasePath string `toml:"base_path"` BasePath string `toml:"base_path"`
Group string `toml:"group"` Group string `toml:"group"`
Subsystem string `toml:"subsystem"` Subsystem string `toml:"subsystem"`
isUnified bool
cgMgrV1 cgv1.Cgroup
cgMgrV2 *cgv2.Manager
} }
type dockerConfig struct { type dockerConfig struct {
@ -125,39 +112,12 @@ type includedMirrorConfig struct {
Mirrors []mirrorConfig `toml:"mirrors"` Mirrors []mirrorConfig `toml:"mirrors"`
} }
type MemBytes int64
// Set sets the value of the MemBytes by passing a string
func (m *MemBytes) Set(value string) error {
val, err := units.RAMInBytes(value)
*m = MemBytes(val)
return err
}
// Type returns the type
func (m *MemBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemBytes) Value() int64 {
return int64(*m)
}
// UnmarshalJSON is the customized unmarshaler for MemBytes
func (m *MemBytes) UnmarshalText(s []byte) error {
val, err := units.RAMInBytes(string(s))
*m = MemBytes(val)
return err
}
type mirrorConfig struct { type mirrorConfig struct {
Name string `toml:"name"` Name string `toml:"name"`
Provider providerEnum `toml:"provider"` Provider providerEnum `toml:"provider"`
Upstream string `toml:"upstream"` Upstream string `toml:"upstream"`
Interval int `toml:"interval"` Interval int `toml:"interval"`
Retry int `toml:"retry"` Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
MirrorDir string `toml:"mirror_dir"` MirrorDir string `toml:"mirror_dir"`
MirrorSubDir string `toml:"mirror_subdir"` MirrorSubDir string `toml:"mirror_subdir"`
LogDir string `toml:"log_dir"` LogDir string `toml:"log_dir"`
@ -168,29 +128,23 @@ type mirrorConfig struct {
ExecOnSuccess []string `toml:"exec_on_success"` ExecOnSuccess []string `toml:"exec_on_success"`
ExecOnFailure []string `toml:"exec_on_failure"` ExecOnFailure []string `toml:"exec_on_failure"`
// These two options are appended to the global options // These two options the global options
ExecOnSuccessExtra []string `toml:"exec_on_success_extra"` ExecOnSuccessExtra []string `toml:"exec_on_success_extra"`
ExecOnFailureExtra []string `toml:"exec_on_failure_extra"` ExecOnFailureExtra []string `toml:"exec_on_failure_extra"`
// will be merged with global option Command string `toml:"command"`
SuccessExitCodes []int `toml:"success_exit_codes"` FailOnMatch string `toml:"fail_on_match"`
SizePattern string `toml:"size_pattern"`
UseIPv6 bool `toml:"use_ipv6"`
UseIPv4 bool `toml:"use_ipv4"`
ExcludeFile string `toml:"exclude_file"`
Username string `toml:"username"`
Password string `toml:"password"`
RsyncOptions []string `toml:"rsync_options"`
RsyncOverride []string `toml:"rsync_override"`
Stage1Profile string `toml:"stage1_profile"`
Command string `toml:"command"` MemoryLimit string `toml:"memory_limit"`
FailOnMatch string `toml:"fail_on_match"`
SizePattern string `toml:"size_pattern"`
UseIPv6 bool `toml:"use_ipv6"`
UseIPv4 bool `toml:"use_ipv4"`
ExcludeFile string `toml:"exclude_file"`
Username string `toml:"username"`
Password string `toml:"password"`
RsyncNoTimeo bool `toml:"rsync_no_timeout"`
RsyncTimeout int `toml:"rsync_timeout"`
RsyncOptions []string `toml:"rsync_options"`
RsyncOverride []string `toml:"rsync_override"`
RsyncOverrideOnly bool `toml:"rsync_override_only"` // only use provided overridden options if true
Stage1Profile string `toml:"stage1_profile"`
MemoryLimit MemBytes `toml:"memory_limit"`
DockerImage string `toml:"docker_image"` DockerImage string `toml:"docker_image"`
DockerVolumes []string `toml:"docker_volumes"` DockerVolumes []string `toml:"docker_volumes"`

View File

@ -53,43 +53,34 @@ func diffMirrorConfig(oldList, newList []mirrorConfig) []mirrorCfgTrans {
sort.Sort(sortableMirrorList(oList)) sort.Sort(sortableMirrorList(oList))
sort.Sort(sortableMirrorList(nList)) sort.Sort(sortableMirrorList(nList))
if len(oList) != 0 && len(nList) != 0 { // insert a tail node to both lists
// insert a tail node to both lists // as the maximum node
// as the maximum node lastOld, lastNew := oList[len(oList)-1], nList[len(nList)-1]
lastOld, lastNew := oList[len(oList)-1], nList[len(nList)-1] maxName := lastOld.Name
maxName := lastOld.Name if lastNew.Name > lastOld.Name {
if lastNew.Name > lastOld.Name { maxName = lastNew.Name
maxName = lastNew.Name }
} Nil := mirrorConfig{Name: "~" + maxName}
Nil := mirrorConfig{Name: "~" + maxName} if Nil.Name <= maxName {
if Nil.Name <= maxName { panic("Nil.Name should be larger than maxName")
panic("Nil.Name should be larger than maxName") }
} oList, nList = append(oList, Nil), append(nList, Nil)
oList, nList = append(oList, Nil), append(nList, Nil)
// iterate over both lists to find the difference // iterate over both lists to find the difference
for i, j := 0, 0; i < len(oList) && j < len(nList); { for i, j := 0, 0; i < len(oList) && j < len(nList); {
o, n := oList[i], nList[j] o, n := oList[i], nList[j]
if n.Name < o.Name { if n.Name < o.Name {
operations = append(operations, mirrorCfgTrans{diffAdd, n}) operations = append(operations, mirrorCfgTrans{diffAdd, n})
j++ j++
} else if o.Name < n.Name { } else if o.Name < n.Name {
operations = append(operations, mirrorCfgTrans{diffDelete, o}) operations = append(operations, mirrorCfgTrans{diffDelete, o})
i++ i++
} else { } else {
if !reflect.DeepEqual(o, n) { if !reflect.DeepEqual(o, n) {
operations = append(operations, mirrorCfgTrans{diffModify, n}) operations = append(operations, mirrorCfgTrans{diffModify, n})
}
i++
j++
} }
} i++
} else { j++
for i := 0; i < len(oList); i++ {
operations = append(operations, mirrorCfgTrans{diffDelete, oList[i]})
}
for i := 0; i < len(nList); i++ {
operations = append(operations, mirrorCfgTrans{diffAdd, nList[i]})
} }
} }

View File

@ -10,12 +10,12 @@ import (
func TestConfigDiff(t *testing.T) { func TestConfigDiff(t *testing.T) {
Convey("When old and new configs are equal", t, func() { Convey("When old and new configs are equal", t, func() {
oldList := []mirrorConfig{ oldList := []mirrorConfig{
{Name: "debian"}, mirrorConfig{Name: "debian"},
{Name: "debian-security"}, mirrorConfig{Name: "debian-security"},
{Name: "fedora"}, mirrorConfig{Name: "fedora"},
{Name: "archlinux"}, mirrorConfig{Name: "archlinux"},
{Name: "AOSP"}, mirrorConfig{Name: "AOSP"},
{Name: "ubuntu"}, mirrorConfig{Name: "ubuntu"},
} }
newList := make([]mirrorConfig, len(oldList)) newList := make([]mirrorConfig, len(oldList))
copy(newList, oldList) copy(newList, oldList)
@ -23,49 +23,21 @@ func TestConfigDiff(t *testing.T) {
difference := diffMirrorConfig(oldList, newList) difference := diffMirrorConfig(oldList, newList)
So(len(difference), ShouldEqual, 0) So(len(difference), ShouldEqual, 0)
}) })
Convey("When old config is empty", t, func() {
newList := []mirrorConfig{
{Name: "debian"},
{Name: "debian-security"},
{Name: "fedora"},
{Name: "archlinux"},
{Name: "AOSP"},
{Name: "ubuntu"},
}
oldList := make([]mirrorConfig, 0)
difference := diffMirrorConfig(oldList, newList)
So(len(difference), ShouldEqual, len(newList))
})
Convey("When new config is empty", t, func() {
oldList := []mirrorConfig{
{Name: "debian"},
{Name: "debian-security"},
{Name: "fedora"},
{Name: "archlinux"},
{Name: "AOSP"},
{Name: "ubuntu"},
}
newList := make([]mirrorConfig, 0)
difference := diffMirrorConfig(oldList, newList)
So(len(difference), ShouldEqual, len(oldList))
})
Convey("When giving two config lists with different names", t, func() { Convey("When giving two config lists with different names", t, func() {
oldList := []mirrorConfig{ oldList := []mirrorConfig{
{Name: "debian"}, mirrorConfig{Name: "debian"},
{Name: "debian-security"}, mirrorConfig{Name: "debian-security"},
{Name: "fedora"}, mirrorConfig{Name: "fedora"},
{Name: "archlinux"}, mirrorConfig{Name: "archlinux"},
{Name: "AOSP", Env: map[string]string{"REPO": "/usr/bin/repo"}}, mirrorConfig{Name: "AOSP", Env: map[string]string{"REPO": "/usr/bin/repo"}},
{Name: "ubuntu"}, mirrorConfig{Name: "ubuntu"},
} }
newList := []mirrorConfig{ newList := []mirrorConfig{
{Name: "debian"}, mirrorConfig{Name: "debian"},
{Name: "debian-cd"}, mirrorConfig{Name: "debian-cd"},
{Name: "archlinuxcn"}, mirrorConfig{Name: "archlinuxcn"},
{Name: "AOSP", Env: map[string]string{"REPO": "/usr/local/bin/aosp-repo"}}, mirrorConfig{Name: "AOSP", Env: map[string]string{"REPO": "/usr/local/bin/aosp-repo"}},
{Name: "ubuntu-ports"}, mirrorConfig{Name: "ubuntu-ports"},
} }
difference := diffMirrorConfig(oldList, newList) difference := diffMirrorConfig(oldList, newList)

View File

@ -2,12 +2,10 @@ package worker
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"time"
units "github.com/docker/go-units"
. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
) )
@ -21,7 +19,6 @@ mirror_dir = "/data/mirrors"
concurrent = 10 concurrent = 10
interval = 240 interval = 240
retry = 3 retry = 3
timeout = 86400
[manager] [manager]
api_base = "https://127.0.0.1:5000" api_base = "https://127.0.0.1:5000"
@ -40,7 +37,6 @@ provider = "command"
upstream = "https://aosp.google.com/" upstream = "https://aosp.google.com/"
interval = 720 interval = 720
retry = 2 retry = 2
timeout = 3600
mirror_dir = "/data/git/AOSP" mirror_dir = "/data/git/AOSP"
exec_on_success = [ exec_on_success = [
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'" "bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
@ -54,15 +50,12 @@ provider = "two-stage-rsync"
stage1_profile = "debian" stage1_profile = "debian"
upstream = "rsync://ftp.debian.org/debian/" upstream = "rsync://ftp.debian.org/debian/"
use_ipv6 = true use_ipv6 = true
memory_limit = "256MiB"
[[mirrors]] [[mirrors]]
name = "fedora" name = "fedora"
provider = "rsync" provider = "rsync"
upstream = "rsync://ftp.fedoraproject.org/fedora/" upstream = "rsync://ftp.fedoraproject.org/fedora/"
use_ipv6 = true use_ipv6 = true
memory_limit = "128M"
exclude_file = "/etc/tunasync.d/fedora-exclude.txt" exclude_file = "/etc/tunasync.d/fedora-exclude.txt"
exec_on_failure = [ exec_on_failure = [
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'" "bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
@ -76,11 +69,11 @@ exec_on_failure = [
}) })
Convey("Everything should work on valid config file", t, func() { Convey("Everything should work on valid config file", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync") tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
So(err, ShouldBeNil) So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
@ -92,7 +85,7 @@ exec_on_failure = [
curCfgBlob := cfgBlob + incSection curCfgBlob := cfgBlob + incSection
err = os.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644) err = ioutil.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer tmpfile.Close() defer tmpfile.Close()
@ -116,9 +109,9 @@ provider = "two-stage-rsync"
stage1_profile = "debian" stage1_profile = "debian"
use_ipv6 = true use_ipv6 = true
` `
err = os.WriteFile(filepath.Join(tmpDir, "debian.conf"), []byte(incBlob1), 0644) err = ioutil.WriteFile(filepath.Join(tmpDir, "debian.conf"), []byte(incBlob1), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
err = os.WriteFile(filepath.Join(tmpDir, "ubuntu.conf"), []byte(incBlob2), 0644) err = ioutil.WriteFile(filepath.Join(tmpDir, "ubuntu.conf"), []byte(incBlob2), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
cfg, err := LoadConfig(tmpfile.Name()) cfg, err := LoadConfig(tmpfile.Name())
@ -126,7 +119,6 @@ use_ipv6 = true
So(cfg.Global.Name, ShouldEqual, "test_worker") So(cfg.Global.Name, ShouldEqual, "test_worker")
So(cfg.Global.Interval, ShouldEqual, 240) So(cfg.Global.Interval, ShouldEqual, 240)
So(cfg.Global.Retry, ShouldEqual, 3) So(cfg.Global.Retry, ShouldEqual, 3)
So(cfg.Global.Timeout, ShouldEqual, 86400)
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors") So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000") So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
@ -138,27 +130,23 @@ use_ipv6 = true
So(m.Provider, ShouldEqual, provCommand) So(m.Provider, ShouldEqual, provCommand)
So(m.Interval, ShouldEqual, 720) So(m.Interval, ShouldEqual, 720)
So(m.Retry, ShouldEqual, 2) So(m.Retry, ShouldEqual, 2)
So(m.Timeout, ShouldEqual, 3600)
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo") So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
m = cfg.Mirrors[1] m = cfg.Mirrors[1]
So(m.Name, ShouldEqual, "debian") So(m.Name, ShouldEqual, "debian")
So(m.MirrorDir, ShouldEqual, "") So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync) So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.MemoryLimit.Value(), ShouldEqual, 256*units.MiB)
m = cfg.Mirrors[2] m = cfg.Mirrors[2]
So(m.Name, ShouldEqual, "fedora") So(m.Name, ShouldEqual, "fedora")
So(m.MirrorDir, ShouldEqual, "") So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provRsync) So(m.Provider, ShouldEqual, provRsync)
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt") So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
So(m.MemoryLimit.Value(), ShouldEqual, 128*units.MiB)
m = cfg.Mirrors[3] m = cfg.Mirrors[3]
So(m.Name, ShouldEqual, "debian-cd") So(m.Name, ShouldEqual, "debian-cd")
So(m.MirrorDir, ShouldEqual, "") So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync) So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.MemoryLimit.Value(), ShouldEqual, 0)
m = cfg.Mirrors[4] m = cfg.Mirrors[4]
So(m.Name, ShouldEqual, "debian-security") So(m.Name, ShouldEqual, "debian-security")
@ -170,11 +158,11 @@ use_ipv6 = true
}) })
Convey("Everything should work on nested config file", t, func() { Convey("Everything should work on nested config file", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync") tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
So(err, ShouldBeNil) So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
@ -186,7 +174,7 @@ use_ipv6 = true
curCfgBlob := cfgBlob + incSection curCfgBlob := cfgBlob + incSection
err = os.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644) err = ioutil.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer tmpfile.Close() defer tmpfile.Close()
@ -212,7 +200,7 @@ use_ipv6 = true
provider = "rsync" provider = "rsync"
upstream = "rsync://test.host3/debian-cd/" upstream = "rsync://test.host3/debian-cd/"
` `
err = os.WriteFile(filepath.Join(tmpDir, "nest.conf"), []byte(incBlob1), 0644) err = ioutil.WriteFile(filepath.Join(tmpDir, "nest.conf"), []byte(incBlob1), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
cfg, err := LoadConfig(tmpfile.Name()) cfg, err := LoadConfig(tmpfile.Name())
@ -266,11 +254,11 @@ use_ipv6 = true
So(len(cfg.Mirrors), ShouldEqual, 6) So(len(cfg.Mirrors), ShouldEqual, 6)
}) })
Convey("Providers can be inited from a valid config file", t, func() { Convey("Providers can be inited from a valid config file", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync") tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644) err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer tmpfile.Close() defer tmpfile.Close()
@ -317,7 +305,7 @@ use_ipv6 = true
}) })
Convey("MirrorSubdir should work", t, func() { Convey("MirrorSubdir should work", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync") tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
@ -328,7 +316,6 @@ log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors" mirror_dir = "/data/mirrors"
concurrent = 10 concurrent = 10
interval = 240 interval = 240
timeout = 86400
retry = 3 retry = 3
[manager] [manager]
@ -363,7 +350,7 @@ use_ipv6 = true
provider = "rsync" provider = "rsync"
upstream = "rsync://test.host3/debian-cd/" upstream = "rsync://test.host3/debian-cd/"
` `
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644) err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer tmpfile.Close() defer tmpfile.Close()
@ -401,180 +388,5 @@ use_ipv6 = true
rp, ok := p.(*rsyncProvider) rp, ok := p.(*rsyncProvider)
So(ok, ShouldBeTrue) So(ok, ShouldBeTrue)
So(rp.WorkingDir(), ShouldEqual, "/data/mirrors/debian-cd") So(rp.WorkingDir(), ShouldEqual, "/data/mirrors/debian-cd")
So(p.Timeout(), ShouldEqual, 86400*time.Second)
})
Convey("rsync_override_only should work", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "foo"
provider = "rsync"
upstream = "rsync://foo.bar/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/foo"
rsync_override = ["--bar", "baz"]
rsync_override_only = true
`
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p, ok := providers["foo"].(*rsyncProvider)
So(ok, ShouldBeTrue)
So(p.options, ShouldResemble, []string{"--bar", "baz"})
})
Convey("rsync global options should work", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
rsync_options = ["--global"]
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "foo"
provider = "rsync"
upstream = "rsync://foo.bar/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/foo"
rsync_override = ["--override"]
rsync_options = ["--local"]
`
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p, ok := providers["foo"].(*rsyncProvider)
So(ok, ShouldBeTrue)
So(p.options, ShouldResemble, []string{
"--override", // from mirror.rsync_override
"--timeout=120", // generated by newRsyncProvider
"--global", // from global.rsync_options
"--local", // from mirror.rsync_options
})
})
Convey("success_exit_codes should work globally and per mirror", t, func() {
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
dangerous_global_success_exit_codes = [10, 20]
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "foo"
provider = "rsync"
upstream = "rsync://foo.bar/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/foo"
success_exit_codes = [30, 40]
`
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p, ok := providers["foo"].(*rsyncProvider)
So(ok, ShouldBeTrue)
So(p.successExitCodes, ShouldResemble, []int{10, 20, 30, 40})
}) })
} }

View File

@ -3,27 +3,19 @@ package worker
import ( import (
"fmt" "fmt"
"os" "os"
"time"
"github.com/codeskyblue/go-sh"
) )
type dockerHook struct { type dockerHook struct {
emptyHook emptyHook
image string image string
volumes []string volumes []string
options []string options []string
memoryLimit MemBytes
} }
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook { func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
volumes := []string{} volumes := []string{}
volumes = append(volumes, gCfg.Volumes...) volumes = append(volumes, gCfg.Volumes...)
volumes = append(volumes, mCfg.DockerVolumes...) volumes = append(volumes, mCfg.DockerVolumes...)
if len(mCfg.ExcludeFile) > 0 {
arg := fmt.Sprintf("%s:%s:ro", mCfg.ExcludeFile, mCfg.ExcludeFile)
volumes = append(volumes, arg)
}
options := []string{} options := []string{}
options = append(options, gCfg.Options...) options = append(options, gCfg.Options...)
@ -33,10 +25,9 @@ func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dock
emptyHook: emptyHook{ emptyHook: emptyHook{
provider: p, provider: p,
}, },
image: mCfg.DockerImage, image: mCfg.DockerImage,
volumes: volumes, volumes: volumes,
options: options, options: options,
memoryLimit: mCfg.MemoryLimit,
} }
} }
@ -69,27 +60,6 @@ func (d *dockerHook) postExec() error {
// sh.Command( // sh.Command(
// "docker", "rm", "-f", d.Name(), // "docker", "rm", "-f", d.Name(),
// ).Run() // ).Run()
name := d.Name()
retry := 10
for ; retry > 0; retry-- {
out, err := sh.Command(
"docker", "ps", "-a",
"--filter", "name=^"+name+"$",
"--format", "{{.Status}}",
).Output()
if err != nil {
logger.Errorf("docker ps failed: %v", err)
break
}
if len(out) == 0 {
break
}
logger.Debugf("container %s still exists: '%s'", name, string(out))
time.Sleep(1 * time.Second)
}
if retry == 0 {
logger.Warningf("container %s not removed automatically, next sync may fail", name)
}
d.provider.ExitContext() d.provider.ExitContext()
return nil return nil
} }

View File

@ -2,14 +2,13 @@ package worker
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
units "github.com/docker/go-units"
"github.com/codeskyblue/go-sh" "github.com/codeskyblue/go-sh"
. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
) )
@ -40,7 +39,7 @@ func getDockerByName(name string) (string, error) {
func TestDocker(t *testing.T) { func TestDocker(t *testing.T) {
Convey("Docker Should Work", t, func(ctx C) { Convey("Docker Should Work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh") cmdScript := filepath.Join(tmpDir, "cmd.sh")
@ -64,7 +63,7 @@ func TestDocker(t *testing.T) {
echo ${TEST_CONTENT} echo ${TEST_CONTENT}
sleep 20 sleep 20
` `
err = os.WriteFile(cmdScript, []byte(cmdScriptContent), 0755) err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
@ -78,7 +77,6 @@ sleep 20
volumes: []string{ volumes: []string{
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"), fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
}, },
memoryLimit: 512 * units.MiB,
} }
provider.AddHook(d) provider.AddHook(d)
So(provider.Docker(), ShouldNotBeNil) So(provider.Docker(), ShouldNotBeNil)
@ -89,34 +87,29 @@ sleep 20
cmdRun("docker", []string{"images"}) cmdRun("docker", []string{"images"})
exitedErr := make(chan error, 1) exitedErr := make(chan error, 1)
go func() { go func() {
err = provider.Run(make(chan empty, 1)) err = provider.Run()
logger.Debugf("provider.Run() exited") logger.Debugf("provider.Run() exited")
if err != nil { if err != nil {
logger.Errorf("provider.Run() failed: %v", err) logger.Errorf("provider.Run() failed: %v", err)
} }
exitedErr <- err exitedErr <- err
}() }()
cmdRun("ps", []string{"aux"})
// Wait for docker running // Wait for docker running
for wait := 0; wait < 8; wait++ { time.Sleep(8 * time.Second)
names, err := getDockerByName(d.Name())
So(err, ShouldBeNil) cmdRun("ps", []string{"aux"})
if names != "" {
break
}
time.Sleep(1 * time.Second)
}
// cmdRun("ps", []string{"aux"})
// assert container running // assert container running
names, err := getDockerByName(d.Name()) names, err := getDockerByName(d.Name())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(names, ShouldEqual, d.Name()+"\n") // So(names, ShouldEqual, d.Name()+"\n")
err = provider.Terminate() err = provider.Terminate()
So(err, ShouldBeNil) // So(err, ShouldBeNil)
// cmdRun("ps", []string{"aux"}) cmdRun("ps", []string{"aux"})
<-exitedErr <-exitedErr
// container should be terminated and removed // container should be terminated and removed
@ -125,7 +118,7 @@ sleep 20
So(names, ShouldEqual, "") So(names, ShouldEqual, "")
// check log content // check log content
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput+"\n") So(string(loggedContent), ShouldEqual, expectedOutput+"\n")

View File

@ -1,6 +1,7 @@
package worker package worker
import ( import (
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -12,7 +13,7 @@ import (
func TestExecPost(t *testing.T) { func TestExecPost(t *testing.T) {
Convey("ExecPost should work", t, func(ctx C) { Convey("ExecPost should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh") scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -45,7 +46,7 @@ echo $TUNASYNC_UPSTREAM_URL
echo $TUNASYNC_LOG_FILE echo $TUNASYNC_LOG_FILE
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
go job.Run(managerChan, semaphore) go job.Run(managerChan, semaphore)
@ -63,7 +64,7 @@ echo $TUNASYNC_LOG_FILE
expectedOutput := "success\n" expectedOutput := "success\n"
outputContent, err := os.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status")) outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(outputContent), ShouldEqual, expectedOutput) So(string(outputContent), ShouldEqual, expectedOutput)
}) })
@ -84,7 +85,7 @@ echo $TUNASYNC_LOG_FILE
exit 1 exit 1
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
go job.Run(managerChan, semaphore) go job.Run(managerChan, semaphore)
@ -104,7 +105,7 @@ exit 1
expectedOutput := "failure\n" expectedOutput := "failure\n"
outputContent, err := os.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status")) outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(outputContent), ShouldEqual, expectedOutput) So(string(outputContent), ShouldEqual, expectedOutput)
}) })

View File

@ -87,12 +87,10 @@ func (m *mirrorJob) SetProvider(provider mirrorProvider) error {
// runMirrorJob is the goroutine where syncing job runs in // runMirrorJob is the goroutine where syncing job runs in
// arguments: // arguments:
// // provider: mirror provider object
// provider: mirror provider object // ctrlChan: receives messages from the manager
// ctrlChan: receives messages from the manager // managerChan: push messages to the manager, this channel should have a larger buffer
// managerChan: push messages to the manager, this channel should have a larger buffer // sempaphore: make sure the concurrent running syncing job won't explode
// sempaphore: make sure the concurrent running syncing job won't explode
//
// TODO: message struct for managerChan // TODO: message struct for managerChan
func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) error { func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) error {
jobsDone.Add(1) jobsDone.Add(1)
@ -157,43 +155,24 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
var syncErr error var syncErr error
syncDone := make(chan error, 1) syncDone := make(chan error, 1)
started := make(chan empty, 10) // we may receive "started" more than one time (e.g. two_stage_rsync)
go func() { go func() {
err := provider.Run(started) err := provider.Run()
syncDone <- err syncDone <- err
}() }()
select { // Wait until provider started or error happened
case err := <-syncDone:
logger.Errorf("failed to start provider %s: %s", m.Name(), err.Error())
syncDone <- err // it will be read again later
case <-started:
logger.Debug("provider started")
}
// Now terminating the provider is feasible
var termErr error
timeout := provider.Timeout()
if timeout <= 0 {
timeout = 100000 * time.Hour // never time out
}
select { select {
case syncErr = <-syncDone: case syncErr = <-syncDone:
logger.Debug("syncing done") logger.Debug("syncing done")
case <-time.After(timeout):
logger.Notice("provider timeout")
termErr = provider.Terminate()
syncErr = fmt.Errorf("%s timeout after %v", m.Name(), timeout)
case <-kill: case <-kill:
logger.Debug("received kill") logger.Debug("received kill")
stopASAP = true stopASAP = true
termErr = provider.Terminate() err := provider.Terminate()
if err != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
return err
}
syncErr = errors.New("killed by manager") syncErr = errors.New("killed by manager")
} }
if termErr != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), termErr.Error())
return termErr
}
// post-exec hooks // post-exec hooks
herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec") herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")

View File

@ -2,6 +2,7 @@ package worker
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -16,7 +17,7 @@ func TestMirrorJob(t *testing.T) {
InitLogger(true, true, false) InitLogger(true, true, false)
Convey("MirrorJob should work", t, func(ctx C) { Convey("MirrorJob should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh") scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -30,7 +31,6 @@ func TestMirrorJob(t *testing.T) {
logDir: tmpDir, logDir: tmpDir,
logFile: tmpFile, logFile: tmpFile,
interval: 1 * time.Second, interval: 1 * time.Second,
timeout: 7 * time.Second,
} }
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
@ -41,7 +41,6 @@ func TestMirrorJob(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir) So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile) So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval) So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("For a normal mirror job", func(ctx C) { Convey("For a normal mirror job", func(ctx C) {
scriptContent := `#!/bin/bash scriptContent := `#!/bin/bash
@ -57,9 +56,9 @@ func TestMirrorJob(t *testing.T) {
provider.upstreamURL, provider.upstreamURL,
provider.LogFile(), provider.LogFile(),
) )
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile) readedScriptContent, err := ioutil.ReadFile(scriptFile)
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent)) So(readedScriptContent, ShouldResemble, []byte(scriptContent))
@ -85,7 +84,7 @@ func TestMirrorJob(t *testing.T) {
So(msg.status, ShouldEqual, Syncing) So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan msg = <-managerChan
So(msg.status, ShouldEqual, Success) So(msg.status, ShouldEqual, Success)
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobStart job.ctrlChan <- jobStart
@ -122,11 +121,11 @@ sleep 3
echo $TUNASYNC_WORKING_DIR echo $TUNASYNC_WORKING_DIR
echo '------' echo '------'
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
hookScriptFile := filepath.Join(tmpDir, "hook.sh") hookScriptFile := filepath.Join(tmpDir, "hook.sh")
err = os.WriteFile(hookScriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(hookScriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
h, err := newExecPostHook(provider, execOnFailure, hookScriptFile) h, err := newExecPostHook(provider, execOnFailure, hookScriptFile)
@ -187,7 +186,7 @@ echo $TUNASYNC_WORKING_DIR
sleep 5 sleep 5
echo $TUNASYNC_WORKING_DIR echo $TUNASYNC_WORKING_DIR
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10) managerChan := make(chan jobMessage, 10)
@ -212,7 +211,7 @@ echo $TUNASYNC_WORKING_DIR
So(msg.status, ShouldEqual, Failed) So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir()) expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable job.ctrlChan <- jobDisable
@ -235,7 +234,7 @@ echo $TUNASYNC_WORKING_DIR
provider.WorkingDir(), provider.WorkingDir(), provider.WorkingDir(), provider.WorkingDir(),
) )
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable job.ctrlChan <- jobDisable
@ -269,7 +268,7 @@ echo $TUNASYNC_WORKING_DIR
provider.WorkingDir(), provider.WorkingDir(), provider.WorkingDir(), provider.WorkingDir(),
) )
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable job.ctrlChan <- jobDisable
@ -325,7 +324,7 @@ echo $TUNASYNC_WORKING_DIR
provider.WorkingDir(), provider.WorkingDir(), provider.WorkingDir(), provider.WorkingDir(),
) )
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
@ -334,66 +333,6 @@ echo $TUNASYNC_WORKING_DIR
}) })
}) })
Convey("When a job timed out", func(ctx C) {
scriptContent := `#!/bin/bash
echo $TUNASYNC_WORKING_DIR
sleep 10
echo $TUNASYNC_WORKING_DIR
`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, 1)
job := newMirrorJob(provider)
Convey("It should be automatically terminated", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("It should be retried", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
for i := 0; i < defaultMaxRetry; i++ {
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldContainSubstring, "timeout after")
// re-schedule after last try
So(msg.schedule, ShouldEqual, i == defaultMaxRetry-1)
}
job.ctrlChan <- jobDisable
<-job.disabled
})
})
}) })
} }
@ -403,7 +342,7 @@ func TestConcurrentMirrorJobs(t *testing.T) {
InitLogger(true, true, false) InitLogger(true, true, false)
Convey("Concurrent MirrorJobs should work", t, func(ctx C) { Convey("Concurrent MirrorJobs should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)

View File

@ -2,6 +2,7 @@ package worker
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -38,7 +39,7 @@ func (l *logLimiter) preExec() error {
} }
logDir := p.LogDir() logDir := p.LogDir()
files, err := os.ReadDir(logDir) files, err := ioutil.ReadDir(logDir)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
os.MkdirAll(logDir, 0755) os.MkdirAll(logDir, 0755)
@ -49,8 +50,7 @@ func (l *logLimiter) preExec() error {
matchedFiles := []os.FileInfo{} matchedFiles := []os.FileInfo{}
for _, f := range files { for _, f := range files {
if strings.HasPrefix(f.Name(), p.Name()) { if strings.HasPrefix(f.Name(), p.Name()) {
info, _ := f.Info() matchedFiles = append(matchedFiles, f)
matchedFiles = append(matchedFiles, info)
} }
} }

View File

@ -2,6 +2,7 @@ package worker
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -13,8 +14,8 @@ import (
func TestLogLimiter(t *testing.T) { func TestLogLimiter(t *testing.T) {
Convey("LogLimiter should work", t, func(ctx C) { Convey("LogLimiter should work", t, func(ctx C) {
tmpDir, _ := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
tmpLogDir, err := os.MkdirTemp("", "tunasync-log") tmpLogDir, err := ioutil.TempDir("", "tunasync-log")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
defer os.RemoveAll(tmpLogDir) defer os.RemoveAll(tmpLogDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -57,7 +58,7 @@ echo $TUNASYNC_UPSTREAM_URL
echo $TUNASYNC_LOG_FILE echo $TUNASYNC_LOG_FILE
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
go job.Run(managerChan, semaphore) go job.Run(managerChan, semaphore)
@ -85,7 +86,7 @@ echo $TUNASYNC_LOG_FILE
logFile, logFile,
) )
loggedContent, err := os.ReadFile(filepath.Join(provider.LogDir(), "latest")) loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
}) })
@ -103,7 +104,7 @@ echo $TUNASYNC_LOG_FILE
sleep 5 sleep 5
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
go job.Run(managerChan, semaphore) go job.Run(managerChan, semaphore)
@ -133,10 +134,10 @@ sleep 5
logFile, logFile,
) )
loggedContent, err := os.ReadFile(filepath.Join(provider.LogDir(), "latest")) loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
loggedContent, err = os.ReadFile(logFile + ".fail") loggedContent, err = ioutil.ReadFile(logFile + ".fail")
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
}) })

View File

@ -24,9 +24,9 @@ type mirrorProvider interface {
Type() providerEnum Type() providerEnum
// Start then Wait // run mirror job in background
Run(started chan empty) error Run() error
// Start the job // run mirror job in background
Start() error Start() error
// Wait job to finish // Wait job to finish
Wait() error Wait() error
@ -46,7 +46,6 @@ type mirrorProvider interface {
Interval() time.Duration Interval() time.Duration
Retry() int Retry() int
Timeout() time.Duration
WorkingDir() string WorkingDir() string
LogDir() string LogDir() string
@ -60,10 +59,6 @@ type mirrorProvider interface {
ExitContext() *Context ExitContext() *Context
// return context // return context
Context() *Context Context() *Context
// set in newMirrorProvider, used by cmdJob.Wait
SetSuccessExitCodes(codes []int)
GetSuccessExitCodes() []int
} }
// newProvider creates a mirrorProvider instance // newProvider creates a mirrorProvider instance
@ -96,9 +91,6 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
if mirror.Retry == 0 { if mirror.Retry == 0 {
mirror.Retry = cfg.Global.Retry mirror.Retry = cfg.Global.Retry
} }
if mirror.Timeout == 0 {
mirror.Timeout = cfg.Global.Timeout
}
logDir = formatLogDir(logDir, mirror) logDir = formatLogDir(logDir, mirror)
// IsMaster // IsMaster
@ -126,7 +118,6 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
logFile: filepath.Join(logDir, "latest.log"), logFile: filepath.Join(logDir, "latest.log"),
interval: time.Duration(mirror.Interval) * time.Minute, interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry, retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
env: mirror.Env, env: mirror.Env,
} }
p, err := newCmdProvider(pc) p, err := newCmdProvider(pc)
@ -144,11 +135,7 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
password: mirror.Password, password: mirror.Password,
excludeFile: mirror.ExcludeFile, excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions, extraOptions: mirror.RsyncOptions,
rsyncNeverTimeout: mirror.RsyncNoTimeo,
rsyncTimeoutValue: mirror.RsyncTimeout,
globalOptions: cfg.Global.RsyncOptions,
overriddenOptions: mirror.RsyncOverride, overriddenOptions: mirror.RsyncOverride,
useOverrideOnly: mirror.RsyncOverrideOnly,
rsyncEnv: mirror.Env, rsyncEnv: mirror.Env,
workingDir: mirrorDir, workingDir: mirrorDir,
logDir: logDir, logDir: logDir,
@ -157,7 +144,6 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
useIPv4: mirror.UseIPv4, useIPv4: mirror.UseIPv4,
interval: time.Duration(mirror.Interval) * time.Minute, interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry, retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
} }
p, err := newRsyncProvider(rc) p, err := newRsyncProvider(rc)
if err != nil { if err != nil {
@ -167,25 +153,21 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
provider = p provider = p
case provTwoStageRsync: case provTwoStageRsync:
rc := twoStageRsyncConfig{ rc := twoStageRsyncConfig{
name: mirror.Name, name: mirror.Name,
stage1Profile: mirror.Stage1Profile, stage1Profile: mirror.Stage1Profile,
upstreamURL: mirror.Upstream, upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command, rsyncCmd: mirror.Command,
username: mirror.Username, username: mirror.Username,
password: mirror.Password, password: mirror.Password,
excludeFile: mirror.ExcludeFile, excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions, extraOptions: mirror.RsyncOptions,
rsyncNeverTimeout: mirror.RsyncNoTimeo, rsyncEnv: mirror.Env,
rsyncTimeoutValue: mirror.RsyncTimeout, workingDir: mirrorDir,
rsyncEnv: mirror.Env, logDir: logDir,
workingDir: mirrorDir, logFile: filepath.Join(logDir, "latest.log"),
logDir: logDir, useIPv6: mirror.UseIPv6,
logFile: filepath.Join(logDir, "latest.log"), interval: time.Duration(mirror.Interval) * time.Minute,
useIPv6: mirror.UseIPv6, retry: mirror.Retry,
useIPv4: mirror.UseIPv4,
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
} }
p, err := newTwoStageRsyncProvider(rc) p, err := newTwoStageRsyncProvider(rc)
if err != nil { if err != nil {
@ -218,7 +200,8 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
// Add Cgroup Hook // Add Cgroup Hook
provider.AddHook( provider.AddHook(
newCgroupHook( newCgroupHook(
provider, cfg.Cgroup, mirror.MemoryLimit, provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group,
cfg.Cgroup.Subsystem, mirror.MemoryLimit,
), ),
) )
} }
@ -253,17 +236,5 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
} }
addHookFromCmdList(mirror.ExecOnFailureExtra, execOnFailure) addHookFromCmdList(mirror.ExecOnFailureExtra, execOnFailure)
successExitCodes := []int{}
if cfg.Global.SuccessExitCodes != nil {
successExitCodes = append(successExitCodes, cfg.Global.SuccessExitCodes...)
}
if mirror.SuccessExitCodes != nil {
successExitCodes = append(successExitCodes, mirror.SuccessExitCodes...)
}
if len(successExitCodes) > 0 {
logger.Infof("Non-zero success exit codes set for mirror %s: %v", mirror.Name, successExitCodes)
provider.SetSuccessExitCodes(successExitCodes)
}
return provider return provider
} }

View File

@ -2,6 +2,7 @@ package worker
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -13,7 +14,7 @@ import (
func TestRsyncProvider(t *testing.T) { func TestRsyncProvider(t *testing.T) {
Convey("Rsync Provider should work", t, func() { Convey("Rsync Provider should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync") scriptFile := filepath.Join(tmpDir, "myrsync")
@ -27,7 +28,6 @@ func TestRsyncProvider(t *testing.T) {
logDir: tmpDir, logDir: tmpDir,
logFile: tmpFile, logFile: tmpFile,
useIPv6: true, useIPv6: true,
timeout: 100 * time.Second,
interval: 600 * time.Second, interval: 600 * time.Second,
} }
@ -40,7 +40,6 @@ func TestRsyncProvider(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir) So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile) So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval) So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("When entering a context (auto exit)", func() { Convey("When entering a context (auto exit)", func() {
func() { func() {
@ -79,7 +78,7 @@ echo "Total file size: 1.33T bytes"
echo "Done" echo "Done"
exit 0 exit 0
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir()) targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
@ -90,16 +89,16 @@ exit 0
"Done\n", "Done\n",
targetDir, targetDir,
fmt.Sprintf( fmt.Sprintf(
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+ "-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+ "--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 -6 %s %s", "--timeout=120 -6 %s %s",
provider.upstreamURL, provider.WorkingDir(), provider.upstreamURL, provider.WorkingDir(),
), ),
) )
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent)) // fmt.Println(string(loggedContent))
@ -108,7 +107,7 @@ exit 0
}) })
Convey("If the rsync program fails", t, func() { Convey("If the rsync program fails", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file") tmpFile := filepath.Join(tmpDir, "log_file")
@ -128,9 +127,9 @@ exit 0
provider, err := newRsyncProvider(c) provider, err := newRsyncProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Syntax or usage error") So(string(loggedContent), ShouldContainSubstring, "Syntax or usage error")
}) })
@ -139,7 +138,7 @@ exit 0
func TestRsyncProviderWithAuthentication(t *testing.T) { func TestRsyncProviderWithAuthentication(t *testing.T) {
Convey("Rsync Provider with password should work", t, func() { Convey("Rsync Provider with password should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync") scriptFile := filepath.Join(tmpDir, "myrsync")
@ -147,19 +146,18 @@ func TestRsyncProviderWithAuthentication(t *testing.T) {
proxyAddr := "127.0.0.1:1233" proxyAddr := "127.0.0.1:1233"
c := rsyncConfig{ c := rsyncConfig{
name: "tuna", name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/", upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile, rsyncCmd: scriptFile,
username: "tunasync", username: "tunasync",
password: "tunasyncpassword", password: "tunasyncpassword",
workingDir: tmpDir, workingDir: tmpDir,
extraOptions: []string{"--delete-excluded"}, extraOptions: []string{"--delete-excluded"},
rsyncTimeoutValue: 30, rsyncEnv: map[string]string{"RSYNC_PROXY": proxyAddr},
rsyncEnv: map[string]string{"RSYNC_PROXY": proxyAddr}, logDir: tmpDir,
logDir: tmpDir, logFile: tmpFile,
logFile: tmpFile, useIPv4: true,
useIPv4: true, interval: 600 * time.Second,
interval: 600 * time.Second,
} }
provider, err := newRsyncProvider(c) provider, err := newRsyncProvider(c)
@ -179,7 +177,7 @@ sleep 1
echo "Done" echo "Done"
exit 0 exit 0
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir()) targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
@ -189,17 +187,17 @@ exit 0
"Done\n", "Done\n",
targetDir, targetDir,
fmt.Sprintf( fmt.Sprintf(
"%s %s %s -aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+ "%s %s %s -aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+ "--delete --delete-after --delay-updates --safe-links "+
"--timeout=30 -4 --delete-excluded %s %s", "--timeout=120 -4 --delete-excluded %s %s",
provider.username, provider.password, proxyAddr, provider.username, provider.password, proxyAddr,
provider.upstreamURL, provider.WorkingDir(), provider.upstreamURL, provider.WorkingDir(),
), ),
) )
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent)) // fmt.Println(string(loggedContent))
@ -210,7 +208,7 @@ exit 0
func TestRsyncProviderWithOverriddenOptions(t *testing.T) { func TestRsyncProviderWithOverriddenOptions(t *testing.T) {
Convey("Rsync Provider with overridden options should work", t, func() { Convey("Rsync Provider with overridden options should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync") scriptFile := filepath.Join(tmpDir, "myrsync")
@ -221,7 +219,6 @@ func TestRsyncProviderWithOverriddenOptions(t *testing.T) {
upstreamURL: "rsync://rsync.tuna.moe/tuna/", upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile, rsyncCmd: scriptFile,
workingDir: tmpDir, workingDir: tmpDir,
rsyncNeverTimeout: true,
overriddenOptions: []string{"-aHvh", "--no-o", "--no-g", "--stats"}, overriddenOptions: []string{"-aHvh", "--no-o", "--no-g", "--stats"},
extraOptions: []string{"--delete-excluded"}, extraOptions: []string{"--delete-excluded"},
logDir: tmpDir, logDir: tmpDir,
@ -247,7 +244,7 @@ sleep 1
echo "Done" echo "Done"
exit 0 exit 0
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir()) targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
@ -260,9 +257,9 @@ exit 0
provider.WorkingDir(), provider.WorkingDir(),
) )
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent)) // fmt.Println(string(loggedContent))
@ -271,81 +268,9 @@ exit 0
}) })
} }
func TestRsyncProviderWithDocker(t *testing.T) {
Convey("Rsync in Docker should work", t, func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
excludeFile := filepath.Join(tmpDir, "exclude.txt")
g := &Config{
Global: globalConfig{
Retry: 2,
},
Docker: dockerConfig{
Enable: true,
Volumes: []string{
scriptFile + ":/bin/myrsync",
"/etc/gai.conf:/etc/gai.conf:ro",
},
},
}
c := mirrorConfig{
Name: "tuna",
Provider: provRsync,
Upstream: "rsync://rsync.tuna.moe/tuna/",
Command: "/bin/myrsync",
ExcludeFile: excludeFile,
DockerImage: "alpine:3.8",
LogDir: tmpDir,
MirrorDir: tmpDir,
UseIPv6: true,
Timeout: 100,
Interval: 600,
}
provider := newMirrorProvider(c, g)
So(provider.Type(), ShouldEqual, provRsync)
So(provider.Name(), ShouldEqual, c.Name)
So(provider.WorkingDir(), ShouldEqual, c.MirrorDir)
So(provider.LogDir(), ShouldEqual, c.LogDir)
cmdScriptContent := `#!/bin/sh
#echo "$@"
while [[ $# -gt 0 ]]; do
if [[ "$1" = "--exclude-from" ]]; then
cat "$2"
shift
fi
shift
done
`
err = os.WriteFile(scriptFile, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
err = os.WriteFile(excludeFile, []byte("__some_pattern"), 0755)
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
err = hook.preExec()
So(err, ShouldBeNil)
}
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
err = hook.postExec()
So(err, ShouldBeNil)
}
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, "__some_pattern")
})
}
func TestCmdProvider(t *testing.T) { func TestCmdProvider(t *testing.T) {
Convey("Command Provider should work", t, func(ctx C) { Convey("Command Provider should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh") scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -390,29 +315,29 @@ echo $AOSP_REPO_BIN
provider.LogFile(), provider.LogFile(),
"/usr/local/bin/repo", "/usr/local/bin/repo",
) )
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile) readedScriptContent, err := ioutil.ReadFile(scriptFile)
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent)) So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
}) })
Convey("If a command fails", func() { Convey("If a command fails", func() {
scriptContent := `exit 1` scriptContent := `exit 1`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile) readedScriptContent, err := ioutil.ReadFile(scriptFile)
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent)) So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
}) })
@ -421,17 +346,14 @@ echo $AOSP_REPO_BIN
scriptContent := `#!/bin/bash scriptContent := `#!/bin/bash
sleep 10 sleep 10
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
started := make(chan empty, 1)
go func() { go func() {
err := provider.Run(started) err = provider.Run()
ctx.So(err, ShouldNotBeNil) ctx.So(err, ShouldNotBeNil)
}() }()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
err = provider.Terminate() err = provider.Terminate()
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -439,7 +361,7 @@ sleep 10
}) })
}) })
Convey("Command Provider without log file should work", t, func(ctx C) { Convey("Command Provider without log file should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -467,13 +389,13 @@ sleep 10
Convey("Run the command", func() { Convey("Run the command", func() {
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
}) })
}) })
Convey("Command Provider with RegExprs should work", t, func(ctx C) { Convey("Command Provider with RegExprs should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file") tmpFile := filepath.Join(tmpDir, "log_file")
@ -495,7 +417,7 @@ sleep 10
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty) So(provider.DataSize(), ShouldBeEmpty)
}) })
@ -505,7 +427,7 @@ sleep 10
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
}) })
@ -515,7 +437,7 @@ sleep 10
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
}) })
@ -524,7 +446,7 @@ sleep 10
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(provider.DataSize(), ShouldNotBeEmpty) So(provider.DataSize(), ShouldNotBeEmpty)
_, err = strconv.ParseFloat(provider.DataSize(), 32) _, err = strconv.ParseFloat(provider.DataSize(), 32)
@ -536,7 +458,7 @@ sleep 10
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(provider.DataSize(), ShouldBeEmpty) So(provider.DataSize(), ShouldBeEmpty)
}) })
@ -547,88 +469,34 @@ sleep 10
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1)) err = provider.Run()
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty) So(provider.DataSize(), ShouldBeEmpty)
}) })
}) })
Convey("Command Provider with successExitCodes should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
name: "tuna-cmd",
upstreamURL: "http://mirrors.tuna.moe/",
command: "bash " + scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
}
provider, err := newCmdProvider(c)
provider.SetSuccessExitCodes([]int{199, 200})
So(err, ShouldBeNil)
So(provider.Type(), ShouldEqual, provCommand)
So(provider.Name(), ShouldEqual, c.name)
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
So(provider.GetSuccessExitCodes(), ShouldResemble, []int{199, 200})
Convey("Command exits with configured successExitCodes", func() {
scriptContent := `exit 199`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
Convey("Command exits with unknown exit code", func() {
scriptContent := `exit 201`
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := os.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
})
})
} }
func TestTwoStageRsyncProvider(t *testing.T) { func TestTwoStageRsyncProvider(t *testing.T) {
Convey("TwoStageRsync Provider should work", t, func(ctx C) { Convey("TwoStageRsync Provider should work", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync") scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file") tmpFile := filepath.Join(tmpDir, "log_file")
c := twoStageRsyncConfig{ c := twoStageRsyncConfig{
name: "tuna-two-stage-rsync", name: "tuna-two-stage-rsync",
upstreamURL: "rsync://mirrors.tuna.moe/", upstreamURL: "rsync://mirrors.tuna.moe/",
stage1Profile: "debian", stage1Profile: "debian",
rsyncCmd: scriptFile, rsyncCmd: scriptFile,
workingDir: tmpDir, workingDir: tmpDir,
logDir: tmpDir, logDir: tmpDir,
logFile: tmpFile, logFile: tmpFile,
useIPv6: true, useIPv6: true,
excludeFile: tmpFile, excludeFile: tmpFile,
rsyncTimeoutValue: 30, extraOptions: []string{"--delete-excluded", "--cache"},
extraOptions: []string{"--delete-excluded", "--cache"}, username: "hello",
username: "hello", password: "world",
password: "world",
} }
provider, err := newTwoStageRsyncProvider(c) provider, err := newTwoStageRsyncProvider(c)
@ -649,10 +517,10 @@ sleep 1
echo "Done" echo "Done"
exit 0 exit 0
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 2)) err = provider.Run()
So(err, ShouldBeNil) So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir()) targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
@ -665,21 +533,21 @@ exit 0
"Done\n", "Done\n",
targetDir, targetDir,
fmt.Sprintf( fmt.Sprintf(
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ --safe-links "+ "-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--include=*.diff/ --include=by-hash/ --exclude=*.diff/Index --exclude=Contents* --exclude=Packages* --exclude=Sources* --exclude=Release* --exclude=InRelease --exclude=i18n/* --exclude=dep11/* --exclude=installer-*/current --exclude=ls-lR* --timeout=30 -6 "+ "--timeout=120 --exclude dists/ -6 "+
"--exclude-from %s %s %s", "--exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(), provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
), ),
targetDir, targetDir,
fmt.Sprintf( fmt.Sprintf(
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+ "-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+ "--delete --delete-after --delay-updates --safe-links "+
"--delete-excluded --cache --timeout=30 -6 --exclude-from %s %s %s", "--timeout=120 --delete-excluded --cache -6 --exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(), provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
), ),
) )
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent)) // fmt.Println(string(loggedContent))
@ -691,29 +559,26 @@ echo $@
sleep 10 sleep 10
exit 0 exit 0
` `
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
started := make(chan empty, 2)
go func() { go func() {
err := provider.Run(started) err = provider.Run()
ctx.So(err, ShouldNotBeNil) ctx.So(err, ShouldNotBeNil)
}() }()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
err = provider.Terminate() err = provider.Terminate()
So(err, ShouldBeNil) So(err, ShouldBeNil)
expectedOutput := fmt.Sprintf( expectedOutput := fmt.Sprintf(
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ --safe-links "+ "-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--include=*.diff/ --include=by-hash/ --exclude=*.diff/Index --exclude=Contents* --exclude=Packages* --exclude=Sources* --exclude=Release* --exclude=InRelease --exclude=i18n/* --exclude=dep11/* --exclude=installer-*/current --exclude=ls-lR* --timeout=30 -6 "+ "--timeout=120 --exclude dists/ -6 "+
"--exclude-from %s %s %s\n", "--exclude-from %s %s %s\n",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(), provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
) )
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldStartWith, expectedOutput) So(string(loggedContent), ShouldStartWith, expectedOutput)
// fmt.Println(string(loggedContent)) // fmt.Println(string(loggedContent))
@ -721,7 +586,7 @@ exit 0
}) })
Convey("If the rsync program fails", t, func(ctx C) { Convey("If the rsync program fails", t, func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil) So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file") tmpFile := filepath.Join(tmpDir, "log_file")
@ -741,9 +606,9 @@ exit 0
provider, err := newTwoStageRsyncProvider(c) provider, err := newTwoStageRsyncProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 2)) err = provider.Run()
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
loggedContent, err := os.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Error in socket I/O") So(string(loggedContent), ShouldContainSubstring, "Error in socket I/O")

View File

@ -2,7 +2,6 @@ package worker
import ( import (
"errors" "errors"
"fmt"
"strings" "strings"
"time" "time"
@ -14,17 +13,12 @@ type rsyncConfig struct {
rsyncCmd string rsyncCmd string
upstreamURL, username, password, excludeFile string upstreamURL, username, password, excludeFile string
extraOptions []string extraOptions []string
globalOptions []string
overriddenOptions []string overriddenOptions []string
useOverrideOnly bool
rsyncNeverTimeout bool
rsyncTimeoutValue int
rsyncEnv map[string]string rsyncEnv map[string]string
workingDir, logDir, logFile string workingDir, logDir, logFile string
useIPv6, useIPv4 bool useIPv6, useIPv4 bool
interval time.Duration interval time.Duration
retry int retry int
timeout time.Duration
} }
// An RsyncProvider provides the implementation to rsync-based syncing jobs // An RsyncProvider provides the implementation to rsync-based syncing jobs
@ -49,7 +43,6 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
ctx: NewContext(), ctx: NewContext(),
interval: c.interval, interval: c.interval,
retry: c.retry, retry: c.retry,
timeout: c.timeout,
}, },
rsyncConfig: c, rsyncConfig: c,
} }
@ -69,44 +62,25 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
options := []string{ options := []string{
"-aHvh", "--no-o", "--no-g", "--stats", "-aHvh", "--no-o", "--no-g", "--stats",
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/", "--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates", "--delete", "--delete-after", "--delay-updates",
"--safe-links", "--safe-links", "--timeout=120",
} }
if c.overriddenOptions != nil { if c.overriddenOptions != nil {
options = c.overriddenOptions options = c.overriddenOptions
} }
if c.useOverrideOnly { if c.useIPv6 {
if c.overriddenOptions == nil { options = append(options, "-6")
return nil, errors.New("rsync_override_only is set but no rsync_override provided") } else if c.useIPv4 {
} options = append(options, "-4")
// use overridden options only }
} else {
if !c.rsyncNeverTimeout {
timeo := 120
if c.rsyncTimeoutValue > 0 {
timeo = c.rsyncTimeoutValue
}
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
}
if c.useIPv6 { if c.excludeFile != "" {
options = append(options, "-6") options = append(options, "--exclude-from", c.excludeFile)
} else if c.useIPv4 { }
options = append(options, "-4") if c.extraOptions != nil {
} options = append(options, c.extraOptions...)
if c.excludeFile != "" {
options = append(options, "--exclude-from", c.excludeFile)
}
if c.globalOptions != nil {
options = append(options, c.globalOptions...)
}
if c.extraOptions != nil {
options = append(options, c.extraOptions...)
}
} }
provider.options = options provider.options = options
@ -129,13 +103,12 @@ func (p *rsyncProvider) DataSize() string {
return p.dataSize return p.dataSize
} }
func (p *rsyncProvider) Run(started chan empty) error { func (p *rsyncProvider) Run() error {
p.dataSize = "" p.dataSize = ""
defer p.closeLogFile() defer p.closeLogFile()
if err := p.Start(); err != nil { if err := p.Start(); err != nil {
return err return err
} }
started <- empty{}
if err := p.Wait(); err != nil { if err := p.Wait(); err != nil {
code, msg := internal.TranslateRsyncErrorCode(err) code, msg := internal.TranslateRsyncErrorCode(err)
if code != 0 { if code != 0 {
@ -171,6 +144,5 @@ func (p *rsyncProvider) Start() error {
return err return err
} }
p.isRunning.Store(true) p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil return nil
} }

View File

@ -5,15 +5,12 @@ import (
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"slices"
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
"time" "time"
"github.com/codeskyblue/go-sh" "github.com/codeskyblue/go-sh"
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
"github.com/moby/sys/reexec"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -59,10 +56,6 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
kv := fmt.Sprintf("%s=%s", k, v) kv := fmt.Sprintf("%s=%s", k, v)
args = append(args, "-e", kv) args = append(args, "-e", kv)
} }
// set memlimit
if d.memoryLimit != 0 {
args = append(args, "-m", fmt.Sprint(d.memoryLimit.Value()))
}
// apply options // apply options
args = append(args, d.options...) args = append(args, d.options...)
// apply image and command // apply image and command
@ -73,7 +66,10 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
cmd = exec.Command(c, args...) cmd = exec.Command(c, args...)
} else if provider.Cgroup() != nil { } else if provider.Cgroup() != nil {
cmd = reexec.Command(append([]string{"tunasync-exec"}, cmdAndArgs...)...) c := "cgexec"
args := []string{"-g", provider.Cgroup().Cgroup()}
args = append(args, cmdAndArgs...)
cmd = exec.Command(c, args...)
} else { } else {
if len(cmdAndArgs) == 1 { if len(cmdAndArgs) == 1 {
@ -108,59 +104,9 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
} }
func (c *cmdJob) Start() error { func (c *cmdJob) Start() error {
cg := c.provider.Cgroup()
var (
pipeR *os.File
pipeW *os.File
)
if cg != nil {
logger.Debugf("Preparing cgroup sync pipes for job %s", c.provider.Name())
var err error
pipeR, pipeW, err = os.Pipe()
if err != nil {
return err
}
c.cmd.ExtraFiles = []*os.File{pipeR}
defer pipeR.Close()
defer pipeW.Close()
}
logger.Debugf("Command start: %v", c.cmd.Args) logger.Debugf("Command start: %v", c.cmd.Args)
c.finished = make(chan empty, 1) c.finished = make(chan empty, 1)
return c.cmd.Start()
if err := c.cmd.Start(); err != nil {
return err
}
if cg != nil {
if err := pipeR.Close(); err != nil {
return err
}
if c.cmd == nil || c.cmd.Process == nil {
return errProcessNotStarted
}
pid := c.cmd.Process.Pid
if cg.cgCfg.isUnified {
if err := cg.cgMgrV2.AddProc(uint64(pid)); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring")
} else {
return err
}
}
} else {
if err := cg.cgMgrV1.Add(cgv1.Process{Pid: pid}); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring")
} else {
return err
}
}
}
if _, err := pipeW.WriteString(string(cmdCont)); err != nil {
return err
}
}
return nil
} }
func (c *cmdJob) Wait() error { func (c *cmdJob) Wait() error {
@ -172,18 +118,9 @@ func (c *cmdJob) Wait() error {
return c.retErr return c.retErr
default: default:
err := c.cmd.Wait() err := c.cmd.Wait()
c.retErr = err
close(c.finished) close(c.finished)
if err != nil { return err
code := err.(*exec.ExitError).ExitCode()
allowedCodes := c.provider.GetSuccessExitCodes()
if slices.Contains(allowedCodes, code) {
// process exited with non-success status
logger.Infof("Command %s exited with code %d: treated as success (allowed: %v)", c.cmd.Args, code, allowedCodes)
} else {
c.retErr = err
}
}
return c.retErr
} }
} }
@ -212,10 +149,10 @@ func (c *cmdJob) Terminate() error {
select { select {
case <-time.After(2 * time.Second): case <-time.After(2 * time.Second):
unix.Kill(c.cmd.Process.Pid, syscall.SIGKILL) unix.Kill(c.cmd.Process.Pid, syscall.SIGKILL)
logger.Warningf("SIGTERM failed to kill the job in 2s. SIGKILL sent") return errors.New("SIGTERM failed to kill the job")
case <-c.finished: case <-c.finished:
return nil
} }
return nil
} }
// Copied from go-sh // Copied from go-sh

View File

@ -15,14 +15,11 @@ type twoStageRsyncConfig struct {
stage1Profile string stage1Profile string
upstreamURL, username, password, excludeFile string upstreamURL, username, password, excludeFile string
extraOptions []string extraOptions []string
rsyncNeverTimeout bool
rsyncTimeoutValue int
rsyncEnv map[string]string rsyncEnv map[string]string
workingDir, logDir, logFile string workingDir, logDir, logFile string
useIPv6, useIPv4 bool useIPv6 bool
interval time.Duration interval time.Duration
retry int retry int
timeout time.Duration
} }
// An RsyncProvider provides the implementation to rsync-based syncing jobs // An RsyncProvider provides the implementation to rsync-based syncing jobs
@ -34,25 +31,11 @@ type twoStageRsyncProvider struct {
dataSize string dataSize string
} }
// ref: https://salsa.debian.org/mirror-team/archvsync/-/blob/master/bin/ftpsync#L431
var rsyncStage1Profiles = map[string]([]string){ var rsyncStage1Profiles = map[string]([]string){
"debian": []string{ "debian": []string{"dists/"},
"--include=*.diff/",
"--include=by-hash/",
"--exclude=*.diff/Index",
"--exclude=Contents*",
"--exclude=Packages*",
"--exclude=Sources*",
"--exclude=Release*",
"--exclude=InRelease",
"--exclude=i18n/*",
"--exclude=dep11/*",
"--exclude=installer-*/current",
"--exclude=ls-lR*",
},
"debian-oldstyle": []string{ "debian-oldstyle": []string{
"--exclude=Packages*", "--exclude=Sources*", "--exclude=Release*", "Packages*", "Sources*", "Release*",
"--exclude=InRelease", "--exclude=i18n/*", "--exclude=ls-lR*", "--exclude=dep11/*", "InRelease", "i18n/*", "ls-lR*", "dep11/*",
}, },
} }
@ -71,19 +54,18 @@ func newTwoStageRsyncProvider(c twoStageRsyncConfig) (*twoStageRsyncProvider, er
ctx: NewContext(), ctx: NewContext(),
interval: c.interval, interval: c.interval,
retry: c.retry, retry: c.retry,
timeout: c.timeout,
}, },
twoStageRsyncConfig: c, twoStageRsyncConfig: c,
stage1Options: []string{ stage1Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats", "-aHvh", "--no-o", "--no-g", "--stats",
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/", "--exclude", ".~tmp~/",
"--safe-links", "--safe-links", "--timeout=120",
}, },
stage2Options: []string{ stage2Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats", "-aHvh", "--no-o", "--no-g", "--stats",
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/", "--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates", "--delete", "--delete-after", "--delay-updates",
"--safe-links", "--safe-links", "--timeout=120",
}, },
} }
@ -123,11 +105,13 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
var options []string var options []string
if stage == 1 { if stage == 1 {
options = append(options, p.stage1Options...) options = append(options, p.stage1Options...)
stage1Profile, ok := rsyncStage1Profiles[p.stage1Profile] stage1Excludes, ok := rsyncStage1Profiles[p.stage1Profile]
if !ok { if !ok {
return nil, errors.New("Invalid Stage 1 Profile") return nil, errors.New("Invalid Stage 1 Profile")
} }
options = append(options, stage1Profile...) for _, exc := range stage1Excludes {
options = append(options, "--exclude", exc)
}
} else if stage == 2 { } else if stage == 2 {
options = append(options, p.stage2Options...) options = append(options, p.stage2Options...)
@ -138,18 +122,8 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
return []string{}, fmt.Errorf("Invalid stage: %d", stage) return []string{}, fmt.Errorf("Invalid stage: %d", stage)
} }
if !p.rsyncNeverTimeout {
timeo := 120
if p.rsyncTimeoutValue > 0 {
timeo = p.rsyncTimeoutValue
}
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
}
if p.useIPv6 { if p.useIPv6 {
options = append(options, "-6") options = append(options, "-6")
} else if p.useIPv4 {
options = append(options, "-4")
} }
if p.excludeFile != "" { if p.excludeFile != "" {
@ -159,7 +133,7 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
return options, nil return options, nil
} }
func (p *twoStageRsyncProvider) Run(started chan empty) error { func (p *twoStageRsyncProvider) Run() error {
p.Lock() p.Lock()
defer p.Unlock() defer p.Unlock()
@ -189,7 +163,6 @@ func (p *twoStageRsyncProvider) Run(started chan empty) error {
} }
p.isRunning.Store(true) p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name()) logger.Debugf("set isRunning to true: %s", p.Name())
started <- empty{}
p.Unlock() p.Unlock()
err = p.Wait() err = p.Wait()

View File

@ -54,12 +54,6 @@ func NewTUNASyncWorker(cfg *Config) *Worker {
w.httpClient = httpClient w.httpClient = httpClient
} }
if cfg.Cgroup.Enable {
if err := initCgroup(&cfg.Cgroup); err != nil {
logger.Errorf("Error initializing Cgroup: %s", err.Error())
return nil
}
}
w.initJobs() w.initJobs()
w.makeHTTPServer() w.makeHTTPServer()
return w return w
@ -67,7 +61,7 @@ func NewTUNASyncWorker(cfg *Config) *Worker {
// Run runs worker forever // Run runs worker forever
func (w *Worker) Run() { func (w *Worker) Run() {
w.registerWorker() w.registorWorker()
go w.runHTTPServer() go w.runHTTPServer()
w.runSchedule() w.runSchedule()
} }
@ -317,7 +311,7 @@ func (w *Worker) runSchedule() {
schedInfo := w.schedule.GetJobs() schedInfo := w.schedule.GetJobs()
w.updateSchedInfo(schedInfo) w.updateSchedInfo(schedInfo)
tick := time.NewTicker(5 * time.Second).C tick := time.Tick(5 * time.Second)
for { for {
select { select {
case jobMsg := <-w.managerChan: case jobMsg := <-w.managerChan:
@ -399,7 +393,7 @@ func (w *Worker) URL() string {
return fmt.Sprintf("%s://%s:%d/", proto, w.cfg.Server.Hostname, w.cfg.Server.Port) return fmt.Sprintf("%s://%s:%d/", proto, w.cfg.Server.Hostname, w.cfg.Server.Port)
} }
func (w *Worker) registerWorker() { func (w *Worker) registorWorker() {
msg := WorkerStatus{ msg := WorkerStatus{
ID: w.Name(), ID: w.Name(),
URL: w.URL(), URL: w.URL(),
@ -408,17 +402,8 @@ func (w *Worker) registerWorker() {
for _, root := range w.cfg.Manager.APIBaseList() { for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf("%s/workers", root) url := fmt.Sprintf("%s/workers", root)
logger.Debugf("register on manager url: %s", url) logger.Debugf("register on manager url: %s", url)
for retry := 10; retry > 0; { if _, err := PostJSON(url, msg, w.httpClient); err != nil {
if _, err := PostJSON(url, msg, w.httpClient); err != nil { logger.Errorf("Failed to register worker")
logger.Errorf("Failed to register worker")
retry--
if retry > 0 {
time.Sleep(1 * time.Second)
logger.Noticef("Retrying... (%d)", retry)
}
} else {
break
}
} }
} }
} }

View File

@ -25,7 +25,6 @@ func makeMockManagerServer(recvData chan interface{}) *gin.Engine {
var _worker WorkerStatus var _worker WorkerStatus
c.BindJSON(&_worker) c.BindJSON(&_worker)
_worker.LastOnline = time.Now() _worker.LastOnline = time.Now()
_worker.LastRegister = time.Now()
recvData <- _worker recvData <- _worker
c.JSON(http.StatusOK, _worker) c.JSON(http.StatusOK, _worker)
}) })
@ -147,7 +146,7 @@ func TestWorker(t *testing.T) {
}) })
Convey("with one job", func(ctx C) { Convey("with one job", func(ctx C) {
workerCfg.Mirrors = []mirrorConfig{ workerCfg.Mirrors = []mirrorConfig{
{ mirrorConfig{
Name: "job-ls", Name: "job-ls",
Provider: provCommand, Provider: provCommand,
Command: "ls", Command: "ls",
@ -194,17 +193,17 @@ func TestWorker(t *testing.T) {
}) })
Convey("with several jobs", func(ctx C) { Convey("with several jobs", func(ctx C) {
workerCfg.Mirrors = []mirrorConfig{ workerCfg.Mirrors = []mirrorConfig{
{ mirrorConfig{
Name: "job-ls-1", Name: "job-ls-1",
Provider: provCommand, Provider: provCommand,
Command: "ls", Command: "ls",
}, },
{ mirrorConfig{
Name: "job-fail", Name: "job-fail",
Provider: provCommand, Provider: provCommand,
Command: "non-existent-command-xxxx", Command: "non-existent-command-xxxx",
}, },
{ mirrorConfig{
Name: "job-ls-2", Name: "job-ls-2",
Provider: provCommand, Provider: provCommand,
Command: "ls", Command: "ls",

View File

@ -1,6 +1,7 @@
package worker package worker
import ( import (
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -12,7 +13,7 @@ import (
func TestZFSHook(t *testing.T) { func TestZFSHook(t *testing.T) {
Convey("ZFS Hook should work", t, func(ctx C) { Convey("ZFS Hook should work", t, func(ctx C) {
tmpDir, _ := os.MkdirTemp("", "tunasync") tmpDir, err := ioutil.TempDir("", "tunasync")
tmpFile := filepath.Join(tmpDir, "log_file") tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{ c := cmdConfig{