mirror of
https://github.com/tuna/tunasync.git
synced 2025-06-13 21:12:43 +00:00
Compare commits
296 Commits
v0.1-alpha
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
528b799bc4 | ||
|
436386fb73 | ||
|
0933b65144 | ||
|
833027a6a0 | ||
|
a5b72b8c55 | ||
|
033aa60540 | ||
|
d2b3e731bf | ||
|
c01de06ac3 | ||
|
ece3e3d9e3 | ||
|
0a00097301 | ||
|
245a8bfc3f | ||
|
a64557b86d | ||
|
27e4307375 | ||
|
ad97ef8421 | ||
|
748f276d49 | ||
|
0ebfc58126 | ||
|
ab8d1c2120 | ||
|
559f5705f6 | ||
|
f8d7ea1828 | ||
|
b4ca6f4c1e | ||
|
113df44f19 | ||
|
e903c644f2 | ||
|
181fddb87c | ||
|
a6a03decf0 | ||
|
5fb63e119c | ||
|
95c4d54ee2 | ||
|
ef32197fef | ||
|
99c7ab6b65 | ||
|
ab416f6545 | ||
|
15e87a5f48 | ||
|
3ad551f73d | ||
|
3562907af9 | ||
|
6d50645ddb | ||
|
95ba9586e0 | ||
|
c73becc0f1 | ||
|
6132446bc7 | ||
|
bfcbfe75bc | ||
|
fcb8dd5f3a | ||
|
938f67c7b4 | ||
|
dca04a3220 | ||
|
c45974c0bf | ||
|
755c87761d | ||
|
7bc3e8f193 | ||
|
8ddcc46255 | ||
|
37b15d157a | ||
|
f2b22d059c | ||
|
45099fc7d3 | ||
|
c3b742c2a8 | ||
|
68a3149e8d | ||
|
b372744640 | ||
|
4007bb2e4d | ||
|
bca49abd3c | ||
|
60f92ceebb | ||
|
c07aaffe65 | ||
|
1804a31b6a | ||
|
98fcb6249b | ||
|
531b09c21a | ||
|
f8b6ea9c4e | ||
|
84fcc8e76b | ||
|
222d98d6ae | ||
|
3e225ae940 | ||
|
8080ed6648 | ||
|
1bc0285905 | ||
|
c26e9fb64a | ||
|
e719dc443b | ||
|
0f05c69c36 | ||
|
28d160a7f0 | ||
|
2c4d2d6ae0 | ||
|
02a144744f | ||
|
3ba70c6c71 | ||
|
2949b9c58c | ||
|
27c33f86b3 | ||
|
f0ccdc47dc | ||
|
3ce5c2ede3 | ||
|
80f0161eb0 | ||
|
38a94758d2 | ||
|
ecc54bf6b7 | ||
|
60beeb6ccd | ||
|
a54c969081 | ||
|
5f5dba7154 | ||
|
32c4d38449 | ||
|
7dfe6a632c | ||
|
94154742a7 | ||
|
932dc69ae8 | ||
|
1f963e21aa | ||
|
7629b09917 | ||
|
4e426c891e | ||
|
992044d402 | ||
|
3c7ee8f9fd | ||
|
d341c0c99d | ||
|
90b4e5debb | ||
|
7dd61ae8ca | ||
|
5880ed92dc | ||
|
fd4c07fdb5 | ||
|
a137f0676a | ||
|
a2887da2dd | ||
|
136e01f1cd | ||
|
cd73602988 | ||
|
2a8fa5636e | ||
|
94b9b20626 | ||
|
5a9c6b9020 | ||
|
75ee481cfa | ||
|
2f9e96a75a | ||
|
aa36b96828 | ||
|
e9ce7fc87a | ||
|
3fd71d777b | ||
|
984f8a1eb5 | ||
|
a4d94cae07 | ||
|
8ebace4d9a | ||
|
b578237df8 | ||
|
9f7f18c2c4 | ||
|
fd274cc976 | ||
|
b4b81ef7e9 | ||
|
c8600d094e | ||
|
2ba3a27fa3 | ||
|
b34238c097 | ||
|
16e458f354 | ||
|
16b4df1ec2 | ||
|
e3c8cded6c | ||
|
3809df6cfb | ||
|
600874ae54 | ||
|
2afe1f2e06 | ||
|
1b099520b2 | ||
|
85b2105a2b | ||
|
45e5d900fb | ||
|
7b0cd490b7 | ||
|
9178966aed | ||
|
b5d2a0ad89 | ||
|
d8963c9946 | ||
|
198afa72cd | ||
|
85ce9c1270 | ||
|
a8a35fc259 | ||
|
c00eb12a75 | ||
|
95ae9c16a9 | ||
|
0392ef28c7 | ||
|
b2a22a9bbc | ||
|
31862210ba | ||
|
e47ba2097e | ||
|
e8c7ff3d7f | ||
|
7e7b469f1e | ||
|
eac66c7554 | ||
|
38b0156fae | ||
|
c8e7d29f34 | ||
|
d40638d738 | ||
|
471d865042 | ||
|
c1641b6714 | ||
|
b8edc1f714 | ||
|
001703a059 | ||
|
2bbd4afda8 | ||
|
e8e6ab6ed6 | ||
|
3fed3f1cf3 | ||
|
1491b6c42b | ||
|
7a9895350b | ||
|
95d6acb026 | ||
|
b132192448 | ||
|
91209cab60 | ||
|
1fb9f85862 | ||
|
d10387e40b | ||
|
5c01e3fa22 | ||
|
a44891d3e8 | ||
|
4d461bd172 | ||
|
c5ed682a49 | ||
|
2c33380ce0 | ||
|
70cb22096f | ||
|
b1f2679fbf | ||
|
92a255fd3c | ||
|
aee1a705b7 | ||
|
c99916cc2a | ||
|
9eb72c5db0 | ||
|
b490c22984 | ||
|
ae5ff25d20 | ||
|
365f49e6d3 | ||
|
fddb793ca1 | ||
|
c41d7a4038 | ||
|
8b0ef2bb53 | ||
|
b25be80670 | ||
|
07cb51076e | ||
|
3a2888dd5d | ||
|
ada881850a | ||
|
6f51188021 | ||
|
a517a4bb64 | ||
|
b816803eaf | ||
|
6d17d6b4ca | ||
|
51e7f1d573 | ||
|
c99095267e | ||
|
5c140035ec | ||
|
6ef9ccdfe6 | ||
|
8df5e41d5b | ||
|
a38a88cf41 | ||
|
f603aebec9 | ||
|
80ad3247a0 | ||
|
02468e21c0 | ||
|
d48815b817 | ||
|
07cd7b5f1f | ||
|
3f45e8b02b | ||
|
ed1f20b1e6 | ||
|
ad28e8aacc | ||
|
230d63e871 | ||
|
908f098c72 | ||
|
22cfdfc9c2 | ||
|
36010dc33e | ||
|
bc416a6088 | ||
|
a065a11b38 | ||
|
b4fe4db82a | ||
|
839363aaaa | ||
|
08aee8eb42 | ||
|
501f77ee41 | ||
|
9e91fd706e | ||
|
94cf0b4bdb | ||
|
8fd2059013 | ||
|
6b56c4254c | ||
|
3872c41607 | ||
|
30259da0f0 | ||
|
4854d9b981 | ||
|
06fce98c00 | ||
|
8408236646 | ||
|
540eea8aeb | ||
|
a6fc97889d | ||
|
5f7d974469 | ||
|
3b52f93e7e | ||
|
1025189542 | ||
|
9f91d90fc5 | ||
|
1aa4ae9cc1 | ||
|
d0deeb19a9 | ||
|
a283328dc4 | ||
|
1890bbed3c | ||
|
ddc9efd155 | ||
|
7eb119b892 | ||
|
96f11f57ed | ||
|
3e6e6f9b14 | ||
|
b06cadfe06 | ||
|
9c34372ae4 | ||
|
ebbfff40f6 | ||
|
5eeade22fc | ||
|
4b3741308b | ||
|
7d495c1956 | ||
|
0bf8400077 | ||
|
c611759394 | ||
|
279aa32b68 | ||
|
025544449a | ||
|
90d419ca66 | ||
|
96cb975412 | ||
|
ff3e690497 | ||
|
a58e6d37ae | ||
|
7a4a8ad486 | ||
|
e1c0c25efa | ||
|
9ac374527a | ||
|
f03626d4e1 | ||
|
23bf4890cc | ||
|
2f6a61aee5 | ||
|
b6043142e1 | ||
|
6241576b12 | ||
|
ef78563b8c | ||
|
ca106f1360 | ||
|
628266ac5a | ||
|
7e601d9fff | ||
|
c750aa1871 | ||
|
6cbe91b4f1 | ||
|
89a792986d | ||
|
0fdb07d061 | ||
|
c5bb172f99 | ||
|
79e6167028 | ||
|
285ffb2f2f | ||
|
95bb4bbd5e | ||
|
6bca9d2cd5 | ||
|
4fe7d03e54 | ||
|
1fe9499728 | ||
|
a475b044c6 | ||
|
a50a360a91 | ||
|
d536aca2ac | ||
|
28545d61e7 | ||
|
a87fb0f8b4 | ||
|
095e7c6320 | ||
|
7b441312f4 | ||
|
563860d424 | ||
|
93194cde2e | ||
|
aa4c31a32b | ||
|
4c6a407c17 | ||
|
939abaef9b | ||
|
d5a438462f | ||
|
d4e07a7b29 | ||
|
9ac3193d50 | ||
|
9ffb101cc7 | ||
|
fd277388d5 | ||
|
c5cba66786 | ||
|
97e9725774 | ||
|
54740388b3 | ||
|
7601e5793f | ||
|
9645fd44ec | ||
|
ebd462be36 | ||
|
21c832c8fb | ||
|
81a15e7dd1 | ||
|
3f31e83c14 | ||
|
a0b8ef08ab | ||
|
86153c59e3 | ||
|
96f9db8bb8 |
38
.github/workflows/release.yml
vendored
Normal file
38
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
name: release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '^1.23'
|
||||
id: go
|
||||
- name: Build
|
||||
run: |
|
||||
TAG=$(git describe --tags)
|
||||
for i in linux-amd64 linux-arm64 linux-riscv64 linux-loong64; do
|
||||
make ARCH=$i all
|
||||
tar -cz --numeric-owner --owner root --group root -f tunasync-${TAG}-$i-bin.tar.gz -C build-$i tunasync tunasynctl
|
||||
done
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
tag_name: ${{ github.ref_name }}
|
||||
name: Release ${{ github.ref_name }}
|
||||
prerelease: false
|
||||
files: |
|
||||
tunasync-*.tar.gz
|
247
.github/workflows/tunasync.yml
vendored
Normal file
247
.github/workflows/tunasync.yml
vendored
Normal file
@ -0,0 +1,247 @@
|
||||
name: tunasync
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '^1.23'
|
||||
id: go
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
go get -v -t -d ./cmd/tunasync
|
||||
go get -v -t -d ./cmd/tunasynctl
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make tunasync
|
||||
make tunasynctl
|
||||
|
||||
- name: Keep artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: tunasync-bin
|
||||
path: build-linux-amd64/
|
||||
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
registry:
|
||||
image: registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
steps:
|
||||
|
||||
- name: Setup test dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y cgroup-tools
|
||||
docker pull alpine:3.8
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '^1.22'
|
||||
id: go
|
||||
|
||||
- name: Run Unit tests.
|
||||
run: |
|
||||
go install github.com/wadey/gocovmerge@latest
|
||||
sudo systemd-run --service-type=oneshot --uid="$(id --user)" --pipe --wait \
|
||||
--property=Delegate=yes --setenv=USECURCGROUP=1 \
|
||||
--setenv=TERM=xterm-256color --same-dir \
|
||||
make test
|
||||
|
||||
- name: Run Additional Unit tests.
|
||||
run: |
|
||||
make build-test-worker
|
||||
sudo mkdir /sys/fs/cgroup/tunasync
|
||||
sudo ./worker.test -test.v=true -test.coverprofile profile2.gcov -test.run TestCgroup
|
||||
sudo rmdir /sys/fs/cgroup/tunasync
|
||||
touch /tmp/dummy_exec
|
||||
chmod +x /tmp/dummy_exec
|
||||
run_test_reexec (){
|
||||
case="$1"
|
||||
shift
|
||||
argv0="$1"
|
||||
shift
|
||||
(TESTREEXEC="$case" TERM=xterm-256color exec -a "$argv0" ./worker.test -test.v=true -test.coverprofile "profile5_$case.gcov" -test.run TestReexec -- "$@")
|
||||
}
|
||||
run_test_reexec 1 tunasync-exec __dummy__
|
||||
run_test_reexec 2 tunasync-exec /tmp/dummy_exec
|
||||
run_test_reexec 3 tunasync-exec /tmp/dummy_exec 3< <(echo -n "abrt")
|
||||
run_test_reexec 4 tunasync-exec /tmp/dummy_exec 3< <(echo -n "cont")
|
||||
run_test_reexec 5 tunasync-exec2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: network=host
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
if: github.event_name == 'push'
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
if: github.event_name == 'pull_request'
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Cache Docker layers
|
||||
if: github.event_name != 'push' && github.event_name != 'pull_request'
|
||||
run: |
|
||||
echo "I do not know how to setup cache"
|
||||
exit -1
|
||||
|
||||
- name: Prepare cache directory
|
||||
run: |
|
||||
mkdir -p /tmp/.buildx-cache
|
||||
|
||||
- name: Build Docker image for uml rootfs
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .umlrootfs
|
||||
file: .umlrootfs/Dockerfile
|
||||
push: true
|
||||
tags: localhost:5000/umlrootfs
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
- name: Fetch and install uml package
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y debian-archive-keyring
|
||||
sudo ln -sf /usr/share/keyrings/debian-archive-keyring.gpg /etc/apt/trusted.gpg.d/
|
||||
echo "deb http://deb.debian.org/debian bullseye main" | sudo tee /etc/apt/sources.list.d/bullseye.list
|
||||
sudo apt-get update
|
||||
apt-get download user-mode-linux/bullseye
|
||||
sudo rm /etc/apt/sources.list.d/bullseye.list
|
||||
sudo apt-get update
|
||||
sudo mv user-mode-linux_*.deb /tmp/uml.deb
|
||||
sudo apt-get install --no-install-recommends -y /tmp/uml.deb
|
||||
sudo rm /tmp/uml.deb
|
||||
sudo apt-get install --no-install-recommends -y rsh-redone-client
|
||||
|
||||
- name: Prepare uml environment
|
||||
run: |
|
||||
docker container create --name umlrootfs localhost:5000/umlrootfs
|
||||
sudo mkdir -p umlrootfs
|
||||
docker container export umlrootfs | sudo tar -xv -C umlrootfs
|
||||
docker container rm umlrootfs
|
||||
sudo cp -a --target-directory=umlrootfs/lib/ /usr/lib/uml/modules
|
||||
/bin/echo -e "127.0.0.1 localhost\n254.255.255.1 host" | sudo tee umlrootfs/etc/hosts
|
||||
sudo ip tuntap add dev umltap mode tap
|
||||
sudo ip addr add 254.255.255.1/24 dev umltap
|
||||
sudo ip link set umltap up
|
||||
|
||||
- name: Start Uml
|
||||
run: |
|
||||
start_uml () {
|
||||
sudo bash -c 'linux root=/dev/root rootflags=/ rw rootfstype=hostfs mem=2G eth0=tuntap,umltap hostfs="$PWD/umlrootfs" con1=pts systemd.unified_cgroup_hierarchy=0 & pid=$!; echo "UMLINUX_PID=$pid" >> '"$GITHUB_ENV"
|
||||
}
|
||||
( start_uml )
|
||||
started=0
|
||||
for i in $(seq 1 60); do
|
||||
if ping -c 1 -w 1 254.255.255.2; then
|
||||
started=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$started" != "1" ]; then
|
||||
echo "Failed to wait Umlinux online"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Prepare Uml Environment
|
||||
run: |
|
||||
CUSER="$(id --user --name)"
|
||||
CUID="$(id --user)"
|
||||
CGID="$(id --group)"
|
||||
sudo chroot umlrootfs bash --noprofile --norc -eo pipefail << EOF
|
||||
groupadd --gid "${CGID?}" "${CUSER?}"
|
||||
useradd --create-home --home-dir "/home/${CUSER}" --gid "${CGID?}" \
|
||||
--uid "${CUID?}" --shell "\$(which bash)" "${CUSER?}"
|
||||
EOF
|
||||
ln ./worker.test "umlrootfs/home/${CUSER}/worker.test"
|
||||
|
||||
- name: Run Tests in Cgroupv1
|
||||
run: |
|
||||
CUSER="$(id --user --name)"
|
||||
sudo rsh 254.255.255.2 bash --noprofile --norc -eo pipefail << EOF
|
||||
exec 2>&1
|
||||
cd "/home/${CUSER}"
|
||||
lssubsys -am
|
||||
cgcreate -a "$CUSER" -t "$CUSER" -g cpu:tunasync
|
||||
cgcreate -a "$CUSER" -t "$CUSER" -g memory:tunasync
|
||||
TERM=xterm-256color ./worker.test -test.v=true -test.coverprofile \
|
||||
profile3.gcov -test.run TestCgroup
|
||||
cgexec -g "*:/" bash -c "echo 0 > /sys/fs/cgroup/systemd/tasks; exec sudo -u $CUSER env USECURCGROUP=1 TERM=xterm-256color cgexec -g cpu,memory:tunasync ./worker.test -test.v=true -test.coverprofile profile4.gcov -test.run TestCgroup"
|
||||
EOF
|
||||
|
||||
- name: Stop Uml
|
||||
run: |
|
||||
sudo rsh 254.255.255.2 systemctl poweroff
|
||||
sleep 10
|
||||
if [ -e "/proc/$UMLINUX_PID" ]; then
|
||||
sleep 10
|
||||
if [ -e "/proc/$UMLINUX_PID" ]; then
|
||||
sudo kill -TERM "$UMLINUX_PID" || true
|
||||
sleep 1
|
||||
fi
|
||||
fi
|
||||
if [ -e "/proc/$UMLINUX_PID" ]; then
|
||||
sleep 10
|
||||
if [ -e "/proc/$UMLINUX_PID" ]; then
|
||||
sudo kill -KILL "$UMLINUX_PID" || true
|
||||
sleep 1
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Combine coverage files
|
||||
run : |
|
||||
CUSER="$(id --user --name)"
|
||||
"${HOME}/go/bin/gocovmerge" profile.gcov profile2.gcov \
|
||||
"umlrootfs/home/${CUSER}/profile3.gcov" \
|
||||
"umlrootfs/home/${CUSER}/profile4.gcov" \
|
||||
profile5_*.gcov > merged.gcov
|
||||
# remove cmdline tools from coverage statistics
|
||||
grep -v "cmd/.*\.go" merged.gcov > profile-all.gcov
|
||||
|
||||
- name: Convert coverage to lcov
|
||||
uses: jandelgado/gcov2lcov-action@v1
|
||||
with:
|
||||
infile: profile-all.gcov
|
||||
outfile: coverage.lcov
|
||||
|
||||
- name: Coveralls
|
||||
uses: coverallsapp/github-action@v2
|
||||
with:
|
||||
github-token: ${{ secrets.github_token }}
|
||||
path-to-lcov: coverage.lcov
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1 +1,4 @@
|
||||
/build
|
||||
/build-*
|
||||
worker.test
|
||||
profile*
|
@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
function die() {
|
||||
echo $*
|
||||
exit 1
|
||||
}
|
||||
|
||||
export GOPATH=`pwd`:$GOPATH
|
||||
|
||||
make travis
|
||||
|
||||
# Initialize profile.cov
|
||||
echo "mode: count" > profile.cov
|
||||
|
||||
# Initialize error tracking
|
||||
ERROR=""
|
||||
|
||||
# Test each package and append coverage profile info to profile.cov
|
||||
for pkg in `cat .testpackages.txt`
|
||||
do
|
||||
go test -v -covermode=count -coverprofile=profile_tmp.cov $pkg || ERROR="Error testing $pkg"
|
||||
|
||||
[ -f profile_tmp.cov ] && {
|
||||
tail -n +2 profile_tmp.cov >> profile.cov || die "Unable to append coverage for $pkg"
|
||||
}
|
||||
done
|
||||
|
||||
if [ ! -z "$ERROR" ]
|
||||
then
|
||||
die "Encountered error, last error was: $ERROR"
|
||||
fi
|
35
.travis.yml
35
.travis.yml
@ -1,35 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
|
||||
before_install:
|
||||
- sudo apt-get install cgroup-bin
|
||||
- go get github.com/smartystreets/goconvey
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get -v github.com/mattn/goveralls
|
||||
|
||||
os:
|
||||
- linux
|
||||
|
||||
before_script:
|
||||
- sudo cgcreate -t travis -a travis -g memory:tunasync
|
||||
|
||||
script:
|
||||
- ./.testandcover.bash
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=profile.cov -service=travis-ci
|
||||
|
||||
before_deploy: "echo 'ready to deploy?'"
|
||||
|
||||
deploy:
|
||||
provider: releases
|
||||
file:
|
||||
- "build/tunasync-linux-bin.tar.gz"
|
||||
api_key:
|
||||
secure: "F9kaVaR1mxEh2+EL9Nm8GZmbVY98pXCJA0LGDNrq1C2vU61AUNOeX6yI1mMklHNZPLBqoFDvGN1M5HnJ+xWCFH+KnJgLD2GVIAcAxFNpcNWQe8XKE5heklNsIQNQfuh/rJKM6YzeDB9G5RN4Y76iL4WIAXhNnMm48W6jLnWhf70="
|
||||
skip_cleanup: true
|
||||
overwrite: true
|
||||
on:
|
||||
tags: true
|
||||
all_branches: true
|
8
.umlrootfs/Dockerfile
Normal file
8
.umlrootfs/Dockerfile
Normal file
@ -0,0 +1,8 @@
|
||||
FROM debian:bullseye
|
||||
RUN apt-get update && apt-get install -y systemd rsh-redone-server ifupdown sudo kmod cgroup-tools systemd-sysv
|
||||
RUN echo "host" > /root/.rhosts && \
|
||||
chmod 600 /root/.rhosts && \
|
||||
/bin/echo -e "auto eth0\niface eth0 inet static\naddress 254.255.255.2/24" > /etc/network/interfaces.d/eth0 && \
|
||||
sed -i '/pam_securetty/d' /etc/pam.d/rlogin && \
|
||||
cp /usr/share/systemd/tmp.mount /etc/systemd/system && \
|
||||
systemctl enable tmp.mount
|
13
.vscode/settings.json
vendored
Normal file
13
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"cSpell.words": [
|
||||
"Btrfs",
|
||||
"Debugf",
|
||||
"Infof",
|
||||
"Noticef",
|
||||
"Warningf",
|
||||
"cgroup",
|
||||
"mergo",
|
||||
"tmpl",
|
||||
"zpool"
|
||||
]
|
||||
}
|
33
Makefile
33
Makefile
@ -1,21 +1,28 @@
|
||||
LDFLAGS="-X main.buildstamp=`date -u '+%s'` -X main.githash=`git rev-parse HEAD`"
|
||||
ARCH ?= linux-amd64
|
||||
ARCH_LIST = $(subst -, ,$(ARCH))
|
||||
GOOS = $(word 1, $(ARCH_LIST))
|
||||
GOARCH = $(word 2, $(ARCH_LIST))
|
||||
BUILDBIN = tunasync tunasynctl
|
||||
|
||||
all: get tunasync tunasynctl
|
||||
all: $(BUILDBIN)
|
||||
|
||||
travis: get tunasync tunasynctl travis-package
|
||||
build-$(ARCH):
|
||||
mkdir -p $@
|
||||
|
||||
get:
|
||||
go get ./cmd/tunasync
|
||||
go get ./cmd/tunasynctl
|
||||
$(BUILDBIN): % : build-$(ARCH) build-$(ARCH)/%
|
||||
|
||||
build:
|
||||
mkdir -p build
|
||||
$(BUILDBIN:%=build-$(ARCH)/%) : build-$(ARCH)/% : cmd/%
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) go get ./$<
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -o $@ -ldflags ${LDFLAGS} github.com/tuna/tunasync/$<
|
||||
|
||||
tunasync: build
|
||||
go build -o build/tunasync -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasync
|
||||
test:
|
||||
go test -v -covermode=count -coverprofile=profile.gcov ./...
|
||||
|
||||
tunasynctl: build
|
||||
go build -o build/tunasynctl -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasynctl
|
||||
build-test-worker:
|
||||
CGO_ENABLED=0 go test -c -covermode=count github.com/tuna/tunasync/worker
|
||||
|
||||
travis-package: tunasync tunasynctl
|
||||
tar zcf build/tunasync-linux-bin.tar.gz -C build tunasync tunasynctl
|
||||
clean:
|
||||
rm -rf build-$(ARCH)
|
||||
|
||||
.PHONY: all test $(BUILDBIN) build-test-worker clean
|
||||
|
102
README.md
102
README.md
@ -1,8 +1,7 @@
|
||||
tunasync
|
||||
========
|
||||
# tunasync
|
||||
|
||||
[](https://travis-ci.org/tuna/tunasync)
|
||||
[](https://coveralls.io/github/tuna/tunasync?branch=dev)
|
||||

|
||||
[](https://coveralls.io/github/tuna/tunasync?branch=master)
|
||||
[](http://commitizen.github.io/cz-cli/)
|
||||

|
||||
|
||||
@ -12,14 +11,14 @@ tunasync
|
||||
|
||||
## Download
|
||||
|
||||
Pre-built binary for Linux x86_64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest).
|
||||
Pre-built binary for Linux x86_64 and ARM64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest).
|
||||
|
||||
## Design
|
||||
|
||||
```
|
||||
```text
|
||||
# Architecture
|
||||
|
||||
- Manager: Centural instance on status and job management
|
||||
- Manager: Central instance for status and job management
|
||||
- Worker: Runs mirror jobs
|
||||
|
||||
+------------+ +---+ +---+
|
||||
@ -40,84 +39,25 @@ Pre-built binary for Linux x86_64 is available at [Github releases](https://gith
|
||||
# Job Run Process
|
||||
|
||||
|
||||
PreSyncing Syncing Success
|
||||
+-----------+ +-----------+ +-------------+ +--------------+
|
||||
| pre-job +--+->| job run +--->| post-exec +-+-->| post-success |
|
||||
+-----------+ ^ +-----------+ +-------------+ | +--------------+
|
||||
| |
|
||||
| +-----------------+ | Failed
|
||||
+------+ post-fail |<---------+
|
||||
+-----------------+
|
||||
```
|
||||
|
||||
## Generate Self-Signed Certificate
|
||||
|
||||
Fisrt, create root CA
|
||||
|
||||
```
|
||||
openssl genrsa -out rootCA.key 2048
|
||||
openssl req -x509 -new -nodes -key rootCA.key -days 365 -out rootCA.crt
|
||||
```
|
||||
|
||||
Create host key
|
||||
|
||||
```
|
||||
openssl genrsa -out host.key 2048
|
||||
```
|
||||
|
||||
Now create CSR, before that, write a `req.cnf`
|
||||
|
||||
```
|
||||
[req]
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_req
|
||||
|
||||
[req_distinguished_name]
|
||||
countryName = Country Name (2 letter code)
|
||||
countryName_default = CN
|
||||
stateOrProvinceName = State or Province Name (full name)
|
||||
stateOrProvinceName_default = BJ
|
||||
localityName = Locality Name (eg, city)
|
||||
localityName_default = Beijing
|
||||
organizationalUnitName = Organizational Unit Name (eg, section)
|
||||
organizationalUnitName_default = TUNA
|
||||
commonName = Common Name (server FQDN or domain name)
|
||||
commonName_default = <server_FQDN>
|
||||
commonName_max = 64
|
||||
|
||||
[v3_req]
|
||||
# Extensions to add to a certificate request
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[alt_names]
|
||||
DNS.1 = <server_FQDN_1>
|
||||
DNS.2 = <server_FQDN_2>
|
||||
```
|
||||
|
||||
Substitute `<server_FQDN>` with your server's FQDN, then run
|
||||
|
||||
```
|
||||
openssl req -new -key host.key -out host.csr -config req.cnf
|
||||
```
|
||||
|
||||
Finally generate and sign host cert with root CA
|
||||
|
||||
```
|
||||
openssl x509 -req -in host.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out host.crt -days 365 -extensions v3_req -extfile req.cnf
|
||||
PreSyncing Syncing Success
|
||||
+-----------+ +----------+ +-----------+ +-------------+ +--------------+
|
||||
| pre-job +--+->| pre-exec +--->| job run +--->| post-exec +-+-->| post-success |
|
||||
+-----------+ ^ +----------+ +-----------+ +-------------+ | +--------------+
|
||||
| |
|
||||
| +-----------------+ | Failed
|
||||
+----------------+ post-fail |<---------------+
|
||||
+-----------------+
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
Setup GOPATH like [this](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable).
|
||||
Go version: 1.22
|
||||
|
||||
Then:
|
||||
|
||||
```
|
||||
go get -d github.com/tuna/tunasync/cmd/tunasync
|
||||
cd $GOPATH/src/github.com/tuna/tunasync
|
||||
make
|
||||
```shell
|
||||
# for native arch
|
||||
> make all
|
||||
# for other arch
|
||||
> make ARCH=linux-arm64 all
|
||||
```
|
||||
|
||||
If you have multiple `GOPATH`s, replace the `$GOPATH` with your first one.
|
||||
Binaries are in `build-$ARCH/`, e.g., `build-linux-amd64/`.
|
||||
|
@ -9,9 +9,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/moby/sys/reexec"
|
||||
"github.com/pkg/profile"
|
||||
"github.com/urfave/cli"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
|
||||
tunasync "github.com/tuna/tunasync/internal"
|
||||
"github.com/tuna/tunasync/manager"
|
||||
@ -39,7 +40,7 @@ func startManager(c *cli.Context) error {
|
||||
|
||||
m := manager.GetTUNASyncManager(cfg)
|
||||
if m == nil {
|
||||
logger.Errorf("Error intializing TUNA sync worker.")
|
||||
logger.Errorf("Error intializing TUNA sync manager.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -60,7 +61,7 @@ func startWorker(c *cli.Context) error {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
w := worker.GetTUNASyncWorker(cfg)
|
||||
w := worker.NewTUNASyncWorker(cfg)
|
||||
if w == nil {
|
||||
logger.Errorf("Error intializing TUNA sync worker.")
|
||||
os.Exit(1)
|
||||
@ -109,6 +110,10 @@ func startWorker(c *cli.Context) error {
|
||||
|
||||
func main() {
|
||||
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
|
||||
cli.VersionPrinter = func(c *cli.Context) {
|
||||
var builddate string
|
||||
if buildstamp == "" {
|
||||
@ -134,7 +139,7 @@ func main() {
|
||||
app.Name = "tunasync"
|
||||
app.Usage = "tunasync mirror job management tool"
|
||||
app.EnableBashCompletion = true
|
||||
app.Version = "0.1"
|
||||
app.Version = tunasync.Version
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "manager",
|
||||
|
@ -3,16 +3,17 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/urfave/cli"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
|
||||
tunasync "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
@ -32,7 +33,7 @@ const (
|
||||
userCfgFile = "$HOME/.config/tunasync/ctl.conf" // user-specific conf
|
||||
)
|
||||
|
||||
var logger = logging.MustGetLogger("tunasynctl-cmd")
|
||||
var logger = logging.MustGetLogger("tunasynctl")
|
||||
|
||||
var baseURL string
|
||||
var client *http.Client
|
||||
@ -41,7 +42,7 @@ func initializeWrapper(handler cli.ActionFunc) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
err := initialize(c)
|
||||
if err != nil {
|
||||
return cli.NewExitError("", 1)
|
||||
return cli.NewExitError(err.Error(), 1)
|
||||
}
|
||||
return handler(c)
|
||||
}
|
||||
@ -55,8 +56,9 @@ type config struct {
|
||||
|
||||
func loadConfig(cfgFile string, cfg *config) error {
|
||||
if cfgFile != "" {
|
||||
logger.Infof("Loading config: %s", cfgFile)
|
||||
if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
|
||||
logger.Errorf(err.Error())
|
||||
// logger.Errorf(err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -66,7 +68,7 @@ func loadConfig(cfgFile string, cfg *config) error {
|
||||
|
||||
func initialize(c *cli.Context) error {
|
||||
// init logger
|
||||
tunasync.InitLogger(c.Bool("verbose"), c.Bool("verbose"), false)
|
||||
tunasync.InitLogger(c.Bool("verbose"), c.Bool("debug"), false)
|
||||
|
||||
cfg := new(config)
|
||||
|
||||
@ -76,14 +78,23 @@ func initialize(c *cli.Context) error {
|
||||
|
||||
// find config file and load config
|
||||
if _, err := os.Stat(systemCfgFile); err == nil {
|
||||
loadConfig(systemCfgFile, cfg)
|
||||
err = loadConfig(systemCfgFile, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fmt.Println(os.ExpandEnv(userCfgFile))
|
||||
logger.Debug("user config file: %s", os.ExpandEnv(userCfgFile))
|
||||
if _, err := os.Stat(os.ExpandEnv(userCfgFile)); err == nil {
|
||||
loadConfig(os.ExpandEnv(userCfgFile), cfg)
|
||||
err = loadConfig(os.ExpandEnv(userCfgFile), cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.String("config") != "" {
|
||||
loadConfig(c.String("config"), cfg)
|
||||
err := loadConfig(c.String("config"), cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// override config using the command-line arguments
|
||||
@ -99,8 +110,11 @@ func initialize(c *cli.Context) error {
|
||||
}
|
||||
|
||||
// parse base url of the manager server
|
||||
baseURL = fmt.Sprintf("https://%s:%d",
|
||||
cfg.ManagerAddr, cfg.ManagerPort)
|
||||
if cfg.CACert != "" {
|
||||
baseURL = fmt.Sprintf("https://%s:%d", cfg.ManagerAddr, cfg.ManagerPort)
|
||||
} else {
|
||||
baseURL = fmt.Sprintf("http://%s:%d", cfg.ManagerAddr, cfg.ManagerPort)
|
||||
}
|
||||
|
||||
logger.Infof("Use manager address: %s", baseURL)
|
||||
|
||||
@ -108,8 +122,8 @@ func initialize(c *cli.Context) error {
|
||||
var err error
|
||||
client, err = tunasync.CreateHTTPClient(cfg.CACert)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error initializing HTTP client: %s", err.Error())
|
||||
logger.Error(err.Error())
|
||||
err = fmt.Errorf("error initializing HTTP client: %s", err.Error())
|
||||
// logger.Error(err.Error())
|
||||
return err
|
||||
|
||||
}
|
||||
@ -132,14 +146,14 @@ func listWorkers(c *cli.Context) error {
|
||||
err.Error()),
|
||||
1)
|
||||
}
|
||||
fmt.Print(string(b))
|
||||
fmt.Println(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
func listJobs(c *cli.Context) error {
|
||||
// FIXME: there should be an API on manager server side that return MirrorStatus list to tunasynctl
|
||||
var jobs []tunasync.MirrorStatus
|
||||
var genericJobs interface{}
|
||||
if c.Bool("all") {
|
||||
var jobs []tunasync.WebMirrorStatus
|
||||
_, err := tunasync.GetJSON(baseURL+listJobsPath, &jobs, client)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
@ -147,8 +161,33 @@ func listJobs(c *cli.Context) error {
|
||||
"of all jobs from manager server: %s", err.Error()),
|
||||
1)
|
||||
}
|
||||
|
||||
if statusStr := c.String("status"); statusStr != "" {
|
||||
filteredJobs := make([]tunasync.WebMirrorStatus, 0, len(jobs))
|
||||
var statuses []tunasync.SyncStatus
|
||||
for _, s := range strings.Split(statusStr, ",") {
|
||||
var status tunasync.SyncStatus
|
||||
err = status.UnmarshalJSON([]byte("\"" + strings.TrimSpace(s) + "\""))
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Error parsing status: %s", err.Error()),
|
||||
1)
|
||||
}
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
for _, job := range jobs {
|
||||
for _, s := range statuses {
|
||||
if job.Status == s {
|
||||
filteredJobs = append(filteredJobs, job)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
genericJobs = filteredJobs
|
||||
} else {
|
||||
genericJobs = jobs
|
||||
}
|
||||
} else {
|
||||
var jobs []tunasync.MirrorStatus
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
return cli.NewExitError(
|
||||
@ -162,24 +201,162 @@ func listJobs(c *cli.Context) error {
|
||||
_, err := tunasync.GetJSON(fmt.Sprintf("%s/workers/%s/jobs",
|
||||
baseURL, workerID), &workerJobs, client)
|
||||
if err != nil {
|
||||
logger.Errorf("Filed to correctly get jobs"+
|
||||
logger.Infof("Failed to correctly get jobs"+
|
||||
" for worker %s: %s", workerID, err.Error())
|
||||
}
|
||||
ans <- workerJobs
|
||||
}(workerID)
|
||||
}
|
||||
for range args {
|
||||
jobs = append(jobs, <-ans...)
|
||||
job := <-ans
|
||||
if job == nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Failed to correctly get information "+
|
||||
"of jobs from at least one manager"),
|
||||
1)
|
||||
}
|
||||
jobs = append(jobs, job...)
|
||||
}
|
||||
genericJobs = jobs
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(jobs, "", " ")
|
||||
if format := c.String("format"); format != "" {
|
||||
tpl := template.New("")
|
||||
_, err := tpl.Parse(format)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Error parsing format template: %s", err.Error()),
|
||||
1)
|
||||
}
|
||||
switch jobs := genericJobs.(type) {
|
||||
case []tunasync.WebMirrorStatus:
|
||||
for _, job := range jobs {
|
||||
err = tpl.Execute(os.Stdout, job)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Error printing out information: %s", err.Error()),
|
||||
1)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
case []tunasync.MirrorStatus:
|
||||
for _, job := range jobs {
|
||||
err = tpl.Execute(os.Stdout, job)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Error printing out information: %s", err.Error()),
|
||||
1)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
b, err := json.MarshalIndent(genericJobs, "", " ")
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Error printing out information: %s", err.Error()),
|
||||
1)
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateMirrorSize(c *cli.Context) error {
|
||||
args := c.Args()
|
||||
if len(args) != 2 {
|
||||
return cli.NewExitError("Usage: tunasynctl set-size -w <worker-id> <mirror> <size>", 1)
|
||||
}
|
||||
workerID := c.String("worker")
|
||||
mirrorID := args.Get(0)
|
||||
mirrorSize := args.Get(1)
|
||||
|
||||
msg := struct {
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
}{
|
||||
Name: mirrorID,
|
||||
Size: mirrorSize,
|
||||
}
|
||||
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs/%s/size", baseURL, workerID, mirrorID,
|
||||
)
|
||||
|
||||
resp, err := tunasync.PostJSON(url, msg, client)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Error printing out informations: %s", err.Error()),
|
||||
fmt.Sprintf("Failed to send request to manager: %s",
|
||||
err.Error()),
|
||||
1)
|
||||
}
|
||||
fmt.Printf(string(b))
|
||||
defer resp.Body.Close()
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Manager failed to update mirror size: %s", body), 1,
|
||||
)
|
||||
}
|
||||
|
||||
var status tunasync.MirrorStatus
|
||||
json.Unmarshal(body, &status)
|
||||
if status.Size != mirrorSize {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf(
|
||||
"Mirror size error, expecting %s, manager returned %s",
|
||||
mirrorSize, status.Size,
|
||||
), 1,
|
||||
)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully updated mirror size to %s\n", mirrorSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeWorker(c *cli.Context) error {
|
||||
args := c.Args()
|
||||
if len(args) != 0 {
|
||||
return cli.NewExitError("Usage: tunasynctl -w <worker-id>", 1)
|
||||
}
|
||||
workerID := c.String("worker")
|
||||
if len(workerID) == 0 {
|
||||
return cli.NewExitError("Please specify the <worker-id>", 1)
|
||||
}
|
||||
url := fmt.Sprintf("%s/workers/%s", baseURL, workerID)
|
||||
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
logger.Panicf("Invalid HTTP Request: %s", err.Error())
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Failed to send request to manager: %s", err.Error()), 1)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Failed to parse response: %s", err.Error()),
|
||||
1)
|
||||
}
|
||||
|
||||
return cli.NewExitError(fmt.Sprintf("Failed to correctly send"+
|
||||
" command: HTTP status code is not 200: %s", body),
|
||||
1)
|
||||
}
|
||||
|
||||
res := map[string]string{}
|
||||
_ = json.NewDecoder(resp.Body).Decode(&res)
|
||||
if res["message"] == "deleted" {
|
||||
fmt.Println("Successfully removed the worker")
|
||||
} else {
|
||||
return cli.NewExitError("Failed to remove the worker", 1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -199,7 +376,7 @@ func flushDisabledJobs(c *cli.Context) error {
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Failed to parse response: %s", err.Error()),
|
||||
@ -211,7 +388,7 @@ func flushDisabledJobs(c *cli.Context) error {
|
||||
1)
|
||||
}
|
||||
|
||||
logger.Info("Successfully flushed disabled jobs")
|
||||
fmt.Println("Successfully flushed disabled jobs")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -232,11 +409,16 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
|
||||
"argument WORKER", 1)
|
||||
}
|
||||
|
||||
options := map[string]bool{}
|
||||
if c.Bool("force") {
|
||||
options["force"] = true
|
||||
}
|
||||
cmd := tunasync.ClientCmd{
|
||||
Cmd: cmd,
|
||||
MirrorID: mirrorID,
|
||||
WorkerID: c.String("worker"),
|
||||
Args: argsList,
|
||||
Options: options,
|
||||
}
|
||||
resp, err := tunasync.PostJSON(baseURL+cmdPath, cmd, client)
|
||||
if err != nil {
|
||||
@ -248,7 +430,7 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Failed to parse response: %s", err.Error()),
|
||||
@ -259,7 +441,7 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
|
||||
" command: HTTP status code is not 200: %s", body),
|
||||
1)
|
||||
}
|
||||
logger.Info("Succesfully send command")
|
||||
fmt.Println("Successfully send the command")
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -267,6 +449,11 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
|
||||
|
||||
func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
|
||||
if c.String("worker") == "" {
|
||||
return cli.NewExitError("Please specify the worker with -w <worker-id>", 1)
|
||||
}
|
||||
|
||||
cmd := tunasync.ClientCmd{
|
||||
Cmd: cmd,
|
||||
WorkerID: c.String("worker"),
|
||||
@ -281,7 +468,7 @@ func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return cli.NewExitError(
|
||||
fmt.Sprintf("Failed to parse response: %s", err.Error()),
|
||||
@ -292,7 +479,7 @@ func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
|
||||
" command: HTTP status code is not 200: %s", body),
|
||||
1)
|
||||
}
|
||||
logger.Info("Succesfully send command")
|
||||
fmt.Println("Successfully send the command")
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -322,7 +509,7 @@ func main() {
|
||||
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Version = "0.1"
|
||||
app.Version = tunasync.Version
|
||||
app.Name = "tunasynctl"
|
||||
app.Usage = "control client for tunasync manager"
|
||||
|
||||
@ -349,6 +536,10 @@ func main() {
|
||||
Name: "verbose, v",
|
||||
Usage: "Enable verbosely logging",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Enable debugging logging",
|
||||
},
|
||||
}
|
||||
cmdFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
@ -357,6 +548,11 @@ func main() {
|
||||
},
|
||||
}
|
||||
|
||||
forceStartFlag := cli.BoolFlag{
|
||||
Name: "force, f",
|
||||
Usage: "Override the concurrent limit",
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
@ -367,6 +563,14 @@ func main() {
|
||||
Name: "all, a",
|
||||
Usage: "List all jobs of all workers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "status, s",
|
||||
Usage: "Filter output based on status provided",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "Pretty-print containers using a Go template",
|
||||
},
|
||||
}...),
|
||||
Action: initializeWrapper(listJobs),
|
||||
},
|
||||
@ -382,10 +586,34 @@ func main() {
|
||||
Flags: commonFlags,
|
||||
Action: initializeWrapper(listWorkers),
|
||||
},
|
||||
{
|
||||
Name: "rm-worker",
|
||||
Usage: "Remove a worker",
|
||||
Flags: append(
|
||||
commonFlags,
|
||||
cli.StringFlag{
|
||||
Name: "worker, w",
|
||||
Usage: "worker-id of the worker to be removed",
|
||||
},
|
||||
),
|
||||
Action: initializeWrapper(removeWorker),
|
||||
},
|
||||
{
|
||||
Name: "set-size",
|
||||
Usage: "Set mirror size",
|
||||
Flags: append(
|
||||
commonFlags,
|
||||
cli.StringFlag{
|
||||
Name: "worker, w",
|
||||
Usage: "specify worker-id of the mirror job",
|
||||
},
|
||||
),
|
||||
Action: initializeWrapper(updateMirrorSize),
|
||||
},
|
||||
{
|
||||
Name: "start",
|
||||
Usage: "Start a job",
|
||||
Flags: append(commonFlags, cmdFlags...),
|
||||
Flags: append(append(commonFlags, cmdFlags...), forceStartFlag),
|
||||
Action: initializeWrapper(cmdJob(tunasync.CmdStart)),
|
||||
},
|
||||
{
|
||||
|
141
docs/cgroup.md
Normal file
141
docs/cgroup.md
Normal file
@ -0,0 +1,141 @@
|
||||
# About Tunasync and cgroup
|
||||
|
||||
Optionally, tunasync can be integrated with cgroup to have better control and tracking processes started by mirror jobs. Also, limiting memory usage of a mirror job also requires cgroup support.
|
||||
|
||||
## How cgroup are utilized in tunasync?
|
||||
|
||||
If cgroup are enabled globally, all the mirror jobs, except those running in docker containers, are run in separate cgroups. If `mem_limit` is specified, it will be applied to the cgroup. For jobs running in docker containers, `mem_limit` is applied via `docker run` command.
|
||||
|
||||
|
||||
## Tl;dr: What's the recommended configuration?
|
||||
|
||||
### If you are using v1 (legacy, hybrid) cgroup hierarchy:
|
||||
|
||||
`tunasync-worker.service`:
|
||||
|
||||
```
|
||||
[Unit]
|
||||
Description = TUNA mirrors sync worker
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=tunasync
|
||||
PermissionsStartOnly=true
|
||||
ExecStartPre=/usr/bin/cgcreate -t tunasync -a tunasync -g memory:tunasync
|
||||
ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd
|
||||
ExecReload=/bin/kill -SIGHUP $MAINPID
|
||||
ExecStopPost=/usr/bin/cgdelete memory:tunasync
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
`worker.conf`:
|
||||
|
||||
``` toml
|
||||
[cgroup]
|
||||
enable = true
|
||||
group = "tunasync"
|
||||
```
|
||||
|
||||
### If you are using v2 (unified) cgroup hierarchy:
|
||||
|
||||
`tunasync-worker.service`:
|
||||
|
||||
```
|
||||
[Unit]
|
||||
Description = TUNA mirrors sync worker
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=tunasync
|
||||
ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd
|
||||
ExecReload=/bin/kill -SIGHUP $MAINPID
|
||||
Delegate=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
`worker.conf`:
|
||||
|
||||
``` toml
|
||||
[cgroup]
|
||||
enable = true
|
||||
```
|
||||
|
||||
|
||||
## Two versions of cgroups
|
||||
|
||||
Due to various of reasons, there are two versions of cgroups in the kernel, which are incompatible with each other. Most of the current linux distributions adopts systemd as the init system, which relies on cgroup and is responsible for initializing cgroup. As a result, the selection of the version of cgroups is mainly decided by systemd. Since version 243, the "unified" cgroup hierarchy setup has become the default.
|
||||
|
||||
Tunasync can automatically detect which version of cgroup is in use and enable the corresponding operating interface, but due to the fact that systemd behaves slightly differently in the two cases, different configurations for tunasync are recomended.
|
||||
|
||||
## Two modes of group name discovery
|
||||
|
||||
Two modes of group name discovery are provided: implicit mode and manual mode.
|
||||
|
||||
### Manual Mode
|
||||
|
||||
In this mode, the administrator should 1. manually create an empty cgroup (for cgroup v2 unified hierarchy) or empty cgroups in certain controller subsystems with the same name (for cgroup v1 hybird hierarchy); 2. change the ownership of the cgroups to the running user of the tunasync worker; and 3. specify the path in the configuration. On start, tunasync will automatically detect which controllers are enabled (for v1) or enable needed controllers (for v2).
|
||||
|
||||
Example 1:
|
||||
|
||||
``` bash
|
||||
# suppose we have cgroup v1
|
||||
sudo mkdir -p /sys/fs/cgroup/cpu/test/tunasync
|
||||
sudo mkdir -p /sys/fs/cgroup/memory/test/tunasync
|
||||
sudo chown -R tunasync:tunasync /sys/fs/cgroup/cpu/test/tunasync
|
||||
sudo chown -R tunasync:tunasync /sys/fs/cgroup/memory/test/tunasync
|
||||
|
||||
# in worker.conf, we have group = "/test/tunasync" or "test/tunasync"
|
||||
tunasync worker -c /path/to/worker.conf
|
||||
```
|
||||
|
||||
In the above scenario, tunasync will detect the enabled subsystem controllers are cpu and memory. When running a mirror job named `foo`, sub-cgroups will be created in both `/sys/fs/cgroup/cpu/test/tunasync/foo` and `/sys/fs/cgroup/memory/test/tunasync/foo`.
|
||||
|
||||
Example 2 (not recommended):
|
||||
|
||||
``` bash
|
||||
# suppose we have cgroup v2
|
||||
sudo mkdir -p /sys/fs/cgroup/test/tunasync
|
||||
sudo chown -R tunasync:tunasync /sys/fs/cgroup/test/tunasync
|
||||
|
||||
# in worker.conf, we have group = "/test/tunasync" or "test/tunasync"
|
||||
tunasync worker -c /path/to/worker.conf
|
||||
```
|
||||
|
||||
In the above scenario, tunasync will directly use the cgroup `/sys/fs/cgroup/test/tunasync`. In most cases, due to the design of cgroupv2, since tunasync is not running as root, tunasync won't have the permission to move the processes it starts to the correct cgroup. That's because cgroup2 requires the operating process should also have the write permission of the common ancestor of the source group and the target group when moving processes between groups. So this example is only for demonstration of the functionality and you should prevent it.
|
||||
|
||||
### Implicit mode
|
||||
|
||||
In this mode, tunasync will use the cgroup it is currently running in and create sub-groups for jobs in that group. Tunasync will first create a sub-group named `__worker` in that group, and move itself in the `__worker` sub-group, to prevent processes in non-leaf cgroups.
|
||||
|
||||
Mostly, this mode is cooperated with the `Delegate=yes` option of the systemd service configuration of tunasync, which will permit the running process to self-manage the cgroup the service in running in. Due to security considerations, systemd won't give write permissions of the current running cgroups to the service when using v1 (legacy, hybrid) cgroup hierarchy and non-root user, so it is more meaningful to use this mode with v2 cgroup hierarchy.
|
||||
|
||||
|
||||
## Configruation
|
||||
|
||||
``` toml
|
||||
[cgroup]
|
||||
enable = true
|
||||
base_path = "/sys/fs/cgroup"
|
||||
group = "tunasync"
|
||||
subsystem = "memory"
|
||||
```
|
||||
|
||||
The defination of the above options is:
|
||||
|
||||
* `enable`: `Bool`, specifies whether cgroup is enabled. When cgroup is disabled, `memory_limit` for non-docker jobs will be ignored, and the following options are also ignored.
|
||||
* `group`: `String`, specifies the cgroup tunasync will use. When not provided, or provided with empty string, cgroup discovery will work in "Implicit mode", i.e. will create sub-cgroups in the current running cgroup. Otherwise, cgroup discovery will work in "Manual mode", where tunasync will create sub-cgroups in the specified cgroup.
|
||||
* `base_path`: `String`, ignored. It originally specifies the mounting path of cgroup filesystem, but for making everything work, it is now required that the cgroup filesystem should be mounted at its default path(`/sys/fs/cgroup`).
|
||||
* `subsystem `: `String`, ignored. It originally specifies which cgroupv1 controller is enabled and now becomes meaningless since the discovery is now automatic.
|
||||
|
||||
## References:
|
||||
|
||||
* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html]()
|
||||
* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/index.html]()
|
||||
* [https://systemd.io/CGROUP_DELEGATION/]()
|
||||
* [https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Delegate=]()
|
@ -1,4 +1,5 @@
|
||||
# tunasync 上手指南
|
||||
|
||||
date: 2016-10-31 00:50:00
|
||||
|
||||
[tunasync](https://github.com/tuna/tunasync) 是[清华大学 TUNA 镜像源](https://mirrors.tuna.tsinghua.edu.cn)目前使用的镜像方案。
|
||||
@ -7,42 +8,42 @@ date: 2016-10-31 00:50:00
|
||||
|
||||
本例中:
|
||||
|
||||
- 只镜像[elvish](https://elvish.io)项目
|
||||
- 禁用了https
|
||||
- 禁用了cgroup支持
|
||||
- 只镜像[elvish](https://elv.sh)项目
|
||||
- 禁用了https
|
||||
- 禁用了cgroup支持
|
||||
|
||||
## 获得tunasync
|
||||
|
||||
### 二进制包
|
||||
|
||||
到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-bin.tar.gz` 即可。
|
||||
到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-amd64-bin.tar.gz` 即可。
|
||||
|
||||
### 自行编译
|
||||
|
||||
```
|
||||
$ make
|
||||
```shell
|
||||
> make
|
||||
```
|
||||
|
||||
## 配置
|
||||
|
||||
```
|
||||
$ mkdir ~/tunasync_demo
|
||||
$ mkdir /tmp/tunasync
|
||||
```shell
|
||||
> mkdir ~/tunasync_demo
|
||||
> mkdir /tmp/tunasync
|
||||
```
|
||||
|
||||
`~/tunasync_demo/worker.conf`:
|
||||
编辑 `~/tunasync_demo/worker.conf`:
|
||||
|
||||
```
|
||||
```conf
|
||||
[global]
|
||||
name = "test_worker"
|
||||
log_dir = "/tmp/tunasync/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/tmp/tunasync"
|
||||
concurrent = 10
|
||||
interval = 1
|
||||
interval = 120
|
||||
|
||||
[manager]
|
||||
api_base = "http://localhost:12345"
|
||||
token = "some_token"
|
||||
token = ""
|
||||
ca_cert = ""
|
||||
|
||||
[cgroup]
|
||||
@ -60,13 +61,13 @@ ssl_key = ""
|
||||
[[mirrors]]
|
||||
name = "elvish"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://rsync.elvish.io/elvish/"
|
||||
upstream = "rsync://rsync.elv.sh/elvish/"
|
||||
use_ipv6 = false
|
||||
```
|
||||
|
||||
`~/tunasync_demo/manager.conf`:
|
||||
编辑 `~/tunasync_demo/manager.conf`:
|
||||
|
||||
```
|
||||
```conf
|
||||
debug = false
|
||||
|
||||
[server]
|
||||
@ -81,22 +82,52 @@ db_file = "/tmp/tunasync/manager.db"
|
||||
ca_cert = ""
|
||||
```
|
||||
|
||||
除了 bolt 以外,还支持 badger、leveldb 和 redis 的数据库后端。对于 badger 和 leveldb,只需要修改 db_type。如果使用 redis 作为数据库后端,把 db_type 改为 redis,并把下面的 db_file 设为 redis 服务器的地址: `redis://user:password@host:port/db_number`。
|
||||
|
||||
### 运行
|
||||
|
||||
```
|
||||
$ tunasync manager --config ~/tunasync_demo/manager.conf
|
||||
$ tunasync worker --config ~/tunasync_demo/worker.conf
|
||||
```shell
|
||||
> tunasync manager --config ~/tunasync_demo/manager.conf
|
||||
> tunasync worker --config ~/tunasync_demo/worker.conf
|
||||
```
|
||||
|
||||
本例中,镜像的数据在`/tmp/tunasync/`
|
||||
本例中,镜像的数据在 `/tmp/tunasync/`。
|
||||
|
||||
### 控制
|
||||
|
||||
查看同步状态
|
||||
|
||||
```shell
|
||||
> tunasynctl list -p 12345 --all
|
||||
```
|
||||
|
||||
tunasynctl 也支持配置文件。配置文件可以放在 `/etc/tunasync/ctl.conf` 或者 `~/.config/tunasync/ctl.conf` 两个位置,后者可以覆盖前者的配置值。
|
||||
|
||||
配置文件内容为:
|
||||
|
||||
```conf
|
||||
manager_addr = "127.0.0.1"
|
||||
manager_port = 12345
|
||||
ca_cert = ""
|
||||
```
|
||||
|
||||
### 安全
|
||||
|
||||
worker 和 manager 之间用 http(s) 通信,如果你 worker 和 manager 都是在本机,那么没必要使用 https。此时 manager 就不指定 `ssl_key` 和 `ssl_cert`,留空;worker 的 `ca_cert` 留空,`api_base` 以 `http://` 开头。
|
||||
|
||||
如果需要加密的通信,manager 需要指定 `ssl_key` 和 `ssl_cert`,worker 要指定 `ca_cert`,并且 `api_base` 应该是 `https://` 开头。
|
||||
|
||||
## 更进一步
|
||||
|
||||
可以参看
|
||||
|
||||
```
|
||||
$ tunasync manager --help
|
||||
$ tunasync worker --help
|
||||
```shell
|
||||
> tunasync manager --help
|
||||
> tunasync worker --help
|
||||
```
|
||||
|
||||
可以看一下 log 目录
|
||||
|
||||
一些 worker 配置文件示例 [workers.conf](workers.conf)。
|
||||
|
||||
你可能会用到的操作 [tips.md](tips.md)。
|
||||
|
93
docs/zh_CN/tips.md
Normal file
93
docs/zh_CN/tips.md
Normal file
@ -0,0 +1,93 @@
|
||||
## 删除某worker的某镜像
|
||||
|
||||
先确定已经给tunasynctl写好config文件:`~/.config/tunasync/ctl.conf`
|
||||
|
||||
```toml
|
||||
manager_addr = "127.0.0.1"
|
||||
manager_port = 12345
|
||||
ca_cert = ""
|
||||
```
|
||||
|
||||
接着
|
||||
|
||||
```shell
|
||||
$ tunasynctl disable -w <worker_id> <mirror_name>
|
||||
$ tunasynctl flush
|
||||
```
|
||||
|
||||
|
||||
## 热重载 `worker.conf`
|
||||
|
||||
```shell
|
||||
$ tunasynctl reload -w <worker_id>
|
||||
```
|
||||
|
||||
e.g. 删除 `test_worker` 的 `elvish` 镜像:
|
||||
|
||||
1. 删除存放镜像的文件夹
|
||||
|
||||
2. 删除 `worker.conf` 中对应的 `mirror` 段落
|
||||
|
||||
3. 接着操作:
|
||||
|
||||
```shell
|
||||
$ tunasynctl reload -w test_worker
|
||||
$ tunasynctl disable -w test_worker elvish
|
||||
$ tunasynctl flush
|
||||
```
|
||||
|
||||
4. (可选)最后删除日志文件夹里的日志
|
||||
|
||||
|
||||
## 删除worker
|
||||
|
||||
```shell
|
||||
$ tunasynctl rm-worker -w <worker_id>
|
||||
```
|
||||
|
||||
e.g.
|
||||
|
||||
```shell
|
||||
$ tunasynctl rm-worker -w test_worker
|
||||
```
|
||||
|
||||
|
||||
## 更新镜像的大小
|
||||
|
||||
```shell
|
||||
$ tunasynctl set-size -w <worker_id> <mirror_name> <size>
|
||||
```
|
||||
|
||||
其中,末尾的 <size> 参数,由操作者设定,或由某定时脚本生成
|
||||
|
||||
由于 `du -s` 比较耗时,故镜像大小可直接由rsync的日志文件读出
|
||||
|
||||
|
||||
## Btrfs 文件系统快照
|
||||
|
||||
如果镜像文件存放在以 Btrfs 为文件系统的分区中,可启用由 Btrfs 提供的快照 (Snapshot) 功能。对于每一个镜像,tunasync 在每次成功同步后更新其快照。
|
||||
|
||||
在 `worker.conf` 中添加如下配置,即可启用 Btrfs 快照功能:
|
||||
|
||||
```toml
|
||||
[btrfs_snapshot]
|
||||
enable = true
|
||||
snapshot_path = "/path/to/snapshot/directory"
|
||||
```
|
||||
|
||||
其中 `snapshot_path` 为快照所在目录。如将其作为发布版本,则镜像同步过程对于镜像站用户而言具有原子性。如此可避免用户接收到仍处于“中间态”的(未完成同步的)文件。
|
||||
|
||||
也可以在 `[[mirrors]]` 中为特定镜像单独指定快照路径,如:
|
||||
|
||||
```toml
|
||||
[[mirrors]]
|
||||
name = "elvish"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://rsync.elv.sh/elvish/"
|
||||
interval = 1440
|
||||
snapshot_path = "/data/publish/elvish"
|
||||
```
|
||||
|
||||
**提示:**
|
||||
|
||||
若运行 tunasync 的用户无 root 权限,请确保该用户对镜像同步目录和快照目录均具有写和执行权限,并使用 [`user_subvol_rm_allowed` 选项](https://btrfs.wiki.kernel.org/index.php/Manpage/btrfs(5)#MOUNT_OPTIONS)挂载相应的 Btrfs 分区。
|
822
docs/zh_CN/workers.conf
Normal file
822
docs/zh_CN/workers.conf
Normal file
@ -0,0 +1,822 @@
|
||||
# /home/scripts in this example points to https://github.com/tuna/tunasync-scripts/
|
||||
|
||||
[global]
|
||||
name = "mirror_worker"
|
||||
log_dir = "/srv/tunasync/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/srv/tunasync"
|
||||
concurrent = 10
|
||||
interval = 120
|
||||
|
||||
# ensure the exec user be add into `docker` group
|
||||
[docker]
|
||||
# in `command provider` can use docker_image and docker_volumes
|
||||
enable = true
|
||||
|
||||
[manager]
|
||||
api_base = "http://localhost:12345"
|
||||
token = "some_token"
|
||||
ca_cert = ""
|
||||
|
||||
[cgroup]
|
||||
enable = false
|
||||
base_path = "/sys/fs/cgroup"
|
||||
group = "tunasync"
|
||||
|
||||
[server]
|
||||
hostname = "localhost"
|
||||
listen_addr = "127.0.0.1"
|
||||
listen_port = 6000
|
||||
ssl_cert = ""
|
||||
ssl_key = ""
|
||||
[[mirrors]]
|
||||
name = "adobe-fonts"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://github.com/adobe-fonts"
|
||||
command = "/home/scripts/adobe-fonts.sh"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "AdoptOpenJDK"
|
||||
interval = 5760
|
||||
provider = "command"
|
||||
command = "/home/scripts/adoptopenjdk.py"
|
||||
upstream = "https://adoptopenjdk.jfrog.io/adoptopenjdk"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "alpine"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://rsync.alpinelinux.org/alpine/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "anaconda"
|
||||
provider = "command"
|
||||
upstream = "https://repo.continuum.io/"
|
||||
command = "/home/scripts/anaconda.py --delete"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
interval = 720
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "apache"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://rsync.apache.org/apache-dist/"
|
||||
use_ipv4 = true
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "armbian"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://rsync.armbian.com/apt/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "armbian-releases"
|
||||
provider = "rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://rsync.armbian.com/dl/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "bananian"
|
||||
provider = "command"
|
||||
upstream = "https://dl.bananian.org/"
|
||||
command = "/home/scripts/lftp.sh"
|
||||
interval = 1440
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "bioconductor"
|
||||
provider = "rsync"
|
||||
upstream = "master.bioconductor.org:./"
|
||||
rsync_options = [ "--rsh=ssh -i /root/id_rsa -o PasswordAuthentication=no -l sync" ]
|
||||
exclude_file = "/etc/excludes/bioconductor.txt"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "blender"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://mirrors.dotsrc.org/blender/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
exclude_file = "/etc/excludes/blender.txt"
|
||||
interval = 1440
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "chakra"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://rsync.chakralinux.org/packages/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "chakra-releases"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://rsync.chakralinux.org/releases/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "chef"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://packages.chef.io/repos"
|
||||
command = "/home/scripts/chef.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "clickhouse"
|
||||
interval = 2880
|
||||
provider = "rsync"
|
||||
upstream = "rsync://repo.yandex.ru/yandexrepo/clickhouse/"
|
||||
exclude_file = "/etc/excludes/clickhouse.txt"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "clojars"
|
||||
provider = "command"
|
||||
upstream = "s3://clojars-repo-production/"
|
||||
command = "/home/scripts/s3.sh"
|
||||
docker_image = "tunathu/ftpsync:latest"
|
||||
|
||||
[mirrors.env]
|
||||
TUNASYNC_S3_ENDPOINT = "https://s3.dualstack.us-east-2.amazonaws.com"
|
||||
#TUNASYNC_S3_ENDPOINT = "https://s3.us-east-2.amazonaws.com"
|
||||
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
|
||||
|
||||
[[mirrors]]
|
||||
name = "CPAN"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://cpan-rsync.perl.org/CPAN/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "CRAN"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://cran.r-project.org/CRAN/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "CTAN"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://mirrors.rit.edu/CTAN/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "dart-pub"
|
||||
provider = "command"
|
||||
upstream = "https://pub.dev/api"
|
||||
command = "/home/scripts/pub.sh"
|
||||
interval = 30
|
||||
docker_image = "tunathu/pub-mirror:latest"
|
||||
|
||||
[mirrors.env]
|
||||
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/dart-pub"
|
||||
|
||||
[[mirrors]]
|
||||
name = "debian"
|
||||
provider = "command"
|
||||
upstream = "rsync://mirrors.tuna.tsinghua.edu.cn/debian/"
|
||||
command = "/home/scripts/debian.sh sync:archive:debian"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
docker_image = "tunathu/ftpsync"
|
||||
docker_volumes = [
|
||||
"/etc/misc/ftpsync-debian.conf:/ftpsync/etc/ftpsync-debian.conf:ro",
|
||||
"/log/ftpsync:/home/log/tunasync/ftpsync",
|
||||
]
|
||||
[mirrors.env]
|
||||
FTPSYNC_LOG_DIR = "/home/log/tunasync/ftpsync"
|
||||
|
||||
[[mirrors]]
|
||||
name = "docker-ce"
|
||||
provider = "command"
|
||||
upstream = "https://download.docker.com/"
|
||||
command = "timeout 3h /home/scripts/docker-ce.py --workers 10 --fast-skip"
|
||||
interval = 1440
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "ELK"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://packages.elastic.co"
|
||||
command = "/home/scripts/ELK.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
# set environment varialbes
|
||||
[mirrors.env]
|
||||
WGET_OPTIONS = "-6"
|
||||
|
||||
[[mirrors]]
|
||||
name = "elasticstack"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://artifacts.elastic.co/"
|
||||
command = "/home/scripts/elastic.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "erlang-solutions"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://packages.erlang-solutions.com"
|
||||
command = "/home/scripts/erlang.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "flutter"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://storage.googleapis.com/flutter_infra/"
|
||||
command = "/home/scripts/flutter.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "github-release"
|
||||
provider = "command"
|
||||
upstream = "https://api.github.com/repos/"
|
||||
command = "/home/scripts/github-release.py --workers 5"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
interval = 720
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
[mirrors.env]
|
||||
GITHUB_TOKEN = "xxxxx"
|
||||
|
||||
[[mirrors]]
|
||||
name = "gitlab-ce"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://packages.gitlab.com/gitlab/gitlab-ce/"
|
||||
command = "/home/scripts/gitlab-ce.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "gitlab-ee"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://packages.gitlab.com/gitlab/gitlab-ee/"
|
||||
command = "/home/scripts/gitlab-ce.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "gitlab-runner"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://packages.gitlab.com/runner/gitlab-runner"
|
||||
command = "/home/scripts/gitlab-runner.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "grafana"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://packages.grafana.com/oss"
|
||||
command = "/home/scripts/grafana.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "hackage"
|
||||
provider = "command"
|
||||
command = "/home/scripts/hackage.sh"
|
||||
upstream = "https://hackage.haskell.org/"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "homebrew-bottles"
|
||||
provider = "command"
|
||||
upstream = "https://homebrew.bintray.com"
|
||||
command = "/home/scripts/linuxbrew-bottles.sh"
|
||||
docker_image = "tunathu/homebrew-mirror"
|
||||
# set environment varialbes
|
||||
[mirrors.env]
|
||||
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
|
||||
|
||||
[[mirrors]]
|
||||
name = "influxdata"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://repos.influxdata.com"
|
||||
command = "/home/scripts/influxdata.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "kali"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://ftp.nluug.nl/kali/"
|
||||
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "kali-images"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://ftp.nluug.nl/kali-images/"
|
||||
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "KaOS"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://kaosx.tk/kaos/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "kernel"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://rsync.kernel.org/pub/linux/kernel/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "kicad"
|
||||
provider = "command"
|
||||
upstream = "s3://kicad-downloads/"
|
||||
command = "/home/scripts/s3.sh"
|
||||
docker_image = "tunathu/ftpsync:latest"
|
||||
[mirrors.env]
|
||||
TUNASYNC_S3_ENDPOINT = "https://s3.cern.ch"
|
||||
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
|
||||
|
||||
[[mirrors]]
|
||||
name = "kodi"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://mirror.yandex.ru/mirrors/xbmc/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
use_ipv6 = true
|
||||
|
||||
[[mirrors]]
|
||||
name = "kubernetes"
|
||||
interval = 2880
|
||||
provider = "command"
|
||||
upstream = "http://packages.cloud.google.com"
|
||||
command = "/home/scripts/kubernetes.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "linuxbrew-bottles"
|
||||
provider = "command"
|
||||
upstream = "https://linuxbrew.bintray.com"
|
||||
command = "/home/scripts/linuxbrew-bottles.sh"
|
||||
docker_image = "tunathu/homebrew-mirror"
|
||||
# set environment varialbes
|
||||
[mirrors.env]
|
||||
RUN_LINUXBREW = "true"
|
||||
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
|
||||
|
||||
[[mirrors]]
|
||||
name = "linuxmint"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://mirrors.kernel.org/linuxmint-packages/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "lxc-images"
|
||||
provider = "command"
|
||||
upstream = "https://us.images.linuxcontainers.org/"
|
||||
command = "/home/scripts/lxc-images.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
interval = 720
|
||||
|
||||
[[mirrors]]
|
||||
name = "lyx"
|
||||
provider = "command"
|
||||
upstream = "ftp://ftp.lyx.org/pub/lyx/"
|
||||
command = "/home/scripts/lftp.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
[mirrors.env]
|
||||
TUNASYNC_LFTP_OPTIONS = "--only-newer"
|
||||
|
||||
[[mirrors]]
|
||||
name = "mongodb"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://repo.mongodb.org"
|
||||
command = "/home/scripts/mongodb.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "msys2"
|
||||
provider = "command"
|
||||
upstream = "http://repo.msys2.org/"
|
||||
command = "/home/scripts/lftp.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "mysql"
|
||||
interval = 30
|
||||
provider = "command"
|
||||
upstream = "https://repo.mysql.com"
|
||||
command = "/home/scripts/mysql.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
# set environment varialbes
|
||||
[mirrors.env]
|
||||
USE_IPV6 = "1"
|
||||
|
||||
[[mirrors]]
|
||||
name = "nix"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "s3://nix-releases/nix/"
|
||||
command = "/home/scripts/nix.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
[mirrors.env]
|
||||
MIRROR_BASE_URL = 'https://mirrors.tuna.tsinghua.edu.cn/nix/'
|
||||
|
||||
[[mirrors]]
|
||||
name = "nix-channels"
|
||||
interval = 300
|
||||
provider = "command"
|
||||
upstream = "https://nixos.org/channels"
|
||||
command = "timeout 20h /home/scripts/nix-channels.py"
|
||||
docker_image = "tunathu/nix-channels:latest"
|
||||
docker_options = [
|
||||
"--cpus", "20",
|
||||
]
|
||||
|
||||
[[mirrors]]
|
||||
name = "nodesource"
|
||||
provider = "command"
|
||||
upstream = "https://deb.nodesource.com/"
|
||||
command = "/home/scripts/nodesource.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "openresty"
|
||||
provider = "command"
|
||||
upstream = "https://openresty.org/package/"
|
||||
command = "/home/scripts/lftp.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[mirrors.env]
|
||||
TUNASYNC_LFTP_OPTIONS = "--only-newer"
|
||||
|
||||
[[mirrors]]
|
||||
name = "packagist"
|
||||
provider = "command"
|
||||
upstream = "http://packagist.org/"
|
||||
command = "/home/scripts/packagist.sh"
|
||||
interval = 1440
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "proxmox"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "http://download.proxmox.com"
|
||||
command = "/home/scripts/proxmox.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "pypi"
|
||||
provider = "command"
|
||||
upstream = "https://pypi.python.org/"
|
||||
command = "/home/scripts/pypi.sh"
|
||||
docker_image = "tunathu/bandersnatch:latest"
|
||||
interval = 5
|
||||
|
||||
[[mirrors]]
|
||||
name = "qt"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://master.qt-project.org/qt-all/"
|
||||
exclude_file = "/etc/excludes/qt.txt"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "raspberrypi"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://apt-repo.raspberrypi.org/archive/debian/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "raspbian-images"
|
||||
interval = 5760
|
||||
provider = "command"
|
||||
upstream = "https://downloads.raspberrypi.org/"
|
||||
command = "/home/scripts/lftp.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[mirrors.env]
|
||||
TUNASYNC_LFTP_OPTIONS = "-x ^icons/$ -c --only-missing -v --no-perms"
|
||||
|
||||
[[mirrors]]
|
||||
name = "raspbian"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://archive.raspbian.org/archive/"
|
||||
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
|
||||
memory_limit = "256M"
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "redhat"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://ftp.redhat.com/redhat/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
exclude_file = "/etc/excludes/redhat.txt"
|
||||
interval = 1440
|
||||
|
||||
[mirrors.env]
|
||||
RSYNC_PROXY="127.0.0.1:8123"
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "remi"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "rsync://rpms.remirepo.net"
|
||||
command = "/home/scripts/remi.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "repo-ck"
|
||||
provider = "command"
|
||||
upstream = "http://repo-ck.com"
|
||||
command = "/home/scripts/repo-ck.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "ros"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://mirror.umd.edu/packages.ros.org/ros/"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "ros2"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "http://packages.ros.org/ros2"
|
||||
command = "/home/scripts/ros2.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "rubygems"
|
||||
provider = "command"
|
||||
upstream = "https://rubygems.org"
|
||||
command = "/home/scripts/rubygems.sh"
|
||||
docker_image = "tunathu/rubygems-mirror"
|
||||
interval = 60
|
||||
# set environment varialbes
|
||||
[mirrors.env]
|
||||
INIT = "0"
|
||||
|
||||
[[mirrors]]
|
||||
name = "rudder"
|
||||
interval = 2880
|
||||
provider = "command"
|
||||
upstream = "https://repository.rudder.io"
|
||||
command = "/home/scripts/rudder.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "rustup"
|
||||
provider = "command"
|
||||
upstream = "https://rustup.rs/"
|
||||
command = "/home/scripts/rustup.sh"
|
||||
interval = 1440
|
||||
docker_image = "tunathu/rustup-mirror:latest"
|
||||
docker_volumes = [
|
||||
]
|
||||
docker_options = [
|
||||
]
|
||||
[mirrors.env]
|
||||
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/rustup"
|
||||
|
||||
[[mirrors]]
|
||||
name = "saltstack"
|
||||
interval = 1440 # required on http://repo.saltstack.com/#mirror
|
||||
provider = "command"
|
||||
upstream = "s3://s3/"
|
||||
command = "/home/scripts/s3.sh"
|
||||
docker_image = "tunathu/ftpsync:latest"
|
||||
|
||||
[mirrors.env]
|
||||
TUNASYNC_S3_ENDPOINT = "https://s3.repo.saltstack.com"
|
||||
TUNASYNC_AWS_OPTIONS = "--delete --exact-timestamps"
|
||||
|
||||
[[mirrors]]
|
||||
name = "solus"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://mirrors.rit.edu/solus/"
|
||||
rsync_options = [ "--exclude", "/shannon", "--exclude", "/unstable" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "stackage"
|
||||
provider = "command"
|
||||
command = "/home/scripts/stackage.py"
|
||||
upstream = "https://www.stackage.org/"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
# set environment varialbes
|
||||
[mirrors.env]
|
||||
GIT_COMMITTER_NAME = "TUNA mirrors"
|
||||
GIT_COMMITTER_EMAIL = "mirrors@tuna.tsinghua.edu.cn"
|
||||
|
||||
|
||||
[[mirrors]]
|
||||
name = "steamos"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "http://repo.steampowered.com"
|
||||
command = "/home/scripts/lftp.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[mirrors.env]
|
||||
TUNASYNC_LFTP_OPTIONS = "--only-newer --exclude icons/ "
|
||||
|
||||
[[mirrors]]
|
||||
name = "termux"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "https://dl.bintray.com/termux/termux-packages-24/"
|
||||
command = "/home/scripts/termux.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "ubuntu"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://archive.ubuntu.com/ubuntu/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "ubuntu-ports"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://ports.ubuntu.com/ubuntu-ports/"
|
||||
rsync_options = [ "--delete-excluded" ]
|
||||
exclude_file = "/etc/excludes/ubuntu-ports-exclude.txt"
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "virtualbox"
|
||||
interval = 1440
|
||||
provider = "command"
|
||||
upstream = "http://download.virtualbox.org/virtualbox"
|
||||
command = "/home/scripts/virtualbox.sh"
|
||||
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "winehq"
|
||||
provider = "command"
|
||||
upstream = "ftp://ftp.winehq.org/pub/"
|
||||
command = "/home/scripts/lftp.sh"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[mirrors.env]
|
||||
TUNASYNC_LFTP_OPTIONS = "-x wine-builds.old/ -x /\\..+"
|
||||
|
||||
[[mirrors]]
|
||||
name = "zabbix"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://repo.zabbix.com/mirror/"
|
||||
rsync_options = [ "--delete-excluded", "--chmod=o+r,Do+x,Fa-x" ]
|
||||
memory_limit = "256M"
|
||||
|
||||
[[mirrors]]
|
||||
name = "AOSP"
|
||||
interval = 720
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/aosp.sh"
|
||||
upstream = "https://android.googlesource.com/mirror/manifest"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
[mirrors.env]
|
||||
REPO = "/usr/local/bin/aosp-repo"
|
||||
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
|
||||
USE_BITMAP_INDEX = "1"
|
||||
|
||||
[[mirrors]]
|
||||
name = "lineageOS"
|
||||
interval = 720
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/aosp.sh"
|
||||
upstream = "https://github.com/LineageOS/mirror"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
[mirrors.env]
|
||||
REPO = "/usr/local/bin/aosp-repo"
|
||||
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
|
||||
USE_BITMAP_INDEX = "1"
|
||||
|
||||
[[mirrors]]
|
||||
name = "chromiumos"
|
||||
interval = 720
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/cros.sh"
|
||||
upstream = "https://chromium.googlesource.com"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
fail_on_match = "fatal: "
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
[mirrors.env]
|
||||
USE_BITMAP_INDEX = "1"
|
||||
CONCURRENT_JOBS = "20"
|
||||
|
||||
[[mirrors]]
|
||||
name = "crates.io-index.git"
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/git.sh"
|
||||
upstream = "https://github.com/rust-lang/crates.io-index.git"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
|
||||
|
||||
[[mirrors]]
|
||||
name = "flutter-sdk.git"
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/git.sh"
|
||||
upstream = "git://github.com/flutter/flutter.git"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
|
||||
|
||||
[[mirrors]]
|
||||
name = "gcc.git"
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/git.sh"
|
||||
upstream = "git://gcc.gnu.org/git/gcc.git"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
|
||||
|
||||
[[mirrors]]
|
||||
name = "gentoo-portage.git"
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/git.sh"
|
||||
upstream = "git://github.com/gentoo-mirror/gentoo.git"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
|
||||
|
||||
[[mirrors]]
|
||||
name = "git-repo"
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/git-repo.sh"
|
||||
upstream = "https://gerrit.googlesource.com/git-repo"
|
||||
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
|
||||
fail_on_match = "fatal: "
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
|
||||
[[mirrors]]
|
||||
name = "homebrew"
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/homebrew.sh"
|
||||
upstream = "https://github.com/Homebrew"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
|
||||
[[mirrors]]
|
||||
name = "CocoaPods"
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/cocoapods.sh"
|
||||
upstream = "https://github.com/CocoaPods"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
|
||||
[[mirrors]]
|
||||
name = "pybombs"
|
||||
interval = 720
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/pybombs.sh"
|
||||
upstream = "https://github.com/scateu/pybombs-mirror/"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
docker_volumes = ["/home/pybombs-mirror:/opt/pybombs-mirror"]
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
[mirrors.env]
|
||||
PYBOMBS_MIRROR_SCRIPT_PATH = "/opt/pybombs-mirror"
|
||||
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/pybombs"
|
||||
|
||||
[[mirrors]]
|
||||
name = "llvm"
|
||||
provider = "command"
|
||||
command = "/home/tunasync-scripts/llvm.sh"
|
||||
upstream = "https://git.llvm.org/git"
|
||||
docker_image = "tunathu/tunasync-scripts:latest"
|
||||
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
|
||||
|
||||
# vim: ft=toml
|
89
go.mod
Normal file
89
go.mod
Normal file
@ -0,0 +1,89 @@
|
||||
module github.com/tuna/tunasync
|
||||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.5
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.0
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe
|
||||
github.com/containerd/cgroups/v3 v3.0.5
|
||||
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6
|
||||
github.com/dgraph-io/badger/v2 v2.2007.4
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gin-gonic/gin v1.10.0
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/imdario/mergo v0.3.16
|
||||
github.com/moby/moby v28.0.1+incompatible
|
||||
github.com/moby/sys/reexec v0.1.0
|
||||
github.com/opencontainers/runtime-spec v1.2.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46
|
||||
github.com/smartystreets/goconvey v1.6.4
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/urfave/cli v1.22.16
|
||||
golang.org/x/sys v0.30.0
|
||||
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473
|
||||
)
|
||||
|
||||
replace github.com/boltdb/bolt v1.3.1 => go.etcd.io/bbolt v1.3.11
|
||||
|
||||
require (
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||
github.com/bytedance/sonic v1.12.9 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.3 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cilium/ebpf v0.17.3 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/dennwc/ioctl v1.0.0 // indirect
|
||||
github.com/dgraph-io/ristretto v0.2.0 // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/gin-contrib/sse v1.0.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.25.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomodule/redigo v1.8.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/smartystreets/assertions v1.2.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
|
||||
golang.org/x/arch v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.35.0 // indirect
|
||||
golang.org/x/net v0.35.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
343
go.sum
Normal file
343
go.sum
Normal file
@ -0,0 +1,343 @@
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bytedance/sonic v1.12.7 h1:CQU8pxOy9HToxhndH0Kx/S1qU/CuS9GnKYrGioDcU1Q=
|
||||
github.com/bytedance/sonic v1.12.7/go.mod h1:tnbal4mxOMju17EGfknm2XyYcpyCnIROYOEYuemj13I=
|
||||
github.com/bytedance/sonic v1.12.9 h1:Od1BvK55NnewtGaJsTDeAOSnLVO2BTSLOe0+ooKokmQ=
|
||||
github.com/bytedance/sonic v1.12.9/go.mod h1:uVvFidNmlt9+wa31S1urfwwthTWteBgG0hWuoKAXTx8=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.2 h1:jxAJuN9fOot/cyz5Q6dUuMJF5OqQ6+5GfA8FjjQ0R4o=
|
||||
github.com/bytedance/sonic/loader v0.2.2/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/bytedance/sonic/loader v0.2.3 h1:yctD0Q3v2NOGfSWPLPvG2ggA2kV6TS6s4wioyEqssH0=
|
||||
github.com/bytedance/sonic/loader v0.2.3/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.17.1 h1:G8mzU81R2JA1nE5/8SRubzqvBMmAmri2VL8BIZPWvV0=
|
||||
github.com/cilium/ebpf v0.17.1/go.mod h1:vay2FaYSmIlv3r8dNACd4mW/OCaZLJKJOo+IHBvCIO8=
|
||||
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
|
||||
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe h1:69JI97HlzP+PH5Mi1thcGlDoBr6PS2Oe+l3mNmAkbs4=
|
||||
github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
|
||||
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
|
||||
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6 h1:fV+JlCY0cCJh3l0jfE7iB3ZmrdfJSgfcjdrCQhPokGg=
|
||||
github.com/dennwc/btrfs v0.0.0-20241002142654-12ae127e0bf6/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA=
|
||||
github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg=
|
||||
github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0=
|
||||
github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o=
|
||||
github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk=
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=
|
||||
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
|
||||
github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E=
|
||||
github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0=
|
||||
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o=
|
||||
github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-playground/validator/v10 v10.25.0 h1:5Dh7cjvzR7BRZadnsVOzPhWsrwUr0nmsZJxEAnFLNO8=
|
||||
github.com/go-playground/validator/v10 v10.25.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus=
|
||||
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
|
||||
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
|
||||
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM=
|
||||
github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
|
||||
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
|
||||
github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
|
||||
github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
||||
github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM=
|
||||
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
|
||||
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
|
||||
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
|
||||
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/moby v27.4.1+incompatible h1:z6detzbcLRt7U+w4ovHV+8oYpJfpHKTmUbFWPG6cudA=
|
||||
github.com/moby/moby v27.4.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
|
||||
github.com/moby/moby v28.0.1+incompatible h1:10ejBTwFhM3/9p6pSaKrLyXnx7QzzCmCYHAedOp67cQ=
|
||||
github.com/moby/moby v28.0.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
|
||||
github.com/moby/sys/reexec v0.1.0 h1:RrBi8e0EBTLEgfruBOFcxtElzRGTEUkeIFaVXgU7wok=
|
||||
github.com/moby/sys/reexec v0.1.0/go.mod h1:EqjBg8F3X7iZe5pU6nRZnYCMUTXoxsjiIfHup5wYIN8=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
|
||||
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
|
||||
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ=
|
||||
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
golang.org/x/arch v0.13.0 h1:KCkqVVV1kGg0X87TFysjCJ8MxtZEIU4Ja/yXGeoECdA=
|
||||
golang.org/x/arch v0.13.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4=
|
||||
golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
|
||||
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
@ -24,9 +24,12 @@ func InitLogger(verbose, debug, withSystemd bool) {
|
||||
|
||||
if debug {
|
||||
logging.SetLevel(logging.DEBUG, "tunasync")
|
||||
logging.SetLevel(logging.DEBUG, "tunasynctl")
|
||||
} else if verbose {
|
||||
logging.SetLevel(logging.INFO, "tunasync")
|
||||
logging.SetLevel(logging.INFO, "tunasynctl")
|
||||
} else {
|
||||
logging.SetLevel(logging.NOTICE, "tunasync")
|
||||
logging.SetLevel(logging.NOTICE, "tunasynctl")
|
||||
}
|
||||
}
|
||||
|
109
internal/msg.go
109
internal/msg.go
@ -1,30 +1,45 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A StatusUpdateMsg represents a msg when
|
||||
// A MirrorStatus represents a msg when
|
||||
// a worker has done syncing
|
||||
type MirrorStatus struct {
|
||||
Name string `json:"name"`
|
||||
Worker string `json:"worker"`
|
||||
IsMaster bool `json:"is_master"`
|
||||
Status SyncStatus `json:"status"`
|
||||
LastUpdate time.Time `json:"last_update"`
|
||||
Upstream string `json:"upstream"`
|
||||
Size string `json:"size"`
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
Name string `json:"name"`
|
||||
Worker string `json:"worker"`
|
||||
IsMaster bool `json:"is_master"`
|
||||
Status SyncStatus `json:"status"`
|
||||
LastUpdate time.Time `json:"last_update"`
|
||||
LastStarted time.Time `json:"last_started"`
|
||||
LastEnded time.Time `json:"last_ended"`
|
||||
Scheduled time.Time `json:"next_schedule"`
|
||||
Upstream string `json:"upstream"`
|
||||
Size string `json:"size"`
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
}
|
||||
|
||||
// A WorkerStatus is the information struct that describe
|
||||
// a worker, and sent from the manager to clients.
|
||||
type WorkerStatus struct {
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"` // worker url
|
||||
Token string `json:"token"` // session token
|
||||
LastOnline time.Time `json:"last_online"` // last seen
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"` // worker url
|
||||
Token string `json:"token"` // session token
|
||||
LastOnline time.Time `json:"last_online"` // last seen
|
||||
LastRegister time.Time `json:"last_register"` // last register time
|
||||
}
|
||||
|
||||
type MirrorSchedules struct {
|
||||
Schedules []MirrorSchedule `json:"schedules"`
|
||||
}
|
||||
|
||||
type MirrorSchedule struct {
|
||||
MirrorName string `json:"name"`
|
||||
NextSchedule time.Time `json:"next_schedule"`
|
||||
}
|
||||
|
||||
// A CmdVerb is an action to a job or worker
|
||||
@ -47,29 +62,54 @@ const (
|
||||
)
|
||||
|
||||
func (c CmdVerb) String() string {
|
||||
switch c {
|
||||
case CmdStart:
|
||||
return "start"
|
||||
case CmdStop:
|
||||
return "stop"
|
||||
case CmdDisable:
|
||||
return "disable"
|
||||
case CmdRestart:
|
||||
return "restart"
|
||||
case CmdPing:
|
||||
return "ping"
|
||||
case CmdReload:
|
||||
return "reload"
|
||||
mapping := map[CmdVerb]string{
|
||||
CmdStart: "start",
|
||||
CmdStop: "stop",
|
||||
CmdDisable: "disable",
|
||||
CmdRestart: "restart",
|
||||
CmdPing: "ping",
|
||||
CmdReload: "reload",
|
||||
}
|
||||
return "unknown"
|
||||
return mapping[c]
|
||||
}
|
||||
|
||||
func NewCmdVerbFromString(s string) CmdVerb {
|
||||
mapping := map[string]CmdVerb{
|
||||
"start": CmdStart,
|
||||
"stop": CmdStop,
|
||||
"disable": CmdDisable,
|
||||
"restart": CmdRestart,
|
||||
"ping": CmdPing,
|
||||
"reload": CmdReload,
|
||||
}
|
||||
return mapping[s]
|
||||
}
|
||||
|
||||
// Marshal and Unmarshal for CmdVerb
|
||||
func (s CmdVerb) MarshalJSON() ([]byte, error) {
|
||||
buffer := bytes.NewBufferString(`"`)
|
||||
buffer.WriteString(s.String())
|
||||
buffer.WriteString(`"`)
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func (s *CmdVerb) UnmarshalJSON(b []byte) error {
|
||||
var j string
|
||||
err := json.Unmarshal(b, &j)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s = NewCmdVerbFromString(j)
|
||||
return nil
|
||||
}
|
||||
|
||||
// A WorkerCmd is the command message send from the
|
||||
// manager to a worker
|
||||
type WorkerCmd struct {
|
||||
Cmd CmdVerb `json:"cmd"`
|
||||
MirrorID string `json:"mirror_id"`
|
||||
Args []string `json:"args"`
|
||||
Cmd CmdVerb `json:"cmd"`
|
||||
MirrorID string `json:"mirror_id"`
|
||||
Args []string `json:"args"`
|
||||
Options map[string]bool `json:"options"`
|
||||
}
|
||||
|
||||
func (c WorkerCmd) String() string {
|
||||
@ -82,8 +122,9 @@ func (c WorkerCmd) String() string {
|
||||
// A ClientCmd is the command message send from client
|
||||
// to the manager
|
||||
type ClientCmd struct {
|
||||
Cmd CmdVerb `json:"cmd"`
|
||||
MirrorID string `json:"mirror_id"`
|
||||
WorkerID string `json:"worker_id"`
|
||||
Args []string `json:"args"`
|
||||
Cmd CmdVerb `json:"cmd"`
|
||||
MirrorID string `json:"mirror_id"`
|
||||
WorkerID string `json:"worker_id"`
|
||||
Args []string `json:"args"`
|
||||
Options map[string]bool `json:"options"`
|
||||
}
|
||||
|
72
internal/status_web.go
Normal file
72
internal/status_web.go
Normal file
@ -0,0 +1,72 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type textTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (t textTime) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
|
||||
}
|
||||
func (t *textTime) UnmarshalJSON(b []byte) error {
|
||||
s := string(b)
|
||||
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
|
||||
*t = textTime{t2}
|
||||
return err
|
||||
}
|
||||
|
||||
type stampTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (t stampTime) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(t.Unix())
|
||||
}
|
||||
func (t *stampTime) UnmarshalJSON(b []byte) error {
|
||||
ts, err := strconv.Atoi(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = stampTime{time.Unix(int64(ts), 0)}
|
||||
return err
|
||||
}
|
||||
|
||||
// WebMirrorStatus is the mirror status to be shown in the web page
|
||||
type WebMirrorStatus struct {
|
||||
Name string `json:"name"`
|
||||
IsMaster bool `json:"is_master"`
|
||||
Status SyncStatus `json:"status"`
|
||||
LastUpdate textTime `json:"last_update"`
|
||||
LastUpdateTs stampTime `json:"last_update_ts"`
|
||||
LastStarted textTime `json:"last_started"`
|
||||
LastStartedTs stampTime `json:"last_started_ts"`
|
||||
LastEnded textTime `json:"last_ended"`
|
||||
LastEndedTs stampTime `json:"last_ended_ts"`
|
||||
Scheduled textTime `json:"next_schedule"`
|
||||
ScheduledTs stampTime `json:"next_schedule_ts"`
|
||||
Upstream string `json:"upstream"`
|
||||
Size string `json:"size"` // approximate size
|
||||
}
|
||||
|
||||
func BuildWebMirrorStatus(m MirrorStatus) WebMirrorStatus {
|
||||
return WebMirrorStatus{
|
||||
Name: m.Name,
|
||||
IsMaster: m.IsMaster,
|
||||
Status: m.Status,
|
||||
LastUpdate: textTime{m.LastUpdate},
|
||||
LastUpdateTs: stampTime{m.LastUpdate},
|
||||
LastStarted: textTime{m.LastStarted},
|
||||
LastStartedTs: stampTime{m.LastStarted},
|
||||
LastEnded: textTime{m.LastEnded},
|
||||
LastEndedTs: stampTime{m.LastEnded},
|
||||
Scheduled: textTime{m.Scheduled},
|
||||
ScheduledTs: stampTime{m.Scheduled},
|
||||
Upstream: m.Upstream,
|
||||
Size: m.Size,
|
||||
}
|
||||
}
|
97
internal/status_web_test.go
Normal file
97
internal/status_web_test.go
Normal file
@ -0,0 +1,97 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestStatus(t *testing.T) {
|
||||
Convey("status json ser-de should work", t, func() {
|
||||
tz := "Asia/Tokyo"
|
||||
loc, err := time.LoadLocation(tz)
|
||||
So(err, ShouldBeNil)
|
||||
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
|
||||
m := WebMirrorStatus{
|
||||
Name: "tunalinux",
|
||||
Status: Success,
|
||||
LastUpdate: textTime{t},
|
||||
LastUpdateTs: stampTime{t},
|
||||
LastStarted: textTime{t},
|
||||
LastStartedTs: stampTime{t},
|
||||
LastEnded: textTime{t},
|
||||
LastEndedTs: stampTime{t},
|
||||
Scheduled: textTime{t},
|
||||
ScheduledTs: stampTime{t},
|
||||
Size: "5GB",
|
||||
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
|
||||
}
|
||||
|
||||
b, err := json.Marshal(m)
|
||||
So(err, ShouldBeNil)
|
||||
//fmt.Println(string(b))
|
||||
var m2 WebMirrorStatus
|
||||
err = json.Unmarshal(b, &m2)
|
||||
So(err, ShouldBeNil)
|
||||
// fmt.Printf("%#v", m2)
|
||||
So(m2.Name, ShouldEqual, m.Name)
|
||||
So(m2.Status, ShouldEqual, m.Status)
|
||||
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
|
||||
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
|
||||
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
|
||||
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
|
||||
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
|
||||
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
|
||||
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
|
||||
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
|
||||
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
|
||||
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
|
||||
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
|
||||
So(m2.LastEndedTs.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
|
||||
So(m2.Scheduled.Unix(), ShouldEqual, m.Scheduled.Unix())
|
||||
So(m2.ScheduledTs.Unix(), ShouldEqual, m.Scheduled.Unix())
|
||||
So(m2.Scheduled.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
|
||||
So(m2.ScheduledTs.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
|
||||
So(m2.Size, ShouldEqual, m.Size)
|
||||
So(m2.Upstream, ShouldEqual, m.Upstream)
|
||||
})
|
||||
Convey("BuildWebMirrorStatus should work", t, func() {
|
||||
m := MirrorStatus{
|
||||
Name: "arch-sync3",
|
||||
Worker: "testWorker",
|
||||
IsMaster: true,
|
||||
Status: Failed,
|
||||
LastUpdate: time.Now().Add(-time.Minute * 30),
|
||||
LastStarted: time.Now().Add(-time.Minute * 1),
|
||||
LastEnded: time.Now(),
|
||||
Scheduled: time.Now().Add(time.Minute * 5),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
}
|
||||
|
||||
var m2 WebMirrorStatus = BuildWebMirrorStatus(m)
|
||||
// fmt.Printf("%#v", m2)
|
||||
So(m2.Name, ShouldEqual, m.Name)
|
||||
So(m2.Status, ShouldEqual, m.Status)
|
||||
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
|
||||
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
|
||||
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
|
||||
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
|
||||
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
|
||||
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
|
||||
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
|
||||
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
|
||||
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
|
||||
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
|
||||
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
|
||||
So(m2.LastEndedTs.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
|
||||
So(m2.Scheduled.Unix(), ShouldEqual, m.Scheduled.Unix())
|
||||
So(m2.ScheduledTs.Unix(), ShouldEqual, m.Scheduled.Unix())
|
||||
So(m2.Scheduled.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
|
||||
So(m2.ScheduledTs.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
|
||||
So(m2.Size, ShouldEqual, m.Size)
|
||||
So(m2.Upstream, ShouldEqual, m.Upstream)
|
||||
})
|
||||
}
|
@ -6,26 +6,53 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
var rsyncExitValues = map[int]string{
|
||||
0: "Success",
|
||||
1: "Syntax or usage error",
|
||||
2: "Protocol incompatibility",
|
||||
3: "Errors selecting input/output files, dirs",
|
||||
4: "Requested action not supported: an attempt was made to manipulate 64-bit files on a platform that cannot support them; or an option was specified that is supported by the client and not by the server.",
|
||||
5: "Error starting client-server protocol",
|
||||
6: "Daemon unable to append to log-file",
|
||||
10: "Error in socket I/O",
|
||||
11: "Error in file I/O",
|
||||
12: "Error in rsync protocol data stream",
|
||||
13: "Errors with program diagnostics",
|
||||
14: "Error in IPC code",
|
||||
20: "Received SIGUSR1 or SIGINT",
|
||||
21: "Some error returned by waitpid()",
|
||||
22: "Error allocating core memory buffers",
|
||||
23: "Partial transfer due to error",
|
||||
24: "Partial transfer due to vanished source files",
|
||||
25: "The --max-delete limit stopped deletions",
|
||||
30: "Timeout in data send/receive",
|
||||
35: "Timeout waiting for daemon connection",
|
||||
}
|
||||
|
||||
// GetTLSConfig generate tls.Config from CAFile
|
||||
func GetTLSConfig(CAFile string) (*tls.Config, error) {
|
||||
caCert, err := ioutil.ReadFile(CAFile)
|
||||
caCert, err := os.ReadFile(CAFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
|
||||
return nil, errors.New("Failed to add CA to pool")
|
||||
return nil, errors.New("failed to add CA to pool")
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
}
|
||||
tlsConfig.BuildNameToCertificate()
|
||||
// tlsConfig.BuildNameToCertificate()
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
@ -78,9 +105,52 @@ func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response,
|
||||
return resp, errors.New("HTTP status code is not 200")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
return resp, json.Unmarshal(body, obj)
|
||||
}
|
||||
|
||||
// FindAllSubmatchInFile calls re.FindAllSubmatch to find matches in given file
|
||||
func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]byte, err error) {
|
||||
if fileName == "/dev/null" {
|
||||
err = errors.New("invalid log file")
|
||||
return
|
||||
}
|
||||
if content, err := os.ReadFile(fileName); err == nil {
|
||||
matches = re.FindAllSubmatch(content, -1)
|
||||
// fmt.Printf("FindAllSubmatchInFile: %q\n", matches)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExtractSizeFromLog uses a regexp to extract the size from log files
|
||||
func ExtractSizeFromLog(logFile string, re *regexp.Regexp) string {
|
||||
matches, _ := FindAllSubmatchInFile(logFile, re)
|
||||
if len(matches) == 0 {
|
||||
return ""
|
||||
}
|
||||
// return the first capture group of the last occurrence
|
||||
return string(matches[len(matches)-1][1])
|
||||
}
|
||||
|
||||
// ExtractSizeFromRsyncLog extracts the size from rsync logs
|
||||
func ExtractSizeFromRsyncLog(logFile string) string {
|
||||
// (?m) flag enables multi-line mode
|
||||
re := regexp.MustCompile(`(?m)^Total file size: ([0-9\.]+[KMGTP]?) bytes`)
|
||||
return ExtractSizeFromLog(logFile, re)
|
||||
}
|
||||
|
||||
// TranslateRsyncErrorCode translates the exit code of rsync to a message
|
||||
func TranslateRsyncErrorCode(cmdErr error) (exitCode int, msg string) {
|
||||
|
||||
if exiterr, ok := cmdErr.(*exec.ExitError); ok {
|
||||
exitCode = exiterr.ExitCode()
|
||||
strerr, valid := rsyncExitValues[exitCode]
|
||||
if valid {
|
||||
msg = fmt.Sprintf("rsync error: %s", strerr)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
41
internal/util_test.go
Normal file
41
internal/util_test.go
Normal file
@ -0,0 +1,41 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestExtractSizeFromRsyncLog(t *testing.T) {
|
||||
realLogContent := `
|
||||
Number of files: 998,470 (reg: 925,484, dir: 58,892, link: 14,094)
|
||||
Number of created files: 1,049 (reg: 1,049)
|
||||
Number of deleted files: 1,277 (reg: 1,277)
|
||||
Number of regular files transferred: 5,694
|
||||
Total file size: 1.33T bytes
|
||||
Total transferred file size: 2.86G bytes
|
||||
Literal data: 780.62M bytes
|
||||
Matched data: 2.08G bytes
|
||||
File list size: 37.55M
|
||||
File list generation time: 7.845 seconds
|
||||
File list transfer time: 0.000 seconds
|
||||
Total bytes sent: 7.55M
|
||||
Total bytes received: 823.25M
|
||||
|
||||
sent 7.55M bytes received 823.25M bytes 5.11M bytes/sec
|
||||
total size is 1.33T speedup is 1,604.11
|
||||
`
|
||||
Convey("Log parser should work", t, func() {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
So(err, ShouldBeNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
logFile := filepath.Join(tmpDir, "rs.log")
|
||||
err = os.WriteFile(logFile, []byte(realLogContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
res := ExtractSizeFromRsyncLog(logFile)
|
||||
So(res, ShouldEqual, "1.33T")
|
||||
})
|
||||
}
|
4
internal/version.go
Normal file
4
internal/version.go
Normal file
@ -0,0 +1,4 @@
|
||||
package internal
|
||||
|
||||
// Version of the program
|
||||
const Version string = "0.9.3"
|
@ -2,7 +2,7 @@ package manager
|
||||
|
||||
import (
|
||||
"github.com/BurntSushi/toml"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// A Config is the top-level toml-serializaible config struct
|
||||
@ -29,6 +29,7 @@ type FileConfig struct {
|
||||
CACert string `toml:"ca_cert"`
|
||||
}
|
||||
|
||||
// LoadConfig loads config from specified file
|
||||
func LoadConfig(cfgFile string, c *cli.Context) (*Config, error) {
|
||||
|
||||
cfg := new(Config)
|
||||
|
@ -2,14 +2,13 @@ package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
@ -37,11 +36,11 @@ func TestConfig(t *testing.T) {
|
||||
|
||||
Convey("load Config should work", t, func() {
|
||||
Convey("create config file & cli context", func() {
|
||||
tmpfile, err := ioutil.TempFile("", "tunasync")
|
||||
tmpfile, err := os.CreateTemp("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
|
||||
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
|
319
manager/db.go
319
manager/db.go
@ -4,8 +4,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "github.com/boltdb/bolt"
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
@ -14,7 +19,9 @@ type dbAdapter interface {
|
||||
Init() error
|
||||
ListWorkers() ([]WorkerStatus, error)
|
||||
GetWorker(workerID string) (WorkerStatus, error)
|
||||
DeleteWorker(workerID string) error
|
||||
CreateWorker(w WorkerStatus) (WorkerStatus, error)
|
||||
RefreshWorker(workerID string) (WorkerStatus, error)
|
||||
UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error)
|
||||
GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error)
|
||||
ListMirrorStatus(workerID string) ([]MirrorStatus, error)
|
||||
@ -23,21 +30,14 @@ type dbAdapter interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) {
|
||||
if dbType == "bolt" {
|
||||
innerDB, err := bolt.Open(dbFile, 0600, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db := boltAdapter{
|
||||
db: innerDB,
|
||||
dbFile: dbFile,
|
||||
}
|
||||
err = db.Init()
|
||||
return &db, err
|
||||
}
|
||||
// unsupported db-type
|
||||
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
|
||||
// interface for a kv database
|
||||
type kvAdapter interface {
|
||||
InitBucket(bucket string) error
|
||||
Get(bucket string, key string) ([]byte, error)
|
||||
GetAll(bucket string) (map[string][]byte, error)
|
||||
Put(bucket string, key string, value []byte) error
|
||||
Delete(bucket string, key string) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
const (
|
||||
@ -45,153 +45,222 @@ const (
|
||||
_statusBucketKey = "mirror_status"
|
||||
)
|
||||
|
||||
type boltAdapter struct {
|
||||
db *bolt.DB
|
||||
dbFile string
|
||||
func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) {
|
||||
if dbType == "bolt" {
|
||||
innerDB, err := bolt.Open(dbFile, 0600, &bolt.Options{
|
||||
Timeout: 5 * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db := boltAdapter{
|
||||
db: innerDB,
|
||||
}
|
||||
kv := kvDBAdapter{
|
||||
db: &db,
|
||||
}
|
||||
err = kv.Init()
|
||||
return &kv, err
|
||||
} else if dbType == "redis" {
|
||||
opt, err := redis.ParseURL(dbFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad redis url: %s", err)
|
||||
}
|
||||
innerDB := redis.NewClient(opt)
|
||||
db := redisAdapter{
|
||||
db: innerDB,
|
||||
}
|
||||
kv := kvDBAdapter{
|
||||
db: &db,
|
||||
}
|
||||
err = kv.Init()
|
||||
return &kv, err
|
||||
} else if dbType == "badger" {
|
||||
innerDB, err := badger.Open(badger.DefaultOptions(dbFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db := badgerAdapter{
|
||||
db: innerDB,
|
||||
}
|
||||
kv := kvDBAdapter{
|
||||
db: &db,
|
||||
}
|
||||
err = kv.Init()
|
||||
return &kv, err
|
||||
} else if dbType == "leveldb" {
|
||||
innerDB, err := leveldb.OpenFile(dbFile, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db := leveldbAdapter{
|
||||
db: innerDB,
|
||||
}
|
||||
kv := kvDBAdapter{
|
||||
db: &db,
|
||||
}
|
||||
err = kv.Init()
|
||||
return &kv, err
|
||||
}
|
||||
// unsupported db-type
|
||||
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
|
||||
}
|
||||
|
||||
func (b *boltAdapter) Init() (err error) {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
_, err = tx.CreateBucketIfNotExists([]byte(_workerBucketKey))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
|
||||
}
|
||||
_, err = tx.CreateBucketIfNotExists([]byte(_statusBucketKey))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket %s error: %s", _statusBucketKey, err.Error())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
// use the underlying kv database to store data
|
||||
type kvDBAdapter struct {
|
||||
db kvAdapter
|
||||
}
|
||||
|
||||
func (b *boltAdapter) ListWorkers() (ws []WorkerStatus, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_workerBucketKey))
|
||||
c := bucket.Cursor()
|
||||
var w WorkerStatus
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
jsonErr := json.Unmarshal(v, &w)
|
||||
if jsonErr != nil {
|
||||
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
|
||||
continue
|
||||
}
|
||||
ws = append(ws, w)
|
||||
func (b *kvDBAdapter) Init() error {
|
||||
err := b.db.InitBucket(_workerBucketKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
|
||||
}
|
||||
err = b.db.InitBucket(_statusBucketKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *kvDBAdapter) ListWorkers() (ws []WorkerStatus, err error) {
|
||||
var workers map[string][]byte
|
||||
workers, err = b.db.GetAll(_workerBucketKey)
|
||||
|
||||
var w WorkerStatus
|
||||
for _, v := range workers {
|
||||
jsonErr := json.Unmarshal(v, &w)
|
||||
if jsonErr != nil {
|
||||
err = errors.Wrap(err, jsonErr.Error())
|
||||
continue
|
||||
}
|
||||
return err
|
||||
})
|
||||
ws = append(ws, w)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_workerBucketKey))
|
||||
v := bucket.Get([]byte(workerID))
|
||||
if v == nil {
|
||||
return fmt.Errorf("invalid workerID %s", workerID)
|
||||
}
|
||||
err := json.Unmarshal(v, &w)
|
||||
return err
|
||||
})
|
||||
func (b *kvDBAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
|
||||
var v []byte
|
||||
v, _ = b.db.Get(_workerBucketKey, workerID)
|
||||
if v == nil {
|
||||
err = fmt.Errorf("invalid workerID %s", workerID)
|
||||
} else {
|
||||
err = json.Unmarshal(v, &w)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_workerBucketKey))
|
||||
v, err := json.Marshal(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = bucket.Put([]byte(w.ID), v)
|
||||
return err
|
||||
})
|
||||
func (b *kvDBAdapter) DeleteWorker(workerID string) error {
|
||||
v, _ := b.db.Get(_workerBucketKey, workerID)
|
||||
if v == nil {
|
||||
return fmt.Errorf("invalid workerID %s", workerID)
|
||||
}
|
||||
return b.db.Delete(_workerBucketKey, workerID)
|
||||
}
|
||||
|
||||
func (b *kvDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
|
||||
v, err := json.Marshal(w)
|
||||
if err == nil {
|
||||
err = b.db.Put(_workerBucketKey, w.ID, v)
|
||||
}
|
||||
return w, err
|
||||
}
|
||||
|
||||
func (b *boltAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
|
||||
func (b *kvDBAdapter) RefreshWorker(workerID string) (w WorkerStatus, err error) {
|
||||
w, err = b.GetWorker(workerID)
|
||||
if err == nil {
|
||||
w.LastOnline = time.Now()
|
||||
w, err = b.CreateWorker(w)
|
||||
}
|
||||
return w, err
|
||||
}
|
||||
|
||||
func (b *kvDBAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
|
||||
id := mirrorID + "/" + workerID
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
v, err := json.Marshal(status)
|
||||
err = bucket.Put([]byte(id), v)
|
||||
return err
|
||||
})
|
||||
v, err := json.Marshal(status)
|
||||
if err == nil {
|
||||
err = b.db.Put(_statusBucketKey, id, v)
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
|
||||
func (b *boltAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) {
|
||||
func (b *kvDBAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) {
|
||||
id := mirrorID + "/" + workerID
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
v := bucket.Get([]byte(id))
|
||||
if v == nil {
|
||||
return fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID)
|
||||
}
|
||||
err := json.Unmarshal(v, &m)
|
||||
return err
|
||||
})
|
||||
var v []byte
|
||||
v, err = b.db.Get(_statusBucketKey, id)
|
||||
if v == nil {
|
||||
err = fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID)
|
||||
} else if err == nil {
|
||||
err = json.Unmarshal(v, &m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
c := bucket.Cursor()
|
||||
var m MirrorStatus
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if wID := strings.Split(string(k), "/")[1]; wID == workerID {
|
||||
jsonErr := json.Unmarshal(v, &m)
|
||||
if jsonErr != nil {
|
||||
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
|
||||
continue
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
func (b *kvDBAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) {
|
||||
var vals map[string][]byte
|
||||
vals, err = b.db.GetAll(_statusBucketKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
c := bucket.Cursor()
|
||||
var m MirrorStatus
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
for k, v := range vals {
|
||||
if wID := strings.Split(k, "/")[1]; wID == workerID {
|
||||
var m MirrorStatus
|
||||
jsonErr := json.Unmarshal(v, &m)
|
||||
if jsonErr != nil {
|
||||
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
|
||||
err = errors.Wrap(err, jsonErr.Error())
|
||||
continue
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) FlushDisabledJobs() (err error) {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(_statusBucketKey))
|
||||
c := bucket.Cursor()
|
||||
func (b *kvDBAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) {
|
||||
var vals map[string][]byte
|
||||
vals, err = b.db.GetAll(_statusBucketKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range vals {
|
||||
var m MirrorStatus
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
jsonErr := json.Unmarshal(v, &m)
|
||||
if jsonErr != nil {
|
||||
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
|
||||
continue
|
||||
}
|
||||
if m.Status == Disabled {
|
||||
err = c.Delete()
|
||||
jsonErr := json.Unmarshal(v, &m)
|
||||
if jsonErr != nil {
|
||||
err = errors.Wrap(err, jsonErr.Error())
|
||||
continue
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *kvDBAdapter) FlushDisabledJobs() (err error) {
|
||||
var vals map[string][]byte
|
||||
vals, err = b.db.GetAll(_statusBucketKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range vals {
|
||||
var m MirrorStatus
|
||||
jsonErr := json.Unmarshal(v, &m)
|
||||
if jsonErr != nil {
|
||||
err = errors.Wrap(err, jsonErr.Error())
|
||||
continue
|
||||
}
|
||||
if m.Status == Disabled || len(m.Name) == 0 {
|
||||
deleteErr := b.db.Delete(_statusBucketKey, k)
|
||||
if deleteErr != nil {
|
||||
err = errors.Wrap(err, deleteErr.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) Close() error {
|
||||
func (b *kvDBAdapter) Close() error {
|
||||
if b.db != nil {
|
||||
return b.db.Close()
|
||||
}
|
||||
|
67
manager/db_badger.go
Normal file
67
manager/db_badger.go
Normal file
@ -0,0 +1,67 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
)
|
||||
|
||||
// implement kv interface backed by badger
|
||||
type badgerAdapter struct {
|
||||
db *badger.DB
|
||||
}
|
||||
|
||||
func (b *badgerAdapter) InitBucket(bucket string) (err error) {
|
||||
// no-op
|
||||
return
|
||||
}
|
||||
|
||||
func (b *badgerAdapter) Get(bucket string, key string) (v []byte, err error) {
|
||||
b.db.View(func(tx *badger.Txn) error {
|
||||
var item *badger.Item
|
||||
item, err = tx.Get([]byte(bucket + key))
|
||||
if item != nil {
|
||||
v, err = item.ValueCopy(nil)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *badgerAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
|
||||
b.db.View(func(tx *badger.Txn) error {
|
||||
it := tx.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
prefix := []byte(bucket)
|
||||
m = make(map[string][]byte)
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
k := string(item.Key())
|
||||
actualKey := k[len(bucket):]
|
||||
|
||||
var v []byte
|
||||
v, err = item.ValueCopy(nil)
|
||||
m[actualKey] = v
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *badgerAdapter) Put(bucket string, key string, value []byte) error {
|
||||
err := b.db.Update(func(tx *badger.Txn) error {
|
||||
err := tx.Set([]byte(bucket+key), value)
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *badgerAdapter) Delete(bucket string, key string) error {
|
||||
err := b.db.Update(func(tx *badger.Txn) error {
|
||||
err := tx.Delete([]byte(bucket + key))
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *badgerAdapter) Close() error {
|
||||
return b.db.Close()
|
||||
}
|
66
manager/db_bolt.go
Normal file
66
manager/db_bolt.go
Normal file
@ -0,0 +1,66 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bolt "github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// implement kv interface backed by boltdb
|
||||
type boltAdapter struct {
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
func (b *boltAdapter) InitBucket(bucket string) (err error) {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
_, err = tx.CreateBucketIfNotExists([]byte(bucket))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (b *boltAdapter) Get(bucket string, key string) (v []byte, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(bucket))
|
||||
v = bucket.Get([]byte(key))
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(bucket))
|
||||
c := bucket.Cursor()
|
||||
m = make(map[string][]byte)
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
m[string(k)] = v
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (b *boltAdapter) Put(bucket string, key string, value []byte) error {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(bucket))
|
||||
err := bucket.Put([]byte(key), value)
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltAdapter) Delete(bucket string, key string) error {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte(bucket))
|
||||
err := bucket.Delete([]byte(key))
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltAdapter) Close() error {
|
||||
return b.db.Close()
|
||||
}
|
51
manager/db_leveldb.go
Normal file
51
manager/db_leveldb.go
Normal file
@ -0,0 +1,51 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// implement kv interface backed by leveldb
|
||||
type leveldbAdapter struct {
|
||||
db *leveldb.DB
|
||||
}
|
||||
|
||||
func (b *leveldbAdapter) InitBucket(bucket string) (err error) {
|
||||
// no-op
|
||||
return
|
||||
}
|
||||
|
||||
func (b *leveldbAdapter) Get(bucket string, key string) (v []byte, err error) {
|
||||
v, err = b.db.Get([]byte(bucket+key), nil)
|
||||
return
|
||||
}
|
||||
|
||||
func (b *leveldbAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
|
||||
it := b.db.NewIterator(util.BytesPrefix([]byte(bucket)), nil)
|
||||
defer it.Release()
|
||||
m = make(map[string][]byte)
|
||||
for it.Next() {
|
||||
k := string(it.Key())
|
||||
actualKey := k[len(bucket):]
|
||||
// it.Value() changes on next iteration
|
||||
val := it.Value()
|
||||
v := make([]byte, len(val))
|
||||
copy(v, val)
|
||||
m[actualKey] = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *leveldbAdapter) Put(bucket string, key string, value []byte) error {
|
||||
err := b.db.Put([]byte(bucket+key), []byte(value), nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *leveldbAdapter) Delete(bucket string, key string) error {
|
||||
err := b.db.Delete([]byte(bucket+key), nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *leveldbAdapter) Close() error {
|
||||
return b.db.Close()
|
||||
}
|
54
manager/db_redis.go
Normal file
54
manager/db_redis.go
Normal file
@ -0,0 +1,54 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
// implement kv interface backed by redis
|
||||
type redisAdapter struct {
|
||||
db *redis.Client
|
||||
}
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func (b *redisAdapter) InitBucket(bucket string) (err error) {
|
||||
// no-op
|
||||
return
|
||||
}
|
||||
|
||||
func (b *redisAdapter) Get(bucket string, key string) (v []byte, err error) {
|
||||
var val string
|
||||
val, err = b.db.HGet(ctx, bucket, key).Result()
|
||||
if err == nil {
|
||||
v = []byte(val)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *redisAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
|
||||
var val map[string]string
|
||||
val, err = b.db.HGetAll(ctx, bucket).Result()
|
||||
if err == nil && val != nil {
|
||||
m = make(map[string][]byte)
|
||||
for k, v := range val {
|
||||
m[k] = []byte(v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *redisAdapter) Put(bucket string, key string, value []byte) error {
|
||||
_, err := b.db.HSet(ctx, bucket, key, string(value)).Result()
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *redisAdapter) Delete(bucket string, key string) error {
|
||||
_, err := b.db.HDel(ctx, bucket, key).Result()
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *redisAdapter) Close() error {
|
||||
return b.db.Close()
|
||||
}
|
@ -2,19 +2,167 @@ package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
func TestBoltAdapter(t *testing.T) {
|
||||
func SortMirrorStatus(status []MirrorStatus) {
|
||||
sort.Slice(status, func(l, r int) bool {
|
||||
return status[l].Name < status[r].Name
|
||||
})
|
||||
}
|
||||
|
||||
func DBAdapterTest(db dbAdapter) {
|
||||
var err error
|
||||
testWorkerIDs := []string{"test_worker1", "test_worker2"}
|
||||
Convey("create worker", func() {
|
||||
for _, id := range testWorkerIDs {
|
||||
w := WorkerStatus{
|
||||
ID: id,
|
||||
Token: "token_" + id,
|
||||
LastOnline: time.Now(),
|
||||
LastRegister: time.Now(),
|
||||
}
|
||||
_, err = db.CreateWorker(w)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
Convey("get existent worker", func() {
|
||||
_, err := db.GetWorker(testWorkerIDs[0])
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("list existent workers", func() {
|
||||
ws, err := db.ListWorkers()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ws), ShouldEqual, 2)
|
||||
})
|
||||
|
||||
Convey("get non-existent worker", func() {
|
||||
_, err := db.GetWorker("invalid workerID")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("delete existent worker", func() {
|
||||
err := db.DeleteWorker(testWorkerIDs[0])
|
||||
So(err, ShouldBeNil)
|
||||
_, err = db.GetWorker(testWorkerIDs[0])
|
||||
So(err, ShouldNotBeNil)
|
||||
ws, err := db.ListWorkers()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ws), ShouldEqual, 1)
|
||||
})
|
||||
|
||||
Convey("delete non-existent worker", func() {
|
||||
err := db.DeleteWorker("invalid workerID")
|
||||
So(err, ShouldNotBeNil)
|
||||
ws, err := db.ListWorkers()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ws), ShouldEqual, 2)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("update mirror status", func() {
|
||||
status := []MirrorStatus{
|
||||
{
|
||||
Name: "arch-sync1",
|
||||
Worker: testWorkerIDs[0],
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now(),
|
||||
LastStarted: time.Now().Add(-time.Minute),
|
||||
LastEnded: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "3GB",
|
||||
},
|
||||
{
|
||||
Name: "arch-sync2",
|
||||
Worker: testWorkerIDs[1],
|
||||
IsMaster: true,
|
||||
Status: Disabled,
|
||||
LastUpdate: time.Now().Add(-time.Hour),
|
||||
LastStarted: time.Now().Add(-time.Minute),
|
||||
LastEnded: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
},
|
||||
{
|
||||
Name: "arch-sync3",
|
||||
Worker: testWorkerIDs[1],
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now().Add(-time.Minute),
|
||||
LastStarted: time.Now().Add(-time.Second),
|
||||
LastEnded: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
},
|
||||
}
|
||||
SortMirrorStatus(status)
|
||||
|
||||
for _, s := range status {
|
||||
_, err := db.UpdateMirrorStatus(s.Worker, s.Name, s)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
}
|
||||
|
||||
Convey("get mirror status", func() {
|
||||
m, err := db.GetMirrorStatus(testWorkerIDs[0], status[0].Name)
|
||||
So(err, ShouldBeNil)
|
||||
expectedJSON, err := json.Marshal(status[0])
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(m)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
|
||||
Convey("list mirror status", func() {
|
||||
ms, err := db.ListMirrorStatus(testWorkerIDs[0])
|
||||
So(err, ShouldBeNil)
|
||||
expectedJSON, err := json.Marshal([]MirrorStatus{status[0]})
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(ms)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
|
||||
Convey("list all mirror status", func() {
|
||||
ms, err := db.ListAllMirrorStatus()
|
||||
So(err, ShouldBeNil)
|
||||
SortMirrorStatus(ms)
|
||||
|
||||
expectedJSON, err := json.Marshal(status)
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(ms)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
|
||||
Convey("flush disabled jobs", func() {
|
||||
ms, err := db.ListAllMirrorStatus()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ms), ShouldEqual, 3)
|
||||
err = db.FlushDisabledJobs()
|
||||
So(err, ShouldBeNil)
|
||||
ms, err = db.ListAllMirrorStatus()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ms), ShouldEqual, 2)
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func TestDBAdapter(t *testing.T) {
|
||||
Convey("boltAdapter should work", t, func() {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@ -28,114 +176,60 @@ func TestBoltAdapter(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
testWorkerIDs := []string{"test_worker1", "test_worker2"}
|
||||
Convey("create worker", func() {
|
||||
for _, id := range testWorkerIDs {
|
||||
w := WorkerStatus{
|
||||
ID: id,
|
||||
Token: "token_" + id,
|
||||
LastOnline: time.Now(),
|
||||
}
|
||||
w, err = boltDB.CreateWorker(w)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
DBAdapterTest(boltDB)
|
||||
})
|
||||
|
||||
Convey("get exists worker", func() {
|
||||
_, err := boltDB.GetWorker(testWorkerIDs[0])
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
Convey("redisAdapter should work", t, func() {
|
||||
mr, err := miniredis.Run()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("list exist worker", func() {
|
||||
ws, err := boltDB.ListWorkers()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ws), ShouldEqual, 2)
|
||||
})
|
||||
addr := fmt.Sprintf("redis://%s", mr.Addr())
|
||||
redisDB, err := makeDBAdapter("redis", addr)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("get inexist worker", func() {
|
||||
_, err := boltDB.GetWorker("invalid workerID")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
defer func() {
|
||||
// close redisDB
|
||||
err := redisDB.Close()
|
||||
So(err, ShouldBeNil)
|
||||
mr.Close()
|
||||
}()
|
||||
|
||||
Convey("update mirror status", func() {
|
||||
status := []MirrorStatus{
|
||||
MirrorStatus{
|
||||
Name: "arch-sync1",
|
||||
Worker: testWorkerIDs[0],
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "3GB",
|
||||
},
|
||||
MirrorStatus{
|
||||
Name: "arch-sync2",
|
||||
Worker: testWorkerIDs[1],
|
||||
IsMaster: true,
|
||||
Status: Disabled,
|
||||
LastUpdate: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
},
|
||||
MirrorStatus{
|
||||
Name: "arch-sync3",
|
||||
Worker: testWorkerIDs[1],
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
},
|
||||
}
|
||||
DBAdapterTest(redisDB)
|
||||
})
|
||||
|
||||
for _, s := range status {
|
||||
_, err := boltDB.UpdateMirrorStatus(s.Worker, s.Name, s)
|
||||
So(err, ShouldBeNil)
|
||||
Convey("badgerAdapter should work", t, func() {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
}
|
||||
dbType, dbFile := "badger", filepath.Join(tmpDir, "badger.db")
|
||||
badgerDB, err := makeDBAdapter(dbType, dbFile)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("get mirror status", func() {
|
||||
m, err := boltDB.GetMirrorStatus(testWorkerIDs[0], status[0].Name)
|
||||
So(err, ShouldBeNil)
|
||||
expectedJSON, err := json.Marshal(status[0])
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(m)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
defer func() {
|
||||
// close badgerDB
|
||||
err := badgerDB.Close()
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
Convey("list mirror status", func() {
|
||||
ms, err := boltDB.ListMirrorStatus(testWorkerIDs[0])
|
||||
So(err, ShouldBeNil)
|
||||
expectedJSON, err := json.Marshal([]MirrorStatus{status[0]})
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(ms)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
DBAdapterTest(badgerDB)
|
||||
})
|
||||
|
||||
Convey("list all mirror status", func() {
|
||||
ms, err := boltDB.ListAllMirrorStatus()
|
||||
So(err, ShouldBeNil)
|
||||
expectedJSON, err := json.Marshal(status)
|
||||
So(err, ShouldBeNil)
|
||||
actualJSON, err := json.Marshal(ms)
|
||||
So(err, ShouldBeNil)
|
||||
So(string(actualJSON), ShouldEqual, string(expectedJSON))
|
||||
})
|
||||
Convey("leveldbAdapter should work", t, func() {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("flush disabled jobs", func() {
|
||||
ms, err := boltDB.ListAllMirrorStatus()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ms), ShouldEqual, 3)
|
||||
err = boltDB.FlushDisabledJobs()
|
||||
So(err, ShouldBeNil)
|
||||
ms, err = boltDB.ListAllMirrorStatus()
|
||||
So(err, ShouldBeNil)
|
||||
So(len(ms), ShouldEqual, 2)
|
||||
})
|
||||
dbType, dbFile := "leveldb", filepath.Join(tmpDir, "leveldb.db")
|
||||
leveldbDB, err := makeDBAdapter(dbType, dbFile)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
})
|
||||
defer func() {
|
||||
// close leveldbDB
|
||||
err := leveldbDB.Close()
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
|
||||
DBAdapterTest(leveldbDB)
|
||||
})
|
||||
}
|
||||
|
@ -1,8 +1,10 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -22,6 +24,7 @@ type Manager struct {
|
||||
cfg *Config
|
||||
engine *gin.Engine
|
||||
adapter dbAdapter
|
||||
rwmu sync.RWMutex
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
@ -83,10 +86,14 @@ func GetTUNASyncManager(cfg *Config) *Manager {
|
||||
// workerID should be valid in this route group
|
||||
workerValidateGroup := s.engine.Group("/workers", s.workerIDValidator)
|
||||
{
|
||||
// delete specified worker
|
||||
workerValidateGroup.DELETE(":id", s.deleteWorker)
|
||||
// get job list
|
||||
workerValidateGroup.GET(":id/jobs", s.listJobsOfWorker)
|
||||
// post job status
|
||||
workerValidateGroup.POST(":id/jobs/:job", s.updateJobOfWorker)
|
||||
workerValidateGroup.POST(":id/jobs/:job/size", s.updateMirrorSize)
|
||||
workerValidateGroup.POST(":id/schedules", s.updateSchedulesOfWorker)
|
||||
}
|
||||
|
||||
// for tunasynctl to post commands
|
||||
@ -122,9 +129,11 @@ func (s *Manager) Run() {
|
||||
}
|
||||
}
|
||||
|
||||
// listAllJobs repond with all jobs of specified workers
|
||||
// listAllJobs respond with all jobs of specified workers
|
||||
func (s *Manager) listAllJobs(c *gin.Context) {
|
||||
s.rwmu.RLock()
|
||||
mirrorStatusList, err := s.adapter.ListAllMirrorStatus()
|
||||
s.rwmu.RUnlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to list all mirror status: %s",
|
||||
err.Error(),
|
||||
@ -133,11 +142,11 @@ func (s *Manager) listAllJobs(c *gin.Context) {
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
webMirStatusList := []webMirrorStatus{}
|
||||
webMirStatusList := []WebMirrorStatus{}
|
||||
for _, m := range mirrorStatusList {
|
||||
webMirStatusList = append(
|
||||
webMirStatusList,
|
||||
convertMirrorStatus(m),
|
||||
BuildWebMirrorStatus(m),
|
||||
)
|
||||
}
|
||||
c.JSON(http.StatusOK, webMirStatusList)
|
||||
@ -145,7 +154,9 @@ func (s *Manager) listAllJobs(c *gin.Context) {
|
||||
|
||||
// flushDisabledJobs deletes all jobs that marks as deleted
|
||||
func (s *Manager) flushDisabledJobs(c *gin.Context) {
|
||||
s.rwmu.Lock()
|
||||
err := s.adapter.FlushDisabledJobs()
|
||||
s.rwmu.Unlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to flush disabled jobs: %s",
|
||||
err.Error(),
|
||||
@ -157,10 +168,30 @@ func (s *Manager) flushDisabledJobs(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{_infoKey: "flushed"})
|
||||
}
|
||||
|
||||
// listWrokers respond with informations of all the workers
|
||||
// deleteWorker deletes one worker by id
|
||||
func (s *Manager) deleteWorker(c *gin.Context) {
|
||||
workerID := c.Param("id")
|
||||
s.rwmu.Lock()
|
||||
err := s.adapter.DeleteWorker(workerID)
|
||||
s.rwmu.Unlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to delete worker: %s",
|
||||
err.Error(),
|
||||
)
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
logger.Noticef("Worker <%s> deleted", workerID)
|
||||
c.JSON(http.StatusOK, gin.H{_infoKey: "deleted"})
|
||||
}
|
||||
|
||||
// listWorkers respond with information of all the workers
|
||||
func (s *Manager) listWorkers(c *gin.Context) {
|
||||
var workerInfos []WorkerStatus
|
||||
s.rwmu.RLock()
|
||||
workers, err := s.adapter.ListWorkers()
|
||||
s.rwmu.RUnlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to list workers: %s",
|
||||
err.Error(),
|
||||
@ -172,8 +203,11 @@ func (s *Manager) listWorkers(c *gin.Context) {
|
||||
for _, w := range workers {
|
||||
workerInfos = append(workerInfos,
|
||||
WorkerStatus{
|
||||
ID: w.ID,
|
||||
LastOnline: w.LastOnline,
|
||||
ID: w.ID,
|
||||
URL: w.URL,
|
||||
Token: "REDACTED",
|
||||
LastOnline: w.LastOnline,
|
||||
LastRegister: w.LastRegister,
|
||||
})
|
||||
}
|
||||
c.JSON(http.StatusOK, workerInfos)
|
||||
@ -184,6 +218,7 @@ func (s *Manager) registerWorker(c *gin.Context) {
|
||||
var _worker WorkerStatus
|
||||
c.BindJSON(&_worker)
|
||||
_worker.LastOnline = time.Now()
|
||||
_worker.LastRegister = time.Now()
|
||||
newWorker, err := s.adapter.CreateWorker(_worker)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to register worker: %s",
|
||||
@ -202,7 +237,9 @@ func (s *Manager) registerWorker(c *gin.Context) {
|
||||
// listJobsOfWorker respond with all the jobs of the specified worker
|
||||
func (s *Manager) listJobsOfWorker(c *gin.Context) {
|
||||
workerID := c.Param("id")
|
||||
s.rwmu.RLock()
|
||||
mirrorStatusList, err := s.adapter.ListMirrorStatus(workerID)
|
||||
s.rwmu.RUnlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to list jobs of worker %s: %s",
|
||||
workerID, err.Error(),
|
||||
@ -220,38 +257,151 @@ func (s *Manager) returnErrJSON(c *gin.Context, code int, err error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
|
||||
workerID := c.Param("id")
|
||||
var schedules MirrorSchedules
|
||||
c.BindJSON(&schedules)
|
||||
|
||||
for _, schedule := range schedules.Schedules {
|
||||
mirrorName := schedule.MirrorName
|
||||
if len(mirrorName) == 0 {
|
||||
s.returnErrJSON(
|
||||
c, http.StatusBadRequest,
|
||||
errors.New("mirror Name should not be empty"),
|
||||
)
|
||||
}
|
||||
|
||||
s.rwmu.RLock()
|
||||
s.adapter.RefreshWorker(workerID)
|
||||
curStatus, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
|
||||
s.rwmu.RUnlock()
|
||||
if err != nil {
|
||||
logger.Errorf("failed to get job %s of worker %s: %s",
|
||||
mirrorName, workerID, err.Error(),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if curStatus.Scheduled == schedule.NextSchedule {
|
||||
// no changes, skip update
|
||||
continue
|
||||
}
|
||||
|
||||
curStatus.Scheduled = schedule.NextSchedule
|
||||
s.rwmu.Lock()
|
||||
_, err = s.adapter.UpdateMirrorStatus(workerID, mirrorName, curStatus)
|
||||
s.rwmu.Unlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to update job %s of worker %s: %s",
|
||||
mirrorName, workerID, err.Error(),
|
||||
)
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
type empty struct{}
|
||||
c.JSON(http.StatusOK, empty{})
|
||||
}
|
||||
|
||||
func (s *Manager) updateJobOfWorker(c *gin.Context) {
|
||||
workerID := c.Param("id")
|
||||
var status MirrorStatus
|
||||
c.BindJSON(&status)
|
||||
mirrorName := status.Name
|
||||
if len(mirrorName) == 0 {
|
||||
s.returnErrJSON(
|
||||
c, http.StatusBadRequest,
|
||||
errors.New("mirror Name should not be empty"),
|
||||
)
|
||||
}
|
||||
|
||||
s.rwmu.RLock()
|
||||
s.adapter.RefreshWorker(workerID)
|
||||
curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName)
|
||||
s.rwmu.RUnlock()
|
||||
|
||||
curTime := time.Now()
|
||||
|
||||
if status.Status == PreSyncing && curStatus.Status != PreSyncing {
|
||||
status.LastStarted = curTime
|
||||
} else {
|
||||
status.LastStarted = curStatus.LastStarted
|
||||
}
|
||||
// Only successful syncing needs last_update
|
||||
if status.Status == Success {
|
||||
status.LastUpdate = time.Now()
|
||||
status.LastUpdate = curTime
|
||||
} else {
|
||||
status.LastUpdate = curStatus.LastUpdate
|
||||
}
|
||||
if status.Status == Success || status.Status == Failed {
|
||||
status.LastEnded = curTime
|
||||
} else {
|
||||
status.LastEnded = curStatus.LastEnded
|
||||
}
|
||||
|
||||
// Only message with meaningful size updates the mirror size
|
||||
if len(curStatus.Size) > 0 && curStatus.Size != "unknown" {
|
||||
if len(status.Size) == 0 || status.Size == "unknown" {
|
||||
status.Size = curStatus.Size
|
||||
}
|
||||
}
|
||||
|
||||
// for logging
|
||||
switch status.Status {
|
||||
case Success:
|
||||
logger.Noticef("Job [%s] @<%s> success", status.Name, status.Worker)
|
||||
case Failed:
|
||||
logger.Warningf("Job [%s] @<%s> failed", status.Name, status.Worker)
|
||||
case Syncing:
|
||||
logger.Noticef("Job [%s] @<%s> starts syncing", status.Name, status.Worker)
|
||||
case Disabled:
|
||||
logger.Noticef("Job [%s] @<%s> disabled", status.Name, status.Worker)
|
||||
case Paused:
|
||||
logger.Noticef("Job [%s] @<%s> paused", status.Name, status.Worker)
|
||||
default:
|
||||
logger.Infof("Job [%s] @<%s> status: %s", status.Name, status.Worker, status.Status)
|
||||
logger.Noticef("Job [%s] @<%s> %s", status.Name, status.Worker, status.Status)
|
||||
}
|
||||
|
||||
s.rwmu.Lock()
|
||||
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
|
||||
s.rwmu.Unlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to update job %s of worker %s: %s",
|
||||
mirrorName, workerID, err.Error(),
|
||||
)
|
||||
c.Error(err)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, newStatus)
|
||||
}
|
||||
|
||||
func (s *Manager) updateMirrorSize(c *gin.Context) {
|
||||
workerID := c.Param("id")
|
||||
type SizeMsg struct {
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
}
|
||||
var msg SizeMsg
|
||||
c.BindJSON(&msg)
|
||||
|
||||
mirrorName := msg.Name
|
||||
s.rwmu.RLock()
|
||||
s.adapter.RefreshWorker(workerID)
|
||||
status, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
|
||||
s.rwmu.RUnlock()
|
||||
if err != nil {
|
||||
logger.Errorf(
|
||||
"Failed to get status of mirror %s @<%s>: %s",
|
||||
mirrorName, workerID, err.Error(),
|
||||
)
|
||||
s.returnErrJSON(c, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Only message with meaningful size updates the mirror size
|
||||
if len(msg.Size) > 0 || msg.Size != "unknown" {
|
||||
status.Size = msg.Size
|
||||
}
|
||||
|
||||
logger.Noticef("Mirror size of [%s] @<%s>: %s", status.Name, status.Worker, status.Size)
|
||||
|
||||
s.rwmu.Lock()
|
||||
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
|
||||
s.rwmu.Unlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to update job %s of worker %s: %s",
|
||||
mirrorName, workerID, err.Error(),
|
||||
@ -274,7 +424,9 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
s.rwmu.RLock()
|
||||
w, err := s.adapter.GetWorker(workerID)
|
||||
s.rwmu.RUnlock()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("worker %s is not registered yet", workerID)
|
||||
s.returnErrJSON(c, http.StatusBadRequest, err)
|
||||
@ -286,11 +438,14 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
|
||||
Cmd: clientCmd.Cmd,
|
||||
MirrorID: clientCmd.MirrorID,
|
||||
Args: clientCmd.Args,
|
||||
Options: clientCmd.Options,
|
||||
}
|
||||
|
||||
// update job status, even if the job did not disable successfully,
|
||||
// this status should be set as disabled
|
||||
s.rwmu.RLock()
|
||||
curStat, _ := s.adapter.GetMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID)
|
||||
s.rwmu.RUnlock()
|
||||
changed := false
|
||||
switch clientCmd.Cmd {
|
||||
case CmdDisable:
|
||||
@ -301,7 +456,9 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
|
||||
changed = true
|
||||
}
|
||||
if changed {
|
||||
s.rwmu.Lock()
|
||||
s.adapter.UpdateMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID, curStat)
|
||||
s.rwmu.Unlock()
|
||||
}
|
||||
|
||||
logger.Noticef("Posting command '%s %s' to <%s>", clientCmd.Cmd, clientCmd.MirrorID, clientCmd.WorkerID)
|
||||
|
@ -3,10 +3,12 @@ package manager
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -21,29 +23,32 @@ const (
|
||||
)
|
||||
|
||||
func TestHTTPServer(t *testing.T) {
|
||||
var listenPort = 5000
|
||||
Convey("HTTP server should work", t, func(ctx C) {
|
||||
listenPort++
|
||||
port := listenPort
|
||||
addr := "127.0.0.1"
|
||||
baseURL := fmt.Sprintf("http://%s:%d", addr, port)
|
||||
InitLogger(true, true, false)
|
||||
s := GetTUNASyncManager(&Config{Debug: false})
|
||||
s := GetTUNASyncManager(&Config{Debug: true})
|
||||
s.cfg.Server.Addr = addr
|
||||
s.cfg.Server.Port = port
|
||||
So(s, ShouldNotBeNil)
|
||||
s.setDBAdapter(&mockDBAdapter{
|
||||
workerStore: map[string]WorkerStatus{
|
||||
_magicBadWorkerID: WorkerStatus{
|
||||
_magicBadWorkerID: {
|
||||
ID: _magicBadWorkerID,
|
||||
}},
|
||||
statusStore: make(map[string]MirrorStatus),
|
||||
})
|
||||
port := rand.Intn(10000) + 20000
|
||||
baseURL := fmt.Sprintf("http://127.0.0.1:%d", port)
|
||||
go func() {
|
||||
s.engine.Run(fmt.Sprintf("127.0.0.1:%d", port))
|
||||
}()
|
||||
time.Sleep(50 * time.Microsecond)
|
||||
go s.Run()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
resp, err := http.Get(baseURL + "/ping")
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
So(resp.Header.Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
So(err, ShouldBeNil)
|
||||
var p map[string]string
|
||||
err = json.Unmarshal(body, &p)
|
||||
@ -61,6 +66,34 @@ func TestHTTPServer(t *testing.T) {
|
||||
So(msg[_errorKey], ShouldEqual, fmt.Sprintf("failed to list jobs of worker %s: %s", _magicBadWorkerID, "database fail"))
|
||||
})
|
||||
|
||||
Convey("when register multiple workers", func(ctx C) {
|
||||
N := 10
|
||||
var cnt uint32
|
||||
for i := 0; i < N; i++ {
|
||||
go func(id int) {
|
||||
w := WorkerStatus{
|
||||
ID: fmt.Sprintf("worker%d", id),
|
||||
}
|
||||
resp, err := PostJSON(baseURL+"/workers", w, nil)
|
||||
ctx.So(err, ShouldBeNil)
|
||||
ctx.So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
atomic.AddUint32(&cnt, 1)
|
||||
}(i)
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
So(cnt, ShouldEqual, N)
|
||||
|
||||
Convey("list all workers", func(ctx C) {
|
||||
resp, err := http.Get(baseURL + "/workers")
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
var actualResponseObj []WorkerStatus
|
||||
err = json.NewDecoder(resp.Body).Decode(&actualResponseObj)
|
||||
So(err, ShouldBeNil)
|
||||
So(len(actualResponseObj), ShouldEqual, N+1)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("when register a worker", func(ctx C) {
|
||||
w := WorkerStatus{
|
||||
ID: "test_worker1",
|
||||
@ -79,7 +112,34 @@ func TestHTTPServer(t *testing.T) {
|
||||
So(len(actualResponseObj), ShouldEqual, 2)
|
||||
})
|
||||
|
||||
Convey("flush disabled jobs", func(ctx C) {
|
||||
Convey("delete an existent worker", func(ctx C) {
|
||||
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/workers/%s", baseURL, w.ID), nil)
|
||||
So(err, ShouldBeNil)
|
||||
clt := &http.Client{}
|
||||
resp, err := clt.Do(req)
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
res := map[string]string{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&res)
|
||||
So(err, ShouldBeNil)
|
||||
So(res[_infoKey], ShouldEqual, "deleted")
|
||||
})
|
||||
|
||||
Convey("delete non-existent worker", func(ctx C) {
|
||||
invalidWorker := "test_worker233"
|
||||
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/workers/%s", baseURL, invalidWorker), nil)
|
||||
So(err, ShouldBeNil)
|
||||
clt := &http.Client{}
|
||||
resp, err := clt.Do(req)
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
res := map[string]string{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&res)
|
||||
So(err, ShouldBeNil)
|
||||
So(res[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
|
||||
})
|
||||
|
||||
Convey("flush disabled jobs", func(ctx C) {
|
||||
req, err := http.NewRequest("DELETE", baseURL+"/jobs/disabled", nil)
|
||||
So(err, ShouldBeNil)
|
||||
clt := &http.Client{}
|
||||
@ -99,11 +159,11 @@ func TestHTTPServer(t *testing.T) {
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "3GB",
|
||||
Size: "unknown",
|
||||
}
|
||||
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
|
||||
defer resp.Body.Close()
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
Convey("list mirror status of an existed worker", func(ctx C) {
|
||||
@ -120,12 +180,44 @@ func TestHTTPServer(t *testing.T) {
|
||||
So(m.Upstream, ShouldEqual, status.Upstream)
|
||||
So(m.Size, ShouldEqual, status.Size)
|
||||
So(m.IsMaster, ShouldEqual, status.IsMaster)
|
||||
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
|
||||
So(time.Since(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
|
||||
So(m.LastStarted.IsZero(), ShouldBeTrue) // hasn't been initialized yet
|
||||
So(time.Since(m.LastEnded), ShouldBeLessThan, 1*time.Second)
|
||||
|
||||
})
|
||||
|
||||
// start syncing
|
||||
status.Status = PreSyncing
|
||||
time.Sleep(1 * time.Second)
|
||||
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
Convey("update mirror status to PreSync - starting sync", func(ctx C) {
|
||||
var ms []MirrorStatus
|
||||
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
|
||||
m := ms[0]
|
||||
So(m.Name, ShouldEqual, status.Name)
|
||||
So(m.Worker, ShouldEqual, status.Worker)
|
||||
So(m.Status, ShouldEqual, status.Status)
|
||||
So(m.Upstream, ShouldEqual, status.Upstream)
|
||||
So(m.Size, ShouldEqual, status.Size)
|
||||
So(m.IsMaster, ShouldEqual, status.IsMaster)
|
||||
So(time.Since(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
|
||||
So(time.Since(m.LastUpdate), ShouldBeGreaterThan, 1*time.Second)
|
||||
So(time.Since(m.LastStarted), ShouldBeLessThan, 2*time.Second)
|
||||
So(time.Since(m.LastEnded), ShouldBeLessThan, 3*time.Second)
|
||||
So(time.Since(m.LastEnded), ShouldBeGreaterThan, 1*time.Second)
|
||||
|
||||
})
|
||||
|
||||
Convey("list all job status of all workers", func(ctx C) {
|
||||
var ms []webMirrorStatus
|
||||
var ms []WebMirrorStatus
|
||||
resp, err := GetJSON(baseURL+"/jobs", &ms, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
@ -136,21 +228,109 @@ func TestHTTPServer(t *testing.T) {
|
||||
So(m.Upstream, ShouldEqual, status.Upstream)
|
||||
So(m.Size, ShouldEqual, status.Size)
|
||||
So(m.IsMaster, ShouldEqual, status.IsMaster)
|
||||
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 1*time.Second)
|
||||
So(time.Since(m.LastUpdate.Time), ShouldBeLessThan, 3*time.Second)
|
||||
So(time.Since(m.LastStarted.Time), ShouldBeLessThan, 2*time.Second)
|
||||
So(time.Since(m.LastEnded.Time), ShouldBeLessThan, 3*time.Second)
|
||||
|
||||
})
|
||||
|
||||
Convey("Update size of a valid mirror", func(ctx C) {
|
||||
msg := struct {
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
}{status.Name, "5GB"}
|
||||
|
||||
url := fmt.Sprintf("%s/workers/%s/jobs/%s/size", baseURL, status.Worker, status.Name)
|
||||
resp, err := PostJSON(url, msg, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
Convey("Get new size of a mirror", func(ctx C) {
|
||||
var ms []MirrorStatus
|
||||
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
|
||||
m := ms[0]
|
||||
So(m.Name, ShouldEqual, status.Name)
|
||||
So(m.Worker, ShouldEqual, status.Worker)
|
||||
So(m.Status, ShouldEqual, status.Status)
|
||||
So(m.Upstream, ShouldEqual, status.Upstream)
|
||||
So(m.Size, ShouldEqual, "5GB")
|
||||
So(m.IsMaster, ShouldEqual, status.IsMaster)
|
||||
So(time.Since(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
|
||||
So(time.Since(m.LastStarted), ShouldBeLessThan, 2*time.Second)
|
||||
So(time.Since(m.LastEnded), ShouldBeLessThan, 3*time.Second)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Update schedule of valid mirrors", func(ctx C) {
|
||||
msg := MirrorSchedules{
|
||||
Schedules: []MirrorSchedule{
|
||||
{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)},
|
||||
{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)},
|
||||
},
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/workers/%s/schedules", baseURL, status.Worker)
|
||||
resp, err := PostJSON(url, msg, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
})
|
||||
|
||||
Convey("Update size of an invalid mirror", func(ctx C) {
|
||||
msg := struct {
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
}{"Invalid mirror", "5GB"}
|
||||
|
||||
url := fmt.Sprintf("%s/workers/%s/jobs/%s/size", baseURL, status.Worker, status.Name)
|
||||
resp, err := PostJSON(url, msg, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusInternalServerError)
|
||||
})
|
||||
|
||||
// what if status changed to failed
|
||||
status.Status = Failed
|
||||
time.Sleep(3 * time.Second)
|
||||
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
Convey("What if syncing job failed", func(ctx C) {
|
||||
var ms []MirrorStatus
|
||||
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
|
||||
m := ms[0]
|
||||
So(m.Name, ShouldEqual, status.Name)
|
||||
So(m.Worker, ShouldEqual, status.Worker)
|
||||
So(m.Status, ShouldEqual, status.Status)
|
||||
So(m.Upstream, ShouldEqual, status.Upstream)
|
||||
So(m.Size, ShouldEqual, status.Size)
|
||||
So(m.IsMaster, ShouldEqual, status.IsMaster)
|
||||
So(time.Since(m.LastUpdate), ShouldBeGreaterThan, 3*time.Second)
|
||||
So(time.Since(m.LastStarted), ShouldBeGreaterThan, 3*time.Second)
|
||||
So(time.Since(m.LastEnded), ShouldBeLessThan, 1*time.Second)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("update mirror status of an inexisted worker", func(ctx C) {
|
||||
invalidWorker := "test_worker2"
|
||||
status := MirrorStatus{
|
||||
Name: "arch-sync2",
|
||||
Worker: invalidWorker,
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
Name: "arch-sync2",
|
||||
Worker: invalidWorker,
|
||||
IsMaster: true,
|
||||
Status: Success,
|
||||
LastUpdate: time.Now(),
|
||||
LastStarted: time.Now(),
|
||||
LastEnded: time.Now(),
|
||||
Upstream: "mirrors.tuna.tsinghua.edu.cn",
|
||||
Size: "4GB",
|
||||
}
|
||||
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s",
|
||||
baseURL, status.Worker, status.Name), status, nil)
|
||||
@ -162,6 +342,24 @@ func TestHTTPServer(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
|
||||
})
|
||||
Convey("update schedule of an non-existent worker", func(ctx C) {
|
||||
invalidWorker := "test_worker2"
|
||||
sch := MirrorSchedules{
|
||||
Schedules: []MirrorSchedule{
|
||||
{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)},
|
||||
{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)},
|
||||
},
|
||||
}
|
||||
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/schedules",
|
||||
baseURL, invalidWorker), sch, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
|
||||
defer resp.Body.Close()
|
||||
var msg map[string]string
|
||||
err = json.NewDecoder(resp.Body).Decode(&msg)
|
||||
So(err, ShouldBeNil)
|
||||
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
|
||||
})
|
||||
Convey("handle client command", func(ctx C) {
|
||||
cmdChan := make(chan WorkerCmd, 1)
|
||||
workerServer := makeMockWorkerServer(cmdChan)
|
||||
@ -180,11 +378,11 @@ func TestHTTPServer(t *testing.T) {
|
||||
// run the mock worker server
|
||||
workerServer.Run(bindAddress)
|
||||
}()
|
||||
time.Sleep(50 * time.Microsecond)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// verify the worker mock server is running
|
||||
workerResp, err := http.Get(workerBaseURL + "/ping")
|
||||
defer workerResp.Body.Close()
|
||||
So(err, ShouldBeNil)
|
||||
defer workerResp.Body.Close()
|
||||
So(workerResp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
|
||||
Convey("when client send wrong cmd", func(ctx C) {
|
||||
@ -194,8 +392,8 @@ func TestHTTPServer(t *testing.T) {
|
||||
WorkerID: "not_exist_worker",
|
||||
}
|
||||
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
|
||||
defer resp.Body.Close()
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
|
||||
})
|
||||
|
||||
@ -207,9 +405,8 @@ func TestHTTPServer(t *testing.T) {
|
||||
}
|
||||
|
||||
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
|
||||
defer resp.Body.Close()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
defer resp.Body.Close()
|
||||
So(resp.StatusCode, ShouldEqual, http.StatusOK)
|
||||
time.Sleep(50 * time.Microsecond)
|
||||
select {
|
||||
@ -228,6 +425,8 @@ func TestHTTPServer(t *testing.T) {
|
||||
type mockDBAdapter struct {
|
||||
workerStore map[string]WorkerStatus
|
||||
statusStore map[string]MirrorStatus
|
||||
workerLock sync.RWMutex
|
||||
statusLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) Init() error {
|
||||
@ -235,35 +434,60 @@ func (b *mockDBAdapter) Init() error {
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) ListWorkers() ([]WorkerStatus, error) {
|
||||
b.workerLock.RLock()
|
||||
workers := make([]WorkerStatus, len(b.workerStore))
|
||||
idx := 0
|
||||
for _, w := range b.workerStore {
|
||||
workers[idx] = w
|
||||
idx++
|
||||
}
|
||||
b.workerLock.RUnlock()
|
||||
return workers, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) GetWorker(workerID string) (WorkerStatus, error) {
|
||||
b.workerLock.RLock()
|
||||
defer b.workerLock.RUnlock()
|
||||
w, ok := b.workerStore[workerID]
|
||||
|
||||
if !ok {
|
||||
return WorkerStatus{}, fmt.Errorf("invalid workerId")
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) DeleteWorker(workerID string) error {
|
||||
b.workerLock.Lock()
|
||||
delete(b.workerStore, workerID)
|
||||
b.workerLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
|
||||
// _, ok := b.workerStore[w.ID]
|
||||
// if ok {
|
||||
// return workerStatus{}, fmt.Errorf("duplicate worker name")
|
||||
// }
|
||||
b.workerLock.Lock()
|
||||
b.workerStore[w.ID] = w
|
||||
b.workerLock.Unlock()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) RefreshWorker(workerID string) (w WorkerStatus, err error) {
|
||||
w, err = b.GetWorker(workerID)
|
||||
if err == nil {
|
||||
w.LastOnline = time.Now()
|
||||
w, err = b.CreateWorker(w)
|
||||
}
|
||||
return w, err
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error) {
|
||||
id := mirrorID + "/" + workerID
|
||||
b.statusLock.RLock()
|
||||
status, ok := b.statusStore[id]
|
||||
b.statusLock.RUnlock()
|
||||
if !ok {
|
||||
return MirrorStatus{}, fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID)
|
||||
}
|
||||
@ -277,7 +501,9 @@ func (b *mockDBAdapter) UpdateMirrorStatus(workerID, mirrorID string, status Mir
|
||||
// }
|
||||
|
||||
id := mirrorID + "/" + workerID
|
||||
b.statusLock.Lock()
|
||||
b.statusStore[id] = status
|
||||
b.statusLock.Unlock()
|
||||
return status, nil
|
||||
}
|
||||
|
||||
@ -287,19 +513,23 @@ func (b *mockDBAdapter) ListMirrorStatus(workerID string) ([]MirrorStatus, error
|
||||
if workerID == _magicBadWorkerID {
|
||||
return []MirrorStatus{}, fmt.Errorf("database fail")
|
||||
}
|
||||
b.statusLock.RLock()
|
||||
for k, v := range b.statusStore {
|
||||
if wID := strings.Split(k, "/")[1]; wID == workerID {
|
||||
mirrorStatusList = append(mirrorStatusList, v)
|
||||
}
|
||||
}
|
||||
b.statusLock.RUnlock()
|
||||
return mirrorStatusList, nil
|
||||
}
|
||||
|
||||
func (b *mockDBAdapter) ListAllMirrorStatus() ([]MirrorStatus, error) {
|
||||
var mirrorStatusList []MirrorStatus
|
||||
b.statusLock.RLock()
|
||||
for _, v := range b.statusStore {
|
||||
mirrorStatusList = append(mirrorStatusList, v)
|
||||
}
|
||||
b.statusLock.RUnlock()
|
||||
return mirrorStatusList, nil
|
||||
}
|
||||
|
||||
|
@ -1,62 +0,0 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
type textTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (t textTime) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
|
||||
}
|
||||
func (t *textTime) UnmarshalJSON(b []byte) error {
|
||||
s := string(b)
|
||||
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
|
||||
*t = textTime{t2}
|
||||
return err
|
||||
}
|
||||
|
||||
type stampTime struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (t stampTime) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(t.Unix())
|
||||
}
|
||||
func (t *stampTime) UnmarshalJSON(b []byte) error {
|
||||
ts, err := strconv.Atoi(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = stampTime{time.Unix(int64(ts), 0)}
|
||||
return err
|
||||
}
|
||||
|
||||
// webMirrorStatus is the mirror status to be shown in the web page
|
||||
type webMirrorStatus struct {
|
||||
Name string `json:"name"`
|
||||
IsMaster bool `json:"is_master"`
|
||||
Status SyncStatus `json:"status"`
|
||||
LastUpdate textTime `json:"last_update"`
|
||||
LastUpdateTs stampTime `json:"last_update_ts"`
|
||||
Upstream string `json:"upstream"`
|
||||
Size string `json:"size"` // approximate size
|
||||
}
|
||||
|
||||
func convertMirrorStatus(m MirrorStatus) webMirrorStatus {
|
||||
return webMirrorStatus{
|
||||
Name: m.Name,
|
||||
IsMaster: m.IsMaster,
|
||||
Status: m.Status,
|
||||
LastUpdate: textTime{m.LastUpdate},
|
||||
LastUpdateTs: stampTime{m.LastUpdate},
|
||||
Upstream: m.Upstream,
|
||||
Size: m.Size,
|
||||
}
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
tunasync "github.com/tuna/tunasync/internal"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestStatus(t *testing.T) {
|
||||
Convey("status json ser-de should work", t, func() {
|
||||
tz := "Asia/Tokyo"
|
||||
loc, err := time.LoadLocation(tz)
|
||||
So(err, ShouldBeNil)
|
||||
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
|
||||
m := webMirrorStatus{
|
||||
Name: "tunalinux",
|
||||
Status: tunasync.Success,
|
||||
LastUpdate: textTime{t},
|
||||
LastUpdateTs: stampTime{t},
|
||||
Size: "5GB",
|
||||
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
|
||||
}
|
||||
|
||||
b, err := json.Marshal(m)
|
||||
So(err, ShouldBeNil)
|
||||
//fmt.Println(string(b))
|
||||
var m2 webMirrorStatus
|
||||
err = json.Unmarshal(b, &m2)
|
||||
So(err, ShouldBeNil)
|
||||
// fmt.Printf("%#v", m2)
|
||||
So(m2.Name, ShouldEqual, m.Name)
|
||||
So(m2.Status, ShouldEqual, m.Status)
|
||||
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
|
||||
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
|
||||
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
|
||||
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
|
||||
So(m2.Size, ShouldEqual, m.Size)
|
||||
So(m2.Upstream, ShouldEqual, m.Upstream)
|
||||
})
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
@ -15,15 +15,20 @@ type baseProvider struct {
|
||||
ctx *Context
|
||||
name string
|
||||
interval time.Duration
|
||||
retry int
|
||||
timeout time.Duration
|
||||
isMaster bool
|
||||
|
||||
cmd *cmdJob
|
||||
isRunning atomic.Value
|
||||
|
||||
logFile *os.File
|
||||
cmd *cmdJob
|
||||
logFileFd *os.File
|
||||
isRunning atomic.Value
|
||||
successExitCodes []int
|
||||
|
||||
cgroup *cgroupHook
|
||||
hooks []jobHook
|
||||
zfs *zfsHook
|
||||
docker *dockerHook
|
||||
|
||||
hooks []jobHook
|
||||
}
|
||||
|
||||
func (p *baseProvider) Name() string {
|
||||
@ -49,6 +54,14 @@ func (p *baseProvider) Interval() time.Duration {
|
||||
return p.interval
|
||||
}
|
||||
|
||||
func (p *baseProvider) Retry() int {
|
||||
return p.retry
|
||||
}
|
||||
|
||||
func (p *baseProvider) Timeout() time.Duration {
|
||||
return p.timeout
|
||||
}
|
||||
|
||||
func (p *baseProvider) IsMaster() bool {
|
||||
return p.isMaster
|
||||
}
|
||||
@ -77,12 +90,17 @@ func (p *baseProvider) LogFile() string {
|
||||
return s
|
||||
}
|
||||
}
|
||||
panic("log dir is impossible to be unavailable")
|
||||
panic("log file is impossible to be unavailable")
|
||||
}
|
||||
|
||||
func (p *baseProvider) AddHook(hook jobHook) {
|
||||
if cg, ok := hook.(*cgroupHook); ok {
|
||||
p.cgroup = cg
|
||||
switch v := hook.(type) {
|
||||
case *cgroupHook:
|
||||
p.cgroup = v
|
||||
case *zfsHook:
|
||||
p.zfs = v
|
||||
case *dockerHook:
|
||||
p.docker = v
|
||||
}
|
||||
p.hooks = append(p.hooks, hook)
|
||||
}
|
||||
@ -95,24 +113,42 @@ func (p *baseProvider) Cgroup() *cgroupHook {
|
||||
return p.cgroup
|
||||
}
|
||||
|
||||
func (p *baseProvider) prepareLogFile() error {
|
||||
func (p *baseProvider) ZFS() *zfsHook {
|
||||
return p.zfs
|
||||
}
|
||||
|
||||
func (p *baseProvider) Docker() *dockerHook {
|
||||
return p.docker
|
||||
}
|
||||
|
||||
func (p *baseProvider) prepareLogFile(append bool) error {
|
||||
if p.LogFile() == "/dev/null" {
|
||||
p.cmd.SetLogFile(nil)
|
||||
return nil
|
||||
}
|
||||
if p.logFile == nil {
|
||||
logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
|
||||
return err
|
||||
}
|
||||
p.logFile = logFile
|
||||
appendMode := 0
|
||||
if append {
|
||||
appendMode = os.O_APPEND
|
||||
}
|
||||
p.cmd.SetLogFile(p.logFile)
|
||||
logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE|appendMode, 0644)
|
||||
if err != nil {
|
||||
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
|
||||
return err
|
||||
}
|
||||
p.logFileFd = logFile
|
||||
p.cmd.SetLogFile(logFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *baseProvider) Run() error {
|
||||
func (p *baseProvider) closeLogFile() (err error) {
|
||||
if p.logFileFd != nil {
|
||||
err = p.logFileFd.Close()
|
||||
p.logFileFd = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *baseProvider) Run(started chan empty) error {
|
||||
panic("Not Implemented")
|
||||
}
|
||||
|
||||
@ -127,32 +163,42 @@ func (p *baseProvider) IsRunning() bool {
|
||||
|
||||
func (p *baseProvider) Wait() error {
|
||||
defer func() {
|
||||
p.Lock()
|
||||
logger.Debugf("set isRunning to false: %s", p.Name())
|
||||
p.isRunning.Store(false)
|
||||
if p.logFile != nil {
|
||||
p.logFile.Close()
|
||||
p.logFile = nil
|
||||
}
|
||||
p.Unlock()
|
||||
}()
|
||||
logger.Debugf("calling Wait: %s", p.Name())
|
||||
return p.cmd.Wait()
|
||||
}
|
||||
|
||||
func (p *baseProvider) Terminate() error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
logger.Debugf("terminating provider: %s", p.Name())
|
||||
if !p.IsRunning() {
|
||||
logger.Warningf("Terminate() called while IsRunning is false: %s", p.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
p.Lock()
|
||||
if p.logFile != nil {
|
||||
p.logFile.Close()
|
||||
p.logFile = nil
|
||||
}
|
||||
p.Unlock()
|
||||
|
||||
err := p.cmd.Terminate()
|
||||
p.isRunning.Store(false)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *baseProvider) DataSize() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *baseProvider) SetSuccessExitCodes(codes []int) {
|
||||
if codes == nil {
|
||||
p.successExitCodes = []int{}
|
||||
} else {
|
||||
p.successExitCodes = codes
|
||||
}
|
||||
}
|
||||
|
||||
func (p *baseProvider) GetSuccessExitCodes() []int {
|
||||
if p.successExitCodes == nil {
|
||||
return []int{}
|
||||
}
|
||||
return p.successExitCodes
|
||||
}
|
||||
|
93
worker/btrfs_snapshot_hook.go
Normal file
93
worker/btrfs_snapshot_hook.go
Normal file
@ -0,0 +1,93 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dennwc/btrfs"
|
||||
)
|
||||
|
||||
type btrfsSnapshotHook struct {
|
||||
provider mirrorProvider
|
||||
mirrorSnapshotPath string
|
||||
}
|
||||
|
||||
// the user who runs the jobs (typically `tunasync`) should be granted the permission to run btrfs commands
|
||||
// TODO: check if the filesystem is Btrfs
|
||||
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
|
||||
mirrorSnapshotPath := mirror.SnapshotPath
|
||||
if mirrorSnapshotPath == "" {
|
||||
mirrorSnapshotPath = filepath.Join(snapshotPath, provider.Name())
|
||||
}
|
||||
return &btrfsSnapshotHook{
|
||||
provider: provider,
|
||||
mirrorSnapshotPath: mirrorSnapshotPath,
|
||||
}
|
||||
}
|
||||
|
||||
// check if path `snapshotPath/providerName` exists
|
||||
// Case 1: Not exists => create a new subvolume
|
||||
// Case 2: Exists as a subvolume => nothing to do
|
||||
// Case 3: Exists as a directory => error detected
|
||||
func (h *btrfsSnapshotHook) preJob() error {
|
||||
path := h.provider.WorkingDir()
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// create subvolume
|
||||
err := btrfs.CreateSubVolume(path)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to create Btrfs subvolume %s: %s", path, err.Error())
|
||||
return err
|
||||
}
|
||||
logger.Noticef("created new Btrfs subvolume %s", path)
|
||||
} else {
|
||||
if is, err := btrfs.IsSubVolume(path); err != nil {
|
||||
return err
|
||||
} else if !is {
|
||||
return fmt.Errorf("path %s exists but isn't a Btrfs subvolume", path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) preExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) postExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete old snapshot if exists, then create a new snapshot
|
||||
func (h *btrfsSnapshotHook) postSuccess() error {
|
||||
if _, err := os.Stat(h.mirrorSnapshotPath); !os.IsNotExist(err) {
|
||||
isSubVol, err := btrfs.IsSubVolume(h.mirrorSnapshotPath)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !isSubVol {
|
||||
return fmt.Errorf("path %s exists and isn't a Btrfs snapshot", h.mirrorSnapshotPath)
|
||||
}
|
||||
// is old snapshot => delete it
|
||||
if err := btrfs.DeleteSubVolume(h.mirrorSnapshotPath); err != nil {
|
||||
logger.Errorf("failed to delete old Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||
return err
|
||||
}
|
||||
logger.Noticef("deleted old snapshot %s", h.mirrorSnapshotPath)
|
||||
}
|
||||
// create a new writable snapshot
|
||||
// (the snapshot is writable so that it can be deleted easily)
|
||||
if err := btrfs.SnapshotSubVolume(h.provider.WorkingDir(), h.mirrorSnapshotPath, false); err != nil {
|
||||
logger.Errorf("failed to create new Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||
return err
|
||||
}
|
||||
logger.Noticef("created new Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// keep the old snapshot => nothing to do
|
||||
func (h *btrfsSnapshotHook) postFail() error {
|
||||
return nil
|
||||
}
|
31
worker/btrfs_snapshot_hook_nolinux.go
Normal file
31
worker/btrfs_snapshot_hook_nolinux.go
Normal file
@ -0,0 +1,31 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package worker
|
||||
|
||||
type btrfsSnapshotHook struct {
|
||||
}
|
||||
|
||||
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
|
||||
return &btrfsSnapshotHook{}
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) postExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) postFail() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) postSuccess() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) preExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) preJob() error {
|
||||
return nil
|
||||
}
|
367
worker/cgroup.go
367
worker/cgroup.go
@ -1,65 +1,297 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
cgroups "github.com/containerd/cgroups/v3"
|
||||
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
|
||||
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
|
||||
"github.com/moby/sys/reexec"
|
||||
contspecs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
var cgSubsystem = "cpu"
|
||||
|
||||
type cgroupHook struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
basePath string
|
||||
baseGroup string
|
||||
created bool
|
||||
cgCfg cgroupConfig
|
||||
memLimit MemBytes
|
||||
cgMgrV1 cgv1.Cgroup
|
||||
cgMgrV2 *cgv2.Manager
|
||||
}
|
||||
|
||||
func initCgroup(basePath string) {
|
||||
if _, err := os.Stat(filepath.Join(basePath, "memory")); err == nil {
|
||||
cgSubsystem = "memory"
|
||||
return
|
||||
}
|
||||
logger.Warning("Memory subsystem of cgroup not enabled, fallback to cpu")
|
||||
type execCmd string
|
||||
|
||||
const (
|
||||
cmdCont execCmd = "cont"
|
||||
cmdAbrt execCmd = "abrt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Register("tunasync-exec", waitExec)
|
||||
}
|
||||
|
||||
func newCgroupHook(p mirrorProvider, basePath, baseGroup string) *cgroupHook {
|
||||
if basePath == "" {
|
||||
basePath = "/sys/fs/cgroup"
|
||||
func waitExec() {
|
||||
binary, err := exec.LookPath(os.Args[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if baseGroup == "" {
|
||||
baseGroup = "tunasync"
|
||||
|
||||
pipe := os.NewFile(3, "pipe")
|
||||
if pipe != nil {
|
||||
if _, err := pipe.Stat(); err == nil {
|
||||
cmdBytes, err := io.ReadAll(pipe)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := pipe.Close(); err != nil {
|
||||
}
|
||||
cmd := execCmd(string(cmdBytes))
|
||||
switch cmd {
|
||||
case cmdAbrt:
|
||||
fallthrough
|
||||
default:
|
||||
panic("Exited on request")
|
||||
case cmdCont:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
args := os.Args[1:]
|
||||
env := os.Environ()
|
||||
if err := syscall.Exec(binary, args, env); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
panic("Exec failed.")
|
||||
}
|
||||
|
||||
func initCgroup(cfg *cgroupConfig) error {
|
||||
|
||||
logger.Debugf("Initializing cgroup")
|
||||
baseGroup := cfg.Group
|
||||
//subsystem := cfg.Subsystem
|
||||
|
||||
// If baseGroup is empty, it implies using the cgroup of the current process
|
||||
// otherwise, it refers to a absolute group path
|
||||
if baseGroup != "" {
|
||||
baseGroup = filepath.Join("/", baseGroup)
|
||||
}
|
||||
|
||||
cfg.isUnified = cgroups.Mode() == cgroups.Unified
|
||||
|
||||
if cfg.isUnified {
|
||||
logger.Debugf("Cgroup V2 detected")
|
||||
g := baseGroup
|
||||
if g == "" {
|
||||
logger.Debugf("Detecting my cgroup path")
|
||||
var err error
|
||||
if g, err = cgv2.NestedGroupPath(""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
logger.Infof("Using cgroup path: %s", g)
|
||||
|
||||
var err error
|
||||
if cfg.cgMgrV2, err = cgv2.Load(g); err != nil {
|
||||
return err
|
||||
}
|
||||
if baseGroup == "" {
|
||||
logger.Debugf("Creating a sub group and move all processes into it")
|
||||
wkrMgr, err := cfg.cgMgrV2.NewChild("__worker", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
logger.Debugf("Reading pids")
|
||||
procs, err := cfg.cgMgrV2.Procs(false)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot read pids in that group")
|
||||
return err
|
||||
}
|
||||
if len(procs) == 0 {
|
||||
break
|
||||
}
|
||||
for _, p := range procs {
|
||||
if err := wkrMgr.AddProc(p); err != nil {
|
||||
if errors.Is(err, syscall.ESRCH) {
|
||||
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("Trying to create a sub group in that group")
|
||||
testMgr, err := cfg.cgMgrV2.NewChild("__test", nil)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot create a sub group in the cgroup")
|
||||
return err
|
||||
}
|
||||
if err := testMgr.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
procs, err := cfg.cgMgrV2.Procs(false)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot read pids in that group")
|
||||
return err
|
||||
}
|
||||
if len(procs) != 0 {
|
||||
return fmt.Errorf("There are remaining processes in cgroup %s", baseGroup)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("Cgroup V1 detected")
|
||||
var pather cgv1.Path
|
||||
if baseGroup != "" {
|
||||
pather = cgv1.StaticPath(baseGroup)
|
||||
} else {
|
||||
pather = (func(p cgv1.Path) cgv1.Path {
|
||||
return func(subsys cgv1.Name) (string, error) {
|
||||
path, err := p(subsys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if path == "/" {
|
||||
return "", cgv1.ErrControllerNotActive
|
||||
}
|
||||
return path, err
|
||||
}
|
||||
})(cgv1.NestedPath(""))
|
||||
}
|
||||
logger.Infof("Loading cgroup")
|
||||
var err error
|
||||
if cfg.cgMgrV1, err = cgv1.Load(pather, func(cfg *cgv1.InitConfig) error {
|
||||
cfg.InitCheck = cgv1.AllowAny
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Debugf("Available subsystems:")
|
||||
for _, subsys := range cfg.cgMgrV1.Subsystems() {
|
||||
p, err := pather(subsys.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Debugf("%s: %s", subsys.Name(), p)
|
||||
}
|
||||
if baseGroup == "" {
|
||||
logger.Debugf("Creating a sub group and move all processes into it")
|
||||
wkrMgr, err := cfg.cgMgrV1.New("__worker", &contspecs.LinuxResources{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, subsys := range cfg.cgMgrV1.Subsystems() {
|
||||
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
|
||||
for {
|
||||
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
|
||||
if err != nil {
|
||||
p, err := pather(subsys.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name())
|
||||
return err
|
||||
}
|
||||
if len(procs) == 0 {
|
||||
break
|
||||
}
|
||||
for _, proc := range procs {
|
||||
if err := wkrMgr.Add(proc); err != nil {
|
||||
if errors.Is(err, syscall.ESRCH) {
|
||||
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("Trying to create a sub group in that group")
|
||||
testMgr, err := cfg.cgMgrV1.New("__test", &contspecs.LinuxResources{})
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot create a sub group in the cgroup")
|
||||
return err
|
||||
}
|
||||
if err := testMgr.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, subsys := range cfg.cgMgrV1.Subsystems() {
|
||||
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
|
||||
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
|
||||
if err != nil {
|
||||
p, err := pather(subsys.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name())
|
||||
return err
|
||||
}
|
||||
if len(procs) != 0 {
|
||||
p, err := pather(subsys.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("There are remaining processes in cgroup %s of subsystem %s", p, subsys.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCgroupHook(p mirrorProvider, cfg cgroupConfig, memLimit MemBytes) *cgroupHook {
|
||||
return &cgroupHook{
|
||||
provider: p,
|
||||
basePath: basePath,
|
||||
baseGroup: baseGroup,
|
||||
emptyHook: emptyHook{
|
||||
provider: p,
|
||||
},
|
||||
cgCfg: cfg,
|
||||
memLimit: memLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cgroupHook) preExec() error {
|
||||
c.created = true
|
||||
if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
if cgSubsystem != "memory" {
|
||||
return nil
|
||||
}
|
||||
if c.provider.Type() == provRsync || c.provider.Type() == provTwoStageRsync {
|
||||
gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name())
|
||||
return sh.Command(
|
||||
"cgset", "-r", "memory.limit_in_bytes=128M", gname,
|
||||
).Run()
|
||||
if c.cgCfg.isUnified {
|
||||
logger.Debugf("Creating v2 cgroup for task %s", c.provider.Name())
|
||||
var resSet *cgv2.Resources
|
||||
if c.memLimit != 0 {
|
||||
resSet = &cgv2.Resources{
|
||||
Memory: &cgv2.Memory{
|
||||
Max: func(i int64) *int64 { return &i }(c.memLimit.Value()),
|
||||
},
|
||||
}
|
||||
}
|
||||
subMgr, err := c.cgCfg.cgMgrV2.NewChild(c.provider.Name(), resSet)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
c.cgMgrV2 = subMgr
|
||||
} else {
|
||||
logger.Debugf("Creating v1 cgroup for task %s", c.provider.Name())
|
||||
var resSet contspecs.LinuxResources
|
||||
if c.memLimit != 0 {
|
||||
resSet = contspecs.LinuxResources{
|
||||
Memory: &contspecs.LinuxMemory{
|
||||
Limit: func(i int64) *int64 { return &i }(c.memLimit.Value()),
|
||||
},
|
||||
}
|
||||
}
|
||||
subMgr, err := c.cgCfg.cgMgrV1.New(c.provider.Name(), &resSet)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
c.cgMgrV1 = subMgr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -70,36 +302,59 @@ func (c *cgroupHook) postExec() error {
|
||||
logger.Errorf("Error killing tasks: %s", err.Error())
|
||||
}
|
||||
|
||||
c.created = false
|
||||
return sh.Command("cgdelete", c.Cgroup()).Run()
|
||||
}
|
||||
|
||||
func (c *cgroupHook) Cgroup() string {
|
||||
name := c.provider.Name()
|
||||
return fmt.Sprintf("%s:%s/%s", cgSubsystem, c.baseGroup, name)
|
||||
if c.cgCfg.isUnified {
|
||||
logger.Debugf("Deleting v2 cgroup for task %s", c.provider.Name())
|
||||
if err := c.cgMgrV2.Delete(); err != nil {
|
||||
logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
c.cgMgrV2 = nil
|
||||
} else {
|
||||
logger.Debugf("Deleting v1 cgroup for task %s", c.provider.Name())
|
||||
if err := c.cgMgrV1.Delete(); err != nil {
|
||||
logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
c.cgMgrV1 = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cgroupHook) killAll() error {
|
||||
if !c.created {
|
||||
return nil
|
||||
if c.cgCfg.isUnified {
|
||||
if c.cgMgrV2 == nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if c.cgMgrV1 == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
name := c.provider.Name()
|
||||
|
||||
readTaskList := func() ([]int, error) {
|
||||
taskList := []int{}
|
||||
taskFile, err := os.Open(filepath.Join(c.basePath, cgSubsystem, c.baseGroup, name, "tasks"))
|
||||
if err != nil {
|
||||
return taskList, err
|
||||
}
|
||||
defer taskFile.Close()
|
||||
|
||||
scanner := bufio.NewScanner(taskFile)
|
||||
for scanner.Scan() {
|
||||
pid, err := strconv.Atoi(scanner.Text())
|
||||
if c.cgCfg.isUnified {
|
||||
procs, err := c.cgMgrV2.Procs(false)
|
||||
if err != nil {
|
||||
return taskList, err
|
||||
return []int{}, err
|
||||
}
|
||||
for _, proc := range procs {
|
||||
taskList = append(taskList, int(proc))
|
||||
}
|
||||
} else {
|
||||
taskSet := make(map[int]struct{})
|
||||
for _, subsys := range c.cgMgrV1.Subsystems() {
|
||||
procs, err := c.cgMgrV1.Processes(subsys.Name(), false)
|
||||
if err != nil {
|
||||
return []int{}, err
|
||||
}
|
||||
for _, proc := range procs {
|
||||
taskSet[proc.Pid] = struct{}{}
|
||||
}
|
||||
}
|
||||
for proc := range taskSet {
|
||||
taskList = append(taskList, proc)
|
||||
}
|
||||
taskList = append(taskList, pid)
|
||||
}
|
||||
return taskList, nil
|
||||
}
|
||||
|
@ -1,40 +1,124 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
|
||||
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/moby/sys/reexec"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestCgroup(t *testing.T) {
|
||||
Convey("Cgroup Should Work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
cmdScript := filepath.Join(tmpDir, "cmd.sh")
|
||||
daemonScript := filepath.Join(tmpDir, "daemon.sh")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
bgPidfile := filepath.Join(tmpDir, "bg.pid")
|
||||
func init() {
|
||||
_, testReexec := os.LookupEnv("TESTREEXEC")
|
||||
if !testReexec {
|
||||
reexec.Init()
|
||||
}
|
||||
}
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-cgroup",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: cmdScript + " " + daemonScript,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 600 * time.Second,
|
||||
env: map[string]string{
|
||||
"BG_PIDFILE": bgPidfile,
|
||||
},
|
||||
func TestReexec(t *testing.T) {
|
||||
testCase, testReexec := os.LookupEnv("TESTREEXEC")
|
||||
if !testReexec {
|
||||
return
|
||||
}
|
||||
for len(os.Args) > 1 {
|
||||
thisArg := os.Args[1]
|
||||
os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
|
||||
if thisArg == "--" {
|
||||
break
|
||||
}
|
||||
cmdScriptContent := `#!/bin/bash
|
||||
}
|
||||
switch testCase {
|
||||
case "1":
|
||||
Convey("Reexec should panic when command not found", t, func(ctx C) {
|
||||
So(func() {
|
||||
reexec.Init()
|
||||
}, ShouldPanicWith, exec.ErrNotFound)
|
||||
})
|
||||
case "2":
|
||||
Convey("Reexec should run when fd 3 is not open", t, func(ctx C) {
|
||||
So((func() error {
|
||||
pipe := os.NewFile(3, "pipe")
|
||||
if pipe == nil {
|
||||
return errors.New("pipe is nil")
|
||||
} else {
|
||||
_, err := pipe.Stat()
|
||||
return err
|
||||
}
|
||||
})(), ShouldNotBeNil)
|
||||
So(func() {
|
||||
reexec.Init()
|
||||
}, ShouldPanicWith, syscall.ENOEXEC)
|
||||
})
|
||||
case "3":
|
||||
Convey("Reexec should fail when fd 3 is sent with abrt cmd", t, func(ctx C) {
|
||||
So(func() {
|
||||
reexec.Init()
|
||||
}, ShouldPanicWith, "Exited on request")
|
||||
})
|
||||
case "4":
|
||||
Convey("Reexec should run when fd 3 is sent with cont cmd", t, func(ctx C) {
|
||||
So(func() {
|
||||
reexec.Init()
|
||||
}, ShouldPanicWith, syscall.ENOEXEC)
|
||||
})
|
||||
case "5":
|
||||
Convey("Reexec should not be triggered when argv[0] is not reexec", t, func(ctx C) {
|
||||
So(func() {
|
||||
reexec.Init()
|
||||
}, ShouldNotPanic)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCgroup(t *testing.T) {
|
||||
var cgcf *cgroupConfig
|
||||
Convey("init cgroup", t, func(ctx C) {
|
||||
_, useCurrentCgroup := os.LookupEnv("USECURCGROUP")
|
||||
cgcf = &cgroupConfig{BasePath: "/sys/fs/cgroup", Group: "tunasync", Subsystem: "cpu"}
|
||||
if useCurrentCgroup {
|
||||
cgcf.Group = ""
|
||||
}
|
||||
err := initCgroup(cgcf)
|
||||
So(err, ShouldBeNil)
|
||||
if cgcf.isUnified {
|
||||
So(cgcf.cgMgrV2, ShouldNotBeNil)
|
||||
} else {
|
||||
So(cgcf.cgMgrV1, ShouldNotBeNil)
|
||||
}
|
||||
|
||||
Convey("Cgroup Should Work", func(ctx C) {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
cmdScript := filepath.Join(tmpDir, "cmd.sh")
|
||||
daemonScript := filepath.Join(tmpDir, "daemon.sh")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
bgPidfile := filepath.Join(tmpDir, "bg.pid")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-cgroup",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: cmdScript + " " + daemonScript,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 600 * time.Second,
|
||||
env: map[string]string{
|
||||
"BG_PIDFILE": bgPidfile,
|
||||
},
|
||||
}
|
||||
cmdScriptContent := `#!/bin/bash
|
||||
redirect-std() {
|
||||
[[ -t 0 ]] && exec </dev/null
|
||||
[[ -t 1 ]] && exec >/dev/null
|
||||
@ -45,13 +129,13 @@ redirect-std() {
|
||||
close-fds() {
|
||||
eval exec {3..255}\>\&-
|
||||
}
|
||||
|
||||
|
||||
# full daemonization of external command with setsid
|
||||
daemonize() {
|
||||
(
|
||||
redirect-std
|
||||
cd /
|
||||
close-fds
|
||||
redirect-std
|
||||
cd /
|
||||
close-fds
|
||||
exec setsid "$@"
|
||||
) &
|
||||
}
|
||||
@ -60,85 +144,180 @@ echo $$
|
||||
daemonize $@
|
||||
sleep 5
|
||||
`
|
||||
daemonScriptContent := `#!/bin/bash
|
||||
daemonScriptContent := `#!/bin/bash
|
||||
echo $$ > $BG_PIDFILE
|
||||
sleep 30
|
||||
`
|
||||
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
err = ioutil.WriteFile(daemonScript, []byte(daemonScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
initCgroup("/sys/fs/cgroup")
|
||||
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
|
||||
provider.AddHook(cg)
|
||||
|
||||
err = cg.preExec()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go func() {
|
||||
err = provider.Run()
|
||||
ctx.So(err, ShouldNotBeNil)
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
// Deamon should be started
|
||||
daemonPidBytes, err := ioutil.ReadFile(bgPidfile)
|
||||
So(err, ShouldBeNil)
|
||||
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
|
||||
logger.Debug("daemon pid: %s", daemonPid)
|
||||
procDir := filepath.Join("/proc", daemonPid)
|
||||
_, err = os.Stat(procDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// Deamon won't be killed
|
||||
_, err = os.Stat(procDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// Deamon can be killed by cgroup killer
|
||||
cg.postExec()
|
||||
_, err = os.Stat(procDir)
|
||||
So(os.IsNotExist(err), ShouldBeTrue)
|
||||
|
||||
})
|
||||
|
||||
Convey("Rsync Memory Should Be Limited", t, func() {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := rsyncConfig{
|
||||
name: "tuna-cgroup",
|
||||
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
|
||||
rsyncCmd: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newRsyncProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
initCgroup("/sys/fs/cgroup")
|
||||
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
|
||||
provider.AddHook(cg)
|
||||
|
||||
cg.preExec()
|
||||
if cgSubsystem == "memory" {
|
||||
memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes"))
|
||||
err = os.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(128*1024*1024))
|
||||
}
|
||||
cg.postExec()
|
||||
err = os.WriteFile(daemonScript, []byte(daemonScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cg := newCgroupHook(provider, *cgcf, 0)
|
||||
provider.AddHook(cg)
|
||||
|
||||
err = cg.preExec()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go func() {
|
||||
err := provider.Run(make(chan empty, 1))
|
||||
ctx.So(err, ShouldNotBeNil)
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
// Deamon should be started
|
||||
daemonPidBytes, err := os.ReadFile(bgPidfile)
|
||||
So(err, ShouldBeNil)
|
||||
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
|
||||
logger.Debug("daemon pid: %s", daemonPid)
|
||||
procDir := filepath.Join("/proc", daemonPid)
|
||||
_, err = os.Stat(procDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// Deamon won't be killed
|
||||
_, err = os.Stat(procDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// Deamon can be killed by cgroup killer
|
||||
cg.postExec()
|
||||
_, err = os.Stat(procDir)
|
||||
So(os.IsNotExist(err), ShouldBeTrue)
|
||||
|
||||
})
|
||||
|
||||
Convey("Rsync Memory Should Be Limited", func() {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := rsyncConfig{
|
||||
name: "tuna-cgroup",
|
||||
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
|
||||
rsyncCmd: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newRsyncProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cg := newCgroupHook(provider, *cgcf, 512*units.MiB)
|
||||
provider.AddHook(cg)
|
||||
|
||||
err = cg.preExec()
|
||||
So(err, ShouldBeNil)
|
||||
if cgcf.isUnified {
|
||||
cgpath := filepath.Join(cgcf.BasePath, cgcf.Group, provider.Name())
|
||||
if useCurrentCgroup {
|
||||
group, err := cgv2.NestedGroupPath(filepath.Join("..", provider.Name()))
|
||||
So(err, ShouldBeNil)
|
||||
cgpath = filepath.Join(cgcf.BasePath, group)
|
||||
}
|
||||
memoLimit, err := os.ReadFile(filepath.Join(cgpath, "memory.max"))
|
||||
So(err, ShouldBeNil)
|
||||
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
|
||||
} else {
|
||||
for _, subsys := range cg.cgMgrV1.Subsystems() {
|
||||
if subsys.Name() == cgv1.Memory {
|
||||
cgpath := filepath.Join(cgcf.Group, provider.Name())
|
||||
if useCurrentCgroup {
|
||||
p, err := cgv1.NestedPath(filepath.Join("..", provider.Name()))(cgv1.Memory)
|
||||
So(err, ShouldBeNil)
|
||||
cgpath = p
|
||||
}
|
||||
memoLimit, err := os.ReadFile(filepath.Join(cgcf.BasePath, "memory", cgpath, "memory.limit_in_bytes"))
|
||||
So(err, ShouldBeNil)
|
||||
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
|
||||
}
|
||||
}
|
||||
}
|
||||
cg.postExec()
|
||||
So(cg.cgMgrV1, ShouldBeNil)
|
||||
})
|
||||
Reset(func() {
|
||||
if cgcf.isUnified {
|
||||
if cgcf.Group == "" {
|
||||
wkrg, err := cgv2.NestedGroupPath("")
|
||||
So(err, ShouldBeNil)
|
||||
wkrMgr, _ := cgv2.Load(wkrg)
|
||||
allCtrls, err := wkrMgr.Controllers()
|
||||
So(err, ShouldBeNil)
|
||||
err = wkrMgr.ToggleControllers(allCtrls, cgv2.Disable)
|
||||
So(err, ShouldBeNil)
|
||||
origMgr := cgcf.cgMgrV2
|
||||
for {
|
||||
logger.Debugf("Restoring pids")
|
||||
procs, err := wkrMgr.Procs(false)
|
||||
So(err, ShouldBeNil)
|
||||
if len(procs) == 0 {
|
||||
break
|
||||
}
|
||||
for _, p := range procs {
|
||||
if err := origMgr.AddProc(p); err != nil {
|
||||
if errors.Is(err, syscall.ESRCH) {
|
||||
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
|
||||
} else {
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
err = wkrMgr.Delete()
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
} else {
|
||||
if cgcf.Group == "" {
|
||||
pather := (func(p cgv1.Path) cgv1.Path {
|
||||
return func(subsys cgv1.Name) (string, error) {
|
||||
path, err := p(subsys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if path == "/" {
|
||||
return "", cgv1.ErrControllerNotActive
|
||||
}
|
||||
return path, err
|
||||
}
|
||||
})(cgv1.NestedPath(""))
|
||||
wkrMgr, err := cgv1.Load(pather, func(cfg *cgv1.InitConfig) error {
|
||||
cfg.InitCheck = cgv1.AllowAny
|
||||
return nil
|
||||
})
|
||||
So(err, ShouldBeNil)
|
||||
origMgr := cgcf.cgMgrV1
|
||||
for _, subsys := range wkrMgr.Subsystems() {
|
||||
for {
|
||||
procs, err := wkrMgr.Processes(subsys.Name(), false)
|
||||
So(err, ShouldBeNil)
|
||||
if len(procs) == 0 {
|
||||
break
|
||||
}
|
||||
for _, proc := range procs {
|
||||
if err := origMgr.Add(proc); err != nil {
|
||||
if errors.Is(err, syscall.ESRCH) {
|
||||
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
|
||||
} else {
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
err = wkrMgr.Delete()
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -1,9 +1,13 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/anmitsu/go-shlex"
|
||||
"github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
type cmdConfig struct {
|
||||
@ -11,22 +15,34 @@ type cmdConfig struct {
|
||||
upstreamURL, command string
|
||||
workingDir, logDir, logFile string
|
||||
interval time.Duration
|
||||
retry int
|
||||
timeout time.Duration
|
||||
env map[string]string
|
||||
failOnMatch string
|
||||
sizePattern string
|
||||
}
|
||||
|
||||
type cmdProvider struct {
|
||||
baseProvider
|
||||
cmdConfig
|
||||
command []string
|
||||
command []string
|
||||
dataSize string
|
||||
failOnMatch *regexp.Regexp
|
||||
sizePattern *regexp.Regexp
|
||||
}
|
||||
|
||||
func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
|
||||
// TODO: check config options
|
||||
if c.retry == 0 {
|
||||
c.retry = defaultMaxRetry
|
||||
}
|
||||
provider := &cmdProvider{
|
||||
baseProvider: baseProvider{
|
||||
name: c.name,
|
||||
ctx: NewContext(),
|
||||
interval: c.interval,
|
||||
retry: c.retry,
|
||||
timeout: c.timeout,
|
||||
},
|
||||
cmdConfig: c,
|
||||
}
|
||||
@ -40,6 +56,22 @@ func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
|
||||
return nil, err
|
||||
}
|
||||
provider.command = cmd
|
||||
if len(c.failOnMatch) > 0 {
|
||||
var err error
|
||||
failOnMatch, err := regexp.Compile(c.failOnMatch)
|
||||
if err != nil {
|
||||
return nil, errors.New("fail-on-match regexp error: " + err.Error())
|
||||
}
|
||||
provider.failOnMatch = failOnMatch
|
||||
}
|
||||
if len(c.sizePattern) > 0 {
|
||||
var err error
|
||||
sizePattern, err := regexp.Compile(c.sizePattern)
|
||||
if err != nil {
|
||||
return nil, errors.New("size-pattern regexp error: " + err.Error())
|
||||
}
|
||||
provider.sizePattern = sizePattern
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
@ -52,25 +84,57 @@ func (p *cmdProvider) Upstream() string {
|
||||
return p.upstreamURL
|
||||
}
|
||||
|
||||
func (p *cmdProvider) Run() error {
|
||||
func (p *cmdProvider) DataSize() string {
|
||||
return p.dataSize
|
||||
}
|
||||
|
||||
func (p *cmdProvider) Run(started chan empty) error {
|
||||
p.dataSize = ""
|
||||
defer p.closeLogFile()
|
||||
if err := p.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return p.Wait()
|
||||
started <- empty{}
|
||||
if err := p.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
if p.failOnMatch != nil {
|
||||
matches, err := internal.FindAllSubmatchInFile(p.LogFile(), p.failOnMatch)
|
||||
logger.Infof("FindAllSubmatchInFile: %q\n", matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(matches) != 0 {
|
||||
logger.Debug("Fail-on-match: %r", matches)
|
||||
return fmt.Errorf("Fail-on-match regexp found %d matches", len(matches))
|
||||
}
|
||||
}
|
||||
if p.sizePattern != nil {
|
||||
p.dataSize = internal.ExtractSizeFromLog(p.LogFile(), p.sizePattern)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *cmdProvider) Start() error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
if p.IsRunning() {
|
||||
return errors.New("provider is currently running")
|
||||
}
|
||||
|
||||
env := map[string]string{
|
||||
"TUNASYNC_MIRROR_NAME": p.Name(),
|
||||
"TUNASYNC_WORKING_DIR": p.WorkingDir(),
|
||||
"TUNASYNC_UPSTREAM_URL": p.upstreamURL,
|
||||
"TUNASYNC_LOG_DIR": p.LogDir(),
|
||||
"TUNASYNC_LOG_FILE": p.LogFile(),
|
||||
}
|
||||
for k, v := range p.env {
|
||||
env[k] = v
|
||||
}
|
||||
p.cmd = newCmdJob(p, p.command, p.WorkingDir(), env)
|
||||
if err := p.prepareLogFile(); err != nil {
|
||||
if err := p.prepareLogFile(false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -78,5 +142,6 @@ func (p *cmdProvider) Start() error {
|
||||
return err
|
||||
}
|
||||
p.isRunning.Store(true)
|
||||
logger.Debugf("set isRunning to true: %s", p.Name())
|
||||
return nil
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package worker
|
||||
|
||||
// put global viables and types here
|
||||
// put global variables and types here
|
||||
|
||||
import (
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
@ -8,6 +8,6 @@ import (
|
||||
|
||||
type empty struct{}
|
||||
|
||||
const maxRetry = 2
|
||||
const defaultMaxRetry = 2
|
||||
|
||||
var logger = logging.MustGetLogger("tunasync")
|
||||
|
176
worker/config.go
176
worker/config.go
@ -6,6 +6,10 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
|
||||
cgv2 "github.com/containerd/cgroups/v3/cgroup2"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
type providerEnum uint8
|
||||
@ -33,12 +37,16 @@ func (p *providerEnum) UnmarshalText(text []byte) error {
|
||||
|
||||
// Config represents worker config options
|
||||
type Config struct {
|
||||
Global globalConfig `toml:"global"`
|
||||
Manager managerConfig `toml:"manager"`
|
||||
Server serverConfig `toml:"server"`
|
||||
Cgroup cgroupConfig `toml:"cgroup"`
|
||||
Include includeConfig `toml:"include"`
|
||||
Mirrors []mirrorConfig `toml:"mirrors"`
|
||||
Global globalConfig `toml:"global"`
|
||||
Manager managerConfig `toml:"manager"`
|
||||
Server serverConfig `toml:"server"`
|
||||
Cgroup cgroupConfig `toml:"cgroup"`
|
||||
ZFS zfsConfig `toml:"zfs"`
|
||||
BtrfsSnapshot btrfsSnapshotConfig `toml:"btrfs_snapshot"`
|
||||
Docker dockerConfig `toml:"docker"`
|
||||
Include includeConfig `toml:"include"`
|
||||
MirrorsConf []mirrorConfig `toml:"mirrors"`
|
||||
Mirrors []mirrorConfig
|
||||
}
|
||||
|
||||
type globalConfig struct {
|
||||
@ -47,15 +55,32 @@ type globalConfig struct {
|
||||
MirrorDir string `toml:"mirror_dir"`
|
||||
Concurrent int `toml:"concurrent"`
|
||||
Interval int `toml:"interval"`
|
||||
Retry int `toml:"retry"`
|
||||
Timeout int `toml:"timeout"`
|
||||
|
||||
// appended to the options generated by rsync_provider, but before mirror-specific options
|
||||
RsyncOptions []string `toml:"rsync_options"`
|
||||
|
||||
ExecOnSuccess []string `toml:"exec_on_success"`
|
||||
ExecOnFailure []string `toml:"exec_on_failure"`
|
||||
|
||||
// merged with mirror-specific options. make sure you know what you are doing!
|
||||
SuccessExitCodes []int `toml:"dangerous_global_success_exit_codes"`
|
||||
}
|
||||
|
||||
type managerConfig struct {
|
||||
APIBase string `toml:"api_base"`
|
||||
CACert string `toml:"ca_cert"`
|
||||
Token string `toml:"token"`
|
||||
// this option overrides the APIBase
|
||||
APIList []string `toml:"api_base_list"`
|
||||
CACert string `toml:"ca_cert"`
|
||||
// Token string `toml:"token"`
|
||||
}
|
||||
|
||||
func (mc managerConfig) APIBaseList() []string {
|
||||
if len(mc.APIList) > 0 {
|
||||
return mc.APIList
|
||||
}
|
||||
return []string{mc.APIBase}
|
||||
}
|
||||
|
||||
type serverConfig struct {
|
||||
@ -67,9 +92,29 @@ type serverConfig struct {
|
||||
}
|
||||
|
||||
type cgroupConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
BasePath string `toml:"base_path"`
|
||||
Group string `toml:"group"`
|
||||
Enable bool `toml:"enable"`
|
||||
BasePath string `toml:"base_path"`
|
||||
Group string `toml:"group"`
|
||||
Subsystem string `toml:"subsystem"`
|
||||
isUnified bool
|
||||
cgMgrV1 cgv1.Cgroup
|
||||
cgMgrV2 *cgv2.Manager
|
||||
}
|
||||
|
||||
type dockerConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
Volumes []string `toml:"volumes"`
|
||||
Options []string `toml:"options"`
|
||||
}
|
||||
|
||||
type zfsConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
Zpool string `toml:"zpool"`
|
||||
}
|
||||
|
||||
type btrfsSnapshotConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
SnapshotPath string `toml:"snapshot_path"`
|
||||
}
|
||||
|
||||
type includeConfig struct {
|
||||
@ -80,30 +125,80 @@ type includedMirrorConfig struct {
|
||||
Mirrors []mirrorConfig `toml:"mirrors"`
|
||||
}
|
||||
|
||||
type MemBytes int64
|
||||
|
||||
// Set sets the value of the MemBytes by passing a string
|
||||
func (m *MemBytes) Set(value string) error {
|
||||
val, err := units.RAMInBytes(value)
|
||||
*m = MemBytes(val)
|
||||
return err
|
||||
}
|
||||
|
||||
// Type returns the type
|
||||
func (m *MemBytes) Type() string {
|
||||
return "bytes"
|
||||
}
|
||||
|
||||
// Value returns the value in int64
|
||||
func (m *MemBytes) Value() int64 {
|
||||
return int64(*m)
|
||||
}
|
||||
|
||||
// UnmarshalJSON is the customized unmarshaler for MemBytes
|
||||
func (m *MemBytes) UnmarshalText(s []byte) error {
|
||||
val, err := units.RAMInBytes(string(s))
|
||||
*m = MemBytes(val)
|
||||
return err
|
||||
}
|
||||
|
||||
type mirrorConfig struct {
|
||||
Name string `toml:"name"`
|
||||
Provider providerEnum `toml:"provider"`
|
||||
Upstream string `toml:"upstream"`
|
||||
Interval int `toml:"interval"`
|
||||
MirrorDir string `toml:"mirror_dir"`
|
||||
LogDir string `toml:"log_dir"`
|
||||
Env map[string]string `toml:"env"`
|
||||
Role string `toml:"role"`
|
||||
Name string `toml:"name"`
|
||||
Provider providerEnum `toml:"provider"`
|
||||
Upstream string `toml:"upstream"`
|
||||
Interval int `toml:"interval"`
|
||||
Retry int `toml:"retry"`
|
||||
Timeout int `toml:"timeout"`
|
||||
MirrorDir string `toml:"mirror_dir"`
|
||||
MirrorSubDir string `toml:"mirror_subdir"`
|
||||
LogDir string `toml:"log_dir"`
|
||||
Env map[string]string `toml:"env"`
|
||||
Role string `toml:"role"`
|
||||
|
||||
// These two options over-write the global options
|
||||
ExecOnSuccess []string `toml:"exec_on_success"`
|
||||
ExecOnFailure []string `toml:"exec_on_failure"`
|
||||
|
||||
// These two options the global options
|
||||
// These two options are appended to the global options
|
||||
ExecOnSuccessExtra []string `toml:"exec_on_success_extra"`
|
||||
ExecOnFailureExtra []string `toml:"exec_on_failure_extra"`
|
||||
|
||||
Command string `toml:"command"`
|
||||
UseIPv6 bool `toml:"use_ipv6"`
|
||||
ExcludeFile string `toml:"exclude_file"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
Stage1Profile string `toml:"stage1_profile"`
|
||||
// will be merged with global option
|
||||
SuccessExitCodes []int `toml:"success_exit_codes"`
|
||||
|
||||
Command string `toml:"command"`
|
||||
FailOnMatch string `toml:"fail_on_match"`
|
||||
SizePattern string `toml:"size_pattern"`
|
||||
UseIPv6 bool `toml:"use_ipv6"`
|
||||
UseIPv4 bool `toml:"use_ipv4"`
|
||||
ExcludeFile string `toml:"exclude_file"`
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
RsyncNoTimeo bool `toml:"rsync_no_timeout"`
|
||||
RsyncTimeout int `toml:"rsync_timeout"`
|
||||
RsyncOptions []string `toml:"rsync_options"`
|
||||
RsyncOverride []string `toml:"rsync_override"`
|
||||
RsyncOverrideOnly bool `toml:"rsync_override_only"` // only use provided overridden options if true
|
||||
Stage1Profile string `toml:"stage1_profile"`
|
||||
|
||||
MemoryLimit MemBytes `toml:"memory_limit"`
|
||||
|
||||
DockerImage string `toml:"docker_image"`
|
||||
DockerVolumes []string `toml:"docker_volumes"`
|
||||
DockerOptions []string `toml:"docker_options"`
|
||||
|
||||
SnapshotPath string `toml:"snapshot_path"`
|
||||
|
||||
ChildMirrors []mirrorConfig `toml:"mirrors"`
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration
|
||||
@ -130,9 +225,36 @@ func LoadConfig(cfgFile string) (*Config, error) {
|
||||
logger.Errorf(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
cfg.Mirrors = append(cfg.Mirrors, incMirCfg.Mirrors...)
|
||||
cfg.MirrorsConf = append(cfg.MirrorsConf, incMirCfg.Mirrors...)
|
||||
}
|
||||
}
|
||||
|
||||
for _, m := range cfg.MirrorsConf {
|
||||
if err := recursiveMirrors(cfg, nil, m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func recursiveMirrors(cfg *Config, parent *mirrorConfig, mirror mirrorConfig) error {
|
||||
var curMir mirrorConfig
|
||||
if parent != nil {
|
||||
curMir = *parent
|
||||
}
|
||||
curMir.ChildMirrors = nil
|
||||
if err := mergo.Merge(&curMir, mirror, mergo.WithOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
if mirror.ChildMirrors == nil {
|
||||
cfg.Mirrors = append(cfg.Mirrors, curMir)
|
||||
} else {
|
||||
for _, m := range mirror.ChildMirrors {
|
||||
if err := recursiveMirrors(cfg, &curMir, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -53,34 +53,43 @@ func diffMirrorConfig(oldList, newList []mirrorConfig) []mirrorCfgTrans {
|
||||
sort.Sort(sortableMirrorList(oList))
|
||||
sort.Sort(sortableMirrorList(nList))
|
||||
|
||||
// insert a tail node to both lists
|
||||
// as the maximum node
|
||||
lastOld, lastNew := oList[len(oList)-1], nList[len(nList)-1]
|
||||
maxName := lastOld.Name
|
||||
if lastNew.Name > lastOld.Name {
|
||||
maxName = lastNew.Name
|
||||
}
|
||||
Nil := mirrorConfig{Name: "~" + maxName}
|
||||
if Nil.Name <= maxName {
|
||||
panic("Nil.Name should be larger than maxName")
|
||||
}
|
||||
oList, nList = append(oList, Nil), append(nList, Nil)
|
||||
if len(oList) != 0 && len(nList) != 0 {
|
||||
// insert a tail node to both lists
|
||||
// as the maximum node
|
||||
lastOld, lastNew := oList[len(oList)-1], nList[len(nList)-1]
|
||||
maxName := lastOld.Name
|
||||
if lastNew.Name > lastOld.Name {
|
||||
maxName = lastNew.Name
|
||||
}
|
||||
Nil := mirrorConfig{Name: "~" + maxName}
|
||||
if Nil.Name <= maxName {
|
||||
panic("Nil.Name should be larger than maxName")
|
||||
}
|
||||
oList, nList = append(oList, Nil), append(nList, Nil)
|
||||
|
||||
// iterate over both lists to find the difference
|
||||
for i, j := 0, 0; i < len(oList) && j < len(nList); {
|
||||
o, n := oList[i], nList[j]
|
||||
if n.Name < o.Name {
|
||||
operations = append(operations, mirrorCfgTrans{diffAdd, n})
|
||||
j++
|
||||
} else if o.Name < n.Name {
|
||||
operations = append(operations, mirrorCfgTrans{diffDelete, o})
|
||||
i++
|
||||
} else {
|
||||
if !reflect.DeepEqual(o, n) {
|
||||
operations = append(operations, mirrorCfgTrans{diffModify, n})
|
||||
// iterate over both lists to find the difference
|
||||
for i, j := 0, 0; i < len(oList) && j < len(nList); {
|
||||
o, n := oList[i], nList[j]
|
||||
if n.Name < o.Name {
|
||||
operations = append(operations, mirrorCfgTrans{diffAdd, n})
|
||||
j++
|
||||
} else if o.Name < n.Name {
|
||||
operations = append(operations, mirrorCfgTrans{diffDelete, o})
|
||||
i++
|
||||
} else {
|
||||
if !reflect.DeepEqual(o, n) {
|
||||
operations = append(operations, mirrorCfgTrans{diffModify, n})
|
||||
}
|
||||
i++
|
||||
j++
|
||||
}
|
||||
i++
|
||||
j++
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < len(oList); i++ {
|
||||
operations = append(operations, mirrorCfgTrans{diffDelete, oList[i]})
|
||||
}
|
||||
for i := 0; i < len(nList); i++ {
|
||||
operations = append(operations, mirrorCfgTrans{diffAdd, nList[i]})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,12 +10,12 @@ import (
|
||||
func TestConfigDiff(t *testing.T) {
|
||||
Convey("When old and new configs are equal", t, func() {
|
||||
oldList := []mirrorConfig{
|
||||
mirrorConfig{Name: "debian"},
|
||||
mirrorConfig{Name: "debian-security"},
|
||||
mirrorConfig{Name: "fedora"},
|
||||
mirrorConfig{Name: "archlinux"},
|
||||
mirrorConfig{Name: "AOSP"},
|
||||
mirrorConfig{Name: "ubuntu"},
|
||||
{Name: "debian"},
|
||||
{Name: "debian-security"},
|
||||
{Name: "fedora"},
|
||||
{Name: "archlinux"},
|
||||
{Name: "AOSP"},
|
||||
{Name: "ubuntu"},
|
||||
}
|
||||
newList := make([]mirrorConfig, len(oldList))
|
||||
copy(newList, oldList)
|
||||
@ -23,21 +23,49 @@ func TestConfigDiff(t *testing.T) {
|
||||
difference := diffMirrorConfig(oldList, newList)
|
||||
So(len(difference), ShouldEqual, 0)
|
||||
})
|
||||
Convey("When old config is empty", t, func() {
|
||||
newList := []mirrorConfig{
|
||||
{Name: "debian"},
|
||||
{Name: "debian-security"},
|
||||
{Name: "fedora"},
|
||||
{Name: "archlinux"},
|
||||
{Name: "AOSP"},
|
||||
{Name: "ubuntu"},
|
||||
}
|
||||
oldList := make([]mirrorConfig, 0)
|
||||
|
||||
difference := diffMirrorConfig(oldList, newList)
|
||||
So(len(difference), ShouldEqual, len(newList))
|
||||
})
|
||||
Convey("When new config is empty", t, func() {
|
||||
oldList := []mirrorConfig{
|
||||
{Name: "debian"},
|
||||
{Name: "debian-security"},
|
||||
{Name: "fedora"},
|
||||
{Name: "archlinux"},
|
||||
{Name: "AOSP"},
|
||||
{Name: "ubuntu"},
|
||||
}
|
||||
newList := make([]mirrorConfig, 0)
|
||||
|
||||
difference := diffMirrorConfig(oldList, newList)
|
||||
So(len(difference), ShouldEqual, len(oldList))
|
||||
})
|
||||
Convey("When giving two config lists with different names", t, func() {
|
||||
oldList := []mirrorConfig{
|
||||
mirrorConfig{Name: "debian"},
|
||||
mirrorConfig{Name: "debian-security"},
|
||||
mirrorConfig{Name: "fedora"},
|
||||
mirrorConfig{Name: "archlinux"},
|
||||
mirrorConfig{Name: "AOSP", Env: map[string]string{"REPO": "/usr/bin/repo"}},
|
||||
mirrorConfig{Name: "ubuntu"},
|
||||
{Name: "debian"},
|
||||
{Name: "debian-security"},
|
||||
{Name: "fedora"},
|
||||
{Name: "archlinux"},
|
||||
{Name: "AOSP", Env: map[string]string{"REPO": "/usr/bin/repo"}},
|
||||
{Name: "ubuntu"},
|
||||
}
|
||||
newList := []mirrorConfig{
|
||||
mirrorConfig{Name: "debian"},
|
||||
mirrorConfig{Name: "debian-cd"},
|
||||
mirrorConfig{Name: "archlinuxcn"},
|
||||
mirrorConfig{Name: "AOSP", Env: map[string]string{"REPO": "/usr/local/bin/aosp-repo"}},
|
||||
mirrorConfig{Name: "ubuntu-ports"},
|
||||
{Name: "debian"},
|
||||
{Name: "debian-cd"},
|
||||
{Name: "archlinuxcn"},
|
||||
{Name: "AOSP", Env: map[string]string{"REPO": "/usr/local/bin/aosp-repo"}},
|
||||
{Name: "ubuntu-ports"},
|
||||
}
|
||||
|
||||
difference := diffMirrorConfig(oldList, newList)
|
||||
|
@ -2,10 +2,12 @@ package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
@ -18,6 +20,8 @@ log_dir = "/var/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/data/mirrors"
|
||||
concurrent = 10
|
||||
interval = 240
|
||||
retry = 3
|
||||
timeout = 86400
|
||||
|
||||
[manager]
|
||||
api_base = "https://127.0.0.1:5000"
|
||||
@ -35,6 +39,8 @@ name = "AOSP"
|
||||
provider = "command"
|
||||
upstream = "https://aosp.google.com/"
|
||||
interval = 720
|
||||
retry = 2
|
||||
timeout = 3600
|
||||
mirror_dir = "/data/git/AOSP"
|
||||
exec_on_success = [
|
||||
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
|
||||
@ -48,12 +54,15 @@ provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
upstream = "rsync://ftp.debian.org/debian/"
|
||||
use_ipv6 = true
|
||||
memory_limit = "256MiB"
|
||||
|
||||
[[mirrors]]
|
||||
name = "fedora"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://ftp.fedoraproject.org/fedora/"
|
||||
use_ipv6 = true
|
||||
memory_limit = "128M"
|
||||
|
||||
exclude_file = "/etc/tunasync.d/fedora-exclude.txt"
|
||||
exec_on_failure = [
|
||||
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
|
||||
@ -67,11 +76,11 @@ exec_on_failure = [
|
||||
})
|
||||
|
||||
Convey("Everything should work on valid config file", t, func() {
|
||||
tmpfile, err := ioutil.TempFile("", "tunasync")
|
||||
tmpfile, err := os.CreateTemp("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
So(err, ShouldBeNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
@ -81,9 +90,9 @@ exec_on_failure = [
|
||||
tmpDir,
|
||||
)
|
||||
|
||||
cfgBlob = cfgBlob + incSection
|
||||
curCfgBlob := cfgBlob + incSection
|
||||
|
||||
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
|
||||
err = os.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
@ -107,15 +116,17 @@ provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
use_ipv6 = true
|
||||
`
|
||||
err = ioutil.WriteFile(filepath.Join(tmpDir, "debian.conf"), []byte(incBlob1), 0644)
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "debian.conf"), []byte(incBlob1), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
err = ioutil.WriteFile(filepath.Join(tmpDir, "ubuntu.conf"), []byte(incBlob2), 0644)
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "ubuntu.conf"), []byte(incBlob2), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
|
||||
cfg, err := LoadConfig(tmpfile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg.Global.Name, ShouldEqual, "test_worker")
|
||||
So(cfg.Global.Interval, ShouldEqual, 240)
|
||||
So(cfg.Global.Retry, ShouldEqual, 3)
|
||||
So(cfg.Global.Timeout, ShouldEqual, 86400)
|
||||
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
|
||||
|
||||
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
|
||||
@ -126,6 +137,100 @@ use_ipv6 = true
|
||||
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
|
||||
So(m.Provider, ShouldEqual, provCommand)
|
||||
So(m.Interval, ShouldEqual, 720)
|
||||
So(m.Retry, ShouldEqual, 2)
|
||||
So(m.Timeout, ShouldEqual, 3600)
|
||||
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
|
||||
|
||||
m = cfg.Mirrors[1]
|
||||
So(m.Name, ShouldEqual, "debian")
|
||||
So(m.MirrorDir, ShouldEqual, "")
|
||||
So(m.Provider, ShouldEqual, provTwoStageRsync)
|
||||
So(m.MemoryLimit.Value(), ShouldEqual, 256*units.MiB)
|
||||
|
||||
m = cfg.Mirrors[2]
|
||||
So(m.Name, ShouldEqual, "fedora")
|
||||
So(m.MirrorDir, ShouldEqual, "")
|
||||
So(m.Provider, ShouldEqual, provRsync)
|
||||
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
|
||||
So(m.MemoryLimit.Value(), ShouldEqual, 128*units.MiB)
|
||||
|
||||
m = cfg.Mirrors[3]
|
||||
So(m.Name, ShouldEqual, "debian-cd")
|
||||
So(m.MirrorDir, ShouldEqual, "")
|
||||
So(m.Provider, ShouldEqual, provTwoStageRsync)
|
||||
So(m.MemoryLimit.Value(), ShouldEqual, 0)
|
||||
|
||||
m = cfg.Mirrors[4]
|
||||
So(m.Name, ShouldEqual, "debian-security")
|
||||
|
||||
m = cfg.Mirrors[5]
|
||||
So(m.Name, ShouldEqual, "ubuntu")
|
||||
|
||||
So(len(cfg.Mirrors), ShouldEqual, 6)
|
||||
})
|
||||
|
||||
Convey("Everything should work on nested config file", t, func() {
|
||||
tmpfile, err := os.CreateTemp("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
So(err, ShouldBeNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
incSection := fmt.Sprintf(
|
||||
"\n[include]\n"+
|
||||
"include_mirrors = \"%s/*.conf\"",
|
||||
tmpDir,
|
||||
)
|
||||
|
||||
curCfgBlob := cfgBlob + incSection
|
||||
|
||||
err = os.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
incBlob1 := `
|
||||
[[mirrors]]
|
||||
name = "ipv6s"
|
||||
use_ipv6 = true
|
||||
[[mirrors.mirrors]]
|
||||
name = "debians"
|
||||
mirror_subdir = "debian"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
|
||||
[[mirrors.mirrors.mirrors]]
|
||||
name = "debian-security"
|
||||
upstream = "rsync://test.host/debian-security/"
|
||||
[[mirrors.mirrors.mirrors]]
|
||||
name = "ubuntu"
|
||||
stage1_profile = "ubuntu"
|
||||
upstream = "rsync://test.host2/ubuntu/"
|
||||
[[mirrors.mirrors]]
|
||||
name = "debian-cd"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://test.host3/debian-cd/"
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(tmpDir, "nest.conf"), []byte(incBlob1), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
|
||||
cfg, err := LoadConfig(tmpfile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(cfg.Global.Name, ShouldEqual, "test_worker")
|
||||
So(cfg.Global.Interval, ShouldEqual, 240)
|
||||
So(cfg.Global.Retry, ShouldEqual, 3)
|
||||
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
|
||||
|
||||
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
|
||||
So(cfg.Server.Hostname, ShouldEqual, "worker1.example.com")
|
||||
|
||||
m := cfg.Mirrors[0]
|
||||
So(m.Name, ShouldEqual, "AOSP")
|
||||
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
|
||||
So(m.Provider, ShouldEqual, provCommand)
|
||||
So(m.Interval, ShouldEqual, 720)
|
||||
So(m.Retry, ShouldEqual, 2)
|
||||
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
|
||||
|
||||
m = cfg.Mirrors[1]
|
||||
@ -140,25 +245,32 @@ use_ipv6 = true
|
||||
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
|
||||
|
||||
m = cfg.Mirrors[3]
|
||||
So(m.Name, ShouldEqual, "debian-cd")
|
||||
So(m.Name, ShouldEqual, "debian-security")
|
||||
So(m.MirrorDir, ShouldEqual, "")
|
||||
So(m.Provider, ShouldEqual, provTwoStageRsync)
|
||||
So(m.UseIPv6, ShouldEqual, true)
|
||||
So(m.Stage1Profile, ShouldEqual, "debian")
|
||||
|
||||
m = cfg.Mirrors[4]
|
||||
So(m.Name, ShouldEqual, "debian-security")
|
||||
So(m.Name, ShouldEqual, "ubuntu")
|
||||
So(m.MirrorDir, ShouldEqual, "")
|
||||
So(m.Provider, ShouldEqual, provTwoStageRsync)
|
||||
So(m.UseIPv6, ShouldEqual, true)
|
||||
So(m.Stage1Profile, ShouldEqual, "ubuntu")
|
||||
|
||||
m = cfg.Mirrors[5]
|
||||
So(m.Name, ShouldEqual, "ubuntu")
|
||||
So(m.Name, ShouldEqual, "debian-cd")
|
||||
So(m.UseIPv6, ShouldEqual, true)
|
||||
So(m.Provider, ShouldEqual, provRsync)
|
||||
|
||||
So(len(cfg.Mirrors), ShouldEqual, 6)
|
||||
})
|
||||
|
||||
Convey("Providers can be inited from a valid config file", t, func() {
|
||||
tmpfile, err := ioutil.TempFile("", "tunasync")
|
||||
tmpfile, err := os.CreateTemp("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
|
||||
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
@ -203,4 +315,266 @@ use_ipv6 = true
|
||||
So(rp.excludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
|
||||
|
||||
})
|
||||
|
||||
Convey("MirrorSubdir should work", t, func() {
|
||||
tmpfile, err := os.CreateTemp("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
cfgBlob1 := `
|
||||
[global]
|
||||
name = "test_worker"
|
||||
log_dir = "/var/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/data/mirrors"
|
||||
concurrent = 10
|
||||
interval = 240
|
||||
timeout = 86400
|
||||
retry = 3
|
||||
|
||||
[manager]
|
||||
api_base = "https://127.0.0.1:5000"
|
||||
token = "some_token"
|
||||
|
||||
[server]
|
||||
hostname = "worker1.example.com"
|
||||
listen_addr = "127.0.0.1"
|
||||
listen_port = 6000
|
||||
ssl_cert = "/etc/tunasync.d/worker1.cert"
|
||||
ssl_key = "/etc/tunasync.d/worker1.key"
|
||||
|
||||
[[mirrors]]
|
||||
name = "ipv6s"
|
||||
use_ipv6 = true
|
||||
[[mirrors.mirrors]]
|
||||
name = "debians"
|
||||
mirror_subdir = "debian"
|
||||
provider = "two-stage-rsync"
|
||||
stage1_profile = "debian"
|
||||
|
||||
[[mirrors.mirrors.mirrors]]
|
||||
name = "debian-security"
|
||||
upstream = "rsync://test.host/debian-security/"
|
||||
[[mirrors.mirrors.mirrors]]
|
||||
name = "ubuntu"
|
||||
stage1_profile = "ubuntu"
|
||||
upstream = "rsync://test.host2/ubuntu/"
|
||||
[[mirrors.mirrors]]
|
||||
name = "debian-cd"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://test.host3/debian-cd/"
|
||||
`
|
||||
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
cfg, err := LoadConfig(tmpfile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
providers := map[string]mirrorProvider{}
|
||||
for _, m := range cfg.Mirrors {
|
||||
p := newMirrorProvider(m, cfg)
|
||||
providers[p.Name()] = p
|
||||
}
|
||||
|
||||
p := providers["debian-security"]
|
||||
So(p.Name(), ShouldEqual, "debian-security")
|
||||
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian-security")
|
||||
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian-security/latest.log")
|
||||
r2p, ok := p.(*twoStageRsyncProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(r2p.stage1Profile, ShouldEqual, "debian")
|
||||
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian/debian-security")
|
||||
|
||||
p = providers["ubuntu"]
|
||||
So(p.Name(), ShouldEqual, "ubuntu")
|
||||
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/ubuntu")
|
||||
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/ubuntu/latest.log")
|
||||
r2p, ok = p.(*twoStageRsyncProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(r2p.stage1Profile, ShouldEqual, "ubuntu")
|
||||
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian/ubuntu")
|
||||
|
||||
p = providers["debian-cd"]
|
||||
So(p.Name(), ShouldEqual, "debian-cd")
|
||||
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian-cd")
|
||||
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian-cd/latest.log")
|
||||
rp, ok := p.(*rsyncProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(rp.WorkingDir(), ShouldEqual, "/data/mirrors/debian-cd")
|
||||
So(p.Timeout(), ShouldEqual, 86400*time.Second)
|
||||
})
|
||||
|
||||
Convey("rsync_override_only should work", t, func() {
|
||||
tmpfile, err := os.CreateTemp("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
cfgBlob1 := `
|
||||
[global]
|
||||
name = "test_worker"
|
||||
log_dir = "/var/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/data/mirrors"
|
||||
concurrent = 10
|
||||
interval = 240
|
||||
retry = 3
|
||||
timeout = 86400
|
||||
|
||||
[manager]
|
||||
api_base = "https://127.0.0.1:5000"
|
||||
token = "some_token"
|
||||
|
||||
[server]
|
||||
hostname = "worker1.example.com"
|
||||
listen_addr = "127.0.0.1"
|
||||
listen_port = 6000
|
||||
ssl_cert = "/etc/tunasync.d/worker1.cert"
|
||||
ssl_key = "/etc/tunasync.d/worker1.key"
|
||||
|
||||
[[mirrors]]
|
||||
name = "foo"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://foo.bar/"
|
||||
interval = 720
|
||||
retry = 2
|
||||
timeout = 3600
|
||||
mirror_dir = "/data/foo"
|
||||
rsync_override = ["--bar", "baz"]
|
||||
rsync_override_only = true
|
||||
`
|
||||
|
||||
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
cfg, err := LoadConfig(tmpfile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
providers := map[string]mirrorProvider{}
|
||||
for _, m := range cfg.Mirrors {
|
||||
p := newMirrorProvider(m, cfg)
|
||||
providers[p.Name()] = p
|
||||
}
|
||||
|
||||
p, ok := providers["foo"].(*rsyncProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(p.options, ShouldResemble, []string{"--bar", "baz"})
|
||||
})
|
||||
|
||||
Convey("rsync global options should work", t, func() {
|
||||
tmpfile, err := os.CreateTemp("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
cfgBlob1 := `
|
||||
[global]
|
||||
name = "test_worker"
|
||||
log_dir = "/var/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/data/mirrors"
|
||||
concurrent = 10
|
||||
interval = 240
|
||||
retry = 3
|
||||
timeout = 86400
|
||||
rsync_options = ["--global"]
|
||||
|
||||
[manager]
|
||||
api_base = "https://127.0.0.1:5000"
|
||||
token = "some_token"
|
||||
|
||||
[server]
|
||||
hostname = "worker1.example.com"
|
||||
listen_addr = "127.0.0.1"
|
||||
listen_port = 6000
|
||||
ssl_cert = "/etc/tunasync.d/worker1.cert"
|
||||
ssl_key = "/etc/tunasync.d/worker1.key"
|
||||
|
||||
[[mirrors]]
|
||||
name = "foo"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://foo.bar/"
|
||||
interval = 720
|
||||
retry = 2
|
||||
timeout = 3600
|
||||
mirror_dir = "/data/foo"
|
||||
rsync_override = ["--override"]
|
||||
rsync_options = ["--local"]
|
||||
`
|
||||
|
||||
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
cfg, err := LoadConfig(tmpfile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
providers := map[string]mirrorProvider{}
|
||||
for _, m := range cfg.Mirrors {
|
||||
p := newMirrorProvider(m, cfg)
|
||||
providers[p.Name()] = p
|
||||
}
|
||||
|
||||
p, ok := providers["foo"].(*rsyncProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(p.options, ShouldResemble, []string{
|
||||
"--override", // from mirror.rsync_override
|
||||
"--timeout=120", // generated by newRsyncProvider
|
||||
"--global", // from global.rsync_options
|
||||
"--local", // from mirror.rsync_options
|
||||
})
|
||||
})
|
||||
|
||||
Convey("success_exit_codes should work globally and per mirror", t, func() {
|
||||
tmpfile, err := os.CreateTemp("", "tunasync")
|
||||
So(err, ShouldEqual, nil)
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
cfgBlob1 := `
|
||||
[global]
|
||||
name = "test_worker"
|
||||
log_dir = "/var/log/tunasync/{{.Name}}"
|
||||
mirror_dir = "/data/mirrors"
|
||||
concurrent = 10
|
||||
interval = 240
|
||||
retry = 3
|
||||
timeout = 86400
|
||||
dangerous_global_success_exit_codes = [10, 20]
|
||||
|
||||
[manager]
|
||||
api_base = "https://127.0.0.1:5000"
|
||||
token = "some_token"
|
||||
|
||||
[server]
|
||||
hostname = "worker1.example.com"
|
||||
listen_addr = "127.0.0.1"
|
||||
listen_port = 6000
|
||||
ssl_cert = "/etc/tunasync.d/worker1.cert"
|
||||
ssl_key = "/etc/tunasync.d/worker1.key"
|
||||
|
||||
[[mirrors]]
|
||||
name = "foo"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://foo.bar/"
|
||||
interval = 720
|
||||
retry = 2
|
||||
timeout = 3600
|
||||
mirror_dir = "/data/foo"
|
||||
success_exit_codes = [30, 40]
|
||||
`
|
||||
|
||||
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
|
||||
So(err, ShouldEqual, nil)
|
||||
defer tmpfile.Close()
|
||||
|
||||
cfg, err := LoadConfig(tmpfile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
providers := map[string]mirrorProvider{}
|
||||
for _, m := range cfg.Mirrors {
|
||||
p := newMirrorProvider(m, cfg)
|
||||
providers[p.Name()] = p
|
||||
}
|
||||
|
||||
p, ok := providers["foo"].(*rsyncProvider)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(p.successExitCodes, ShouldResemble, []int{10, 20, 30, 40})
|
||||
})
|
||||
}
|
||||
|
126
worker/docker.go
Normal file
126
worker/docker.go
Normal file
@ -0,0 +1,126 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
)
|
||||
|
||||
type dockerHook struct {
|
||||
emptyHook
|
||||
image string
|
||||
volumes []string
|
||||
options []string
|
||||
memoryLimit MemBytes
|
||||
}
|
||||
|
||||
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
|
||||
volumes := []string{}
|
||||
volumes = append(volumes, gCfg.Volumes...)
|
||||
volumes = append(volumes, mCfg.DockerVolumes...)
|
||||
if len(mCfg.ExcludeFile) > 0 {
|
||||
arg := fmt.Sprintf("%s:%s:ro", mCfg.ExcludeFile, mCfg.ExcludeFile)
|
||||
volumes = append(volumes, arg)
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
options = append(options, gCfg.Options...)
|
||||
options = append(options, mCfg.DockerOptions...)
|
||||
|
||||
return &dockerHook{
|
||||
emptyHook: emptyHook{
|
||||
provider: p,
|
||||
},
|
||||
image: mCfg.DockerImage,
|
||||
volumes: volumes,
|
||||
options: options,
|
||||
memoryLimit: mCfg.MemoryLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerHook) preExec() error {
|
||||
p := d.provider
|
||||
logDir := p.LogDir()
|
||||
logFile := p.LogFile()
|
||||
workingDir := p.WorkingDir()
|
||||
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
logger.Debugf("Making dir %s", workingDir)
|
||||
if err = os.MkdirAll(workingDir, 0755); err != nil {
|
||||
return fmt.Errorf("Error making dir %s: %s", workingDir, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Override workingDir
|
||||
ctx := p.EnterContext()
|
||||
ctx.Set(
|
||||
"volumes", []string{
|
||||
fmt.Sprintf("%s:%s", logDir, logDir),
|
||||
fmt.Sprintf("%s:%s", logFile, logFile),
|
||||
fmt.Sprintf("%s:%s", workingDir, workingDir),
|
||||
},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerHook) postExec() error {
|
||||
// sh.Command(
|
||||
// "docker", "rm", "-f", d.Name(),
|
||||
// ).Run()
|
||||
name := d.Name()
|
||||
retry := 10
|
||||
for ; retry > 0; retry-- {
|
||||
out, err := sh.Command(
|
||||
"docker", "ps", "-a",
|
||||
"--filter", "name=^"+name+"$",
|
||||
"--format", "{{.Status}}",
|
||||
).Output()
|
||||
if err != nil {
|
||||
logger.Errorf("docker ps failed: %v", err)
|
||||
break
|
||||
}
|
||||
if len(out) == 0 {
|
||||
break
|
||||
}
|
||||
logger.Debugf("container %s still exists: '%s'", name, string(out))
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
if retry == 0 {
|
||||
logger.Warningf("container %s not removed automatically, next sync may fail", name)
|
||||
}
|
||||
d.provider.ExitContext()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Volumes returns the configured volumes and
|
||||
// runtime-needed volumes, including mirror dirs
|
||||
// and log files
|
||||
func (d *dockerHook) Volumes() []string {
|
||||
vols := make([]string, len(d.volumes))
|
||||
copy(vols, d.volumes)
|
||||
|
||||
p := d.provider
|
||||
ctx := p.Context()
|
||||
if ivs, ok := ctx.Get("volumes"); ok {
|
||||
vs := ivs.([]string)
|
||||
vols = append(vols, vs...)
|
||||
}
|
||||
return vols
|
||||
}
|
||||
|
||||
func (d *dockerHook) LogFile() string {
|
||||
p := d.provider
|
||||
ctx := p.Context()
|
||||
if iv, ok := ctx.Get(_LogFileKey + ":docker"); ok {
|
||||
v := iv.(string)
|
||||
return v
|
||||
}
|
||||
return p.LogFile()
|
||||
}
|
||||
|
||||
func (d *dockerHook) Name() string {
|
||||
p := d.provider
|
||||
return "tunasync-job-" + p.Name()
|
||||
}
|
134
worker/docker_test.go
Normal file
134
worker/docker_test.go
Normal file
@ -0,0 +1,134 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func cmdRun(p string, args []string) {
|
||||
cmd := exec.Command(p, args...)
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
logger.Debugf("cmdRun failed %s", err)
|
||||
return
|
||||
}
|
||||
logger.Debugf("cmdRun: ", string(out))
|
||||
}
|
||||
|
||||
func getDockerByName(name string) (string, error) {
|
||||
// docker ps -f 'name=$name' --format '{{.Names}}'
|
||||
out, err := sh.Command(
|
||||
"docker", "ps", "-a",
|
||||
"--filter", "name="+name,
|
||||
"--format", "{{.Names}}",
|
||||
).Output()
|
||||
if err == nil {
|
||||
logger.Debugf("docker ps: '%s'", string(out))
|
||||
}
|
||||
return string(out), err
|
||||
}
|
||||
|
||||
func TestDocker(t *testing.T) {
|
||||
Convey("Docker Should Work", t, func(ctx C) {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
cmdScript := filepath.Join(tmpDir, "cmd.sh")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
expectedOutput := "HELLO_WORLD"
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-docker",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "/bin/cmd.sh",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 600 * time.Second,
|
||||
env: map[string]string{
|
||||
"TEST_CONTENT": expectedOutput,
|
||||
},
|
||||
}
|
||||
|
||||
cmdScriptContent := `#!/bin/sh
|
||||
echo ${TEST_CONTENT}
|
||||
sleep 20
|
||||
`
|
||||
err = os.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
d := &dockerHook{
|
||||
emptyHook: emptyHook{
|
||||
provider: provider,
|
||||
},
|
||||
image: "alpine:3.8",
|
||||
volumes: []string{
|
||||
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
|
||||
},
|
||||
memoryLimit: 512 * units.MiB,
|
||||
}
|
||||
provider.AddHook(d)
|
||||
So(provider.Docker(), ShouldNotBeNil)
|
||||
|
||||
err = d.preExec()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
cmdRun("docker", []string{"images"})
|
||||
exitedErr := make(chan error, 1)
|
||||
go func() {
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
logger.Debugf("provider.Run() exited")
|
||||
if err != nil {
|
||||
logger.Errorf("provider.Run() failed: %v", err)
|
||||
}
|
||||
exitedErr <- err
|
||||
}()
|
||||
|
||||
// Wait for docker running
|
||||
for wait := 0; wait < 8; wait++ {
|
||||
names, err := getDockerByName(d.Name())
|
||||
So(err, ShouldBeNil)
|
||||
if names != "" {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
// cmdRun("ps", []string{"aux"})
|
||||
|
||||
// assert container running
|
||||
names, err := getDockerByName(d.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(names, ShouldEqual, d.Name()+"\n")
|
||||
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// cmdRun("ps", []string{"aux"})
|
||||
<-exitedErr
|
||||
|
||||
// container should be terminated and removed
|
||||
names, err = getDockerByName(d.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(names, ShouldEqual, "")
|
||||
|
||||
// check log content
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput+"\n")
|
||||
|
||||
d.postExec()
|
||||
})
|
||||
}
|
@ -18,7 +18,6 @@ const (
|
||||
|
||||
type execPostHook struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
|
||||
// exec on success or on failure
|
||||
execOn uint8
|
||||
@ -37,9 +36,11 @@ func newExecPostHook(provider mirrorProvider, execOn uint8, command string) (*ex
|
||||
}
|
||||
|
||||
return &execPostHook{
|
||||
provider: provider,
|
||||
execOn: execOn,
|
||||
command: cmd,
|
||||
emptyHook: emptyHook{
|
||||
provider: provider,
|
||||
},
|
||||
execOn: execOn,
|
||||
command: cmd,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -71,6 +72,7 @@ func (h *execPostHook) Do() error {
|
||||
"TUNASYNC_MIRROR_NAME": p.Name(),
|
||||
"TUNASYNC_WORKING_DIR": p.WorkingDir(),
|
||||
"TUNASYNC_UPSTREAM_URL": p.Upstream(),
|
||||
"TUNASYNC_LOG_DIR": p.LogDir(),
|
||||
"TUNASYNC_LOG_FILE": p.LogFile(),
|
||||
"TUNASYNC_JOB_EXIT_STATUS": exitStatus,
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -13,7 +12,7 @@ import (
|
||||
|
||||
func TestExecPost(t *testing.T) {
|
||||
Convey("ExecPost should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "cmd.sh")
|
||||
@ -46,7 +45,7 @@ echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
`
|
||||
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
@ -64,7 +63,7 @@ echo $TUNASYNC_LOG_FILE
|
||||
|
||||
expectedOutput := "success\n"
|
||||
|
||||
outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
|
||||
outputContent, err := os.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(outputContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
@ -85,14 +84,14 @@ echo $TUNASYNC_LOG_FILE
|
||||
exit 1
|
||||
`
|
||||
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
for i := 0; i < maxRetry; i++ {
|
||||
for i := 0; i < defaultMaxRetry; i++ {
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
@ -105,7 +104,7 @@ exit 1
|
||||
|
||||
expectedOutput := "failure\n"
|
||||
|
||||
outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
|
||||
outputContent, err := os.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(outputContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
|
114
worker/job.go
114
worker/job.go
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
tunasync "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
@ -14,12 +15,13 @@ import (
|
||||
type ctrlAction uint8
|
||||
|
||||
const (
|
||||
jobStart ctrlAction = iota
|
||||
jobStop // stop syncing keep the job
|
||||
jobDisable // disable the job (stops goroutine)
|
||||
jobRestart // restart syncing
|
||||
jobPing // ensure the goroutine is alive
|
||||
jobHalt // worker halts
|
||||
jobStart ctrlAction = iota
|
||||
jobStop // stop syncing keep the job
|
||||
jobDisable // disable the job (stops goroutine)
|
||||
jobRestart // restart syncing
|
||||
jobPing // ensure the goroutine is alive
|
||||
jobHalt // worker halts
|
||||
jobForceStart // ignore concurrent limit
|
||||
)
|
||||
|
||||
type jobMessage struct {
|
||||
@ -51,6 +53,7 @@ type mirrorJob struct {
|
||||
ctrlChan chan ctrlAction
|
||||
disabled chan empty
|
||||
state uint32
|
||||
size string
|
||||
}
|
||||
|
||||
func newMirrorJob(provider mirrorProvider) *mirrorJob {
|
||||
@ -84,10 +87,12 @@ func (m *mirrorJob) SetProvider(provider mirrorProvider) error {
|
||||
|
||||
// runMirrorJob is the goroutine where syncing job runs in
|
||||
// arguments:
|
||||
// provider: mirror provider object
|
||||
// ctrlChan: receives messages from the manager
|
||||
// managerChan: push messages to the manager, this channel should have a larger buffer
|
||||
// sempaphore: make sure the concurrent running syncing job won't explode
|
||||
//
|
||||
// provider: mirror provider object
|
||||
// ctrlChan: receives messages from the manager
|
||||
// managerChan: push messages to the manager, this channel should have a larger buffer
|
||||
// sempaphore: make sure the concurrent running syncing job won't explode
|
||||
//
|
||||
// TODO: message struct for managerChan
|
||||
func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) error {
|
||||
jobsDone.Add(1)
|
||||
@ -110,7 +115,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
|
||||
managerChan <- jobMessage{
|
||||
tunasync.Failed, m.Name(),
|
||||
fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
|
||||
false,
|
||||
true,
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -136,7 +141,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
|
||||
return err
|
||||
}
|
||||
|
||||
for retry := 0; retry < maxRetry; retry++ {
|
||||
for retry := 0; retry < provider.Retry(); retry++ {
|
||||
stopASAP := false // stop job as soon as possible
|
||||
|
||||
if retry > 0 {
|
||||
@ -152,26 +157,43 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
|
||||
|
||||
var syncErr error
|
||||
syncDone := make(chan error, 1)
|
||||
started := make(chan empty, 10) // we may receive "started" more than one time (e.g. two_stage_rsync)
|
||||
go func() {
|
||||
err := provider.Run()
|
||||
if !stopASAP {
|
||||
syncDone <- err
|
||||
}
|
||||
err := provider.Run(started)
|
||||
syncDone <- err
|
||||
}()
|
||||
|
||||
select { // Wait until provider started or error happened
|
||||
case err := <-syncDone:
|
||||
logger.Errorf("failed to start provider %s: %s", m.Name(), err.Error())
|
||||
syncDone <- err // it will be read again later
|
||||
case <-started:
|
||||
logger.Debug("provider started")
|
||||
}
|
||||
// Now terminating the provider is feasible
|
||||
|
||||
var termErr error
|
||||
timeout := provider.Timeout()
|
||||
if timeout <= 0 {
|
||||
timeout = 100000 * time.Hour // never time out
|
||||
}
|
||||
select {
|
||||
case syncErr = <-syncDone:
|
||||
logger.Debug("syncing done")
|
||||
case <-time.After(timeout):
|
||||
logger.Notice("provider timeout")
|
||||
termErr = provider.Terminate()
|
||||
syncErr = fmt.Errorf("%s timeout after %v", m.Name(), timeout)
|
||||
case <-kill:
|
||||
logger.Debug("received kill")
|
||||
stopASAP = true
|
||||
err := provider.Terminate()
|
||||
if err != nil {
|
||||
logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
termErr = provider.Terminate()
|
||||
syncErr = errors.New("killed by manager")
|
||||
}
|
||||
if termErr != nil {
|
||||
logger.Errorf("failed to terminate provider %s: %s", m.Name(), termErr.Error())
|
||||
return termErr
|
||||
}
|
||||
|
||||
// post-exec hooks
|
||||
herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")
|
||||
@ -182,26 +204,33 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
|
||||
if syncErr == nil {
|
||||
// syncing success
|
||||
logger.Noticef("succeeded syncing %s", m.Name())
|
||||
managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
|
||||
// post-success hooks
|
||||
logger.Debug("post-success hooks")
|
||||
err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
// syncing failed
|
||||
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
|
||||
// post-fail hooks
|
||||
logger.Debug("post-fail hooks")
|
||||
err := runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if syncErr == nil {
|
||||
// syncing success
|
||||
m.size = provider.DataSize()
|
||||
managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncing failed
|
||||
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
|
||||
managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == maxRetry-1) && (m.State() == stateReady)}
|
||||
managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == provider.Retry()-1) && (m.State() == stateReady)}
|
||||
|
||||
// post-fail hooks
|
||||
logger.Debug("post-fail hooks")
|
||||
err = runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// gracefully exit
|
||||
if stopASAP {
|
||||
logger.Debug("No retry, exit directly")
|
||||
@ -212,22 +241,26 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
|
||||
return nil
|
||||
}
|
||||
|
||||
runJob := func(kill <-chan empty, jobDone chan<- empty) {
|
||||
runJob := func(kill <-chan empty, jobDone chan<- empty, bypassSemaphore <-chan empty) {
|
||||
select {
|
||||
case semaphore <- empty{}:
|
||||
defer func() { <-semaphore }()
|
||||
runJobWrapper(kill, jobDone)
|
||||
case <-bypassSemaphore:
|
||||
logger.Noticef("Concurrent limit ignored by %s", m.Name())
|
||||
runJobWrapper(kill, jobDone)
|
||||
case <-kill:
|
||||
jobDone <- empty{}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
bypassSemaphore := make(chan empty, 1)
|
||||
for {
|
||||
if m.State() == stateReady {
|
||||
kill := make(chan empty)
|
||||
jobDone := make(chan empty)
|
||||
go runJob(kill, jobDone)
|
||||
go runJob(kill, jobDone, bypassSemaphore)
|
||||
|
||||
_wait_for_job:
|
||||
select {
|
||||
@ -248,7 +281,14 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
|
||||
m.SetState(stateReady)
|
||||
close(kill)
|
||||
<-jobDone
|
||||
time.Sleep(time.Second) // Restart may fail if the process was not exited yet
|
||||
continue
|
||||
case jobForceStart:
|
||||
select { //non-blocking
|
||||
default:
|
||||
case bypassSemaphore <- empty{}:
|
||||
}
|
||||
fallthrough
|
||||
case jobStart:
|
||||
m.SetState(stateReady)
|
||||
goto _wait_for_job
|
||||
@ -272,8 +312,14 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
|
||||
case jobDisable:
|
||||
m.SetState(stateDisabled)
|
||||
return nil
|
||||
case jobForceStart:
|
||||
select { //non-blocking
|
||||
default:
|
||||
case bypassSemaphore <- empty{}:
|
||||
}
|
||||
fallthrough
|
||||
case jobRestart:
|
||||
m.SetState(stateReady)
|
||||
fallthrough
|
||||
case jobStart:
|
||||
m.SetState(stateReady)
|
||||
default:
|
||||
|
@ -2,7 +2,6 @@ package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -17,7 +16,7 @@ func TestMirrorJob(t *testing.T) {
|
||||
InitLogger(true, true, false)
|
||||
|
||||
Convey("MirrorJob should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "cmd.sh")
|
||||
@ -31,6 +30,7 @@ func TestMirrorJob(t *testing.T) {
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 1 * time.Second,
|
||||
timeout: 7 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
@ -41,6 +41,7 @@ func TestMirrorJob(t *testing.T) {
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
So(provider.Timeout(), ShouldEqual, c.timeout)
|
||||
|
||||
Convey("For a normal mirror job", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
@ -56,9 +57,9 @@ func TestMirrorJob(t *testing.T) {
|
||||
provider.upstreamURL,
|
||||
provider.LogFile(),
|
||||
)
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
readedScriptContent, err := ioutil.ReadFile(scriptFile)
|
||||
readedScriptContent, err := os.ReadFile(scriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
|
||||
|
||||
@ -84,7 +85,7 @@ func TestMirrorJob(t *testing.T) {
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Success)
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
job.ctrlChan <- jobStart
|
||||
@ -112,15 +113,26 @@ func TestMirrorJob(t *testing.T) {
|
||||
|
||||
})
|
||||
|
||||
Convey("When running long jobs", func(ctx C) {
|
||||
Convey("When running long jobs with post-fail hook", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo '++++++'
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
sleep 5
|
||||
echo $0 sleeping
|
||||
sleep 3
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
echo '------'
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
hookScriptFile := filepath.Join(tmpDir, "hook.sh")
|
||||
err = os.WriteFile(hookScriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
h, err := newExecPostHook(provider, execOnFailure, hookScriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
provider.AddHook(h)
|
||||
|
||||
managerChan := make(chan jobMessage, 10)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
@ -140,8 +152,67 @@ echo $TUNASYNC_WORKING_DIR
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
|
||||
Convey("If we kill it then start it", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
|
||||
job.ctrlChan <- jobStop
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
logger.Debugf("Now starting...\n")
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
Convey("When running long jobs", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
sleep 5
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
`
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
managerChan := make(chan jobMessage, 10)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
|
||||
Convey("If we kill it", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
|
||||
job.ctrlChan <- jobStart // should be ignored
|
||||
|
||||
job.ctrlChan <- jobStop
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
|
||||
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
job.ctrlChan <- jobDisable
|
||||
@ -164,14 +235,305 @@ echo $TUNASYNC_WORKING_DIR
|
||||
provider.WorkingDir(), provider.WorkingDir(),
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
|
||||
Convey("If we restart it", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
|
||||
job.ctrlChan <- jobRestart
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
So(msg.msg, ShouldEqual, "killed by manager")
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Success)
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"%s\n%s\n",
|
||||
provider.WorkingDir(), provider.WorkingDir(),
|
||||
)
|
||||
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
|
||||
Convey("If we disable it", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
|
||||
job.ctrlChan <- jobDisable
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
So(msg.msg, ShouldEqual, "killed by manager")
|
||||
|
||||
<-job.disabled
|
||||
})
|
||||
|
||||
Convey("If we stop it twice, than start it", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
|
||||
job.ctrlChan <- jobStop
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
So(msg.msg, ShouldEqual, "killed by manager")
|
||||
|
||||
job.ctrlChan <- jobStop // should be ignored
|
||||
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Success)
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"%s\n%s\n",
|
||||
provider.WorkingDir(), provider.WorkingDir(),
|
||||
)
|
||||
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When a job timed out", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
sleep 10
|
||||
echo $TUNASYNC_WORKING_DIR
|
||||
`
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
managerChan := make(chan jobMessage, 10)
|
||||
semaphore := make(chan empty, 1)
|
||||
job := newMirrorJob(provider)
|
||||
|
||||
Convey("It should be automatically terminated", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
|
||||
job.ctrlChan <- jobStart // should be ignored
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
|
||||
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
|
||||
Convey("It should be retried", func(ctx C) {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
time.Sleep(1 * time.Second)
|
||||
msg := <-managerChan
|
||||
So(msg.status, ShouldEqual, PreSyncing)
|
||||
|
||||
for i := 0; i < defaultMaxRetry; i++ {
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Syncing)
|
||||
|
||||
job.ctrlChan <- jobStart // should be ignored
|
||||
|
||||
msg = <-managerChan
|
||||
So(msg.status, ShouldEqual, Failed)
|
||||
So(msg.msg, ShouldContainSubstring, "timeout after")
|
||||
// re-schedule after last try
|
||||
So(msg.schedule, ShouldEqual, i == defaultMaxRetry-1)
|
||||
}
|
||||
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestConcurrentMirrorJobs(t *testing.T) {
|
||||
|
||||
InitLogger(true, true, false)
|
||||
|
||||
Convey("Concurrent MirrorJobs should work", t, func(ctx C) {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
const CONCURRENT = 5
|
||||
|
||||
var providers [CONCURRENT]*cmdProvider
|
||||
var jobs [CONCURRENT]*mirrorJob
|
||||
for i := 0; i < CONCURRENT; i++ {
|
||||
c := cmdConfig{
|
||||
name: fmt.Sprintf("job-%d", i),
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "sleep 2",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: "/dev/null",
|
||||
interval: 10 * time.Second,
|
||||
}
|
||||
|
||||
var err error
|
||||
providers[i], err = newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
jobs[i] = newMirrorJob(providers[i])
|
||||
}
|
||||
|
||||
managerChan := make(chan jobMessage, 10)
|
||||
semaphore := make(chan empty, CONCURRENT-2)
|
||||
|
||||
countingJobs := func(managerChan chan jobMessage, totalJobs, concurrentCheck int) (peakConcurrent, counterFailed int) {
|
||||
counterEnded := 0
|
||||
counterRunning := 0
|
||||
peakConcurrent = 0
|
||||
counterFailed = 0
|
||||
for counterEnded < totalJobs {
|
||||
msg := <-managerChan
|
||||
switch msg.status {
|
||||
case PreSyncing:
|
||||
counterRunning++
|
||||
case Syncing:
|
||||
case Failed:
|
||||
counterFailed++
|
||||
fallthrough
|
||||
case Success:
|
||||
counterEnded++
|
||||
counterRunning--
|
||||
default:
|
||||
So(0, ShouldEqual, 1)
|
||||
}
|
||||
// Test if semaphore works
|
||||
So(counterRunning, ShouldBeLessThanOrEqualTo, concurrentCheck)
|
||||
if counterRunning > peakConcurrent {
|
||||
peakConcurrent = counterRunning
|
||||
}
|
||||
}
|
||||
// select {
|
||||
// case msg := <-managerChan:
|
||||
// logger.Errorf("extra message received: %v", msg)
|
||||
// So(0, ShouldEqual, 1)
|
||||
// case <-time.After(2 * time.Second):
|
||||
// }
|
||||
return
|
||||
}
|
||||
|
||||
Convey("When we run them all", func(ctx C) {
|
||||
for _, job := range jobs {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
}
|
||||
|
||||
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
|
||||
|
||||
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
|
||||
So(counterFailed, ShouldEqual, 0)
|
||||
|
||||
for _, job := range jobs {
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
}
|
||||
})
|
||||
Convey("If we cancel one job", func(ctx C) {
|
||||
for _, job := range jobs {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobRestart
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Cancel the one waiting for semaphore
|
||||
jobs[len(jobs)-1].ctrlChan <- jobStop
|
||||
|
||||
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT-1, CONCURRENT-2)
|
||||
|
||||
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
|
||||
So(counterFailed, ShouldEqual, 0)
|
||||
|
||||
for _, job := range jobs {
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
}
|
||||
})
|
||||
Convey("If we override the concurrent limit", func(ctx C) {
|
||||
for _, job := range jobs {
|
||||
go job.Run(managerChan, semaphore)
|
||||
job.ctrlChan <- jobStart
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
jobs[len(jobs)-1].ctrlChan <- jobForceStart
|
||||
jobs[len(jobs)-2].ctrlChan <- jobForceStart
|
||||
|
||||
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT)
|
||||
|
||||
So(peakConcurrent, ShouldEqual, CONCURRENT)
|
||||
So(counterFailed, ShouldEqual, 0)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// fmt.Println("Restart them")
|
||||
|
||||
for _, job := range jobs {
|
||||
job.ctrlChan <- jobStart
|
||||
}
|
||||
|
||||
peakConcurrent, counterFailed = countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
|
||||
|
||||
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
|
||||
So(counterFailed, ShouldEqual, 0)
|
||||
|
||||
for _, job := range jobs {
|
||||
job.ctrlChan <- jobDisable
|
||||
<-job.disabled
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@ -14,12 +13,13 @@ import (
|
||||
|
||||
type logLimiter struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
}
|
||||
|
||||
func newLogLimiter(provider mirrorProvider) *logLimiter {
|
||||
return &logLimiter{
|
||||
provider: provider,
|
||||
emptyHook: emptyHook{
|
||||
provider: provider,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ func (l *logLimiter) preExec() error {
|
||||
}
|
||||
|
||||
logDir := p.LogDir()
|
||||
files, err := ioutil.ReadDir(logDir)
|
||||
files, err := os.ReadDir(logDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
os.MkdirAll(logDir, 0755)
|
||||
@ -49,7 +49,8 @@ func (l *logLimiter) preExec() error {
|
||||
matchedFiles := []os.FileInfo{}
|
||||
for _, f := range files {
|
||||
if strings.HasPrefix(f.Name(), p.Name()) {
|
||||
matchedFiles = append(matchedFiles, f)
|
||||
info, _ := f.Info()
|
||||
matchedFiles = append(matchedFiles, info)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,6 @@ package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -14,8 +13,8 @@ import (
|
||||
|
||||
func TestLogLimiter(t *testing.T) {
|
||||
Convey("LogLimiter should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpLogDir, err := ioutil.TempDir("", "tunasync-log")
|
||||
tmpDir, _ := os.MkdirTemp("", "tunasync")
|
||||
tmpLogDir, err := os.MkdirTemp("", "tunasync-log")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
defer os.RemoveAll(tmpLogDir)
|
||||
So(err, ShouldBeNil)
|
||||
@ -58,7 +57,7 @@ echo $TUNASYNC_UPSTREAM_URL
|
||||
echo $TUNASYNC_LOG_FILE
|
||||
`
|
||||
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
@ -86,7 +85,7 @@ echo $TUNASYNC_LOG_FILE
|
||||
logFile,
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
|
||||
loggedContent, err := os.ReadFile(filepath.Join(provider.LogDir(), "latest"))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
@ -104,7 +103,7 @@ echo $TUNASYNC_LOG_FILE
|
||||
sleep 5
|
||||
`
|
||||
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go job.Run(managerChan, semaphore)
|
||||
@ -134,10 +133,10 @@ sleep 5
|
||||
logFile,
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
|
||||
loggedContent, err := os.ReadFile(filepath.Join(provider.LogDir(), "latest"))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
loggedContent, err = ioutil.ReadFile(logFile + ".fail")
|
||||
loggedContent, err = os.ReadFile(logFile + ".fail")
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
|
@ -24,9 +24,9 @@ type mirrorProvider interface {
|
||||
|
||||
Type() providerEnum
|
||||
|
||||
// run mirror job in background
|
||||
Run() error
|
||||
// run mirror job in background
|
||||
// Start then Wait
|
||||
Run(started chan empty) error
|
||||
// Start the job
|
||||
Start() error
|
||||
// Wait job to finish
|
||||
Wait() error
|
||||
@ -36,16 +36,23 @@ type mirrorProvider interface {
|
||||
IsRunning() bool
|
||||
// Cgroup
|
||||
Cgroup() *cgroupHook
|
||||
// ZFS
|
||||
ZFS() *zfsHook
|
||||
// Docker
|
||||
Docker() *dockerHook
|
||||
|
||||
AddHook(hook jobHook)
|
||||
Hooks() []jobHook
|
||||
|
||||
Interval() time.Duration
|
||||
Retry() int
|
||||
Timeout() time.Duration
|
||||
|
||||
WorkingDir() string
|
||||
LogDir() string
|
||||
LogFile() string
|
||||
IsMaster() bool
|
||||
DataSize() string
|
||||
|
||||
// enter context
|
||||
EnterContext() *Context
|
||||
@ -53,6 +60,10 @@ type mirrorProvider interface {
|
||||
ExitContext() *Context
|
||||
// return context
|
||||
Context() *Context
|
||||
|
||||
// set in newMirrorProvider, used by cmdJob.Wait
|
||||
SetSuccessExitCodes(codes []int)
|
||||
GetSuccessExitCodes() []int
|
||||
}
|
||||
|
||||
// newProvider creates a mirrorProvider instance
|
||||
@ -76,12 +87,18 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
||||
}
|
||||
if mirrorDir == "" {
|
||||
mirrorDir = filepath.Join(
|
||||
cfg.Global.MirrorDir, mirror.Name,
|
||||
cfg.Global.MirrorDir, mirror.MirrorSubDir, mirror.Name,
|
||||
)
|
||||
}
|
||||
if mirror.Interval == 0 {
|
||||
mirror.Interval = cfg.Global.Interval
|
||||
}
|
||||
if mirror.Retry == 0 {
|
||||
mirror.Retry = cfg.Global.Retry
|
||||
}
|
||||
if mirror.Timeout == 0 {
|
||||
mirror.Timeout = cfg.Global.Timeout
|
||||
}
|
||||
logDir = formatLogDir(logDir, mirror)
|
||||
|
||||
// IsMaster
|
||||
@ -103,57 +120,78 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
||||
upstreamURL: mirror.Upstream,
|
||||
command: mirror.Command,
|
||||
workingDir: mirrorDir,
|
||||
failOnMatch: mirror.FailOnMatch,
|
||||
sizePattern: mirror.SizePattern,
|
||||
logDir: logDir,
|
||||
logFile: filepath.Join(logDir, "latest.log"),
|
||||
interval: time.Duration(mirror.Interval) * time.Minute,
|
||||
retry: mirror.Retry,
|
||||
timeout: time.Duration(mirror.Timeout) * time.Second,
|
||||
env: mirror.Env,
|
||||
}
|
||||
p, err := newCmdProvider(pc)
|
||||
p.isMaster = isMaster
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.isMaster = isMaster
|
||||
provider = p
|
||||
case provRsync:
|
||||
rc := rsyncConfig{
|
||||
name: mirror.Name,
|
||||
upstreamURL: mirror.Upstream,
|
||||
rsyncCmd: mirror.Command,
|
||||
username: mirror.Username,
|
||||
password: mirror.Password,
|
||||
excludeFile: mirror.ExcludeFile,
|
||||
workingDir: mirrorDir,
|
||||
logDir: logDir,
|
||||
logFile: filepath.Join(logDir, "latest.log"),
|
||||
useIPv6: mirror.UseIPv6,
|
||||
interval: time.Duration(mirror.Interval) * time.Minute,
|
||||
name: mirror.Name,
|
||||
upstreamURL: mirror.Upstream,
|
||||
rsyncCmd: mirror.Command,
|
||||
username: mirror.Username,
|
||||
password: mirror.Password,
|
||||
excludeFile: mirror.ExcludeFile,
|
||||
extraOptions: mirror.RsyncOptions,
|
||||
rsyncNeverTimeout: mirror.RsyncNoTimeo,
|
||||
rsyncTimeoutValue: mirror.RsyncTimeout,
|
||||
globalOptions: cfg.Global.RsyncOptions,
|
||||
overriddenOptions: mirror.RsyncOverride,
|
||||
useOverrideOnly: mirror.RsyncOverrideOnly,
|
||||
rsyncEnv: mirror.Env,
|
||||
workingDir: mirrorDir,
|
||||
logDir: logDir,
|
||||
logFile: filepath.Join(logDir, "latest.log"),
|
||||
useIPv6: mirror.UseIPv6,
|
||||
useIPv4: mirror.UseIPv4,
|
||||
interval: time.Duration(mirror.Interval) * time.Minute,
|
||||
retry: mirror.Retry,
|
||||
timeout: time.Duration(mirror.Timeout) * time.Second,
|
||||
}
|
||||
p, err := newRsyncProvider(rc)
|
||||
p.isMaster = isMaster
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.isMaster = isMaster
|
||||
provider = p
|
||||
case provTwoStageRsync:
|
||||
rc := twoStageRsyncConfig{
|
||||
name: mirror.Name,
|
||||
stage1Profile: mirror.Stage1Profile,
|
||||
upstreamURL: mirror.Upstream,
|
||||
rsyncCmd: mirror.Command,
|
||||
username: mirror.Username,
|
||||
password: mirror.Password,
|
||||
excludeFile: mirror.ExcludeFile,
|
||||
workingDir: mirrorDir,
|
||||
logDir: logDir,
|
||||
logFile: filepath.Join(logDir, "latest.log"),
|
||||
useIPv6: mirror.UseIPv6,
|
||||
interval: time.Duration(mirror.Interval) * time.Minute,
|
||||
name: mirror.Name,
|
||||
stage1Profile: mirror.Stage1Profile,
|
||||
upstreamURL: mirror.Upstream,
|
||||
rsyncCmd: mirror.Command,
|
||||
username: mirror.Username,
|
||||
password: mirror.Password,
|
||||
excludeFile: mirror.ExcludeFile,
|
||||
extraOptions: mirror.RsyncOptions,
|
||||
rsyncNeverTimeout: mirror.RsyncNoTimeo,
|
||||
rsyncTimeoutValue: mirror.RsyncTimeout,
|
||||
rsyncEnv: mirror.Env,
|
||||
workingDir: mirrorDir,
|
||||
logDir: logDir,
|
||||
logFile: filepath.Join(logDir, "latest.log"),
|
||||
useIPv6: mirror.UseIPv6,
|
||||
useIPv4: mirror.UseIPv4,
|
||||
interval: time.Duration(mirror.Interval) * time.Minute,
|
||||
retry: mirror.Retry,
|
||||
timeout: time.Duration(mirror.Timeout) * time.Second,
|
||||
}
|
||||
p, err := newTwoStageRsyncProvider(rc)
|
||||
p.isMaster = isMaster
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.isMaster = isMaster
|
||||
provider = p
|
||||
default:
|
||||
panic(errors.New("Invalid mirror provider"))
|
||||
@ -162,10 +200,26 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
||||
// Add Logging Hook
|
||||
provider.AddHook(newLogLimiter(provider))
|
||||
|
||||
// Add Cgroup Hook
|
||||
if cfg.Cgroup.Enable {
|
||||
// Add ZFS Hook
|
||||
if cfg.ZFS.Enable {
|
||||
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
|
||||
}
|
||||
|
||||
// Add Btrfs Snapshot Hook
|
||||
if cfg.BtrfsSnapshot.Enable {
|
||||
provider.AddHook(newBtrfsSnapshotHook(provider, cfg.BtrfsSnapshot.SnapshotPath, mirror))
|
||||
}
|
||||
|
||||
// Add Docker Hook
|
||||
if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
|
||||
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))
|
||||
|
||||
} else if cfg.Cgroup.Enable {
|
||||
// Add Cgroup Hook
|
||||
provider.AddHook(
|
||||
newCgroupHook(provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group),
|
||||
newCgroupHook(
|
||||
provider, cfg.Cgroup, mirror.MemoryLimit,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@ -199,5 +253,17 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
||||
}
|
||||
addHookFromCmdList(mirror.ExecOnFailureExtra, execOnFailure)
|
||||
|
||||
successExitCodes := []int{}
|
||||
if cfg.Global.SuccessExitCodes != nil {
|
||||
successExitCodes = append(successExitCodes, cfg.Global.SuccessExitCodes...)
|
||||
}
|
||||
if mirror.SuccessExitCodes != nil {
|
||||
successExitCodes = append(successExitCodes, mirror.SuccessExitCodes...)
|
||||
}
|
||||
if len(successExitCodes) > 0 {
|
||||
logger.Infof("Non-zero success exit codes set for mirror %s: %v", mirror.Name, successExitCodes)
|
||||
provider.SetSuccessExitCodes(successExitCodes)
|
||||
}
|
||||
|
||||
return provider
|
||||
}
|
||||
|
@ -2,9 +2,9 @@ package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -13,7 +13,7 @@ import (
|
||||
|
||||
func TestRsyncProvider(t *testing.T) {
|
||||
Convey("Rsync Provider should work", t, func() {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
@ -27,6 +27,7 @@ func TestRsyncProvider(t *testing.T) {
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
timeout: 100 * time.Second,
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
@ -39,6 +40,7 @@ func TestRsyncProvider(t *testing.T) {
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
So(provider.Timeout(), ShouldEqual, c.timeout)
|
||||
|
||||
Convey("When entering a context (auto exit)", func() {
|
||||
func() {
|
||||
@ -73,55 +75,91 @@ func TestRsyncProvider(t *testing.T) {
|
||||
echo "syncing to $(pwd)"
|
||||
echo $RSYNC_PASSWORD $@
|
||||
sleep 1
|
||||
echo "Total file size: 1.33T bytes"
|
||||
echo "Done"
|
||||
exit 0
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"syncing to %s\n"+
|
||||
"%s\n"+
|
||||
"Total file size: 1.33T bytes\n"+
|
||||
"Done\n",
|
||||
provider.WorkingDir(),
|
||||
targetDir,
|
||||
fmt.Sprintf(
|
||||
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
|
||||
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
|
||||
"--delete --delete-after --delay-updates --safe-links "+
|
||||
"--timeout=120 --contimeout=120 -6 %s %s",
|
||||
"--timeout=120 -6 %s %s",
|
||||
provider.upstreamURL, provider.WorkingDir(),
|
||||
),
|
||||
)
|
||||
|
||||
err = provider.Run()
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
// fmt.Println(string(loggedContent))
|
||||
So(provider.DataSize(), ShouldEqual, "1.33T")
|
||||
})
|
||||
|
||||
})
|
||||
Convey("If the rsync program fails", t, func() {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
Convey("in the rsyncProvider", func() {
|
||||
|
||||
c := rsyncConfig{
|
||||
name: "tuna",
|
||||
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
extraOptions: []string{"--somethine-invalid"},
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newRsyncProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldNotBeNil)
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldContainSubstring, "Syntax or usage error")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestRsyncProviderWithAuthentication(t *testing.T) {
|
||||
Convey("Rsync Provider with password should work", t, func() {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
proxyAddr := "127.0.0.1:1233"
|
||||
|
||||
c := rsyncConfig{
|
||||
name: "tuna",
|
||||
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
|
||||
rsyncCmd: scriptFile,
|
||||
username: "tunasync",
|
||||
password: "tunasyncpassword",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
interval: 600 * time.Second,
|
||||
name: "tuna",
|
||||
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
|
||||
rsyncCmd: scriptFile,
|
||||
username: "tunasync",
|
||||
password: "tunasyncpassword",
|
||||
workingDir: tmpDir,
|
||||
extraOptions: []string{"--delete-excluded"},
|
||||
rsyncTimeoutValue: 30,
|
||||
rsyncEnv: map[string]string{"RSYNC_PROXY": proxyAddr},
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv4: true,
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newRsyncProvider(c)
|
||||
@ -136,30 +174,32 @@ func TestRsyncProviderWithAuthentication(t *testing.T) {
|
||||
Convey("Let's try a run", func() {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo "syncing to $(pwd)"
|
||||
echo $USER $RSYNC_PASSWORD $@
|
||||
echo $USER $RSYNC_PASSWORD $RSYNC_PROXY $@
|
||||
sleep 1
|
||||
echo "Done"
|
||||
exit 0
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"syncing to %s\n"+
|
||||
"%s\n"+
|
||||
"Done\n",
|
||||
provider.WorkingDir(),
|
||||
targetDir,
|
||||
fmt.Sprintf(
|
||||
"%s %s -aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
|
||||
"%s %s %s -aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
|
||||
"--delete --delete-after --delay-updates --safe-links "+
|
||||
"--timeout=120 --contimeout=120 -6 %s %s",
|
||||
provider.username, provider.password, provider.upstreamURL, provider.WorkingDir(),
|
||||
"--timeout=30 -4 --delete-excluded %s %s",
|
||||
provider.username, provider.password, proxyAddr,
|
||||
provider.upstreamURL, provider.WorkingDir(),
|
||||
),
|
||||
)
|
||||
|
||||
err = provider.Run()
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
// fmt.Println(string(loggedContent))
|
||||
@ -168,9 +208,144 @@ exit 0
|
||||
})
|
||||
}
|
||||
|
||||
func TestRsyncProviderWithOverriddenOptions(t *testing.T) {
|
||||
Convey("Rsync Provider with overridden options should work", t, func() {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := rsyncConfig{
|
||||
name: "tuna",
|
||||
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
|
||||
rsyncCmd: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
rsyncNeverTimeout: true,
|
||||
overriddenOptions: []string{"-aHvh", "--no-o", "--no-g", "--stats"},
|
||||
extraOptions: []string{"--delete-excluded"},
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newRsyncProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(provider.Name(), ShouldEqual, c.name)
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
|
||||
Convey("Let's try a run", func() {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo "syncing to $(pwd)"
|
||||
echo $@
|
||||
sleep 1
|
||||
echo "Done"
|
||||
exit 0
|
||||
`
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"syncing to %s\n"+
|
||||
"-aHvh --no-o --no-g --stats -6 --delete-excluded %s %s\n"+
|
||||
"Done\n",
|
||||
targetDir,
|
||||
provider.upstreamURL,
|
||||
provider.WorkingDir(),
|
||||
)
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
// fmt.Println(string(loggedContent))
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func TestRsyncProviderWithDocker(t *testing.T) {
|
||||
Convey("Rsync in Docker should work", t, func() {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
excludeFile := filepath.Join(tmpDir, "exclude.txt")
|
||||
|
||||
g := &Config{
|
||||
Global: globalConfig{
|
||||
Retry: 2,
|
||||
},
|
||||
Docker: dockerConfig{
|
||||
Enable: true,
|
||||
Volumes: []string{
|
||||
scriptFile + ":/bin/myrsync",
|
||||
"/etc/gai.conf:/etc/gai.conf:ro",
|
||||
},
|
||||
},
|
||||
}
|
||||
c := mirrorConfig{
|
||||
Name: "tuna",
|
||||
Provider: provRsync,
|
||||
Upstream: "rsync://rsync.tuna.moe/tuna/",
|
||||
Command: "/bin/myrsync",
|
||||
ExcludeFile: excludeFile,
|
||||
DockerImage: "alpine:3.8",
|
||||
LogDir: tmpDir,
|
||||
MirrorDir: tmpDir,
|
||||
UseIPv6: true,
|
||||
Timeout: 100,
|
||||
Interval: 600,
|
||||
}
|
||||
|
||||
provider := newMirrorProvider(c, g)
|
||||
|
||||
So(provider.Type(), ShouldEqual, provRsync)
|
||||
So(provider.Name(), ShouldEqual, c.Name)
|
||||
So(provider.WorkingDir(), ShouldEqual, c.MirrorDir)
|
||||
So(provider.LogDir(), ShouldEqual, c.LogDir)
|
||||
|
||||
cmdScriptContent := `#!/bin/sh
|
||||
#echo "$@"
|
||||
while [[ $# -gt 0 ]]; do
|
||||
if [[ "$1" = "--exclude-from" ]]; then
|
||||
cat "$2"
|
||||
shift
|
||||
fi
|
||||
shift
|
||||
done
|
||||
`
|
||||
err = os.WriteFile(scriptFile, []byte(cmdScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
err = os.WriteFile(excludeFile, []byte("__some_pattern"), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
for _, hook := range provider.Hooks() {
|
||||
err = hook.preExec()
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
for _, hook := range provider.Hooks() {
|
||||
err = hook.postExec()
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, "__some_pattern")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCmdProvider(t *testing.T) {
|
||||
Convey("Command Provider should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "cmd.sh")
|
||||
@ -215,71 +390,245 @@ echo $AOSP_REPO_BIN
|
||||
provider.LogFile(),
|
||||
"/usr/local/bin/repo",
|
||||
)
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
readedScriptContent, err := ioutil.ReadFile(scriptFile)
|
||||
readedScriptContent, err := os.ReadFile(scriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
|
||||
|
||||
err = provider.Run()
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
})
|
||||
|
||||
Convey("If a command fails", func() {
|
||||
scriptContent := `exit 1`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
readedScriptContent, err := ioutil.ReadFile(scriptFile)
|
||||
readedScriptContent, err := os.ReadFile(scriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
|
||||
|
||||
err = provider.Run()
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
})
|
||||
|
||||
Convey("If a long job is killed", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
sleep 5
|
||||
sleep 10
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
started := make(chan empty, 1)
|
||||
go func() {
|
||||
err = provider.Run()
|
||||
err := provider.Run(started)
|
||||
ctx.So(err, ShouldNotBeNil)
|
||||
}()
|
||||
|
||||
<-started
|
||||
So(provider.IsRunning(), ShouldBeTrue)
|
||||
time.Sleep(1 * time.Second)
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
})
|
||||
})
|
||||
Convey("Command Provider without log file should work", t, func(ctx C) {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
c := cmdConfig{
|
||||
name: "run-ls",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "ls",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: "/dev/null",
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(provider.IsMaster(), ShouldEqual, false)
|
||||
So(provider.ZFS(), ShouldBeNil)
|
||||
So(provider.Type(), ShouldEqual, provCommand)
|
||||
So(provider.Name(), ShouldEqual, c.name)
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
|
||||
Convey("Run the command", func() {
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
})
|
||||
})
|
||||
Convey("Command Provider with RegExprs should work", t, func(ctx C) {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "run-uptime",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "uptime",
|
||||
failOnMatch: "",
|
||||
sizePattern: "",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
Convey("when fail-on-match regexp matches", func() {
|
||||
c.failOnMatch = `[a-z]+`
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldNotBeNil)
|
||||
So(provider.DataSize(), ShouldBeEmpty)
|
||||
})
|
||||
|
||||
Convey("when fail-on-match regexp does not match", func() {
|
||||
c.failOnMatch = `load average_`
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("when fail-on-match regexp meets /dev/null", func() {
|
||||
c.failOnMatch = `load average_`
|
||||
c.logFile = "/dev/null"
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("when size-pattern regexp matches", func() {
|
||||
c.sizePattern = `load average: ([\d\.]+)`
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
So(provider.DataSize(), ShouldNotBeEmpty)
|
||||
_, err = strconv.ParseFloat(provider.DataSize(), 32)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("when size-pattern regexp does not match", func() {
|
||||
c.sizePattern = `load ave: ([\d\.]+)`
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
So(provider.DataSize(), ShouldBeEmpty)
|
||||
})
|
||||
|
||||
Convey("when size-pattern regexp meets /dev/null", func() {
|
||||
c.sizePattern = `load ave: ([\d\.]+)`
|
||||
c.logFile = "/dev/null"
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldNotBeNil)
|
||||
So(provider.DataSize(), ShouldBeEmpty)
|
||||
})
|
||||
})
|
||||
Convey("Command Provider with successExitCodes should work", t, func(ctx C) {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "cmd.sh")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-cmd",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "bash " + scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 600 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
provider.SetSuccessExitCodes([]int{199, 200})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(provider.Type(), ShouldEqual, provCommand)
|
||||
So(provider.Name(), ShouldEqual, c.name)
|
||||
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
|
||||
So(provider.LogDir(), ShouldEqual, c.logDir)
|
||||
So(provider.LogFile(), ShouldEqual, c.logFile)
|
||||
So(provider.Interval(), ShouldEqual, c.interval)
|
||||
So(provider.GetSuccessExitCodes(), ShouldResemble, []int{199, 200})
|
||||
|
||||
Convey("Command exits with configured successExitCodes", func() {
|
||||
scriptContent := `exit 199`
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
readedScriptContent, err := os.ReadFile(scriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("Command exits with unknown exit code", func() {
|
||||
scriptContent := `exit 201`
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
readedScriptContent, err := os.ReadFile(scriptFile)
|
||||
So(err, ShouldBeNil)
|
||||
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
|
||||
|
||||
err = provider.Run(make(chan empty, 1))
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestTwoStageRsyncProvider(t *testing.T) {
|
||||
Convey("TwoStageRsync Provider should work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
scriptFile := filepath.Join(tmpDir, "myrsync")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := twoStageRsyncConfig{
|
||||
name: "tuna-two-stage-rsync",
|
||||
upstreamURL: "rsync://mirrors.tuna.moe/",
|
||||
stage1Profile: "debian",
|
||||
rsyncCmd: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
excludeFile: tmpFile,
|
||||
name: "tuna-two-stage-rsync",
|
||||
upstreamURL: "rsync://mirrors.tuna.moe/",
|
||||
stage1Profile: "debian",
|
||||
rsyncCmd: scriptFile,
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
useIPv6: true,
|
||||
excludeFile: tmpFile,
|
||||
rsyncTimeoutValue: 30,
|
||||
extraOptions: []string{"--delete-excluded", "--cache"},
|
||||
username: "hello",
|
||||
password: "world",
|
||||
}
|
||||
|
||||
provider, err := newTwoStageRsyncProvider(c)
|
||||
@ -300,12 +649,13 @@ sleep 1
|
||||
echo "Done"
|
||||
exit 0
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run()
|
||||
err = provider.Run(make(chan empty, 2))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"syncing to %s\n"+
|
||||
"%s\n"+
|
||||
@ -313,23 +663,23 @@ exit 0
|
||||
"syncing to %s\n"+
|
||||
"%s\n"+
|
||||
"Done\n",
|
||||
provider.WorkingDir(),
|
||||
targetDir,
|
||||
fmt.Sprintf(
|
||||
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
|
||||
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+
|
||||
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ --safe-links "+
|
||||
"--include=*.diff/ --include=by-hash/ --exclude=*.diff/Index --exclude=Contents* --exclude=Packages* --exclude=Sources* --exclude=Release* --exclude=InRelease --exclude=i18n/* --exclude=dep11/* --exclude=installer-*/current --exclude=ls-lR* --timeout=30 -6 "+
|
||||
"--exclude-from %s %s %s",
|
||||
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
|
||||
),
|
||||
provider.WorkingDir(),
|
||||
targetDir,
|
||||
fmt.Sprintf(
|
||||
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
|
||||
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
|
||||
"--delete --delete-after --delay-updates --safe-links "+
|
||||
"--timeout=120 --contimeout=120 -6 --exclude-from %s %s %s",
|
||||
"--delete-excluded --cache --timeout=30 -6 --exclude-from %s %s %s",
|
||||
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
|
||||
),
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
// fmt.Println(string(loggedContent))
|
||||
@ -338,32 +688,65 @@ exit 0
|
||||
Convey("Try terminating", func(ctx C) {
|
||||
scriptContent := `#!/bin/bash
|
||||
echo $@
|
||||
sleep 4
|
||||
sleep 10
|
||||
exit 0
|
||||
`
|
||||
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
started := make(chan empty, 2)
|
||||
go func() {
|
||||
err = provider.Run()
|
||||
err := provider.Run(started)
|
||||
ctx.So(err, ShouldNotBeNil)
|
||||
}()
|
||||
|
||||
<-started
|
||||
So(provider.IsRunning(), ShouldBeTrue)
|
||||
time.Sleep(1 * time.Second)
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
expectedOutput := fmt.Sprintf(
|
||||
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
|
||||
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+
|
||||
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ --safe-links "+
|
||||
"--include=*.diff/ --include=by-hash/ --exclude=*.diff/Index --exclude=Contents* --exclude=Packages* --exclude=Sources* --exclude=Release* --exclude=InRelease --exclude=i18n/* --exclude=dep11/* --exclude=installer-*/current --exclude=ls-lR* --timeout=30 -6 "+
|
||||
"--exclude-from %s %s %s\n",
|
||||
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
|
||||
)
|
||||
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput)
|
||||
So(string(loggedContent), ShouldStartWith, expectedOutput)
|
||||
// fmt.Println(string(loggedContent))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("If the rsync program fails", t, func(ctx C) {
|
||||
tmpDir, err := os.MkdirTemp("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
Convey("in the twoStageRsyncProvider", func() {
|
||||
|
||||
c := twoStageRsyncConfig{
|
||||
name: "tuna-two-stage-rsync",
|
||||
upstreamURL: "rsync://0.0.0.1/",
|
||||
stage1Profile: "debian",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
excludeFile: tmpFile,
|
||||
}
|
||||
|
||||
provider, err := newTwoStageRsyncProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = provider.Run(make(chan empty, 2))
|
||||
So(err, ShouldNotBeNil)
|
||||
loggedContent, err := os.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldContainSubstring, "Error in socket I/O")
|
||||
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -2,24 +2,37 @@ package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
type rsyncConfig struct {
|
||||
name string
|
||||
rsyncCmd string
|
||||
upstreamURL, username, password, excludeFile string
|
||||
extraOptions []string
|
||||
globalOptions []string
|
||||
overriddenOptions []string
|
||||
useOverrideOnly bool
|
||||
rsyncNeverTimeout bool
|
||||
rsyncTimeoutValue int
|
||||
rsyncEnv map[string]string
|
||||
workingDir, logDir, logFile string
|
||||
useIPv6 bool
|
||||
useIPv6, useIPv4 bool
|
||||
interval time.Duration
|
||||
retry int
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// An RsyncProvider provides the implementation to rsync-based syncing jobs
|
||||
type rsyncProvider struct {
|
||||
baseProvider
|
||||
rsyncConfig
|
||||
options []string
|
||||
options []string
|
||||
dataSize string
|
||||
}
|
||||
|
||||
func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
|
||||
@ -27,11 +40,16 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
|
||||
if !strings.HasSuffix(c.upstreamURL, "/") {
|
||||
return nil, errors.New("rsync upstream URL should ends with /")
|
||||
}
|
||||
if c.retry == 0 {
|
||||
c.retry = defaultMaxRetry
|
||||
}
|
||||
provider := &rsyncProvider{
|
||||
baseProvider: baseProvider{
|
||||
name: c.name,
|
||||
ctx: NewContext(),
|
||||
interval: c.interval,
|
||||
retry: c.retry,
|
||||
timeout: c.timeout,
|
||||
},
|
||||
rsyncConfig: c,
|
||||
}
|
||||
@ -39,20 +57,56 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
|
||||
if c.rsyncCmd == "" {
|
||||
provider.rsyncCmd = "rsync"
|
||||
}
|
||||
if c.rsyncEnv == nil {
|
||||
provider.rsyncEnv = map[string]string{}
|
||||
}
|
||||
if c.username != "" {
|
||||
provider.rsyncEnv["USER"] = c.username
|
||||
}
|
||||
if c.password != "" {
|
||||
provider.rsyncEnv["RSYNC_PASSWORD"] = c.password
|
||||
}
|
||||
|
||||
options := []string{
|
||||
"-aHvh", "--no-o", "--no-g", "--stats",
|
||||
"--exclude", ".~tmp~/",
|
||||
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/",
|
||||
"--delete", "--delete-after", "--delay-updates",
|
||||
"--safe-links", "--timeout=120", "--contimeout=120",
|
||||
"--safe-links",
|
||||
}
|
||||
if c.overriddenOptions != nil {
|
||||
options = c.overriddenOptions
|
||||
}
|
||||
|
||||
if c.useIPv6 {
|
||||
options = append(options, "-6")
|
||||
}
|
||||
if c.useOverrideOnly {
|
||||
if c.overriddenOptions == nil {
|
||||
return nil, errors.New("rsync_override_only is set but no rsync_override provided")
|
||||
}
|
||||
// use overridden options only
|
||||
} else {
|
||||
if !c.rsyncNeverTimeout {
|
||||
timeo := 120
|
||||
if c.rsyncTimeoutValue > 0 {
|
||||
timeo = c.rsyncTimeoutValue
|
||||
}
|
||||
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
|
||||
}
|
||||
|
||||
if c.excludeFile != "" {
|
||||
options = append(options, "--exclude-from", c.excludeFile)
|
||||
if c.useIPv6 {
|
||||
options = append(options, "-6")
|
||||
} else if c.useIPv4 {
|
||||
options = append(options, "-4")
|
||||
}
|
||||
|
||||
if c.excludeFile != "" {
|
||||
options = append(options, "--exclude-from", c.excludeFile)
|
||||
}
|
||||
|
||||
if c.globalOptions != nil {
|
||||
options = append(options, c.globalOptions...)
|
||||
}
|
||||
if c.extraOptions != nil {
|
||||
options = append(options, c.extraOptions...)
|
||||
}
|
||||
}
|
||||
provider.options = options
|
||||
|
||||
@ -71,28 +125,45 @@ func (p *rsyncProvider) Upstream() string {
|
||||
return p.upstreamURL
|
||||
}
|
||||
|
||||
func (p *rsyncProvider) Run() error {
|
||||
func (p *rsyncProvider) DataSize() string {
|
||||
return p.dataSize
|
||||
}
|
||||
|
||||
func (p *rsyncProvider) Run(started chan empty) error {
|
||||
p.dataSize = ""
|
||||
defer p.closeLogFile()
|
||||
if err := p.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return p.Wait()
|
||||
started <- empty{}
|
||||
if err := p.Wait(); err != nil {
|
||||
code, msg := internal.TranslateRsyncErrorCode(err)
|
||||
if code != 0 {
|
||||
logger.Debug("Rsync exitcode %d (%s)", code, msg)
|
||||
if p.logFileFd != nil {
|
||||
p.logFileFd.WriteString(msg + "\n")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
p.dataSize = internal.ExtractSizeFromRsyncLog(p.LogFile())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *rsyncProvider) Start() error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
env := map[string]string{}
|
||||
if p.username != "" {
|
||||
env["USER"] = p.username
|
||||
}
|
||||
if p.password != "" {
|
||||
env["RSYNC_PASSWORD"] = p.password
|
||||
if p.IsRunning() {
|
||||
return errors.New("provider is currently running")
|
||||
}
|
||||
|
||||
command := []string{p.rsyncCmd}
|
||||
command = append(command, p.options...)
|
||||
command = append(command, p.upstreamURL, p.WorkingDir())
|
||||
|
||||
p.cmd = newCmdJob(p, command, p.WorkingDir(), env)
|
||||
if err := p.prepareLogFile(); err != nil {
|
||||
p.cmd = newCmdJob(p, command, p.WorkingDir(), p.rsyncEnv)
|
||||
if err := p.prepareLogFile(false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -100,5 +171,6 @@ func (p *rsyncProvider) Start() error {
|
||||
return err
|
||||
}
|
||||
p.isRunning.Store(true)
|
||||
logger.Debugf("set isRunning to true: %s", p.Name())
|
||||
return nil
|
||||
}
|
||||
|
141
worker/runner.go
141
worker/runner.go
@ -2,13 +2,18 @@ package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
cgv1 "github.com/containerd/cgroups/v3/cgroup1"
|
||||
"github.com/moby/sys/reexec"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@ -31,11 +36,45 @@ type cmdJob struct {
|
||||
func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, env map[string]string) *cmdJob {
|
||||
var cmd *exec.Cmd
|
||||
|
||||
if provider.Cgroup() != nil {
|
||||
c := "cgexec"
|
||||
args := []string{"-g", provider.Cgroup().Cgroup()}
|
||||
if d := provider.Docker(); d != nil {
|
||||
c := "docker"
|
||||
args := []string{
|
||||
"run", "--rm",
|
||||
"-a", "STDOUT", "-a", "STDERR",
|
||||
"--name", d.Name(),
|
||||
"-w", workingDir,
|
||||
}
|
||||
// specify user
|
||||
args = append(
|
||||
args, "-u",
|
||||
fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()),
|
||||
)
|
||||
// add volumes
|
||||
for _, vol := range d.Volumes() {
|
||||
logger.Debugf("volume: %s", vol)
|
||||
args = append(args, "-v", vol)
|
||||
}
|
||||
// set env
|
||||
for k, v := range env {
|
||||
kv := fmt.Sprintf("%s=%s", k, v)
|
||||
args = append(args, "-e", kv)
|
||||
}
|
||||
// set memlimit
|
||||
if d.memoryLimit != 0 {
|
||||
args = append(args, "-m", fmt.Sprint(d.memoryLimit.Value()))
|
||||
}
|
||||
// apply options
|
||||
args = append(args, d.options...)
|
||||
// apply image and command
|
||||
args = append(args, d.image)
|
||||
// apply command
|
||||
args = append(args, cmdAndArgs...)
|
||||
|
||||
cmd = exec.Command(c, args...)
|
||||
|
||||
} else if provider.Cgroup() != nil {
|
||||
cmd = reexec.Command(append([]string{"tunasync-exec"}, cmdAndArgs...)...)
|
||||
|
||||
} else {
|
||||
if len(cmdAndArgs) == 1 {
|
||||
cmd = exec.Command(cmdAndArgs[0])
|
||||
@ -48,27 +87,80 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir)
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
logger.Debugf("Making dir %s", workingDir)
|
||||
if err = os.MkdirAll(workingDir, 0755); err != nil {
|
||||
logger.Errorf("Error making dir %s", workingDir)
|
||||
if provider.Docker() == nil {
|
||||
logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir)
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
logger.Debugf("Making dir %s", workingDir)
|
||||
if err = os.MkdirAll(workingDir, 0755); err != nil {
|
||||
logger.Errorf("Error making dir %s: %s", workingDir, err.Error())
|
||||
}
|
||||
}
|
||||
cmd.Dir = workingDir
|
||||
cmd.Env = newEnviron(env, true)
|
||||
}
|
||||
|
||||
cmd.Dir = workingDir
|
||||
cmd.Env = newEnviron(env, true)
|
||||
|
||||
return &cmdJob{
|
||||
cmd: cmd,
|
||||
workingDir: workingDir,
|
||||
env: env,
|
||||
provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cmdJob) Start() error {
|
||||
cg := c.provider.Cgroup()
|
||||
var (
|
||||
pipeR *os.File
|
||||
pipeW *os.File
|
||||
)
|
||||
if cg != nil {
|
||||
logger.Debugf("Preparing cgroup sync pipes for job %s", c.provider.Name())
|
||||
var err error
|
||||
pipeR, pipeW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.cmd.ExtraFiles = []*os.File{pipeR}
|
||||
defer pipeR.Close()
|
||||
defer pipeW.Close()
|
||||
}
|
||||
|
||||
logger.Debugf("Command start: %v", c.cmd.Args)
|
||||
c.finished = make(chan empty, 1)
|
||||
return c.cmd.Start()
|
||||
|
||||
if err := c.cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
if cg != nil {
|
||||
if err := pipeR.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.cmd == nil || c.cmd.Process == nil {
|
||||
return errProcessNotStarted
|
||||
}
|
||||
pid := c.cmd.Process.Pid
|
||||
if cg.cgCfg.isUnified {
|
||||
if err := cg.cgMgrV2.AddProc(uint64(pid)); err != nil {
|
||||
if errors.Is(err, syscall.ESRCH) {
|
||||
logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := cg.cgMgrV1.Add(cgv1.Process{Pid: pid}); err != nil {
|
||||
if errors.Is(err, syscall.ESRCH) {
|
||||
logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := pipeW.WriteString(string(cmdCont)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cmdJob) Wait() error {
|
||||
@ -80,9 +172,18 @@ func (c *cmdJob) Wait() error {
|
||||
return c.retErr
|
||||
default:
|
||||
err := c.cmd.Wait()
|
||||
c.retErr = err
|
||||
close(c.finished)
|
||||
return err
|
||||
if err != nil {
|
||||
code := err.(*exec.ExitError).ExitCode()
|
||||
allowedCodes := c.provider.GetSuccessExitCodes()
|
||||
if slices.Contains(allowedCodes, code) {
|
||||
// process exited with non-success status
|
||||
logger.Infof("Command %s exited with code %d: treated as success (allowed: %v)", c.cmd.Args, code, allowedCodes)
|
||||
} else {
|
||||
c.retErr = err
|
||||
}
|
||||
}
|
||||
return c.retErr
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,6 +196,14 @@ func (c *cmdJob) Terminate() error {
|
||||
if c.cmd == nil || c.cmd.Process == nil {
|
||||
return errProcessNotStarted
|
||||
}
|
||||
|
||||
if d := c.provider.Docker(); d != nil {
|
||||
sh.Command(
|
||||
"docker", "stop", "-t", "2", d.Name(),
|
||||
).Run()
|
||||
return nil
|
||||
}
|
||||
|
||||
err := unix.Kill(c.cmd.Process.Pid, syscall.SIGTERM)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -103,10 +212,10 @@ func (c *cmdJob) Terminate() error {
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
unix.Kill(c.cmd.Process.Pid, syscall.SIGKILL)
|
||||
return errors.New("SIGTERM failed to kill the job")
|
||||
logger.Warningf("SIGTERM failed to kill the job in 2s. SIGKILL sent")
|
||||
case <-c.finished:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copied from go-sh
|
||||
|
@ -15,6 +15,11 @@ type scheduleQueue struct {
|
||||
jobs map[string]bool
|
||||
}
|
||||
|
||||
type jobScheduleInfo struct {
|
||||
jobName string
|
||||
nextScheduled time.Time
|
||||
}
|
||||
|
||||
func timeLessThan(l, r interface{}) bool {
|
||||
tl := l.(time.Time)
|
||||
tr := r.(time.Time)
|
||||
@ -28,6 +33,20 @@ func newScheduleQueue() *scheduleQueue {
|
||||
return queue
|
||||
}
|
||||
|
||||
func (q *scheduleQueue) GetJobs() (jobs []jobScheduleInfo) {
|
||||
cur := q.list.Iterator()
|
||||
defer cur.Close()
|
||||
|
||||
for cur.Next() {
|
||||
cj := cur.Value().(*mirrorJob)
|
||||
jobs = append(jobs, jobScheduleInfo{
|
||||
cj.Name(),
|
||||
cur.Key().(time.Time),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (q *scheduleQueue) AddJob(schedTime time.Time, job *mirrorJob) {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
type twoStageRsyncConfig struct {
|
||||
@ -12,9 +14,15 @@ type twoStageRsyncConfig struct {
|
||||
rsyncCmd string
|
||||
stage1Profile string
|
||||
upstreamURL, username, password, excludeFile string
|
||||
extraOptions []string
|
||||
rsyncNeverTimeout bool
|
||||
rsyncTimeoutValue int
|
||||
rsyncEnv map[string]string
|
||||
workingDir, logDir, logFile string
|
||||
useIPv6 bool
|
||||
useIPv6, useIPv4 bool
|
||||
interval time.Duration
|
||||
retry int
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// An RsyncProvider provides the implementation to rsync-based syncing jobs
|
||||
@ -23,13 +31,28 @@ type twoStageRsyncProvider struct {
|
||||
twoStageRsyncConfig
|
||||
stage1Options []string
|
||||
stage2Options []string
|
||||
dataSize string
|
||||
}
|
||||
|
||||
// ref: https://salsa.debian.org/mirror-team/archvsync/-/blob/master/bin/ftpsync#L431
|
||||
var rsyncStage1Profiles = map[string]([]string){
|
||||
"debian": []string{"dists/"},
|
||||
"debian": []string{
|
||||
"--include=*.diff/",
|
||||
"--include=by-hash/",
|
||||
"--exclude=*.diff/Index",
|
||||
"--exclude=Contents*",
|
||||
"--exclude=Packages*",
|
||||
"--exclude=Sources*",
|
||||
"--exclude=Release*",
|
||||
"--exclude=InRelease",
|
||||
"--exclude=i18n/*",
|
||||
"--exclude=dep11/*",
|
||||
"--exclude=installer-*/current",
|
||||
"--exclude=ls-lR*",
|
||||
},
|
||||
"debian-oldstyle": []string{
|
||||
"Packages*", "Sources*", "Release*",
|
||||
"InRelease", "i18n/*", "ls-lR*", "dep11/*",
|
||||
"--exclude=Packages*", "--exclude=Sources*", "--exclude=Release*",
|
||||
"--exclude=InRelease", "--exclude=i18n/*", "--exclude=ls-lR*", "--exclude=dep11/*",
|
||||
},
|
||||
}
|
||||
|
||||
@ -38,27 +61,41 @@ func newTwoStageRsyncProvider(c twoStageRsyncConfig) (*twoStageRsyncProvider, er
|
||||
if !strings.HasSuffix(c.upstreamURL, "/") {
|
||||
return nil, errors.New("rsync upstream URL should ends with /")
|
||||
}
|
||||
if c.retry == 0 {
|
||||
c.retry = defaultMaxRetry
|
||||
}
|
||||
|
||||
provider := &twoStageRsyncProvider{
|
||||
baseProvider: baseProvider{
|
||||
name: c.name,
|
||||
ctx: NewContext(),
|
||||
interval: c.interval,
|
||||
retry: c.retry,
|
||||
timeout: c.timeout,
|
||||
},
|
||||
twoStageRsyncConfig: c,
|
||||
stage1Options: []string{
|
||||
"-aHvh", "--no-o", "--no-g", "--stats",
|
||||
"--exclude", ".~tmp~/",
|
||||
"--safe-links", "--timeout=120", "--contimeout=120",
|
||||
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/",
|
||||
"--safe-links",
|
||||
},
|
||||
stage2Options: []string{
|
||||
"-aHvh", "--no-o", "--no-g", "--stats",
|
||||
"--exclude", ".~tmp~/",
|
||||
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/",
|
||||
"--delete", "--delete-after", "--delay-updates",
|
||||
"--safe-links", "--timeout=120", "--contimeout=120",
|
||||
"--safe-links",
|
||||
},
|
||||
}
|
||||
|
||||
if c.rsyncEnv == nil {
|
||||
provider.rsyncEnv = map[string]string{}
|
||||
}
|
||||
if c.username != "" {
|
||||
provider.rsyncEnv["USER"] = c.username
|
||||
}
|
||||
if c.password != "" {
|
||||
provider.rsyncEnv["RSYNC_PASSWORD"] = c.password
|
||||
}
|
||||
if c.rsyncCmd == "" {
|
||||
provider.rsyncCmd = "rsync"
|
||||
}
|
||||
@ -78,26 +115,41 @@ func (p *twoStageRsyncProvider) Upstream() string {
|
||||
return p.upstreamURL
|
||||
}
|
||||
|
||||
func (p *twoStageRsyncProvider) DataSize() string {
|
||||
return p.dataSize
|
||||
}
|
||||
|
||||
func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
|
||||
var options []string
|
||||
if stage == 1 {
|
||||
options = append(options, p.stage1Options...)
|
||||
stage1Excludes, ok := rsyncStage1Profiles[p.stage1Profile]
|
||||
stage1Profile, ok := rsyncStage1Profiles[p.stage1Profile]
|
||||
if !ok {
|
||||
return nil, errors.New("Invalid Stage 1 Profile")
|
||||
}
|
||||
for _, exc := range stage1Excludes {
|
||||
options = append(options, "--exclude", exc)
|
||||
}
|
||||
options = append(options, stage1Profile...)
|
||||
|
||||
} else if stage == 2 {
|
||||
options = append(options, p.stage2Options...)
|
||||
if p.extraOptions != nil {
|
||||
options = append(options, p.extraOptions...)
|
||||
}
|
||||
} else {
|
||||
return []string{}, fmt.Errorf("Invalid stage: %d", stage)
|
||||
}
|
||||
|
||||
if !p.rsyncNeverTimeout {
|
||||
timeo := 120
|
||||
if p.rsyncTimeoutValue > 0 {
|
||||
timeo = p.rsyncTimeoutValue
|
||||
}
|
||||
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
|
||||
}
|
||||
|
||||
if p.useIPv6 {
|
||||
options = append(options, "-6")
|
||||
} else if p.useIPv4 {
|
||||
options = append(options, "-4")
|
||||
}
|
||||
|
||||
if p.excludeFile != "" {
|
||||
@ -107,17 +159,15 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func (p *twoStageRsyncProvider) Run() error {
|
||||
defer p.Wait()
|
||||
func (p *twoStageRsyncProvider) Run(started chan empty) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
env := map[string]string{}
|
||||
if p.username != "" {
|
||||
env["USER"] = p.username
|
||||
}
|
||||
if p.password != "" {
|
||||
env["RSYNC_PASSWORD"] = p.password
|
||||
if p.IsRunning() {
|
||||
return errors.New("provider is currently running")
|
||||
}
|
||||
|
||||
p.dataSize = ""
|
||||
stages := []int{1, 2}
|
||||
for _, stage := range stages {
|
||||
command := []string{p.rsyncCmd}
|
||||
@ -128,21 +178,33 @@ func (p *twoStageRsyncProvider) Run() error {
|
||||
command = append(command, options...)
|
||||
command = append(command, p.upstreamURL, p.WorkingDir())
|
||||
|
||||
p.cmd = newCmdJob(p, command, p.WorkingDir(), env)
|
||||
if err := p.prepareLogFile(); err != nil {
|
||||
p.cmd = newCmdJob(p, command, p.WorkingDir(), p.rsyncEnv)
|
||||
if err := p.prepareLogFile(stage > 1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.closeLogFile()
|
||||
|
||||
if err = p.cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
p.isRunning.Store(true)
|
||||
logger.Debugf("set isRunning to true: %s", p.Name())
|
||||
started <- empty{}
|
||||
|
||||
err = p.cmd.Wait()
|
||||
p.isRunning.Store(false)
|
||||
p.Unlock()
|
||||
err = p.Wait()
|
||||
p.Lock()
|
||||
if err != nil {
|
||||
code, msg := internal.TranslateRsyncErrorCode(err)
|
||||
if code != 0 {
|
||||
logger.Debug("Rsync exitcode %d (%s)", code, msg)
|
||||
if p.logFileFd != nil {
|
||||
p.logFileFd.WriteString(msg + "\n")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
p.dataSize = internal.ExtractSizeFromRsyncLog(p.LogFile())
|
||||
return nil
|
||||
}
|
||||
|
107
worker/worker.go
107
worker/worker.go
@ -12,8 +12,6 @@ import (
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
var tunasyncWorker *Worker
|
||||
|
||||
// A Worker is a instance of tunasync worker
|
||||
type Worker struct {
|
||||
L sync.Mutex
|
||||
@ -29,10 +27,11 @@ type Worker struct {
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// GetTUNASyncWorker returns a singalton worker
|
||||
func GetTUNASyncWorker(cfg *Config) *Worker {
|
||||
if tunasyncWorker != nil {
|
||||
return tunasyncWorker
|
||||
// NewTUNASyncWorker creates a worker
|
||||
func NewTUNASyncWorker(cfg *Config) *Worker {
|
||||
|
||||
if cfg.Global.Retry == 0 {
|
||||
cfg.Global.Retry = defaultMaxRetry
|
||||
}
|
||||
|
||||
w := &Worker{
|
||||
@ -56,17 +55,19 @@ func GetTUNASyncWorker(cfg *Config) *Worker {
|
||||
}
|
||||
|
||||
if cfg.Cgroup.Enable {
|
||||
initCgroup(cfg.Cgroup.BasePath)
|
||||
if err := initCgroup(&cfg.Cgroup); err != nil {
|
||||
logger.Errorf("Error initializing Cgroup: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
w.initJobs()
|
||||
w.makeHTTPServer()
|
||||
tunasyncWorker = w
|
||||
return w
|
||||
}
|
||||
|
||||
// Run runs worker forever
|
||||
func (w *Worker) Run() {
|
||||
w.registorWorker()
|
||||
w.registerWorker()
|
||||
go w.runHTTPServer()
|
||||
w.runSchedule()
|
||||
}
|
||||
@ -222,7 +223,11 @@ func (w *Worker) makeHTTPServer() {
|
||||
}
|
||||
switch cmd.Cmd {
|
||||
case CmdStart:
|
||||
job.ctrlChan <- jobStart
|
||||
if cmd.Options["force"] {
|
||||
job.ctrlChan <- jobForceStart
|
||||
} else {
|
||||
job.ctrlChan <- jobStart
|
||||
}
|
||||
case CmdRestart:
|
||||
job.ctrlChan <- jobRestart
|
||||
case CmdStop:
|
||||
@ -309,7 +314,10 @@ func (w *Worker) runSchedule() {
|
||||
|
||||
w.L.Unlock()
|
||||
|
||||
tick := time.Tick(5 * time.Second)
|
||||
schedInfo := w.schedule.GetJobs()
|
||||
w.updateSchedInfo(schedInfo)
|
||||
|
||||
tick := time.NewTicker(5 * time.Second).C
|
||||
for {
|
||||
select {
|
||||
case jobMsg := <-w.managerChan:
|
||||
@ -345,6 +353,9 @@ func (w *Worker) runSchedule() {
|
||||
w.schedule.AddJob(schedTime, job)
|
||||
}
|
||||
|
||||
schedInfo = w.schedule.GetJobs()
|
||||
w.updateSchedInfo(schedInfo)
|
||||
|
||||
case <-tick:
|
||||
// check schedule every 5 seconds
|
||||
if job := w.schedule.Pop(); job != nil {
|
||||
@ -388,29 +399,31 @@ func (w *Worker) URL() string {
|
||||
return fmt.Sprintf("%s://%s:%d/", proto, w.cfg.Server.Hostname, w.cfg.Server.Port)
|
||||
}
|
||||
|
||||
func (w *Worker) registorWorker() {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers",
|
||||
w.cfg.Manager.APIBase,
|
||||
)
|
||||
|
||||
func (w *Worker) registerWorker() {
|
||||
msg := WorkerStatus{
|
||||
ID: w.Name(),
|
||||
URL: w.URL(),
|
||||
}
|
||||
|
||||
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to register worker")
|
||||
for _, root := range w.cfg.Manager.APIBaseList() {
|
||||
url := fmt.Sprintf("%s/workers", root)
|
||||
logger.Debugf("register on manager url: %s", url)
|
||||
for retry := 10; retry > 0; {
|
||||
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to register worker")
|
||||
retry--
|
||||
if retry > 0 {
|
||||
time.Sleep(1 * time.Second)
|
||||
logger.Noticef("Retrying... (%d)", retry)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs/%s",
|
||||
w.cfg.Manager.APIBase,
|
||||
w.Name(),
|
||||
jobMsg.name,
|
||||
)
|
||||
p := job.provider
|
||||
smsg := MirrorStatus{
|
||||
Name: jobMsg.name,
|
||||
@ -422,19 +435,49 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
|
||||
ErrorMsg: jobMsg.msg,
|
||||
}
|
||||
|
||||
if _, err := PostJSON(url, smsg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
|
||||
// Certain Providers (rsync for example) may know the size of mirror,
|
||||
// so we report it to Manager here
|
||||
if len(job.size) != 0 {
|
||||
smsg.Size = job.size
|
||||
}
|
||||
|
||||
for _, root := range w.cfg.Manager.APIBaseList() {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs/%s", root, w.Name(), jobMsg.name,
|
||||
)
|
||||
logger.Debugf("reporting on manager url: %s", url)
|
||||
if _, err := PostJSON(url, smsg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) updateSchedInfo(schedInfo []jobScheduleInfo) {
|
||||
var s []MirrorSchedule
|
||||
for _, sched := range schedInfo {
|
||||
s = append(s, MirrorSchedule{
|
||||
MirrorName: sched.jobName,
|
||||
NextSchedule: sched.nextScheduled,
|
||||
})
|
||||
}
|
||||
msg := MirrorSchedules{Schedules: s}
|
||||
|
||||
for _, root := range w.cfg.Manager.APIBaseList() {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/schedules", root, w.Name(),
|
||||
)
|
||||
logger.Debugf("reporting on manager url: %s", url)
|
||||
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to upload schedules: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) fetchJobStatus() []MirrorStatus {
|
||||
var mirrorList []MirrorStatus
|
||||
apiBase := w.cfg.Manager.APIBaseList()[0]
|
||||
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs",
|
||||
w.cfg.Manager.APIBase,
|
||||
w.Name(),
|
||||
)
|
||||
url := fmt.Sprintf("%s/workers/%s/jobs", apiBase, w.Name())
|
||||
|
||||
if _, err := GetJSON(url, &mirrorList, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to fetch job status: %s", err.Error())
|
||||
|
256
worker/worker_test.go
Normal file
256
worker/worker_test.go
Normal file
@ -0,0 +1,256 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
. "github.com/tuna/tunasync/internal"
|
||||
)
|
||||
|
||||
type workTestFunc func(*Worker)
|
||||
|
||||
var managerPort = 5001
|
||||
var workerPort = 5002
|
||||
|
||||
func makeMockManagerServer(recvData chan interface{}) *gin.Engine {
|
||||
r := gin.Default()
|
||||
r.GET("/ping", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"_infoKey": "pong"})
|
||||
})
|
||||
r.POST("/workers", func(c *gin.Context) {
|
||||
var _worker WorkerStatus
|
||||
c.BindJSON(&_worker)
|
||||
_worker.LastOnline = time.Now()
|
||||
_worker.LastRegister = time.Now()
|
||||
recvData <- _worker
|
||||
c.JSON(http.StatusOK, _worker)
|
||||
})
|
||||
r.POST("/workers/dut/schedules", func(c *gin.Context) {
|
||||
var _sch MirrorSchedules
|
||||
c.BindJSON(&_sch)
|
||||
recvData <- _sch
|
||||
c.JSON(http.StatusOK, empty{})
|
||||
})
|
||||
r.POST("/workers/dut/jobs/:job", func(c *gin.Context) {
|
||||
var status MirrorStatus
|
||||
c.BindJSON(&status)
|
||||
recvData <- status
|
||||
c.JSON(http.StatusOK, status)
|
||||
})
|
||||
r.GET("/workers/dut/jobs", func(c *gin.Context) {
|
||||
mirrorStatusList := []MirrorStatus{}
|
||||
c.JSON(http.StatusOK, mirrorStatusList)
|
||||
})
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func startWorkerThenStop(cfg *Config, tester workTestFunc) {
|
||||
exitedChan := make(chan int)
|
||||
w := NewTUNASyncWorker(cfg)
|
||||
So(w, ShouldNotBeNil)
|
||||
go func() {
|
||||
w.Run()
|
||||
exitedChan <- 1
|
||||
}()
|
||||
|
||||
tester(w)
|
||||
|
||||
w.Halt()
|
||||
select {
|
||||
case exited := <-exitedChan:
|
||||
So(exited, ShouldEqual, 1)
|
||||
case <-time.After(2 * time.Second):
|
||||
So(0, ShouldEqual, 1)
|
||||
}
|
||||
|
||||
}
|
||||
func sendCommandToWorker(workerURL string, httpClient *http.Client, cmd CmdVerb, mirror string) {
|
||||
workerCmd := WorkerCmd{
|
||||
Cmd: cmd,
|
||||
MirrorID: mirror,
|
||||
}
|
||||
logger.Debugf("POST to %s with cmd %s", workerURL, cmd)
|
||||
_, err := PostJSON(workerURL, workerCmd, httpClient)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
func TestWorker(t *testing.T) {
|
||||
InitLogger(false, true, false)
|
||||
|
||||
recvDataChan := make(chan interface{})
|
||||
_s := makeMockManagerServer(recvDataChan)
|
||||
httpServer := &http.Server{
|
||||
Addr: "localhost:" + strconv.Itoa(managerPort),
|
||||
Handler: _s,
|
||||
ReadTimeout: 2 * time.Second,
|
||||
WriteTimeout: 2 * time.Second,
|
||||
}
|
||||
go func() {
|
||||
err := httpServer.ListenAndServe()
|
||||
So(err, ShouldBeNil)
|
||||
}()
|
||||
// Wait for http server starting
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
Convey("Worker should work", t, func(ctx C) {
|
||||
|
||||
httpClient, err := CreateHTTPClient("")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
workerPort++
|
||||
|
||||
workerCfg := Config{
|
||||
Global: globalConfig{
|
||||
Name: "dut",
|
||||
LogDir: "/tmp",
|
||||
MirrorDir: "/tmp",
|
||||
Concurrent: 2,
|
||||
Interval: 1,
|
||||
},
|
||||
Server: serverConfig{
|
||||
Hostname: "localhost",
|
||||
Addr: "127.0.0.1",
|
||||
Port: workerPort,
|
||||
},
|
||||
Manager: managerConfig{
|
||||
APIBase: "http://localhost:" + strconv.Itoa(managerPort),
|
||||
},
|
||||
}
|
||||
logger.Debugf("worker port %d", workerPort)
|
||||
Convey("with no job", func(ctx C) {
|
||||
dummyTester := func(*Worker) {
|
||||
registered := false
|
||||
for {
|
||||
select {
|
||||
case data := <-recvDataChan:
|
||||
if reg, ok := data.(WorkerStatus); ok {
|
||||
So(reg.ID, ShouldEqual, "dut")
|
||||
registered = true
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
sendCommandToWorker(reg.URL, httpClient, CmdStart, "foobar")
|
||||
} else if sch, ok := data.(MirrorSchedules); ok {
|
||||
So(len(sch.Schedules), ShouldEqual, 0)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
So(registered, ShouldBeTrue)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
startWorkerThenStop(&workerCfg, dummyTester)
|
||||
})
|
||||
Convey("with one job", func(ctx C) {
|
||||
workerCfg.Mirrors = []mirrorConfig{
|
||||
{
|
||||
Name: "job-ls",
|
||||
Provider: provCommand,
|
||||
Command: "ls",
|
||||
},
|
||||
}
|
||||
|
||||
dummyTester := func(*Worker) {
|
||||
url := ""
|
||||
jobRunning := false
|
||||
lastStatus := SyncStatus(None)
|
||||
for {
|
||||
select {
|
||||
case data := <-recvDataChan:
|
||||
if reg, ok := data.(WorkerStatus); ok {
|
||||
So(reg.ID, ShouldEqual, "dut")
|
||||
url = reg.URL
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
sendCommandToWorker(url, httpClient, CmdStart, "job-ls")
|
||||
} else if sch, ok := data.(MirrorSchedules); ok {
|
||||
if !jobRunning {
|
||||
So(len(sch.Schedules), ShouldEqual, 1)
|
||||
So(sch.Schedules[0].MirrorName, ShouldEqual, "job-ls")
|
||||
So(sch.Schedules[0].NextSchedule,
|
||||
ShouldHappenBetween,
|
||||
time.Now().Add(-2*time.Second),
|
||||
time.Now().Add(1*time.Minute))
|
||||
}
|
||||
} else if status, ok := data.(MirrorStatus); ok {
|
||||
logger.Noticef("Job %s status %s", status.Name, status.Status.String())
|
||||
jobRunning = status.Status == PreSyncing || status.Status == Syncing
|
||||
So(status.Status, ShouldNotEqual, Failed)
|
||||
lastStatus = status.Status
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
So(url, ShouldNotEqual, "")
|
||||
So(jobRunning, ShouldBeFalse)
|
||||
So(lastStatus, ShouldEqual, Success)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
startWorkerThenStop(&workerCfg, dummyTester)
|
||||
})
|
||||
Convey("with several jobs", func(ctx C) {
|
||||
workerCfg.Mirrors = []mirrorConfig{
|
||||
{
|
||||
Name: "job-ls-1",
|
||||
Provider: provCommand,
|
||||
Command: "ls",
|
||||
},
|
||||
{
|
||||
Name: "job-fail",
|
||||
Provider: provCommand,
|
||||
Command: "non-existent-command-xxxx",
|
||||
},
|
||||
{
|
||||
Name: "job-ls-2",
|
||||
Provider: provCommand,
|
||||
Command: "ls",
|
||||
},
|
||||
}
|
||||
|
||||
dummyTester := func(*Worker) {
|
||||
url := ""
|
||||
lastStatus := make(map[string]SyncStatus)
|
||||
nextSch := make(map[string]time.Time)
|
||||
for {
|
||||
select {
|
||||
case data := <-recvDataChan:
|
||||
if reg, ok := data.(WorkerStatus); ok {
|
||||
So(reg.ID, ShouldEqual, "dut")
|
||||
url = reg.URL
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
sendCommandToWorker(url, httpClient, CmdStart, "job-fail")
|
||||
sendCommandToWorker(url, httpClient, CmdStart, "job-ls-1")
|
||||
sendCommandToWorker(url, httpClient, CmdStart, "job-ls-2")
|
||||
} else if sch, ok := data.(MirrorSchedules); ok {
|
||||
//So(len(sch.Schedules), ShouldEqual, 3)
|
||||
for _, item := range sch.Schedules {
|
||||
nextSch[item.MirrorName] = item.NextSchedule
|
||||
}
|
||||
} else if status, ok := data.(MirrorStatus); ok {
|
||||
logger.Noticef("Job %s status %s", status.Name, status.Status.String())
|
||||
jobRunning := status.Status == PreSyncing || status.Status == Syncing
|
||||
if !jobRunning {
|
||||
if status.Name == "job-fail" {
|
||||
So(status.Status, ShouldEqual, Failed)
|
||||
} else {
|
||||
So(status.Status, ShouldNotEqual, Failed)
|
||||
}
|
||||
}
|
||||
lastStatus[status.Name] = status.Status
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
So(len(lastStatus), ShouldEqual, 3)
|
||||
So(len(nextSch), ShouldEqual, 3)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
startWorkerThenStop(&workerCfg, dummyTester)
|
||||
})
|
||||
})
|
||||
}
|
54
worker/zfs_hook.go
Normal file
54
worker/zfs_hook.go
Normal file
@ -0,0 +1,54 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"strings"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
)
|
||||
|
||||
type zfsHook struct {
|
||||
emptyHook
|
||||
zpool string
|
||||
}
|
||||
|
||||
func newZfsHook(provider mirrorProvider, zpool string) *zfsHook {
|
||||
return &zfsHook{
|
||||
emptyHook: emptyHook{
|
||||
provider: provider,
|
||||
},
|
||||
zpool: zpool,
|
||||
}
|
||||
}
|
||||
|
||||
func (z *zfsHook) printHelpMessage() {
|
||||
zfsDataset := fmt.Sprintf("%s/%s", z.zpool, z.provider.Name())
|
||||
zfsDataset = strings.ToLower(zfsDataset)
|
||||
workingDir := z.provider.WorkingDir()
|
||||
logger.Infof("You may create the ZFS dataset with:")
|
||||
logger.Infof(" zfs create '%s'", zfsDataset)
|
||||
logger.Infof(" zfs set mountpoint='%s' '%s'", workingDir, zfsDataset)
|
||||
usr, err := user.Current()
|
||||
if err != nil || usr.Uid == "0" {
|
||||
return
|
||||
}
|
||||
logger.Infof(" chown %s '%s'", usr.Uid, workingDir)
|
||||
}
|
||||
|
||||
// check if working directory is a zfs dataset
|
||||
func (z *zfsHook) preJob() error {
|
||||
workingDir := z.provider.WorkingDir()
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
logger.Errorf("Directory %s doesn't exist", workingDir)
|
||||
z.printHelpMessage()
|
||||
return err
|
||||
}
|
||||
if err := sh.Command("mountpoint", "-q", workingDir).Run(); err != nil {
|
||||
logger.Errorf("%s is not a mount point", workingDir)
|
||||
z.printHelpMessage()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
47
worker/zfs_hook_test.go
Normal file
47
worker/zfs_hook_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestZFSHook(t *testing.T) {
|
||||
|
||||
Convey("ZFS Hook should work", t, func(ctx C) {
|
||||
tmpDir, _ := os.MkdirTemp("", "tunasync")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna_zfs_hook_test",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "ls",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 1 * time.Second,
|
||||
}
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
Convey("When working directory doesn't exist", func(ctx C) {
|
||||
|
||||
errRm := os.RemoveAll(tmpDir)
|
||||
So(errRm, ShouldBeNil)
|
||||
|
||||
hook := newZfsHook(provider, "test_pool")
|
||||
err := hook.preJob()
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
Convey("When working directory is not a mount point", func(ctx C) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
hook := newZfsHook(provider, "test_pool")
|
||||
err := hook.preJob()
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user