Fix more linter issues

Signed-off-by: Shengqi Chen <harry-chen@outlook.com>
This commit is contained in:
Shengqi Chen 2025-01-11 15:53:42 +08:00
parent ab416f6545
commit 99c7ab6b65
No known key found for this signature in database
19 changed files with 378 additions and 386 deletions

View File

@ -3,7 +3,7 @@ package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"os"
"strconv"
@ -122,7 +122,7 @@ func initialize(c *cli.Context) error {
var err error
client, err = tunasync.CreateHTTPClient(cfg.CACert)
if err != nil {
err = fmt.Errorf("Error initializing HTTP client: %s", err.Error())
err = fmt.Errorf("error initializing HTTP client: %s", err.Error())
// logger.Error(err.Error())
return err
@ -292,7 +292,7 @@ func updateMirrorSize(c *cli.Context) error {
1)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
return cli.NewExitError(
fmt.Sprintf("Manager failed to update mirror size: %s", body), 1,
@ -338,7 +338,7 @@ func removeWorker(c *cli.Context) error {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -351,7 +351,7 @@ func removeWorker(c *cli.Context) error {
}
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
_ = json.NewDecoder(resp.Body).Decode(&res)
if res["message"] == "deleted" {
fmt.Println("Successfully removed the worker")
} else {
@ -376,7 +376,7 @@ func flushDisabledJobs(c *cli.Context) error {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -430,7 +430,7 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
@ -468,7 +468,7 @@ func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),

View File

@ -7,8 +7,9 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"os"
"os/exec"
"regexp"
"time"
@ -39,19 +40,19 @@ var rsyncExitValues = map[int]string{
// GetTLSConfig generate tls.Config from CAFile
func GetTLSConfig(CAFile string) (*tls.Config, error) {
caCert, err := ioutil.ReadFile(CAFile)
caCert, err := os.ReadFile(CAFile)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
return nil, errors.New("Failed to add CA to pool")
return nil, errors.New("failed to add CA to pool")
}
tlsConfig := &tls.Config{
RootCAs: caCertPool,
}
tlsConfig.BuildNameToCertificate()
// tlsConfig.BuildNameToCertificate()
return tlsConfig, nil
}
@ -104,7 +105,7 @@ func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response,
return resp, errors.New("HTTP status code is not 200")
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return resp, err
}
@ -114,10 +115,10 @@ func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response,
// FindAllSubmatchInFile calls re.FindAllSubmatch to find matches in given file
func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]byte, err error) {
if fileName == "/dev/null" {
err = errors.New("Invalid log file")
err = errors.New("invalid log file")
return
}
if content, err := ioutil.ReadFile(fileName); err == nil {
if content, err := os.ReadFile(fileName); err == nil {
matches = re.FindAllSubmatch(content, -1)
// fmt.Printf("FindAllSubmatchInFile: %q\n", matches)
}
@ -127,7 +128,7 @@ func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]by
// ExtractSizeFromLog uses a regexp to extract the size from log files
func ExtractSizeFromLog(logFile string, re *regexp.Regexp) string {
matches, _ := FindAllSubmatchInFile(logFile, re)
if matches == nil || len(matches) == 0 {
if len(matches) == 0 {
return ""
}
// return the first capture group of the last occurrence

View File

@ -1,7 +1,6 @@
package internal
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -29,11 +28,11 @@ sent 7.55M bytes received 823.25M bytes 5.11M bytes/sec
total size is 1.33T speedup is 1,604.11
`
Convey("Log parser should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
logFile := filepath.Join(tmpDir, "rs.log")
err = ioutil.WriteFile(logFile, []byte(realLogContent), 0755)
err = os.WriteFile(logFile, []byte(realLogContent), 0755)
So(err, ShouldBeNil)
res := ExtractSizeFromRsyncLog(logFile)

View File

@ -2,7 +2,6 @@ package manager
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
@ -37,11 +36,11 @@ func TestConfig(t *testing.T) {
Convey("load Config should work", t, func() {
Convey("create config file & cli context", func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()

View File

@ -141,7 +141,7 @@ func (b *kvDBAdapter) ListWorkers() (ws []WorkerStatus, err error) {
func (b *kvDBAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
var v []byte
v, err = b.db.Get(_workerBucketKey, workerID)
v, _ = b.db.Get(_workerBucketKey, workerID)
if v == nil {
err = fmt.Errorf("invalid workerID %s", workerID)
} else {

View File

@ -3,7 +3,6 @@ package manager
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -32,7 +31,7 @@ func DBAdapterTest(db dbAdapter) {
LastOnline: time.Now(),
LastRegister: time.Now(),
}
w, err = db.CreateWorker(w)
_, err = db.CreateWorker(w)
So(err, ShouldBeNil)
}
@ -73,7 +72,7 @@ func DBAdapterTest(db dbAdapter) {
Convey("update mirror status", func() {
status := []MirrorStatus{
MirrorStatus{
{
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
@ -84,7 +83,7 @@ func DBAdapterTest(db dbAdapter) {
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
},
MirrorStatus{
{
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
@ -95,7 +94,7 @@ func DBAdapterTest(db dbAdapter) {
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
MirrorStatus{
{
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
@ -159,12 +158,11 @@ func DBAdapterTest(db dbAdapter) {
})
})
return
}
func TestDBAdapter(t *testing.T) {
Convey("boltAdapter should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
@ -200,7 +198,7 @@ func TestDBAdapter(t *testing.T) {
})
Convey("badgerAdapter should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
@ -218,7 +216,7 @@ func TestDBAdapter(t *testing.T) {
})
Convey("leveldbAdapter should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)

View File

@ -267,7 +267,7 @@ func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
if len(mirrorName) == 0 {
s.returnErrJSON(
c, http.StatusBadRequest,
errors.New("Mirror Name should not be empty"),
errors.New("mirror Name should not be empty"),
)
}
@ -312,7 +312,7 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
if len(mirrorName) == 0 {
s.returnErrJSON(
c, http.StatusBadRequest,
errors.New("Mirror Name should not be empty"),
errors.New("mirror Name should not be empty"),
)
}

View File

@ -3,7 +3,7 @@ package manager
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"math/rand"
"net/http"
"strings"
@ -36,7 +36,7 @@ func TestHTTPServer(t *testing.T) {
So(s, ShouldNotBeNil)
s.setDBAdapter(&mockDBAdapter{
workerStore: map[string]WorkerStatus{
_magicBadWorkerID: WorkerStatus{
_magicBadWorkerID: {
ID: _magicBadWorkerID,
}},
statusStore: make(map[string]MirrorStatus),
@ -48,7 +48,7 @@ func TestHTTPServer(t *testing.T) {
So(resp.StatusCode, ShouldEqual, http.StatusOK)
So(resp.Header.Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8")
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
So(err, ShouldBeNil)
var p map[string]string
err = json.Unmarshal(body, &p)
@ -268,8 +268,8 @@ func TestHTTPServer(t *testing.T) {
Convey("Update schedule of valid mirrors", func(ctx C) {
msg := MirrorSchedules{
Schedules: []MirrorSchedule{
MirrorSchedule{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)},
MirrorSchedule{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)},
{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)},
{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)},
},
}
@ -346,8 +346,8 @@ func TestHTTPServer(t *testing.T) {
invalidWorker := "test_worker2"
sch := MirrorSchedules{
Schedules: []MirrorSchedule{
MirrorSchedule{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)},
MirrorSchedule{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)},
{MirrorName: "arch-sync1", NextSchedule: time.Now().Add(time.Minute * 10)},
{MirrorName: "arch-sync2", NextSchedule: time.Now().Add(time.Minute * 7)},
},
}
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/schedules",

View File

@ -3,7 +3,7 @@ package worker
import (
"errors"
"fmt"
"io/ioutil"
"io"
"os"
"os/exec"
"path/filepath"
@ -12,32 +12,32 @@ import (
"golang.org/x/sys/unix"
"github.com/moby/moby/pkg/reexec"
cgv1 "github.com/containerd/cgroups"
cgv2 "github.com/containerd/cgroups/v2"
"github.com/moby/moby/pkg/reexec"
contspecs "github.com/opencontainers/runtime-spec/specs-go"
)
type cgroupHook struct {
emptyHook
cgCfg cgroupConfig
memLimit MemBytes
cgMgrV1 cgv1.Cgroup
cgMgrV2 *cgv2.Manager
cgCfg cgroupConfig
memLimit MemBytes
cgMgrV1 cgv1.Cgroup
cgMgrV2 *cgv2.Manager
}
type execCmd string
const (
cmdCont execCmd = "cont"
cmdAbrt execCmd = "abrt"
cmdCont execCmd = "cont"
cmdAbrt execCmd = "abrt"
)
func init () {
func init() {
reexec.Register("tunasync-exec", waitExec)
}
func waitExec () {
func waitExec() {
binary, err := exec.LookPath(os.Args[1])
if err != nil {
panic(err)
@ -46,7 +46,7 @@ func waitExec () {
pipe := os.NewFile(3, "pipe")
if pipe != nil {
if _, err := pipe.Stat(); err == nil {
cmdBytes, err := ioutil.ReadAll(pipe)
cmdBytes, err := io.ReadAll(pipe)
if err != nil {
panic(err)
}
@ -54,11 +54,11 @@ func waitExec () {
}
cmd := execCmd(string(cmdBytes))
switch cmd {
case cmdAbrt:
fallthrough
default:
panic("Exited on request")
case cmdCont:
case cmdAbrt:
fallthrough
default:
panic("Exited on request")
case cmdCont:
}
}
}
@ -71,7 +71,7 @@ func waitExec () {
panic("Exec failed.")
}
func initCgroup(cfg *cgroupConfig) (error) {
func initCgroup(cfg *cgroupConfig) error {
logger.Debugf("Initializing cgroup")
baseGroup := cfg.Group
@ -103,7 +103,7 @@ func initCgroup(cfg *cgroupConfig) (error) {
}
if baseGroup == "" {
logger.Debugf("Creating a sub group and move all processes into it")
wkrMgr, err := cfg.cgMgrV2.NewChild("__worker", nil);
wkrMgr, err := cfg.cgMgrV2.NewChild("__worker", nil)
if err != nil {
return err
}
@ -117,8 +117,8 @@ func initCgroup(cfg *cgroupConfig) (error) {
if len(procs) == 0 {
break
}
for _, p := range(procs) {
if err := wkrMgr.AddProc(p); err != nil{
for _, p := range procs {
if err := wkrMgr.AddProc(p); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
@ -129,7 +129,7 @@ func initCgroup(cfg *cgroupConfig) (error) {
}
} else {
logger.Debugf("Trying to create a sub group in that group")
testMgr, err := cfg.cgMgrV2.NewChild("__test", nil);
testMgr, err := cfg.cgMgrV2.NewChild("__test", nil)
if err != nil {
logger.Errorf("Cannot create a sub group in the cgroup")
return err
@ -152,9 +152,9 @@ func initCgroup(cfg *cgroupConfig) (error) {
if baseGroup != "" {
pather = cgv1.StaticPath(baseGroup)
} else {
pather = (func(p cgv1.Path) (cgv1.Path){
return func(subsys cgv1.Name) (string, error){
path, err := p(subsys);
pather = (func(p cgv1.Path) cgv1.Path {
return func(subsys cgv1.Name) (string, error) {
path, err := p(subsys)
if err != nil {
return "", err
}
@ -167,14 +167,14 @@ func initCgroup(cfg *cgroupConfig) (error) {
}
logger.Infof("Loading cgroup")
var err error
if cfg.cgMgrV1, err = cgv1.Load(cgv1.V1, pather, func(cfg *cgv1.InitConfig) error{
if cfg.cgMgrV1, err = cgv1.Load(cgv1.V1, pather, func(cfg *cgv1.InitConfig) error {
cfg.InitCheck = cgv1.AllowAny
return nil
}); err != nil {
return err
}
logger.Debugf("Available subsystems:")
for _, subsys := range(cfg.cgMgrV1.Subsystems()) {
for _, subsys := range cfg.cgMgrV1.Subsystems() {
p, err := pather(subsys.Name())
if err != nil {
return err
@ -183,11 +183,11 @@ func initCgroup(cfg *cgroupConfig) (error) {
}
if baseGroup == "" {
logger.Debugf("Creating a sub group and move all processes into it")
wkrMgr, err := cfg.cgMgrV1.New("__worker", &contspecs.LinuxResources{});
wkrMgr, err := cfg.cgMgrV1.New("__worker", &contspecs.LinuxResources{})
if err != nil {
return err
}
for _, subsys := range(cfg.cgMgrV1.Subsystems()) {
for _, subsys := range cfg.cgMgrV1.Subsystems() {
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
for {
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
@ -202,7 +202,7 @@ func initCgroup(cfg *cgroupConfig) (error) {
if len(procs) == 0 {
break
}
for _, proc := range(procs) {
for _, proc := range procs {
if err := wkrMgr.Add(proc); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
@ -215,7 +215,7 @@ func initCgroup(cfg *cgroupConfig) (error) {
}
} else {
logger.Debugf("Trying to create a sub group in that group")
testMgr, err := cfg.cgMgrV1.New("__test", &contspecs.LinuxResources{});
testMgr, err := cfg.cgMgrV1.New("__test", &contspecs.LinuxResources{})
if err != nil {
logger.Errorf("Cannot create a sub group in the cgroup")
return err
@ -223,7 +223,7 @@ func initCgroup(cfg *cgroupConfig) (error) {
if err := testMgr.Delete(); err != nil {
return err
}
for _, subsys := range(cfg.cgMgrV1.Subsystems()) {
for _, subsys := range cfg.cgMgrV1.Subsystems() {
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
@ -253,7 +253,7 @@ func newCgroupHook(p mirrorProvider, cfg cgroupConfig, memLimit MemBytes) *cgrou
emptyHook: emptyHook{
provider: p,
},
cgCfg: cfg,
cgCfg: cfg,
memLimit: memLimit,
}
}
@ -263,7 +263,7 @@ func (c *cgroupHook) preExec() error {
logger.Debugf("Creating v2 cgroup for task %s", c.provider.Name())
var resSet *cgv2.Resources
if c.memLimit != 0 {
resSet = &cgv2.Resources {
resSet = &cgv2.Resources{
Memory: &cgv2.Memory{
Max: func(i int64) *int64 { return &i }(c.memLimit.Value()),
},
@ -279,7 +279,7 @@ func (c *cgroupHook) preExec() error {
logger.Debugf("Creating v1 cgroup for task %s", c.provider.Name())
var resSet contspecs.LinuxResources
if c.memLimit != 0 {
resSet = contspecs.LinuxResources {
resSet = contspecs.LinuxResources{
Memory: &contspecs.LinuxMemory{
Limit: func(i int64) *int64 { return &i }(c.memLimit.Value()),
},
@ -334,7 +334,7 @@ func (c *cgroupHook) killAll() error {
taskList := []int{}
if c.cgCfg.isUnified {
procs, err := c.cgMgrV2.Procs(false)
if (err != nil) {
if err != nil {
return []int{}, err
}
for _, proc := range procs {
@ -342,16 +342,16 @@ func (c *cgroupHook) killAll() error {
}
} else {
taskSet := make(map[int]struct{})
for _, subsys := range(c.cgMgrV1.Subsystems()) {
for _, subsys := range c.cgMgrV1.Subsystems() {
procs, err := c.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
return []int{}, err
}
for _, proc := range(procs) {
for _, proc := range procs {
taskSet[proc.Pid] = struct{}{}
}
}
for proc := range(taskSet) {
for proc := range taskSet {
taskList = append(taskList, proc)
}
}

View File

@ -1,16 +1,16 @@
package worker
import (
"io/ioutil"
"errors"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"testing"
"time"
"errors"
"syscall"
cgv1 "github.com/containerd/cgroups"
cgv2 "github.com/containerd/cgroups/v2"
units "github.com/docker/go-units"
@ -21,14 +21,14 @@ import (
func init() {
_, testReexec := os.LookupEnv("TESTREEXEC")
if ! testReexec {
if !testReexec {
reexec.Init()
}
}
func TestReexec(t *testing.T) {
testCase, testReexec := os.LookupEnv("TESTREEXEC")
if ! testReexec {
if !testReexec {
return
}
for len(os.Args) > 1 {
@ -39,51 +39,51 @@ func TestReexec(t *testing.T) {
}
}
switch testCase {
case "1":
Convey("Reexec should panic when command not found", t, func(ctx C){
So(func(){
reexec.Init()
}, ShouldPanicWith, exec.ErrNotFound)
})
case "2":
Convey("Reexec should run when fd 3 is not open", t, func(ctx C){
So((func() error{
pipe := os.NewFile(3, "pipe")
if pipe == nil {
return errors.New("pipe is nil")
} else {
_, err := pipe.Stat()
return err
}
})(), ShouldNotBeNil)
So(func(){
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "3":
Convey("Reexec should fail when fd 3 is sent with abrt cmd", t, func(ctx C){
So(func(){
reexec.Init()
}, ShouldPanicWith, "Exited on request")
})
case "4":
Convey("Reexec should run when fd 3 is sent with cont cmd", t, func(ctx C){
So(func(){
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "5":
Convey("Reexec should not be triggered when argv[0] is not reexec", t, func(ctx C){
So(func(){
reexec.Init()
}, ShouldNotPanic)
})
case "1":
Convey("Reexec should panic when command not found", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, exec.ErrNotFound)
})
case "2":
Convey("Reexec should run when fd 3 is not open", t, func(ctx C) {
So((func() error {
pipe := os.NewFile(3, "pipe")
if pipe == nil {
return errors.New("pipe is nil")
} else {
_, err := pipe.Stat()
return err
}
})(), ShouldNotBeNil)
So(func() {
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "3":
Convey("Reexec should fail when fd 3 is sent with abrt cmd", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, "Exited on request")
})
case "4":
Convey("Reexec should run when fd 3 is sent with cont cmd", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "5":
Convey("Reexec should not be triggered when argv[0] is not reexec", t, func(ctx C) {
So(func() {
reexec.Init()
}, ShouldNotPanic)
})
}
}
func TestCgroup(t *testing.T) {
var cgcf *cgroupConfig
Convey("init cgroup", t, func(ctx C){
Convey("init cgroup", t, func(ctx C) {
_, useCurrentCgroup := os.LookupEnv("USECURCGROUP")
cgcf = &cgroupConfig{BasePath: "/sys/fs/cgroup", Group: "tunasync", Subsystem: "cpu"}
if useCurrentCgroup {
@ -97,28 +97,28 @@ func TestCgroup(t *testing.T) {
So(cgcf.cgMgrV1, ShouldNotBeNil)
}
Convey("Cgroup Should Work", func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
daemonScript := filepath.Join(tmpDir, "daemon.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
bgPidfile := filepath.Join(tmpDir, "bg.pid")
Convey("Cgroup Should Work", func(ctx C) {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
daemonScript := filepath.Join(tmpDir, "daemon.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
bgPidfile := filepath.Join(tmpDir, "bg.pid")
c := cmdConfig{
name: "tuna-cgroup",
upstreamURL: "http://mirrors.tuna.moe/",
command: cmdScript + " " + daemonScript,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
env: map[string]string{
"BG_PIDFILE": bgPidfile,
},
}
cmdScriptContent := `#!/bin/bash
c := cmdConfig{
name: "tuna-cgroup",
upstreamURL: "http://mirrors.tuna.moe/",
command: cmdScript + " " + daemonScript,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
env: map[string]string{
"BG_PIDFILE": bgPidfile,
},
}
cmdScriptContent := `#!/bin/bash
redirect-std() {
[[ -t 0 ]] && exec </dev/null
[[ -t 1 ]] && exec >/dev/null
@ -144,167 +144,127 @@ echo $$
daemonize $@
sleep 5
`
daemonScriptContent := `#!/bin/bash
daemonScriptContent := `#!/bin/bash
echo $$ > $BG_PIDFILE
sleep 30
`
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
err = ioutil.WriteFile(daemonScript, []byte(daemonScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, *cgcf, 0)
provider.AddHook(cg)
err = cg.preExec()
So(err, ShouldBeNil)
go func() {
err := provider.Run(make(chan empty, 1))
ctx.So(err, ShouldNotBeNil)
}()
time.Sleep(1 * time.Second)
// Deamon should be started
daemonPidBytes, err := ioutil.ReadFile(bgPidfile)
So(err, ShouldBeNil)
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
logger.Debug("daemon pid: %s", daemonPid)
procDir := filepath.Join("/proc", daemonPid)
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
err = provider.Terminate()
So(err, ShouldBeNil)
// Deamon won't be killed
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
// Deamon can be killed by cgroup killer
cg.postExec()
_, err = os.Stat(procDir)
So(os.IsNotExist(err), ShouldBeTrue)
})
Convey("Rsync Memory Should Be Limited", func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna-cgroup",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, *cgcf, 512 * units.MiB)
provider.AddHook(cg)
err = cg.preExec()
So(err, ShouldBeNil)
if cgcf.isUnified {
cgpath := filepath.Join(cgcf.BasePath, cgcf.Group, provider.Name())
if useCurrentCgroup {
group, err := cgv2.NestedGroupPath(filepath.Join("..", provider.Name()))
So(err, ShouldBeNil)
cgpath = filepath.Join(cgcf.BasePath, group)
}
memoLimit, err := ioutil.ReadFile(filepath.Join(cgpath, "memory.max"))
err = os.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
} else {
for _, subsys := range(cg.cgMgrV1.Subsystems()) {
if subsys.Name() == cgv1.Memory {
cgpath := filepath.Join(cgcf.Group, provider.Name())
if useCurrentCgroup {
p, err := cgv1.NestedPath(filepath.Join("..", provider.Name()))(cgv1.Memory)
err = os.WriteFile(daemonScript, []byte(daemonScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, *cgcf, 0)
provider.AddHook(cg)
err = cg.preExec()
So(err, ShouldBeNil)
go func() {
err := provider.Run(make(chan empty, 1))
ctx.So(err, ShouldNotBeNil)
}()
time.Sleep(1 * time.Second)
// Deamon should be started
daemonPidBytes, err := os.ReadFile(bgPidfile)
So(err, ShouldBeNil)
daemonPid := strings.Trim(string(daemonPidBytes), " \n")
logger.Debug("daemon pid: %s", daemonPid)
procDir := filepath.Join("/proc", daemonPid)
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
err = provider.Terminate()
So(err, ShouldBeNil)
// Deamon won't be killed
_, err = os.Stat(procDir)
So(err, ShouldBeNil)
// Deamon can be killed by cgroup killer
cg.postExec()
_, err = os.Stat(procDir)
So(os.IsNotExist(err), ShouldBeTrue)
})
Convey("Rsync Memory Should Be Limited", func() {
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna-cgroup",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, *cgcf, 512*units.MiB)
provider.AddHook(cg)
err = cg.preExec()
So(err, ShouldBeNil)
if cgcf.isUnified {
cgpath := filepath.Join(cgcf.BasePath, cgcf.Group, provider.Name())
if useCurrentCgroup {
group, err := cgv2.NestedGroupPath(filepath.Join("..", provider.Name()))
So(err, ShouldBeNil)
cgpath = filepath.Join(cgcf.BasePath, group)
}
memoLimit, err := os.ReadFile(filepath.Join(cgpath, "memory.max"))
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
} else {
for _, subsys := range cg.cgMgrV1.Subsystems() {
if subsys.Name() == cgv1.Memory {
cgpath := filepath.Join(cgcf.Group, provider.Name())
if useCurrentCgroup {
p, err := cgv1.NestedPath(filepath.Join("..", provider.Name()))(cgv1.Memory)
So(err, ShouldBeNil)
cgpath = p
}
memoLimit, err := os.ReadFile(filepath.Join(cgcf.BasePath, "memory", cgpath, "memory.limit_in_bytes"))
So(err, ShouldBeNil)
cgpath = p
}
memoLimit, err := ioutil.ReadFile(filepath.Join(cgcf.BasePath, "memory", cgpath, "memory.limit_in_bytes"))
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
}
}
}
cg.postExec()
So(cg.cgMgrV1, ShouldBeNil)
})
Reset(func() {
if cgcf.isUnified {
if cgcf.Group == "" {
wkrg, err := cgv2.NestedGroupPath("");
So(err, ShouldBeNil)
wkrMgr, err := cgv2.LoadManager("/sys/fs/cgroup", wkrg);
allCtrls, err := wkrMgr.Controllers()
So(err, ShouldBeNil)
err = wkrMgr.ToggleControllers(allCtrls, cgv2.Disable)
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV2
for {
logger.Debugf("Restoring pids")
procs, err := wkrMgr.Procs(false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, p := range(procs) {
if err := origMgr.AddProc(p); err != nil{
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
So(err, ShouldBeNil)
}
}
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
} else {
if cgcf.Group == "" {
pather := (func(p cgv1.Path) (cgv1.Path){
return func(subsys cgv1.Name) (string, error){
path, err := p(subsys);
if err != nil {
return "", err
}
if path == "/" {
return "", cgv1.ErrControllerNotActive
}
return path, err
}
})(cgv1.NestedPath(""))
wkrMgr, err := cgv1.Load(cgv1.V1, pather, func(cfg *cgv1.InitConfig) error{
cfg.InitCheck = cgv1.AllowAny
return nil
})
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV1
for _, subsys := range(wkrMgr.Subsystems()){
cg.postExec()
So(cg.cgMgrV1, ShouldBeNil)
})
Reset(func() {
if cgcf.isUnified {
if cgcf.Group == "" {
wkrg, err := cgv2.NestedGroupPath("")
So(err, ShouldBeNil)
wkrMgr, err := cgv2.LoadManager("/sys/fs/cgroup", wkrg)
allCtrls, err := wkrMgr.Controllers()
So(err, ShouldBeNil)
err = wkrMgr.ToggleControllers(allCtrls, cgv2.Disable)
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV2
for {
procs, err := wkrMgr.Processes(subsys.Name(), false)
logger.Debugf("Restoring pids")
procs, err := wkrMgr.Procs(false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, proc := range(procs) {
if err := origMgr.Add(proc); err != nil {
for _, p := range procs {
if err := origMgr.AddProc(p); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
@ -313,11 +273,51 @@ sleep 30
}
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
} else {
if cgcf.Group == "" {
pather := (func(p cgv1.Path) cgv1.Path {
return func(subsys cgv1.Name) (string, error) {
path, err := p(subsys)
if err != nil {
return "", err
}
if path == "/" {
return "", cgv1.ErrControllerNotActive
}
return path, err
}
})(cgv1.NestedPath(""))
wkrMgr, err := cgv1.Load(cgv1.V1, pather, func(cfg *cgv1.InitConfig) error {
cfg.InitCheck = cgv1.AllowAny
return nil
})
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV1
for _, subsys := range wkrMgr.Subsystems() {
for {
procs, err := wkrMgr.Processes(subsys.Name(), false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, proc := range procs {
if err := origMgr.Add(proc); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
So(err, ShouldBeNil)
}
}
}
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
}
})
})
})
}

View File

@ -2,11 +2,11 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
units "github.com/docker/go-units"
. "github.com/smartystreets/goconvey/convey"
@ -76,11 +76,11 @@ exec_on_failure = [
})
Convey("Everything should work on valid config file", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
@ -92,7 +92,7 @@ exec_on_failure = [
curCfgBlob := cfgBlob + incSection
err = ioutil.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
err = os.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
@ -116,9 +116,9 @@ provider = "two-stage-rsync"
stage1_profile = "debian"
use_ipv6 = true
`
err = ioutil.WriteFile(filepath.Join(tmpDir, "debian.conf"), []byte(incBlob1), 0644)
err = os.WriteFile(filepath.Join(tmpDir, "debian.conf"), []byte(incBlob1), 0644)
So(err, ShouldEqual, nil)
err = ioutil.WriteFile(filepath.Join(tmpDir, "ubuntu.conf"), []byte(incBlob2), 0644)
err = os.WriteFile(filepath.Join(tmpDir, "ubuntu.conf"), []byte(incBlob2), 0644)
So(err, ShouldEqual, nil)
cfg, err := LoadConfig(tmpfile.Name())
@ -145,20 +145,20 @@ use_ipv6 = true
So(m.Name, ShouldEqual, "debian")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.MemoryLimit.Value(), ShouldEqual, 256 * units.MiB)
So(m.MemoryLimit.Value(), ShouldEqual, 256*units.MiB)
m = cfg.Mirrors[2]
So(m.Name, ShouldEqual, "fedora")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provRsync)
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
So(m.MemoryLimit.Value(), ShouldEqual, 128 * units.MiB)
So(m.MemoryLimit.Value(), ShouldEqual, 128*units.MiB)
m = cfg.Mirrors[3]
So(m.Name, ShouldEqual, "debian-cd")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.MemoryLimit.Value(), ShouldEqual, 0)
So(m.MemoryLimit.Value(), ShouldEqual, 0)
m = cfg.Mirrors[4]
So(m.Name, ShouldEqual, "debian-security")
@ -170,11 +170,11 @@ use_ipv6 = true
})
Convey("Everything should work on nested config file", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
@ -186,7 +186,7 @@ use_ipv6 = true
curCfgBlob := cfgBlob + incSection
err = ioutil.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
err = os.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
@ -212,7 +212,7 @@ use_ipv6 = true
provider = "rsync"
upstream = "rsync://test.host3/debian-cd/"
`
err = ioutil.WriteFile(filepath.Join(tmpDir, "nest.conf"), []byte(incBlob1), 0644)
err = os.WriteFile(filepath.Join(tmpDir, "nest.conf"), []byte(incBlob1), 0644)
So(err, ShouldEqual, nil)
cfg, err := LoadConfig(tmpfile.Name())
@ -266,11 +266,11 @@ use_ipv6 = true
So(len(cfg.Mirrors), ShouldEqual, 6)
})
Convey("Providers can be inited from a valid config file", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
@ -317,7 +317,7 @@ use_ipv6 = true
})
Convey("MirrorSubdir should work", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
tmpfile, err := os.CreateTemp("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
@ -363,7 +363,7 @@ use_ipv6 = true
provider = "rsync"
upstream = "rsync://test.host3/debian-cd/"
`
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
err = os.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()

View File

@ -2,12 +2,12 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
units "github.com/docker/go-units"
"github.com/codeskyblue/go-sh"
@ -40,7 +40,7 @@ func getDockerByName(name string) (string, error) {
func TestDocker(t *testing.T) {
Convey("Docker Should Work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
@ -64,7 +64,7 @@ func TestDocker(t *testing.T) {
echo ${TEST_CONTENT}
sleep 20
`
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
err = os.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
@ -125,7 +125,7 @@ sleep 20
So(names, ShouldEqual, "")
// check log content
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput+"\n")

View File

@ -1,7 +1,6 @@
package worker
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -13,7 +12,7 @@ import (
func TestExecPost(t *testing.T) {
Convey("ExecPost should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -46,7 +45,7 @@ echo $TUNASYNC_UPSTREAM_URL
echo $TUNASYNC_LOG_FILE
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
go job.Run(managerChan, semaphore)
@ -64,7 +63,7 @@ echo $TUNASYNC_LOG_FILE
expectedOutput := "success\n"
outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
outputContent, err := os.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
So(err, ShouldBeNil)
So(string(outputContent), ShouldEqual, expectedOutput)
})
@ -85,7 +84,7 @@ echo $TUNASYNC_LOG_FILE
exit 1
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
go job.Run(managerChan, semaphore)
@ -105,7 +104,7 @@ exit 1
expectedOutput := "failure\n"
outputContent, err := ioutil.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
outputContent, err := os.ReadFile(filepath.Join(provider.WorkingDir(), "exit_status"))
So(err, ShouldBeNil)
So(string(outputContent), ShouldEqual, expectedOutput)
})

View File

@ -2,7 +2,6 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -17,7 +16,7 @@ func TestMirrorJob(t *testing.T) {
InitLogger(true, true, false)
Convey("MirrorJob should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -58,9 +57,9 @@ func TestMirrorJob(t *testing.T) {
provider.upstreamURL,
provider.LogFile(),
)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := ioutil.ReadFile(scriptFile)
readedScriptContent, err := os.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
@ -86,7 +85,7 @@ func TestMirrorJob(t *testing.T) {
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Success)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobStart
@ -123,11 +122,11 @@ sleep 3
echo $TUNASYNC_WORKING_DIR
echo '------'
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
hookScriptFile := filepath.Join(tmpDir, "hook.sh")
err = ioutil.WriteFile(hookScriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(hookScriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
h, err := newExecPostHook(provider, execOnFailure, hookScriptFile)
@ -188,7 +187,7 @@ echo $TUNASYNC_WORKING_DIR
sleep 5
echo $TUNASYNC_WORKING_DIR
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10)
@ -213,7 +212,7 @@ echo $TUNASYNC_WORKING_DIR
So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
@ -236,7 +235,7 @@ echo $TUNASYNC_WORKING_DIR
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
@ -270,7 +269,7 @@ echo $TUNASYNC_WORKING_DIR
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
@ -326,7 +325,7 @@ echo $TUNASYNC_WORKING_DIR
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
@ -341,7 +340,7 @@ echo $TUNASYNC_WORKING_DIR
sleep 10
echo $TUNASYNC_WORKING_DIR
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10)
@ -364,7 +363,7 @@ echo $TUNASYNC_WORKING_DIR
So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
@ -404,7 +403,7 @@ func TestConcurrentMirrorJobs(t *testing.T) {
InitLogger(true, true, false)
Convey("Concurrent MirrorJobs should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)

View File

@ -2,7 +2,6 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -39,7 +38,7 @@ func (l *logLimiter) preExec() error {
}
logDir := p.LogDir()
files, err := ioutil.ReadDir(logDir)
files, err := os.ReadDir(logDir)
if err != nil {
if os.IsNotExist(err) {
os.MkdirAll(logDir, 0755)
@ -50,7 +49,8 @@ func (l *logLimiter) preExec() error {
matchedFiles := []os.FileInfo{}
for _, f := range files {
if strings.HasPrefix(f.Name(), p.Name()) {
matchedFiles = append(matchedFiles, f)
info, _ := f.Info()
matchedFiles = append(matchedFiles, info)
}
}

View File

@ -2,7 +2,6 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -14,8 +13,8 @@ import (
func TestLogLimiter(t *testing.T) {
Convey("LogLimiter should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpLogDir, err := ioutil.TempDir("", "tunasync-log")
tmpDir, _ := os.MkdirTemp("", "tunasync")
tmpLogDir, err := os.MkdirTemp("", "tunasync-log")
defer os.RemoveAll(tmpDir)
defer os.RemoveAll(tmpLogDir)
So(err, ShouldBeNil)
@ -58,7 +57,7 @@ echo $TUNASYNC_UPSTREAM_URL
echo $TUNASYNC_LOG_FILE
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
go job.Run(managerChan, semaphore)
@ -86,7 +85,7 @@ echo $TUNASYNC_LOG_FILE
logFile,
)
loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
loggedContent, err := os.ReadFile(filepath.Join(provider.LogDir(), "latest"))
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
})
@ -104,7 +103,7 @@ echo $TUNASYNC_LOG_FILE
sleep 5
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
go job.Run(managerChan, semaphore)
@ -134,10 +133,10 @@ sleep 5
logFile,
)
loggedContent, err := ioutil.ReadFile(filepath.Join(provider.LogDir(), "latest"))
loggedContent, err := os.ReadFile(filepath.Join(provider.LogDir(), "latest"))
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
loggedContent, err = ioutil.ReadFile(logFile + ".fail")
loggedContent, err = os.ReadFile(logFile + ".fail")
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
})

View File

@ -2,7 +2,6 @@ package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@ -14,7 +13,7 @@ import (
func TestRsyncProvider(t *testing.T) {
Convey("Rsync Provider should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
@ -80,7 +79,7 @@ echo "Total file size: 1.33T bytes"
echo "Done"
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
@ -100,7 +99,7 @@ exit 0
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
@ -109,7 +108,7 @@ exit 0
})
Convey("If the rsync program fails", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
@ -131,7 +130,7 @@ exit 0
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Syntax or usage error")
})
@ -140,7 +139,7 @@ exit 0
func TestRsyncProviderWithAuthentication(t *testing.T) {
Convey("Rsync Provider with password should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
@ -180,7 +179,7 @@ sleep 1
echo "Done"
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
@ -200,7 +199,7 @@ exit 0
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
@ -211,7 +210,7 @@ exit 0
func TestRsyncProviderWithOverriddenOptions(t *testing.T) {
Convey("Rsync Provider with overridden options should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
@ -248,7 +247,7 @@ sleep 1
echo "Done"
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
@ -263,7 +262,7 @@ exit 0
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
@ -274,7 +273,7 @@ exit 0
func TestRsyncProviderWithDocker(t *testing.T) {
Convey("Rsync in Docker should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
@ -323,9 +322,9 @@ fi
shift
done
`
err = ioutil.WriteFile(scriptFile, []byte(cmdScriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
err = ioutil.WriteFile(excludeFile, []byte("__some_pattern"), 0755)
err = os.WriteFile(excludeFile, []byte("__some_pattern"), 0755)
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
@ -338,7 +337,7 @@ done
err = hook.postExec()
So(err, ShouldBeNil)
}
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, "__some_pattern")
})
@ -346,7 +345,7 @@ done
func TestCmdProvider(t *testing.T) {
Convey("Command Provider should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "cmd.sh")
@ -391,25 +390,25 @@ echo $AOSP_REPO_BIN
provider.LogFile(),
"/usr/local/bin/repo",
)
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := ioutil.ReadFile(scriptFile)
readedScriptContent, err := os.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
})
Convey("If a command fails", func() {
scriptContent := `exit 1`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
readedScriptContent, err := ioutil.ReadFile(scriptFile)
readedScriptContent, err := os.ReadFile(scriptFile)
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
@ -422,7 +421,7 @@ echo $AOSP_REPO_BIN
scriptContent := `#!/bin/bash
sleep 10
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
started := make(chan empty, 1)
@ -440,7 +439,7 @@ sleep 10
})
})
Convey("Command Provider without log file should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
@ -474,7 +473,7 @@ sleep 10
})
})
Convey("Command Provider with RegExprs should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
@ -557,7 +556,7 @@ sleep 10
func TestTwoStageRsyncProvider(t *testing.T) {
Convey("TwoStageRsync Provider should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
@ -597,7 +596,7 @@ sleep 1
echo "Done"
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 2))
@ -627,7 +626,7 @@ exit 0
),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
@ -639,7 +638,7 @@ echo $@
sleep 10
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
err = os.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
started := make(chan empty, 2)
@ -661,7 +660,7 @@ exit 0
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldStartWith, expectedOutput)
// fmt.Println(string(loggedContent))
@ -669,7 +668,7 @@ exit 0
})
Convey("If the rsync program fails", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
@ -691,7 +690,7 @@ exit 0
err = provider.Run(make(chan empty, 2))
So(err, ShouldNotBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
loggedContent, err := os.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Error in socket I/O")

View File

@ -317,7 +317,7 @@ func (w *Worker) runSchedule() {
schedInfo := w.schedule.GetJobs()
w.updateSchedInfo(schedInfo)
tick := time.Tick(5 * time.Second)
tick := time.NewTicker(5 * time.Second).C
for {
select {
case jobMsg := <-w.managerChan:

View File

@ -1,7 +1,6 @@
package worker
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -13,7 +12,7 @@ import (
func TestZFSHook(t *testing.T) {
Convey("ZFS Hook should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpDir, err := os.MkdirTemp("", "tunasync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
@ -45,4 +44,4 @@ func TestZFSHook(t *testing.T) {
So(err, ShouldNotBeNil)
})
})
}
}