mirror of
https://github.com/tuna/tunasync.git
synced 2025-04-20 20:22:46 +00:00
Merge remote-tracking branch 'origin/dev'
This commit is contained in:
commit
b4fe4db82a
@ -2,7 +2,7 @@
|
||||
|
||||
先确定已经给tunasynctl写好config文件:`~/.config/tunasync/ctl.conf`
|
||||
|
||||
```
|
||||
```toml
|
||||
manager_addr = "127.0.0.1"
|
||||
manager_port = 12345
|
||||
ca_cert = ""
|
||||
@ -10,7 +10,7 @@ ca_cert = ""
|
||||
|
||||
接着
|
||||
|
||||
```
|
||||
```shell
|
||||
$ tunasynctl disable -w <worker_id> <mirror_name>
|
||||
$ tunasynctl flush
|
||||
```
|
||||
@ -18,8 +18,9 @@ $ tunasynctl flush
|
||||
|
||||
## 热重载 `worker.conf`
|
||||
|
||||
`$ tunasynctl reload -w <worker_id>`
|
||||
|
||||
```shell
|
||||
$ tunasynctl reload -w <worker_id>
|
||||
```
|
||||
|
||||
e.g. 删除 `test_worker` 的 `elvish` 镜像:
|
||||
|
||||
@ -29,7 +30,7 @@ e.g. 删除 `test_worker` 的 `elvish` 镜像:
|
||||
|
||||
3. 接着操作:
|
||||
|
||||
```
|
||||
```shell
|
||||
$ tunasynctl reload -w test_worker
|
||||
$ tunasynctl disable -w test_worker elvish
|
||||
$ tunasynctl flush
|
||||
@ -40,15 +41,53 @@ $ tunasynctl flush
|
||||
|
||||
## 删除worker
|
||||
|
||||
`$ tunasynctl rm-worker -w <worker_id>`
|
||||
```shell
|
||||
$ tunasynctl rm-worker -w <worker_id>
|
||||
```
|
||||
|
||||
e.g. `$ tunasynctl rm-worker -w test_worker`
|
||||
e.g.
|
||||
|
||||
```shell
|
||||
$ tunasynctl rm-worker -w test_worker
|
||||
```
|
||||
|
||||
|
||||
## 更新镜像的大小
|
||||
|
||||
`$ tunasynctl set-size -w <worker_id> <mirror_name> <size>`
|
||||
```shell
|
||||
$ tunasynctl set-size -w <worker_id> <mirror_name> <size>
|
||||
```
|
||||
|
||||
其中,末尾的 <size> 参数,由操作者设定,或由某定时脚本生成
|
||||
|
||||
由于 `du -s` 比较耗时,故镜像大小可直接由rsync的日志文件读出
|
||||
|
||||
|
||||
## Btrfs 文件系统快照
|
||||
|
||||
如果镜像文件存放在以 Btrfs 为文件系统的分区中,可启用由 Btrfs 提供的快照 (Snapshot) 功能。对于每一个镜像,tunasync 在每次成功同步后更新其快照。
|
||||
|
||||
在 `worker.conf` 中添加如下配置,即可启用 Btrfs 快照功能:
|
||||
|
||||
```toml
|
||||
[btrfs_snapshot]
|
||||
enable = true
|
||||
snapshot_path = "/path/to/snapshot/directory"
|
||||
```
|
||||
|
||||
其中 `snapshot_path` 为快照所在目录。如将其作为发布版本,则镜像同步过程对于镜像站用户而言具有原子性。如此可避免用户接收到仍处于“中间态”的(未完成同步的)文件。
|
||||
|
||||
也可以在 `[[mirrors]]` 中为特定镜像单独指定快照路径,如:
|
||||
|
||||
```toml
|
||||
[[mirrors]]
|
||||
name = "elvish"
|
||||
provider = "rsync"
|
||||
upstream = "rsync://rsync.elvish.io/elvish/"
|
||||
interval = 1440
|
||||
snapshot_path = "/data/publish/elvish"
|
||||
```
|
||||
|
||||
**提示:**
|
||||
|
||||
若运行 tunasync 的用户无 root 权限,请确保该用户对镜像同步目录和快照目录均具有写和执行权限,并使用 [`user_subvol_rm_allowed` 选项](https://btrfs.wiki.kernel.org/index.php/Manpage/btrfs(5)#MOUNT_OPTIONS)挂载相应的 Btrfs 分区。
|
||||
|
90
worker/btrfs_snapshot_hook.go
Normal file
90
worker/btrfs_snapshot_hook.go
Normal file
@ -0,0 +1,90 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dennwc/btrfs"
|
||||
)
|
||||
|
||||
type btrfsSnapshotHook struct {
|
||||
provider mirrorProvider
|
||||
mirrorSnapshotPath string
|
||||
}
|
||||
|
||||
// the user who runs the jobs (typically `tunasync`) should be granted the permission to run btrfs commands
|
||||
// TODO: check if the filesystem is Btrfs
|
||||
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
|
||||
mirrorSnapshotPath := mirror.SnapshotPath
|
||||
if mirrorSnapshotPath == "" {
|
||||
mirrorSnapshotPath = filepath.Join(snapshotPath, provider.Name())
|
||||
}
|
||||
return &btrfsSnapshotHook{
|
||||
provider: provider,
|
||||
mirrorSnapshotPath: mirrorSnapshotPath,
|
||||
}
|
||||
}
|
||||
|
||||
// check if path `snapshotPath/providerName` exists
|
||||
// Case 1: Not exists => create a new subvolume
|
||||
// Case 2: Exists as a subvolume => nothing to do
|
||||
// Case 3: Exists as a directory => error detected
|
||||
func (h *btrfsSnapshotHook) preJob() error {
|
||||
path := h.provider.WorkingDir()
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// create subvolume
|
||||
err := btrfs.CreateSubVolume(path)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to create Btrfs subvolume %s: %s", path, err.Error())
|
||||
return err
|
||||
}
|
||||
logger.Noticef("created new Btrfs subvolume %s", path)
|
||||
} else {
|
||||
if is, err := btrfs.IsSubVolume(path); err != nil {
|
||||
return err
|
||||
} else if !is {
|
||||
return fmt.Errorf("path %s exists but isn't a Btrfs subvolume", path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) preExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *btrfsSnapshotHook) postExec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete old snapshot if exists, then create a new snapshot
|
||||
func (h *btrfsSnapshotHook) postSuccess() error {
|
||||
if _, err := os.Stat(h.mirrorSnapshotPath); !os.IsNotExist(err) {
|
||||
isSubVol, err := btrfs.IsSubVolume(h.mirrorSnapshotPath)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !isSubVol {
|
||||
return fmt.Errorf("path %s exists and isn't a Btrfs snapshot", h.mirrorSnapshotPath)
|
||||
}
|
||||
// is old snapshot => delete it
|
||||
if err := btrfs.DeleteSubVolume(h.mirrorSnapshotPath); err != nil {
|
||||
logger.Errorf("failed to delete old Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||
return err
|
||||
}
|
||||
logger.Noticef("deleted old snapshot %s", h.mirrorSnapshotPath)
|
||||
}
|
||||
// create a new writable snapshot
|
||||
// (the snapshot is writable so that it can be deleted easily)
|
||||
if err := btrfs.SnapshotSubVolume(h.provider.WorkingDir(), h.mirrorSnapshotPath, false); err != nil {
|
||||
logger.Errorf("failed to create new Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||
return err
|
||||
}
|
||||
logger.Noticef("created new Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// keep the old snapshot => nothing to do
|
||||
func (h *btrfsSnapshotHook) postFail() error {
|
||||
return nil
|
||||
}
|
@ -33,14 +33,15 @@ func (p *providerEnum) UnmarshalText(text []byte) error {
|
||||
|
||||
// Config represents worker config options
|
||||
type Config struct {
|
||||
Global globalConfig `toml:"global"`
|
||||
Manager managerConfig `toml:"manager"`
|
||||
Server serverConfig `toml:"server"`
|
||||
Cgroup cgroupConfig `toml:"cgroup"`
|
||||
ZFS zfsConfig `toml:"zfs"`
|
||||
Docker dockerConfig `toml:"docker"`
|
||||
Include includeConfig `toml:"include"`
|
||||
Mirrors []mirrorConfig `toml:"mirrors"`
|
||||
Global globalConfig `toml:"global"`
|
||||
Manager managerConfig `toml:"manager"`
|
||||
Server serverConfig `toml:"server"`
|
||||
Cgroup cgroupConfig `toml:"cgroup"`
|
||||
ZFS zfsConfig `toml:"zfs"`
|
||||
BtrfsSnapshot btrfsSnapshotConfig `toml:"btrfs_snapshot"`
|
||||
Docker dockerConfig `toml:"docker"`
|
||||
Include includeConfig `toml:"include"`
|
||||
Mirrors []mirrorConfig `toml:"mirrors"`
|
||||
}
|
||||
|
||||
type globalConfig struct {
|
||||
@ -96,6 +97,11 @@ type zfsConfig struct {
|
||||
Zpool string `toml:"zpool"`
|
||||
}
|
||||
|
||||
type btrfsSnapshotConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
SnapshotPath string `toml:"snapshot_path"`
|
||||
}
|
||||
|
||||
type includeConfig struct {
|
||||
IncludeMirrors string `toml:"include_mirrors"`
|
||||
}
|
||||
@ -136,6 +142,8 @@ type mirrorConfig struct {
|
||||
DockerImage string `toml:"docker_image"`
|
||||
DockerVolumes []string `toml:"docker_volumes"`
|
||||
DockerOptions []string `toml:"docker_options"`
|
||||
|
||||
SnapshotPath string `toml:"snapshot_path"`
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration
|
||||
|
@ -180,6 +180,11 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
||||
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
|
||||
}
|
||||
|
||||
// Add Btrfs Snapshot Hook
|
||||
if cfg.BtrfsSnapshot.Enable {
|
||||
provider.AddHook(newBtrfsSnapshotHook(provider, cfg.BtrfsSnapshot.SnapshotPath, mirror))
|
||||
}
|
||||
|
||||
// Add Docker Hook
|
||||
if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
|
||||
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))
|
||||
|
Loading…
x
Reference in New Issue
Block a user