Skip to content

Commit

Permalink
Merge branch 'master' into add-lint-action
Browse files Browse the repository at this point in the history
  • Loading branch information
moberghammer committed Mar 11, 2024
2 parents cd4e1f8 + 0d953a6 commit d226eed
Show file tree
Hide file tree
Showing 22 changed files with 431 additions and 259 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
/zfs-cleaner
/.idea/
/debian/.debhelper
12 changes: 12 additions & 0 deletions debian/changelog
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
zfs-cleaner (0.9alpha-1) any; urgency=low
* Performance rewrite

-- Jesper Broge Jørgensen <[email protected]> Wed, 13 Jan 2021 17:01:31 +0000

zfs-cleaner (0.8alpha-1) any; urgency=low
* Added age to destroy comment
* Print configuration on verbose
* Do not use args before checking if they exist

-- Anders Brander <[email protected]> Tue, 27 Oct 2020 15:23:31 +0000

zfs-cleaner (0.7alpha-1) any; urgency=low

* Add include keyword
Expand Down
1 change: 1 addition & 0 deletions droplet-test/all.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
include cleaner_*.conf
23 changes: 23 additions & 0 deletions droplet-test/clean.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/bin/bash
set -x pipefail

REMOTE=$(cat remotehost)

ssh "$REMOTE" zpool destroy datastore0
ssh "$REMOTE" zpool destroy datastore1
ssh "$REMOTE" zpool destroy datastore2

ssh "$REMOTE" losetup -v -d /dev/loop10
ssh "$REMOTE" losetup -v -d /dev/loop11
ssh "$REMOTE" losetup -v -d /dev/loop12
ssh "$REMOTE" losetup -v -d /dev/loop13
ssh "$REMOTE" losetup -v -d /dev/loop14
ssh "$REMOTE" losetup -v -d /dev/loop15
ssh "$REMOTE" losetup -v -d /dev/loop16
ssh "$REMOTE" losetup -v -d /dev/loop17

ssh "$REMOTE" rm -rf /zfsmnt

ssh "$REMOTE" rm -f /root/*.conf
ssh "$REMOTE" rm -f /root/*.protect
ssh "$REMOTE" rm -f /usr/local/bin/zfs-cleaner
11 changes: 11 additions & 0 deletions droplet-test/cleaner_datastore_0-1.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
plan cego-registry {
path datastore0
path datastore1

keep latest 100
keep 0s for 4h
keep 1h for 48h
keep 1d for 30d
protect </root/1.protect
protect </root/2.protect
}
9 changes: 9 additions & 0 deletions droplet-test/cleaner_datastore_2.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
plan mg-local-backup {
path datastore2

keep latest 2
keep 0s for 4h
keep 1h for 36h
keep 1d for 30d
keep 30d for 180d
}
9 changes: 9 additions & 0 deletions droplet-test/cleaner_non_existing_path.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
plan non_existing {
path datastore_non_existing

keep latest 2
keep 0s for 4h
keep 1h for 36h
keep 1d for 30d
keep 30d for 180d
}
1 change: 1 addition & 0 deletions droplet-test/remotehost
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
167.172.188.13
28 changes: 28 additions & 0 deletions droplet-test/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/bin/bash
set -exo pipefail

REMOTE=$(cat remotehost)

scp ../zfs-cleaner "$REMOTE:/usr/local/bin"
scp *.conf "$REMOTE:"

echo "without ignore empty"

ssh "$REMOTE" zfs-cleaner plancheck cleaner_datastore_0-1.conf

echo "with ignore empty"

ssh "$REMOTE" zfs-cleaner plancheck --ignore-empty cleaner_datastore_0-1.conf

echo "all"

ssh "$REMOTE" zfs-cleaner plancheck all.conf

echo "clean"

ssh "$REMOTE" zfs-cleaner cleaner_datastore_0-1.conf

echo "non existing"
ssh "$REMOTE" zfs-cleaner cleaner_non_existing_path.conf

echo "done"
45 changes: 45 additions & 0 deletions droplet-test/setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#!/bin/bash
set -exo pipefail

REMOTE=$(cat remotehost)

ssh "$REMOTE" mkdir -p /zfsmnt/
ssh "$REMOTE" apt install --yes zfsutils-linux

ssh "$REMOTE" dd if=/dev/zero of=/zfsmnt/disk0 bs=1M count=128
ssh "$REMOTE" dd if=/dev/zero of=/zfsmnt/disk1 bs=1M count=128
ssh "$REMOTE" dd if=/dev/zero of=/zfsmnt/disk2 bs=1M count=128

ssh "$REMOTE" losetup /dev/loop10 /zfsmnt/disk0
ssh "$REMOTE" losetup /dev/loop11 /zfsmnt/disk1
ssh "$REMOTE" losetup /dev/loop12 /zfsmnt/disk2

ssh "$REMOTE" zpool create -f datastore0 raidz /dev/loop10 /dev/loop11 /dev/loop12

ssh "$REMOTE" zfs snapshot datastore0@0
ssh "$REMOTE" zfs snapshot datastore0@1
ssh "$REMOTE" zfs snapshot datastore0@2

ssh "$REMOTE" dd if=/dev/zero of=/zfsmnt/disk3 bs=1M count=128
ssh "$REMOTE" dd if=/dev/zero of=/zfsmnt/disk4 bs=1M count=128

ssh "$REMOTE" losetup /dev/loop13 /zfsmnt/disk3
ssh "$REMOTE" losetup /dev/loop14 /zfsmnt/disk4

ssh "$REMOTE" zpool create -f datastore1 raidz /dev/loop13 /dev/loop14

ssh "$REMOTE" zfs snapshot datastore1@0
ssh "$REMOTE" zfs snapshot datastore1@1

ssh "$REMOTE" bash -c "echo @1 > /root/1.protect"
ssh "$REMOTE" bash -c "echo @1 > /root/2.protect"

ssh "$REMOTE" dd if=/dev/zero of=/zfsmnt/disk5 bs=1M count=128
ssh "$REMOTE" dd if=/dev/zero of=/zfsmnt/disk6 bs=1M count=128
ssh "$REMOTE" dd if=/dev/zero of=/zfsmnt/disk7 bs=1M count=128

ssh "$REMOTE" losetup /dev/loop15 /zfsmnt/disk5
ssh "$REMOTE" losetup /dev/loop16 /zfsmnt/disk6
ssh "$REMOTE" losetup /dev/loop17 /zfsmnt/disk7

ssh "$REMOTE" zpool create -f datastore2 raidz /dev/loop15 /dev/loop16 /dev/loop17
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ module github.com/cego/zfs-cleaner
go 1.12

require (
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.3 // indirect
)
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
Expand Down
74 changes: 16 additions & 58 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"fmt"
"io"
"os"
"os/exec"
"sync"
"syscall"
"time"
Expand All @@ -18,94 +17,67 @@ var (
verbose = false
dryrun = false
showVersion = false

commandName = "/sbin/zfs"
commandArguments = []string{"list", "-t", "snapshot", "-o", "name,creation", "-s", "creation", "-r", "-H", "-p"}

// This can be set to a specific time for testing.
now = time.Now()

// tasks can be added to this for testing.
mainWaitGroup sync.WaitGroup

// This can be changed to true when testing.
panicBail = false

rootCmd = &cobra.Command{
rootCmd = &cobra.Command{
Use: "zfs-cleaner [config file]",
Short: "Tool for destroying ZFS snapshots after predefined retention periods",
RunE: clean,
}

// Can be overridden when running tests.
stdout io.Writer = os.Stdout
stdout io.Writer = os.Stdout
zfsExecutor zfs.Executor
)

func init() {
rootCmd.PersistentFlags().BoolVarP(&dryrun, "dryrun", "n", false, "Do nothing destructive, only print")
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Be more verbose")
rootCmd.PersistentFlags().BoolVarP(&showVersion, "version", "V", false, "Show version and exit")
rootCmd.TraverseChildren = true
zfsExecutor = zfs.NewExecutor()
}

func getList() ([]byte, error) {
output, err := exec.Command(commandName, commandArguments...).Output()
if err != nil {
return nil, err
}

return output, nil
}

func readConf(r *os.File) (*conf.Config, error) {
conf := &conf.Config{}

err := conf.Read(r)
func readConfig(r *os.File) (*conf.Config, error) {
config := &conf.Config{}
err := config.Read(r)
if err != nil {
return nil, fmt.Errorf("failed to parse %s: %s", r.Name(), err.Error())
}

return conf, nil
return config, nil
}

func processAll(now time.Time, conf *conf.Config) ([]zfs.SnapshotList, error) {
list, err := getList()
if err != nil {
return nil, err
}

func processAll(now time.Time, conf *conf.Config, zfsExecutor zfs.Executor) ([]zfs.SnapshotList, error) {
lists := []zfs.SnapshotList{}

for _, plan := range conf.Plans {
for _, path := range plan.Paths {
list, err := zfs.NewSnapshotListFromOutput(list, path)
for _, dataset := range plan.Paths {
list := zfs.SnapshotList{}
list, err := list.NewSnapshotListFromDataset(zfsExecutor, dataset)
if err != nil {
return nil, err
}

list.KeepNamed(plan.Protect)
list.KeepLatest(plan.Latest)

for _, period := range plan.Periods {
start := now.Add(-period.Age)

list.Sieve(start, period.Frequency)
}

lists = append(lists, list)
}
}

return lists, nil
}

func main() {
AddPlanCheckCommand(zfsExecutor)
err := rootCmd.Execute()
if err != nil {
if panicBail {
panic(err.Error())
}

fmt.Fprintf(os.Stderr, "%s\n", err.Error())
os.Exit(1)
}
Expand All @@ -115,72 +87,58 @@ func clean(cmd *cobra.Command, args []string) error {
if showVersion {
printVersion()
}

if len(args) != 1 {
return fmt.Errorf("%s /path/to/config.conf", cmd.Name())
}

configPath := args[0]

confFile, err := os.Open(configPath)
if err != nil {
return fmt.Errorf("failed to open %s: %s", args[0], err.Error())
}
defer confFile.Close()

conf, err := readConf(confFile)
conf, err := readConfig(confFile)
if err != nil {
return err
}

fd := int(confFile.Fd())

err = syscall.Flock(fd, syscall.LOCK_EX|syscall.LOCK_NB)
if err != nil {
return fmt.Errorf("could not acquire lock on '%s'", confFile.Name())
}

// make sure to unlock :)
defer func() {
// We can ignore errors here, we're exiting anyway.
_ = syscall.Flock(fd, syscall.LOCK_UN)
}()

lists, err := processAll(now, conf)
lists, err := processAll(now, conf, zfsExecutor)
if err != nil {
return err
}

// Start by generating a list of stuff to do.
todos := []todo{}

// Print plan when verbose.
if verbose {
todos = append(todos, newComment("Config: '%s'", configPath))
for _, plan := range conf.Plans {
todos = append(todos, newComment("Plan: %+v", plan))
}
}

for _, list := range lists {
for _, snapshot := range list {
if !snapshot.Keep {
todos = append(todos, newDestroy(snapshot))
todos = append(todos, newDestroy(zfsExecutor, snapshot))
} else {
todos = append(todos, newComment("Keep %s (Age %s)", snapshot.Name, now.Sub(snapshot.Creation)))
}
}
}

// And then do it! :-)
for _, todo := range todos {
err := todo.Do()
if err != nil {
return err
}
}

mainWaitGroup.Wait()

return nil
}
Loading

0 comments on commit d226eed

Please sign in to comment.