From cd2292ba3aefdf97fba0a39db8657a613512b296 Mon Sep 17 00:00:00 2001 From: Andy Zhou Date: Sat, 11 May 2019 21:14:50 -0700 Subject: [PATCH] Automatically reclaim k3s container volumes after a cluster is deleted Thanks @zeerorg for the suggestion on possible container volume leak. With out this fix the k3s container volumes are left in the reclaimable state. This experiment confirms it: $ docker system df TYPE TOTAL ACTIVE SIZE RECLAIMABLE Images 14 0 2.131GB 2.131GB (100%) Containers 0 0 0B 0B Local Volumes 0 0 0B 0B Build Cache 0 0 0B 0B $ bin/k3d create; sleep 5; bin/k3d delete $ docker system df TYPE TOTAL ACTIVE SIZE RECLAIMABLE Images 14 0 2.131GB 2.131GB (100%) Containers 0 0 0B 0B Local Volumes 3 0 2.366MB 2.366MB (100%) Build Cache 0 0 0B 0B In this case, 2.36MB are left in the reclaimable state. This number can be larger with a larger cluster. With this fix, output of "docker system df" does not contain the claimable volume TYPE TOTAL ACTIVE SIZE RECLAIMABLE Images 14 0 2.131GB 2.131GB (100%) Containers 0 0 0B 0B Local Volumes 0 0 0B 0B Build Cache 0 0 0B 0B --- cli/container.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cli/container.go b/cli/container.go index b780508e..1e4cd2aa 100644 --- a/cli/container.go +++ b/cli/container.go @@ -196,7 +196,13 @@ func removeContainer(ID string) error { if err != nil { return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err) } - if err := docker.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{Force: true}); err != nil { + + options := types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force:true, + } + + if err := docker.ContainerRemove(ctx, ID, options); err != nil { return fmt.Errorf("FAILURE: couldn't delete container [%s] -> %+v", ID, err) } return nil