Skip to content

Commit

Permalink
Add compaction docs, update website, add compaction switch (#482)
Browse files Browse the repository at this point in the history
* Update yarn

* Add CompactionForceDelete option

* Add docs

* update comment
  • Loading branch information
badrishc committed Jun 24, 2024
1 parent ab7d54e commit ccc078e
Show file tree
Hide file tree
Showing 10 changed files with 1,680 additions and 705 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,10 @@ public bool TryReplicateFromPrimary(out ReadOnlySpan<byte> errorMessage, bool ba
private async Task<string> InitiateReplicaSync()
{
// Send request to primary
// primary will initiate background task and start sending checkpoint data
// Primary will initiate background task and start sending checkpoint data
//
// Replica waits for retrieval to complete before moving forward to recovery
// retrieval completion coordinated by remoteCheckpointRetrievalCompleted
// Retrieval completion coordinated by remoteCheckpointRetrievalCompleted
var current = clusterProvider.clusterManager.CurrentConfig;
var (address, port) = current.GetLocalNodePrimaryAddress();
GarnetClientSession gcs = null;
Expand Down
15 changes: 14 additions & 1 deletion libs/host/Configuration/Options.cs
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,13 @@ internal sealed class Options
[Option("compaction-freq", Required = false, HelpText = "Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead)")]
public int CompactionFrequencySecs { get; set; }

[Option("compaction-type", Required = false, HelpText = "Hybrid log compaction type. Value options: None - No compaction, Shift - shift begin address without compaction (data loss), ShiftForced - shift begin address without compaction (data loss). Immediately deletes files - do not use if you plan to recover after failure, Scan - scan old pages and move live records to tail (no data loss - take a checkpoint to actually delete the older data files from disk), Lookup - Lookup each record in compaction range, for record liveness checking using hash chain (no data loss - take a checkpoint to actually delete the older data files from disk)")]
[Option("compaction-type", Required = false, HelpText = "Hybrid log compaction type. Value options: None - no compaction, Shift - shift begin address without compaction (data loss), Scan - scan old pages and move live records to tail (no data loss), Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss)")]
public LogCompactionType CompactionType { get; set; }

[OptionValidation]
[Option("compaction-force-delete", Required = false, HelpText = "Forcefully delete the inactive segments immediately after the compaction strategy (type) is applied. If false, take a checkpoint to actually delete the older data files from disk.")]
public bool? CompactionForceDelete { get; set; }

[IntRangeValidation(0, int.MaxValue)]
[Option("compaction-max-segments", Required = false, HelpText = "Number of log segments created on disk before compaction triggers.")]
public int CompactionMaxSegments { get; set; }
Expand Down Expand Up @@ -529,6 +533,14 @@ public GarnetServerOptions GetServerOptions(ILogger logger = null)
throw new Exception("Revivification cannot specify RevivifiableFraction without specifying bins.");
}

// For backwards compatibility
if (CompactionType == LogCompactionType.ShiftForced)
{
logger?.LogWarning("Compaction type ShiftForced is deprecated. Use Shift instead along with CompactionForceDelete.");
CompactionType = LogCompactionType.Shift;
CompactionForceDelete = true;
}

return new GarnetServerOptions(logger)
{
Port = Port,
Expand Down Expand Up @@ -567,6 +579,7 @@ public GarnetServerOptions GetServerOptions(ILogger logger = null)
AofSizeLimit = AofSizeLimit,
CompactionFrequencySecs = CompactionFrequencySecs,
CompactionType = CompactionType,
CompactionForceDelete = CompactionForceDelete.GetValueOrDefault(),
CompactionMaxSegments = CompactionMaxSegments,
ObjectStoreCompactionMaxSegments = ObjectStoreCompactionMaxSegments,
GossipSamplePercent = GossipSamplePercent,
Expand Down
13 changes: 8 additions & 5 deletions libs/host/defaults.conf
Original file line number Diff line number Diff line change
Expand Up @@ -136,13 +136,16 @@
"CompactionFrequencySecs" : 0,

/* Hybrid log compaction type. Value options: */
/* None - No compaction */
/* Shift - shift begin address without compaction (data loss) */
/* ShiftForced - shift begin address without compaction (data loss). Immediately deletes files - do not use if you plan to recover after failure */
/* Scan - scan old pages and move live records to tail (no data loss - take a checkpoint to actually delete the older data files from disk) */
/* Lookup - Lookup each record in compaction range, for record liveness checking using hash chain (no data loss - take a checkpoint to actually delete the older data files from disk) */
/* None - no compaction */
/* Shift - shift begin address without compaction (data loss) */
/* Scan - scan old pages and move live records to tail (no data loss) */
/* Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss) */
"CompactionType" : "None",

/* Forcefully delete the inactive segments immediately after the compaction strategy (type) is applied. */
/* If false, take a checkpoint to actually delete the older data files from disk. */
"CompactionForceDelete": false,

/* Number of log segments created on disk before compaction triggers. */
"CompactionMaxSegments" : 32,

Expand Down
14 changes: 12 additions & 2 deletions libs/server/Servers/GarnetServerOptions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,19 @@ public class GarnetServerOptions : ServerOptions
public int CompactionFrequencySecs = 0;

/// <summary>
/// Hybrid log compaction type. Shift = shift begin address without compaction (data loss), Scan = scan old pages and move live records to tail (no data loss - take a checkpoint to actually delete the older data files from disk).
/// Hybrid log compaction type.
/// None - no compaction.
/// Shift - shift begin address without compaction (data loss).
/// Scan - scan old pages and move live records to tail (no data loss).
/// Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss).
/// </summary>
public LogCompactionType CompactionType = LogCompactionType.Shift;
public LogCompactionType CompactionType = LogCompactionType.None;

/// <summary>
/// Forcefully delete the inactive segments immediately after the compaction strategy (type) is applied.
/// If false, take a checkpoint to actually delete the older data files from disk.
/// </summary>
public bool CompactionForceDelete = false;

/// <summary>
/// Number of log segments created on disk before compaction triggers.
Expand Down
69 changes: 47 additions & 22 deletions libs/server/StoreWrapper.cs
Original file line number Diff line number Diff line change
Expand Up @@ -342,11 +342,11 @@ async Task CompactionTask(int compactionFrequencySecs, CancellationToken token =
while (true)
{
if (token.IsCancellationRequested) return;
DoCompaction(serverOptions.CompactionMaxSegments, serverOptions.ObjectStoreCompactionMaxSegments, 1, serverOptions.CompactionType);
if (serverOptions.CompactionType != LogCompactionType.ShiftForced)
DoCompaction(serverOptions.CompactionMaxSegments, serverOptions.ObjectStoreCompactionMaxSegments, 1, serverOptions.CompactionType, serverOptions.CompactionForceDelete);
if (!serverOptions.CompactionForceDelete)
logger?.LogInformation("NOTE: Take a checkpoint (SAVE/BGSAVE) in order to actually delete the older data segments (files) from disk");
else
logger?.LogInformation("NOTE: ShiftForced compaction type - make sure checkpoint/recovery is not being used");
logger?.LogInformation("NOTE: Compaction will delete files, make sure checkpoint/recovery is not being used");

await Task.Delay(compactionFrequencySecs * 1000, token);
}
Expand All @@ -361,15 +361,8 @@ void DoCompaction()
{
// Periodic compaction -> no need to compact before checkpointing
if (serverOptions.CompactionFrequencySecs > 0) return;
if (serverOptions.CompactionType == LogCompactionType.ShiftForced)
{
string error = "Cannot use ShiftForced with checkpointing";
logger.LogError(error);
Debug.Fail(error);
return;
}

DoCompaction(serverOptions.CompactionMaxSegments, serverOptions.ObjectStoreCompactionMaxSegments, 1, serverOptions.CompactionType);
DoCompaction(serverOptions.CompactionMaxSegments, serverOptions.ObjectStoreCompactionMaxSegments, 1, serverOptions.CompactionType, serverOptions.CompactionForceDelete);
}

/// <summary>
Expand All @@ -388,7 +381,7 @@ public void EnqueueCommit(bool isMainStore, long version)
appendOnlyFile?.Enqueue(header, out _);
}

void DoCompaction(int mainStoreMaxSegments, int objectStoreMaxSegments, int numSegmentsToCompact, LogCompactionType compactionType)
void DoCompaction(int mainStoreMaxSegments, int objectStoreMaxSegments, int numSegmentsToCompact, LogCompactionType compactionType, bool compactionForceDelete)
{
if (compactionType == LogCompactionType.None) return;

Expand All @@ -404,19 +397,25 @@ void DoCompaction(int mainStoreMaxSegments, int objectStoreMaxSegments, int numS
switch (compactionType)
{
case LogCompactionType.Shift:
store.Log.ShiftBeginAddress(untilAddress, true, false);
break;

case LogCompactionType.ShiftForced:
store.Log.ShiftBeginAddress(untilAddress, true, true);
store.Log.ShiftBeginAddress(untilAddress, true, compactionForceDelete);
break;

case LogCompactionType.Scan:
store.Log.Compact<SpanByte, Empty, Empty, SpanByteFunctions<Empty, Empty>>(new SpanByteFunctions<Empty, Empty>(), untilAddress, CompactionType.Scan);
if (compactionForceDelete)
{
CompactionCommitAof();
store.Log.Truncate();
}
break;

case LogCompactionType.Lookup:
store.Log.Compact<SpanByte, Empty, Empty, SpanByteFunctions<Empty, Empty>>(new SpanByteFunctions<Empty, Empty>(), untilAddress, CompactionType.Lookup);
if (compactionForceDelete)
{
CompactionCommitAof();
store.Log.Truncate();
}
break;

default:
Expand All @@ -440,21 +439,27 @@ void DoCompaction(int mainStoreMaxSegments, int objectStoreMaxSegments, int numS
switch (compactionType)
{
case LogCompactionType.Shift:
objectStore.Log.ShiftBeginAddress(untilAddress, true, false);
break;

case LogCompactionType.ShiftForced:
objectStore.Log.ShiftBeginAddress(untilAddress, true, true);
objectStore.Log.ShiftBeginAddress(untilAddress, compactionForceDelete);
break;

case LogCompactionType.Scan:
objectStore.Log.Compact<IGarnetObject, IGarnetObject, Empty, SimpleSessionFunctions<byte[], IGarnetObject, Empty>>(
new SimpleSessionFunctions<byte[], IGarnetObject, Empty>(), untilAddress, CompactionType.Scan);
if (compactionForceDelete)
{
CompactionCommitAof();
objectStore.Log.Truncate();
}
break;

case LogCompactionType.Lookup:
objectStore.Log.Compact<IGarnetObject, IGarnetObject, Empty, SimpleSessionFunctions<byte[], IGarnetObject, Empty>>(
new SimpleSessionFunctions<byte[], IGarnetObject, Empty>(), untilAddress, CompactionType.Lookup);
if (compactionForceDelete)
{
CompactionCommitAof();
objectStore.Log.Truncate();
}
break;

default:
Expand All @@ -465,6 +470,26 @@ void DoCompaction(int mainStoreMaxSegments, int objectStoreMaxSegments, int numS
}
}

void CompactionCommitAof()
{
// If we are the primary, we commit the AOF.
// If we are the replica, we commit the AOF only if fast commit is disabled
// because we do not want to clobber AOF addresses.
// TODO: replica should instead wait until the next AOF commit is done via primary
if (serverOptions.EnableAOF)
{
if (serverOptions.EnableCluster && clusterProvider.IsReplica())
{
if (!serverOptions.EnableFastCommit)
appendOnlyFile?.CommitAsync().ConfigureAwait(false).GetAwaiter().GetResult();
}
else
{
appendOnlyFile?.CommitAsync().ConfigureAwait(false).GetAwaiter().GetResult();
}
}
}

internal void Start()
{
monitor?.Start();
Expand Down
38 changes: 38 additions & 0 deletions website/docs/getting-started/compaction.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
---
id: compaction
sidebar_label: Compaction
title: Log Compaction
---

When Garnet is configured to run with storage using `EnableStorageTier` or `--storage-tier`, data that does not fit in memory will spill to disk storage.
Data on disk is split into segments, with one physical file per segment. The size of each segment is configured using `SegmentSize` or `--segment` for
the main store, and `ObjectStoreSegmentSize` or `--obj-segment` for the object store.

File segments continue to get created and added over time, so we need a way to delete older segments. This is where compaction comes in.

## Triggering Compaction

You can configure `CompactionFrequencySecs` or `--compaction-freq`, which creates a task that wakes up every so often to try compaction. If the number
of segments on disk exceeds `CompactionMaxSegments` or `--compaction-max-segments`, compaction runs using the specified strategy so that we end up with
at most `CompactionMaxSegments` active segments. The oldest segments are our chosen candidates for compaction. For the object store, the corresponding
switch is `ObjectStoreCompactionMaxSegments` or `--obj-compaction-max-segments`.


## Compaction Strategy

The candidate segments for compaction are processed using some strategy, specified using the
`CompactionType` or `--compaction-type` switch. Available options are:
* None: No compaction is performed.
* Shift: The inactive segments are simply marked as ready for deletion.
* Scan: The entire log is scanned to check which records in the candidate segments to be compacted are "live", and these live records are copied to the tail of the log (in memory).
* Lookup: For every record in the candidate segments to be compacted, we perform a random lookup to check if it is live. As before, the live records are copied to the tail of the log (in memory).

## Segment Deletion

After the compaction strategy is applied on the candidate segments, they are inactive and eligible for deletion. However, the inactive segments are not
immediately deleted from disk by default, since doing so can cause data loss in case the server crashes before taking the next checkpoint (and the AOF is disabled).
Instead, the next checkpoint will automatically cause the deletion of the inactive segments.

In case you are not taking checkpoints and want to force the physical deletion of inactive segments immediately after the compaction strategy is applied, you can specify
the override `CompactionForceDelete` or `--compaction-force-delete` switch. Note that this option can cause data loss when we recover to the previous
checkpoint, in case the AOF is disabled.
3 changes: 2 additions & 1 deletion website/docs/getting-started/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ For all available command line settings, run `GarnetServer.exe -h` or `GarnetSer
| **WaitForCommit** | ```--aof-commit-wait``` | ```bool``` | | Wait for AOF to flush the commit before returning results to client. Warning: will greatly increase operation latency. |
| **AofSizeLimit** | ```--aof-size-limit``` | ```string``` | Memory size | Maximum size of AOF (rounds down to power of 2) after which unsafe truncation will be applied. Left empty AOF will grow without bound unless a checkpoint is taken |
| **CompactionFrequencySecs** | ```--compaction-freq``` | ```int``` | Integer in range:<br/>[0, MaxValue] | Background hybrid log compaction frequency in seconds. 0 = disabled (compaction performed before checkpointing instead) |
| **CompactionType** | ```--compaction-type``` | ```LogCompactionType``` | None, Shift, ShiftForced, Scan, Lookup | Hybrid log compaction type. Value options: None - No compaction, Shift - shift begin address without compaction (data loss), ShiftForced - shift begin address without compaction (data loss). Immediately deletes files - do not use if you plan to recover after failure, Scan - scan old pages and move live records to tail (no data loss - take a checkpoint to actually delete the older data files from disk), Lookup - Lookup each record in compaction range, for record liveness checking using hash chain (no data loss - take a checkpoint to actually delete the older data files from disk) |
| **CompactionType** | ```--compaction-type``` | ```LogCompactionType``` | None, Shift, Scan, Lookup | Hybrid log compaction type. Value options: None - No compaction, Shift - shift begin address without compaction (data loss), Scan - scan old pages and move live records to tail (no data loss), Lookup - lookup each record in compaction range, for record liveness checking using hash chain (no data loss) |
| **CompactionForceDelete** | ```--compaction-force-delete``` | ```bool``` | | Forcefully delete the inactive segments immediately after the compaction strategy (type) is applied. If false, take a checkpoint to actually delete the older data files from disk. |
| **CompactionMaxSegments** | ```--compaction-max-segments``` | ```int``` | Integer in range:<br/>[0, MaxValue] | Number of log segments created on disk before compaction triggers. |
| **ObjectStoreCompactionMaxSegments** | ```--obj-compaction-max-segments``` | ```int``` | Integer in range:<br/>[0, MaxValue] | Number of object store log segments created on disk before compaction triggers. |
| **GossipSamplePercent** | ```--gossip-sp``` | ```int``` | | Percent of cluster nodes to gossip with at each gossip iteration. |
Expand Down
16 changes: 8 additions & 8 deletions website/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,20 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.0.1",
"@docusaurus/preset-classic": "^3.0.1",
"@docusaurus/theme-mermaid": "^3.0.1",
"@docusaurus/core": "^3.4.0",
"@docusaurus/preset-classic": "^3.4.0",
"@docusaurus/theme-mermaid": "^3.4.0",
"@mdx-js/react": "^3.0.0",
"clsx": "^1.2.1",
"clsx": "^2.1.1",
"docusaurus-plugin-clarity": "^2.1.0",
"docusaurus-theme-github-codeblock": "^2.0.2",
"prism-react-renderer": "^2.1.0",
"react": "^18.0.0",
"react-dom": "^18.0.0"
"react": "^18.3.1",
"react-dom": "^18.3.1"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "3.0.0",
"@docusaurus/types": "3.0.0"
"@docusaurus/module-type-aliases": "3.4.0",
"@docusaurus/types": "3.4.0"
},
"browserslist": {
"production": [
Expand Down
Loading

0 comments on commit ccc078e

Please sign in to comment.