Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 92 additions & 20 deletions lib/storage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ export function formatStorageErrorHint(error: unknown, path: string): string {
}

let storageMutex: Promise<void> = Promise.resolve();
let accountFileMutex: Promise<void> = Promise.resolve();

function withStorageLock<T>(fn: () => Promise<T>): Promise<T> {
const previousMutex = storageMutex;
Expand All @@ -119,6 +120,24 @@ function withStorageLock<T>(fn: () => Promise<T>): Promise<T> {
return previousMutex.then(fn).finally(() => releaseLock());
}

function withAccountFileMutex<T>(fn: () => Promise<T>): Promise<T> {
const previousMutex = accountFileMutex;
let releaseLock: () => void;
accountFileMutex = new Promise<void>((resolve) => {
releaseLock = resolve;
});
return previousMutex.then(fn).finally(() => releaseLock());
}

async function withStorageSerializedFileLock<T>(path: string, fn: () => Promise<T>): Promise<T> {
// Serialize file-lock acquisition to keep save ordering deterministic, then
// preserve the historical lock order (file lock -> in-process mutex) so all
// account-storage mutation paths share the same acquisition sequence.
return withAccountFileMutex(() =>
withAccountFileLock(path, () => withStorageLock(fn)),
);
}
Comment on lines +123 to +139
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

lock order comment contradicts actual implementation.

the comment at line 133-135 says "preserve the historical lock order (file lock -> in-process mutex)" but the actual acquisition order is: withAccountFileMutexwithAccountFileLockwithStorageLock, which is memory mutex first, then file lock. this is the opposite of what the comment claims.

if the intent was to reverse lock order (as pr description suggests), update the comment. if the comment is correct, the code is wrong.

-	// Serialize file-lock acquisition to keep save ordering deterministic, then
-	// preserve the historical lock order (file lock -> in-process mutex) so all
-	// account-storage mutation paths share the same acquisition sequence.
+	// Serialize file-lock acquisition to keep save ordering deterministic.
+	// Acquisition order: in-process mutex -> file lock -> storage mutex.
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
function withAccountFileMutex<T>(fn: () => Promise<T>): Promise<T> {
const previousMutex = accountFileMutex;
let releaseLock: () => void;
accountFileMutex = new Promise<void>((resolve) => {
releaseLock = resolve;
});
return previousMutex.then(fn).finally(() => releaseLock());
}
async function withStorageSerializedFileLock<T>(path: string, fn: () => Promise<T>): Promise<T> {
// Serialize file-lock acquisition to keep save ordering deterministic, then
// preserve the historical lock order (file lock -> in-process mutex) so all
// account-storage mutation paths share the same acquisition sequence.
return withAccountFileMutex(() =>
withAccountFileLock(path, () => withStorageLock(fn)),
);
}
function withAccountFileMutex<T>(fn: () => Promise<T>): Promise<T> {
const previousMutex = accountFileMutex;
let releaseLock: () => void;
accountFileMutex = new Promise<void>((resolve) => {
releaseLock = resolve;
});
return previousMutex.then(fn).finally(() => releaseLock());
}
async function withStorageSerializedFileLock<T>(path: string, fn: () => Promise<T>): Promise<T> {
// Serialize file-lock acquisition to keep save ordering deterministic.
// Acquisition order: in-process mutex -> file lock -> storage mutex.
return withAccountFileMutex(() =>
withAccountFileLock(path, () => withStorageLock(fn)),
);
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/storage.ts` around lines 123 - 139, The comment claiming "file lock ->
in-process mutex" contradicts the implementation in
withStorageSerializedFileLock (which currently calls withAccountFileMutex →
withAccountFileLock → withStorageLock), so either update the comment to state
the actual acquisition order (in-process mutex -> file lock -> storage lock) or
reorder the calls to match the comment; specifically, either change the comment
near withStorageSerializedFileLock to reflect the real sequence or change the
implementation to call withAccountFileLock(path, () => withAccountFileMutex(()
=> withStorageLock(fn))) so the acquisition becomes file lock -> in-process
mutex -> storage lock (and ensure variable/closure usage still compiles).


type AnyAccountStorage = AccountStorageV1 | AccountStorageV3;

type AccountLike = {
Expand Down Expand Up @@ -318,16 +337,70 @@ function getAccountsLockPath(path: string): string {
return `${path}.lock`;
}

async function releaseStorageLockFallback(lockPath: string): Promise<void> {
try {
await fs.rm(lockPath, { force: true });
} catch {
// Best-effort lock cleanup fallback.
}
}
Comment on lines +340 to +346
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

releaseStorageLockFallback swallows all errors silently.

using fs.rm(lockPath, { force: true }) with an empty catch means any error (permission denied, io error) is silently ignored. this is intentional for "best-effort" but makes debugging difficult when lock cleanup actually fails.

consider logging at debug level for observability:

 async function releaseStorageLockFallback(lockPath: string): Promise<void> {
 	try {
 		await fs.rm(lockPath, { force: true });
-	} catch {
-		// Best-effort lock cleanup fallback.
+	} catch (error) {
+		log.debug("best-effort lock cleanup failed", { path: lockPath, error: String(error) });
 	}
 }
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/storage.ts` around lines 340 - 346, The catch block in
releaseStorageLockFallback currently swallows all errors from fs.rm; update it
to log a debug-level message including the lockPath and the caught error so
failed cleanup is observable (e.g., use the existing logger or processLogger if
available), while preserving the "best-effort" behavior by not rethrowing;
ensure you reference releaseStorageLockFallback and the fs.rm call so the log
includes both the path and error details.


async function cleanupDeadProcessStorageLock(lockPath: string): Promise<void> {
try {
const raw = await fs.readFile(lockPath, "utf-8");
const parsed = JSON.parse(raw) as { pid?: number; acquiredAt?: number };
const lockPid = Number(parsed?.pid);
const lockAcquiredAt = Number(parsed?.acquiredAt);

if (Number.isFinite(lockPid) && lockPid > 0) {
let isDeadProcess = false;
try {
process.kill(lockPid, 0);
} catch (error) {
const code = (error as NodeJS.ErrnoException).code;
isDeadProcess = code === "ESRCH";
}
Comment on lines +355 to +362
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

add observability for dead-process lock cleanup.

when a stale lock from a dead process is cleaned up (line 365), there's no logging. this conflicts with the ops runbook expectations mentioned in the pr description. operators need visibility into when stale locks are being cleaned.

 			if (isDeadProcess) {
+				log.info("cleaning up stale lock from dead process", { path: lockPath, pid: lockPid });
 				await releaseStorageLockFallback(lockPath);
 				return;
 			}

similarly for age-based cleanup at line 371:

 		if (Number.isFinite(lockAcquiredAt) && Date.now() - lockAcquiredAt > ACCOUNT_STORAGE_LOCK_OPTIONS.staleAfterMs) {
+			log.info("cleaning up stale lock by age", { path: lockPath, acquiredAt: lockAcquiredAt, ageMs: Date.now() - lockAcquiredAt });
 			await releaseStorageLockFallback(lockPath);
 		}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/storage.ts` around lines 355 - 362, When detecting and removing a stale
lock (the branch that computes lockPid and isDeadProcess using process.kill) and
the separate age-based cleanup branch, add observability logs using the existing
logger (e.g., processLogger or storage logger used elsewhere in this module)
that record the action and context: include the lock path/name, lockPid, whether
isDeadProcess was true, the lock age (timestamp or computed age), and a concise
reason ("stale: dead PID" or "stale: age threshold exceeded"). Place one log
right before or immediately after the dead-process cleanup path (where
isDeadProcess is true) and another log in the age-based cleanup path to make
both events visible to operators. Ensure the log messages are structured and
include these fields so they match ops runbook expectations.


if (isDeadProcess) {
await releaseStorageLockFallback(lockPath);
return;
}
}

if (Number.isFinite(lockAcquiredAt) && Date.now() - lockAcquiredAt > ACCOUNT_STORAGE_LOCK_OPTIONS.staleAfterMs) {
await releaseStorageLockFallback(lockPath);
}
} catch (error) {
const code = (error as NodeJS.ErrnoException).code;
if (code === "ENOENT") {
return;
}
await releaseStorageLockFallback(lockPath);
}
Comment on lines +373 to +379
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

EBUSY not handled in catch block.

when fs.readFile or JSON.parse fails with EBUSY (common on windows when another process holds the lock file), the code falls through to releaseStorageLockFallback at line 378. this could delete a lock held by another active process.

consider only calling fallback cleanup for parse errors, not for EBUSY:

 	} catch (error) {
 		const code = (error as NodeJS.ErrnoException).code;
 		if (code === "ENOENT") {
 			return;
 		}
+		if (code === "EBUSY") {
+			// another process is actively using this lock
+			return;
+		}
 		await releaseStorageLockFallback(lockPath);
 	}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
} catch (error) {
const code = (error as NodeJS.ErrnoException).code;
if (code === "ENOENT") {
return;
}
await releaseStorageLockFallback(lockPath);
}
} catch (error) {
const code = (error as NodeJS.ErrnoException).code;
if (code === "ENOENT") {
return;
}
if (code === "EBUSY") {
// another process is actively using this lock
return;
}
await releaseStorageLockFallback(lockPath);
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/storage.ts` around lines 373 - 379, The catch block around the storage
read should not call releaseStorageLockFallback when the error is an EBUSY from
fs.readFile/JSON.parse; update the catch in the function that reads the lock
(the block that currently inspects (error as NodeJS.ErrnoException).code) to
explicitly handle error.code === "ENOENT" (return) and error.code === "EBUSY"
(return or rethrow as appropriate) and only call
releaseStorageLockFallback(lockPath) for other error types (e.g., parse errors).
Reference the existing catch scope that uses (error as
NodeJS.ErrnoException).code and the releaseStorageLockFallback(lockPath) call to
implement this conditional behavior.

}
Comment on lines +347 to +380
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

non-atomic TOCTOU sequence in dead process detection. between reading lock file, checking PID, and removing lock, the legitimate owner could still be using it or another process could recreate it.

additionally, process.kill(lockPid, 0) fails across user boundaries and in some container environments where PIDs are namespaced.

windows filesystem concern: if antivirus locks the file between stat check and unlink, this could thrash or leave stale locks.

Prompt To Fix With AI
This is a comment left during a code review.
Path: lib/storage.ts
Line: 347-380

Comment:
non-atomic TOCTOU sequence in dead process detection. between reading lock file, checking PID, and removing lock, the legitimate owner could still be using it or another process could recreate it.

additionally, `process.kill(lockPid, 0)` fails across user boundaries and in some container environments where PIDs are namespaced.

windows filesystem concern: if antivirus locks the file between stat check and unlink, this could thrash or leave stale locks.

How can I resolve this? If you propose a fix, please make it concise.

Fix in Codex

Comment on lines +348 to +380
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🌐 Web query:

Node.js process.kill(pid, 0) ESRCH EPERM Windows behavior

💡 Result:

On Windows, process.kill(pid, 0) in Node.js is implemented as an existence/health check: it does not send a real signal, but it throws if the target can’t be checked. [1]

What the two common errors mean

  • ESRCH (“no such process”): the PID is not valid / no longer running. In libuv’s Windows implementation, this happens when OpenProcess(...) fails with ERROR_INVALID_PARAMETER, or when the process handle indicates the process is no longer active. [2]

  • EPERM (sometimes surfaced as EACCES): the PID likely exists, but Node/libuv can’t open/query it due to permissions/access restrictions (Windows OpenProcess(...) can fail with access denied; libuv then translates that Win32 error to a permission error). [2][3]

Practical interpretation

So on Windows, for process.kill(pid, 0):

  • returns normally ⇒ process is running and accessible
  • throws ESRCH ⇒ process is not running (or PID invalid)
  • throws EPERM/EACCES ⇒ process may be running, but you don’t have rights to query/terminate it (try elevated privileges or different user/session). [2][3]

Sources: Node.js docs for process.kill(..., 0) [1]; libuv Windows uv_kill / signal 0 and OpenProcess behavior [2]; libuv error translation / meaning of UV_EACCES (“permission denied”) [3].


🏁 Script executed:

# Find tests for cleanupDeadProcessStorageLock
rg -i "cleanupDeadProcessStorageLock" --type ts --type js

# Check test files in the repository
git ls-files test/ lib/ | head -20

Repository: ndycode/codex-multi-auth

Length of output: 650


🏁 Script executed:

# Find test files for storage
fd -e test.ts -e spec.ts | rg -i storage

# Also search for any test files
fd -e test.ts -e spec.ts | head -20

Repository: ndycode/codex-multi-auth

Length of output: 750


🏁 Script executed:

# Check for logging in the storage.ts file and around the function
rg "logger\.|log\(" lib/storage.ts -B 2 -A 2

# Look at broader context around cleanupDeadProcessStorageLock
sed -n '320,400p' lib/storage.ts

Repository: ndycode/codex-multi-auth

Length of output: 2509


🏁 Script executed:

# Search for tests of cleanupDeadProcessStorageLock in test files
rg "cleanupDeadProcessStorageLock" test/

# Also check what the storage test files cover
rg -A 10 -B 2 "process\.kill\|deadProcess" test/storage*.test.ts

Repository: ndycode/codex-multi-auth

Length of output: 50


🏁 Script executed:

# Check logging imports in storage.ts
head -50 lib/storage.ts | rg "import.*log"

Repository: ndycode/codex-multi-auth

Length of output: 111


🏁 Script executed:

# Check for log variable definition in storage.ts
rg "^const log|^let log" lib/storage.ts

# Check entire test/storage.test.ts to see what functions ARE tested
head -100 test/storage.test.ts

Repository: ndycode/codex-multi-auth

Length of output: 3239


add logging to cleanupDeadProcessStorageLock and write regression tests for lock cleanup scenarios.

the process.kill(pid, 0) check with ESRCH actually works on windows — that part is fine. but the function has zero visibility: no logging when skipping dead-process cleanup, no logging when releasing stale locks. this makes debugging lock contention issues in production much harder.

also, this critical lock management function lib/storage.ts:348-380 has no test coverage at all — no vitest regression tests for dead-process detection, stale lock scenarios, or windows edge cases. add tests covering:

  • process exists, lock is fresh (should not release)
  • process dead (ESRCH thrown), lock should be released
  • process exists but permissions denied (EPERM), lock is fresh vs stale (should only release if stale)
  • lock file missing/corrupted json (should fallback)

without logging and tests here, you won't know when lock cleanup fails in the field.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/storage.ts` around lines 348 - 380, Add explicit debug/info/error logs
inside cleanupDeadProcessStorageLock to record: when the lock file is read and
parsed (include pid and acquiredAt), when process.kill(pid, 0) indicates the
process is alive (skipping cleanup) or throws ESRCH (releasing lock), when EPERM
or other errors occur (and whether lock is stale), when
releaseStorageLockFallback(lockPath) is invoked, and when ENOENT or JSON parse
errors are encountered; use the existing processLogger (or the module logger)
and include lockPath and relevant metadata in each message. Also add vitest
regression tests for cleanupDeadProcessStorageLock covering: (1) process exists
and lock is fresh — assert no releaseStorageLockFallback call, (2) process dead
(mock process.kill to throw ESRCH) — assert releaseStorageLockFallback called,
(3) process exists but process.kill throws EPERM — assert no release unless lock
is stale (simulate acquiredAt older than
ACCOUNT_STORAGE_LOCK_OPTIONS.staleAfterMs), and (4) missing or corrupted lock
file — assert fallback release or ENOENT path; mock fs.readFile, process.kill,
and releaseStorageLockFallback to verify log calls and behavior.


async function withAccountFileLock<T>(path: string, fn: () => Promise<T>): Promise<T> {
const lockPath = getAccountsLockPath(path);
await cleanupDeadProcessStorageLock(lockPath);
await fs.mkdir(dirname(path), { recursive: true });
const lock = await acquireFileLock(getAccountsLockPath(path), ACCOUNT_STORAGE_LOCK_OPTIONS);
const lock = await acquireFileLock(lockPath, ACCOUNT_STORAGE_LOCK_OPTIONS);
Comment on lines 382 to +386
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

cleanup before acquire creates race window.

calling cleanupDeadProcessStorageLock(lockPath) at line 384 before acquireFileLock at line 386 opens a race: process A cleans up a stale lock, process B also sees the stale lock and cleans up, both then race to acquire. this is likely benign since acquireFileLock handles contention, but the cleanup could delete a lock that was just acquired by another process between cleanup and acquire.

consider moving cleanup inside acquireFileLock or relying solely on removeIfStale which already exists in lib/file-lock.ts:91-93.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/storage.ts` around lines 382 - 386, withAccountFileLock currently calls
cleanupDeadProcessStorageLock(lockPath) before calling
acquireFileLock(lockPath,...), which creates a race where two processes can both
remove a stale lock then race to create it; instead remove the external cleanup
call and rely on acquireFileLock to handle stale-lock removal (or move the
cleanup logic into acquireFileLock). Update withAccountFileLock to stop calling
cleanupDeadProcessStorageLock and ensure acquireFileLock (in lib/file-lock.ts)
invokes removeIfStale/remove-or-cleanup atomically so stale locks are handled
inside acquireFileLock rather than externally.

try {
return await fn();
} finally {
await lock.release();
try {
await lock.release();
} catch (error) {
const code = (error as NodeJS.ErrnoException).code;
if (code !== "ENOENT") {
log.warn("Failed to release account storage lock", {
path: lockPath,
error: String(error),
});
}
}
await releaseStorageLockFallback(lockPath);
Comment on lines +393 to +401
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

race condition: releaseStorageLockFallback executes unconditionally after successful lock.release(). between the unlink in release() and this fallback call, another process can acquire the lock. the fallback then removes that process's valid lock file, breaking cross-process serialization.

should only call fallback if lock.release() threw a non-ENOENT error:

Suggested change
const code = (error as NodeJS.ErrnoException).code;
if (code !== "ENOENT") {
log.warn("Failed to release account storage lock", {
path: lockPath,
error: String(error),
});
}
}
await releaseStorageLockFallback(lockPath);
try {
await lock.release();
} catch (error) {
const code = (error as NodeJS.ErrnoException).code;
if (code !== "ENOENT") {
log.warn("Failed to release account storage lock", {
path: lockPath,
error: String(error),
});
await releaseStorageLockFallback(lockPath);
}
}
}
Prompt To Fix With AI
This is a comment left during a code review.
Path: lib/storage.ts
Line: 393-401

Comment:
race condition: `releaseStorageLockFallback` executes unconditionally after successful `lock.release()`. between the unlink in `release()` and this fallback call, another process can acquire the lock. the fallback then removes that process's valid lock file, breaking cross-process serialization.

should only call fallback if `lock.release()` threw a non-ENOENT error:

```suggestion
		try {
			await lock.release();
		} catch (error) {
			const code = (error as NodeJS.ErrnoException).code;
			if (code !== "ENOENT") {
				log.warn("Failed to release account storage lock", {
					path: lockPath,
					error: String(error),
				});
				await releaseStorageLockFallback(lockPath);
			}
		}
	}
```

How can I resolve this? If you propose a fix, please make it concise.

Fix in Codex

Comment on lines +390 to +401
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

double cleanup: both lock.release() and releaseStorageLockFallback always run.

the finally block at lines 390-401 calls lock.release() then unconditionally calls releaseStorageLockFallback(lockPath) at line 401. if lock.release() succeeds, the file is already deleted, so the fallback is redundant. if it fails and logs a warning, the fallback might mask the actual error by forcibly deleting the lock.

this seems intentional for robustness but could hide issues. consider only calling fallback when release actually fails:

 	} finally {
+		let releaseSucceeded = false;
 		try {
 			await lock.release();
+			releaseSucceeded = true;
 		} catch (error) {
 			const code = (error as NodeJS.ErrnoException).code;
 			if (code !== "ENOENT") {
 				log.warn("Failed to release account storage lock", {
 					path: lockPath,
 					error: String(error),
 				});
 			}
 		}
-		await releaseStorageLockFallback(lockPath);
+		if (!releaseSucceeded) {
+			await releaseStorageLockFallback(lockPath);
+		}
 	}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/storage.ts` around lines 390 - 401, The current finally always calls
releaseStorageLockFallback(lockPath) even when lock.release() succeeds; change
the flow so the fallback is only invoked when lock.release() throws or otherwise
fails: try calling await lock.release() and on catch (error) check the error
code like in the existing block, log the warning, then call await
releaseStorageLockFallback(lockPath) from inside that catch (or when a boolean
“failedToRelease” flag is set) so the fallback only runs on failure; update
references to lock.release, releaseStorageLockFallback, lockPath and preserve
the ENOENT handling/logging behavior.

}
}

async function copyFileWithRetry(
sourcePath: string,
destinationPath: string,
Expand Down Expand Up @@ -1242,14 +1315,13 @@ export async function withAccountStorageTransaction<T>(
) => Promise<T>,
): Promise<T> {
const path = getStoragePath();
return withAccountFileLock(path, () =>
withStorageLock(async () => {
const current = await loadAccountsInternal(saveAccountsUnlocked);
return handler(current, saveAccountsUnlocked);
}),
);
return withStorageSerializedFileLock(path, async () => {
const current = await loadAccountsInternal(saveAccountsUnlocked);
return handler(current, saveAccountsUnlocked);
});
}


/**
* Persists account storage to disk using atomic write (temp file + rename).
* Creates the Codex multi-auth storage directory if it doesn't exist.
Expand All @@ -1259,23 +1331,21 @@ export async function withAccountStorageTransaction<T>(
*/
export async function saveAccounts(storage: AccountStorageV3): Promise<void> {
const path = getStoragePath();
return withAccountFileLock(path, () =>
withStorageLock(async () => {
await saveAccountsUnlocked(storage);
}),
);
return withStorageSerializedFileLock(path, async () => {
await saveAccountsUnlocked(storage);
});
}


/**
* Deletes the account storage file from disk.
* Silently ignores if file doesn't exist.
*/
export async function clearAccounts(): Promise<void> {
const path = getStoragePath();
return withAccountFileLock(path, () =>
withStorageLock(async () => {
const walPath = getAccountsWalPath(path);
const backupPaths = getAccountsBackupRecoveryCandidates(path);
return withStorageSerializedFileLock(path, async () => {
const walPath = getAccountsWalPath(path);
const backupPaths = getAccountsBackupRecoveryCandidates(path);
const clearPath = async (targetPath: string): Promise<void> => {
try {
await fs.unlink(targetPath);
Expand All @@ -1295,10 +1365,10 @@ export async function clearAccounts(): Promise<void> {
} catch {
// Individual path cleanup is already best-effort with per-artifact logging.
}
}),
);
});
}


function normalizeFlaggedStorage(data: unknown): FlaggedAccountStorageV1 {
if (!isRecord(data) || data.version !== 1 || !Array.isArray(data.accounts)) {
return { version: 1, accounts: [] };
Expand Down Expand Up @@ -1597,3 +1667,5 @@ export async function rotateStoredSecretEncryption(): Promise<{
flaggedAccounts: flaggedCount,
};
}