From 26c1a711eb2af3244591cdf7ad63c9855d944276 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Fri, 6 Mar 2026 18:49:06 +0100 Subject: [PATCH 01/16] Add retries, workers, and other parameters to EndToEnd MCP tool --- developer-cli/Commands/McpCommand.cs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/developer-cli/Commands/McpCommand.cs b/developer-cli/Commands/McpCommand.cs index 3482c359c..83cbedf9e 100644 --- a/developer-cli/Commands/McpCommand.cs +++ b/developer-cli/Commands/McpCommand.cs @@ -141,8 +141,18 @@ public static async Task EndToEnd( [Description("Search terms")] string[]? searchTerms = null, [Description("Browser")] string browser = "all", [Description("Smoke only")] bool smoke = false, - [Description("Wait for Aspire to start (retries server check up to 50 seconds)")] - bool waitForAspire = false) + [Description("Wait for Aspire to start (retries server check up to 2 minutes)")] + bool waitForAspire = false, + [Description("Maximum retry count for flaky tests, zero for no retries")] + int? retries = null, + [Description("Stop after the first failure")] + bool stopOnFirstFailure = false, + [Description("Number of times to repeat each test")] + int? repeatEach = null, + [Description("Only re-run the failures")] + bool lastFailed = false, + [Description("Number of worker processes to use for running tests")] + int? workers = null) { var args = new List { "e2e", "--quiet" }; if (searchTerms is { Length: > 0 }) args.AddRange(searchTerms); @@ -154,6 +164,11 @@ public static async Task EndToEnd( if (smoke) args.Add("--smoke"); if (waitForAspire) args.Add("--wait-for-aspire"); + if (retries.HasValue) args.Add($"--retries={retries.Value}"); + if (stopOnFirstFailure) args.Add("--stop-on-first-failure"); + if (repeatEach.HasValue) args.Add($"--repeat-each={repeatEach.Value}"); + if (lastFailed) args.Add("--last-failed"); + if (workers.HasValue) args.Add($"--workers={workers.Value}"); return await ExecuteCliCommandAsync(args.ToArray()); } From aaf0b7346acb5a966cf56182bf9f7cec52a4e2d8 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Tue, 31 Mar 2026 16:46:13 +0200 Subject: [PATCH 02/16] Make wait-for-Aspire the default for end-to-end tests --- developer-cli/Commands/End2EndCommand.cs | 36 ++++++++++++++++-------- developer-cli/Commands/McpCommand.cs | 6 ++-- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/developer-cli/Commands/End2EndCommand.cs b/developer-cli/Commands/End2EndCommand.cs index e3a392046..ebd84da5c 100644 --- a/developer-cli/Commands/End2EndCommand.cs +++ b/developer-cli/Commands/End2EndCommand.cs @@ -34,7 +34,7 @@ public class End2EndCommand : Command var stopOnFirstFailureOption = new Option("--stop-on-first-failure", "-x") { Description = "Stop after the first failure" }; var uiOption = new Option("--ui") { Description = "Run tests in interactive UI mode with time-travel debugging" }; var workersOption = new Option("--workers", "-w") { Description = "Number of worker processes to use for running tests" }; - var waitForAspireOption = new Option("--wait-for-aspire") { Description = "Wait for Aspire to start (retries server check up to 50 seconds)" }; + var noWaitForAspireOption = new Option("--no-wait-for-aspire") { Description = "Skip waiting for Aspire to start (by default, retries server check up to 3 minutes)" }; Arguments.Add(searchTermsArgument); Options.Add(browserOption); @@ -55,7 +55,7 @@ public class End2EndCommand : Command Options.Add(stopOnFirstFailureOption); Options.Add(uiOption); Options.Add(workersOption); - Options.Add(waitForAspireOption); + Options.Add(noWaitForAspireOption); // SetHandler only supports up to 8 parameters, so we use SetAction for this complex command SetAction(parseResult => Execute( @@ -78,7 +78,7 @@ public class End2EndCommand : Command parseResult.GetValue(stopOnFirstFailureOption), parseResult.GetValue(uiOption), parseResult.GetValue(workersOption), - parseResult.GetValue(waitForAspireOption) + parseResult.GetValue(noWaitForAspireOption) ) ); } @@ -105,7 +105,7 @@ private static void Execute( bool stopOnFirstFailure, bool ui, int? workers, - bool waitForAspire) + bool noWaitForAspire) { Prerequisite.Ensure(Prerequisite.Node); @@ -117,7 +117,7 @@ private static void Execute( } AnsiConsole.MarkupLine("[blue]Checking server availability...[/]"); - CheckWebsiteAccessibility(waitForAspire); + CheckWebsiteAccessibility(!noWaitForAspire); PlaywrightInstaller.EnsurePlaywrightBrowsers(); @@ -347,14 +347,19 @@ private static bool RunTestsForSystem( private static void CheckWebsiteAccessibility(bool waitForAspire) { - var maxRetries = waitForAspire ? 10 : 1; + var maxAttempts = waitForAspire ? 36 : 1; // 36 * 5s = 3 minutes var retryDelaySeconds = 5; - for (var attempt = 1; attempt <= maxRetries; attempt++) + for (var attempt = 1; attempt <= maxAttempts; attempt++) { try { - using var httpClient = new HttpClient(); + using var httpClient = new HttpClient(new HttpClientHandler + { + AllowAutoRedirect = true, + ServerCertificateCustomValidationCallback = (_, _, _, _) => true + } + ); httpClient.Timeout = TimeSpan.FromSeconds(5); var response = httpClient.Send(new HttpRequestMessage(HttpMethod.Head, BaseUrl)); @@ -364,13 +369,20 @@ private static void CheckWebsiteAccessibility(bool waitForAspire) AnsiConsole.MarkupLine($"[green]Server is accessible at {BaseUrl}[/]"); return; } + + if (attempt < maxAttempts) + { + AnsiConsole.MarkupLine($"[yellow]Server returned HTTP {(int)response.StatusCode} ({response.StatusCode}), retrying in {retryDelaySeconds}s... (attempt {attempt}/{maxAttempts})[/]"); + Thread.Sleep(TimeSpan.FromSeconds(retryDelaySeconds)); + } } - catch + catch (Exception exception) { - // Retry if waiting for Aspire and not the last attempt - if (waitForAspire && attempt < maxRetries) + var reason = exception.InnerException?.Message ?? exception.Message; + + if (attempt < maxAttempts) { - AnsiConsole.MarkupLine($"[yellow]Server not ready yet, retrying in {retryDelaySeconds} seconds... (attempt {attempt}/{maxRetries})[/]"); + AnsiConsole.MarkupLine($"[yellow]Server not ready ({reason}), retrying in {retryDelaySeconds}s... (attempt {attempt}/{maxAttempts})[/]"); Thread.Sleep(TimeSpan.FromSeconds(retryDelaySeconds)); } } diff --git a/developer-cli/Commands/McpCommand.cs b/developer-cli/Commands/McpCommand.cs index 83cbedf9e..1234993e6 100644 --- a/developer-cli/Commands/McpCommand.cs +++ b/developer-cli/Commands/McpCommand.cs @@ -141,8 +141,8 @@ public static async Task EndToEnd( [Description("Search terms")] string[]? searchTerms = null, [Description("Browser")] string browser = "all", [Description("Smoke only")] bool smoke = false, - [Description("Wait for Aspire to start (retries server check up to 2 minutes)")] - bool waitForAspire = false, + [Description("Skip waiting for Aspire to start (by default, retries server check up to 3 minutes)")] + bool noWaitForAspire = false, [Description("Maximum retry count for flaky tests, zero for no retries")] int? retries = null, [Description("Stop after the first failure")] @@ -163,7 +163,7 @@ public static async Task EndToEnd( } if (smoke) args.Add("--smoke"); - if (waitForAspire) args.Add("--wait-for-aspire"); + if (noWaitForAspire) args.Add("--no-wait-for-aspire"); if (retries.HasValue) args.Add($"--retries={retries.Value}"); if (stopOnFirstFailure) args.Add("--stop-on-first-failure"); if (repeatEach.HasValue) args.Add($"--repeat-each={repeatEach.Value}"); From 28b0f3dc4c40550005a0f9aa4068809d6dbdd107 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 12:39:27 +0200 Subject: [PATCH 03/16] Default end-to-end tests to chromium browser --- developer-cli/Commands/End2EndCommand.cs | 2 +- developer-cli/Commands/McpCommand.cs | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/developer-cli/Commands/End2EndCommand.cs b/developer-cli/Commands/End2EndCommand.cs index ebd84da5c..fda5d4634 100644 --- a/developer-cli/Commands/End2EndCommand.cs +++ b/developer-cli/Commands/End2EndCommand.cs @@ -16,7 +16,7 @@ public class End2EndCommand : Command public End2EndCommand() : base("e2e", "Run end-to-end tests using Playwright") { var searchTermsArgument = new Argument("search-terms") { Description = "Search terms for test filtering (e.g., 'user management', '@smoke', 'smoke', 'comprehensive', 'user-management-flows.spec.ts')", DefaultValueFactory = _ => [] }; - var browserOption = new Option("--browser", "-b") { Description = "Browser to use for tests (chromium, firefox, webkit, safari, all)", DefaultValueFactory = _ => "all" }; + var browserOption = new Option("--browser", "-b") { Description = "Browser to use for tests (chromium, firefox, webkit, safari, all). Defaults to chromium", DefaultValueFactory = _ => "chromium" }; var debugOption = new Option("--debug") { Description = "Start with Playwright Inspector for debugging (automatically enables headed mode)" }; var debugTimingOption = new Option("--debug-timing") { Description = "Show step timing output with color coding during test execution" }; var headedOption = new Option("--headed") { Description = "Show browser UI while running tests (automatically enables sequential execution)" }; diff --git a/developer-cli/Commands/McpCommand.cs b/developer-cli/Commands/McpCommand.cs index 1234993e6..722860ece 100644 --- a/developer-cli/Commands/McpCommand.cs +++ b/developer-cli/Commands/McpCommand.cs @@ -139,7 +139,8 @@ public static string Run() [Description("Run end-to-end tests")] public static async Task EndToEnd( [Description("Search terms")] string[]? searchTerms = null, - [Description("Browser")] string browser = "all", + [Description("Browser (chromium, firefox, webkit, safari, all). Defaults to chromium")] + string browser = "chromium", [Description("Smoke only")] bool smoke = false, [Description("Skip waiting for Aspire to start (by default, retries server check up to 3 minutes)")] bool noWaitForAspire = false, @@ -156,11 +157,8 @@ public static async Task EndToEnd( { var args = new List { "e2e", "--quiet" }; if (searchTerms is { Length: > 0 }) args.AddRange(searchTerms); - if (browser != "all") - { - args.Add("--browser"); - args.Add(browser); - } + args.Add("--browser"); + args.Add(browser); if (smoke) args.Add("--smoke"); if (noWaitForAspire) args.Add("--no-wait-for-aspire"); From be98075ff861e5d413c4c093a3db9cd7cbc6bec6 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 12:52:02 +0200 Subject: [PATCH 04/16] Run end-to-end tests for all systems in a single Playwright invocation --- .gitignore | 3 + .../tests/e2e/fixtures/page-auth.ts | 8 +- .../tests/e2e/fixtures/worker-auth.ts | 13 +- developer-cli/Commands/End2EndCommand.cs | 159 ++++++++++++++++-- 4 files changed, 165 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index fdb1ca225..d1dc45b0b 100644 --- a/.gitignore +++ b/.gitignore @@ -424,3 +424,6 @@ playwright-report/ # PlatformPlatform agent workspace .workspace + +# Generated Playwright combined config (created by pp e2e) +application/playwright.combined.config.ts diff --git a/application/shared-webapp/tests/e2e/fixtures/page-auth.ts b/application/shared-webapp/tests/e2e/fixtures/page-auth.ts index 68ee3cd7e..2b4a49d69 100644 --- a/application/shared-webapp/tests/e2e/fixtures/page-auth.ts +++ b/application/shared-webapp/tests/e2e/fixtures/page-auth.ts @@ -164,7 +164,7 @@ export const test = base.extend({ ownerPage: async ({ browser }, use, testInfo) => { const workerIndex = testInfo.parallelIndex; const browserName = testInfo.project.name; - const systemPrefix = getSelfContainedSystemPrefix(); + const systemPrefix = getSelfContainedSystemPrefix(testInfo.file); // Get tenant for this worker const tenant = await getWorkerTenant(workerIndex, systemPrefix); @@ -188,7 +188,7 @@ export const test = base.extend({ adminPage: async ({ browser }, use, testInfo) => { const workerIndex = testInfo.parallelIndex; const browserName = testInfo.project.name; - const systemPrefix = getSelfContainedSystemPrefix(); + const systemPrefix = getSelfContainedSystemPrefix(testInfo.file); // Get tenant for this worker const tenant = await getWorkerTenant(workerIndex, systemPrefix); @@ -212,7 +212,7 @@ export const test = base.extend({ memberPage: async ({ browser }, use, testInfo) => { const workerIndex = testInfo.parallelIndex; const browserName = testInfo.project.name; - const systemPrefix = getSelfContainedSystemPrefix(); + const systemPrefix = getSelfContainedSystemPrefix(testInfo.file); // Get tenant for this worker const tenant = await getWorkerTenant(workerIndex, systemPrefix); @@ -235,7 +235,7 @@ export const test = base.extend({ anonymousPage: async ({ browser }, use, testInfo) => { const workerIndex = testInfo.parallelIndex; - const systemPrefix = getSelfContainedSystemPrefix(); + const systemPrefix = getSelfContainedSystemPrefix(testInfo.file); // Get tenant for this worker - ensure users exist for testing existing user flows const tenant = await getWorkerTenant(workerIndex, systemPrefix, { diff --git a/application/shared-webapp/tests/e2e/fixtures/worker-auth.ts b/application/shared-webapp/tests/e2e/fixtures/worker-auth.ts index 3112122d4..c72c5616e 100644 --- a/application/shared-webapp/tests/e2e/fixtures/worker-auth.ts +++ b/application/shared-webapp/tests/e2e/fixtures/worker-auth.ts @@ -46,11 +46,18 @@ export async function getWorkerTenant( } /** - * Extract the self-contained system prefix from the current working directory or test context + * Extract the self-contained system prefix from the test file path or current working directory + * @param testFilePath Optional test file path from testInfo.file * @returns The self-contained system prefix (e.g., "account" or "back-office") */ -export function getSelfContainedSystemPrefix(): string | undefined { - // Try to extract from current working directory +export function getSelfContainedSystemPrefix(testFilePath?: string): string | undefined { + // Prefer the test file path (works in combined runner where cwd is application/) + if (testFilePath) { + const fileMatch = testFilePath.match(/application\/([^/]+)\/WebApp/); + if (fileMatch) return fileMatch[1]; + } + + // Fall back to current working directory (works in per-SCS runner) const cwd = process.cwd(); const match = cwd.match(/application\/([^/]+)\/WebApp/); return match ? match[1] : undefined; diff --git a/developer-cli/Commands/End2EndCommand.cs b/developer-cli/Commands/End2EndCommand.cs index fda5d4634..35100c62c 100644 --- a/developer-cli/Commands/End2EndCommand.cs +++ b/developer-cli/Commands/End2EndCommand.cs @@ -168,22 +168,30 @@ private static void Execute( var stopwatch = Stopwatch.StartNew(); var overallSuccess = true; var failedSelfContainedSystems = new List(); + var showBrowser = headed || debug || slowMo; + var useCombinedRun = selfContainedSystemsToTest.Length > 1 && !debug && !ui && !showBrowser; - foreach (var currentSelfContainedSystem in selfContainedSystemsToTest) + if (useCombinedRun) { - var selfContainedSystemSuccess = RunTestsForSystem(currentSelfContainedSystem, testPatterns, browser, debug, debugTiming, searchGrep, headed, includeSlow, lastFailed, - onlyChanged, repeatEach, retries, showReport, slowMo, smoke, stopOnFirstFailure, ui, workers + overallSuccess = RunTestsCombined(selfContainedSystemsToTest, testPatterns, browser, debugTiming, searchGrep, includeSlow, lastFailed, + onlyChanged, repeatEach, retries, showReport, smoke, stopOnFirstFailure, workers ); - - if (!selfContainedSystemSuccess) + if (!overallSuccess) failedSelfContainedSystems.AddRange(selfContainedSystemsToTest); + } + else + { + foreach (var currentSelfContainedSystem in selfContainedSystemsToTest) { - overallSuccess = false; - failedSelfContainedSystems.Add(currentSelfContainedSystem); + var selfContainedSystemSuccess = RunTestsForSystem(currentSelfContainedSystem, testPatterns, browser, debug, debugTiming, searchGrep, headed, includeSlow, lastFailed, + onlyChanged, repeatEach, retries, showReport, slowMo, smoke, stopOnFirstFailure, ui, workers + ); - // If stop on first failure is enabled, exit the loop after the first failure - if (stopOnFirstFailure) + if (!selfContainedSystemSuccess) { - break; + overallSuccess = false; + failedSelfContainedSystems.Add(currentSelfContainedSystem); + + if (stopOnFirstFailure) break; } } } @@ -197,7 +205,11 @@ private static void Execute( if (!quiet) { - if (showReport) + if (useCombinedRun) + { + if (showReport || !overallSuccess) OpenCombinedHtmlReport(); + } + else if (showReport) { foreach (var currentSelfContainedSystem in selfContainedSystemsToTest) { @@ -257,6 +269,111 @@ private static (string[] testPatterns, string? grep) ProcessSearchTerms(string[] return (testPatterns.ToArray(), finalGrep); } + private static bool RunTestsCombined( + string[] selfContainedSystems, + string[] testPatterns, + string browser, + bool debugTiming, + string? searchGrep, + bool includeSlow, + bool lastFailed, + bool onlyChanged, + int? repeatEach, + int? retries, + bool showReport, + bool smoke, + bool stopOnFirstFailure, + int? workers) + { + // Build test directory arguments for all SCSs so Playwright runs everything in one invocation + var testDirs = new List(); + foreach (var scs in selfContainedSystems) + { + var end2EndTestsPath = Path.Combine(scs, "WebApp", "tests", "e2e"); + var fullPath = Path.Combine(Configuration.ApplicationFolder, end2EndTestsPath); + if (!Directory.Exists(fullPath)) + { + AnsiConsole.MarkupLine($"[yellow]No end-to-end tests found for {scs}. Skipping...[/]"); + continue; + } + + testDirs.Add(end2EndTestsPath); + } + + if (testDirs.Count == 0) + { + AnsiConsole.MarkupLine("[yellow]No end-to-end tests found for any system.[/]"); + return true; + } + + AnsiConsole.MarkupLine($"[blue]Running tests for {string.Join(", ", selfContainedSystems)} in a single Playwright invocation...[/]"); + + // Write a temporary combined Playwright config + var combinedConfigPath = Path.Combine(Configuration.ApplicationFolder, "playwright.combined.config.ts"); + var testMatchEntries = string.Join(", ", testDirs.Select(dir => $"\"{dir}/**/*.spec.ts\"")); + var configContent = $$""" + import { defineConfig } from "@playwright/test"; + import baseConfig from "./shared-webapp/tests/e2e/playwright.config"; + + export default defineConfig({ + ...baseConfig, + testDir: ".", + testMatch: [{{testMatchEntries}}] + }); + """; + File.WriteAllText(combinedConfigPath, configContent); + + try + { + // Clean up report directory if we're going to show it + var reportDirectory = Path.Combine(Configuration.ApplicationFolder, "tests", "test-results", "playwright-report"); + if (showReport && Directory.Exists(reportDirectory)) + { + Directory.Delete(reportDirectory, true); + } + + var runSequential = debugTiming; + var isLocalhost = BaseUrl.Contains("localhost", StringComparison.OrdinalIgnoreCase); + + var playwrightArgs = BuildPlaywrightArgs( + testPatterns, browser, false, searchGrep, false, includeSlow, lastFailed, onlyChanged, repeatEach, + retries, runSequential, smoke, stopOnFirstFailure, false, workers + ); + + var processStartInfo = new ProcessStartInfo + { + FileName = Configuration.IsWindows ? "cmd.exe" : "npx", + Arguments = $"{(Configuration.IsWindows ? "/C npx" : string.Empty)} playwright test --config=./playwright.combined.config.ts {playwrightArgs}", + WorkingDirectory = Configuration.ApplicationFolder, + UseShellExecute = false + }; + + AnsiConsole.MarkupLine($"[cyan]Running: npx playwright test --config=./playwright.combined.config.ts {playwrightArgs}[/]"); + + processStartInfo.EnvironmentVariables["PUBLIC_URL"] = BaseUrl; + if (isLocalhost) processStartInfo.EnvironmentVariables["PLAYWRIGHT_VIDEO_MODE"] = "on"; + if (debugTiming) processStartInfo.EnvironmentVariables["PLAYWRIGHT_SHOW_DEBUG_TIMING"] = "true"; + processStartInfo.EnvironmentVariables["PLAYWRIGHT_HTML_OPEN"] = "never"; + + try + { + ProcessHelper.StartProcess(processStartInfo, throwOnError: true); + AnsiConsole.MarkupLine("[green]All tests completed successfully[/]"); + return true; + } + catch + { + AnsiConsole.MarkupLine("[red]Some tests failed[/]"); + return false; + } + } + finally + { + // Clean up the temporary config + if (File.Exists(combinedConfigPath)) File.Delete(combinedConfigPath); + } + } + private static bool RunTestsForSystem( string selfContainedSystem, string[] testPatterns, @@ -536,6 +653,26 @@ private static void OpenHtmlReport(string selfContainedSystem) } } + private static void OpenCombinedHtmlReport() + { + // The combined run uses the application-level report path from the base config (test-results/playwright-report) + var reportPath = Path.Combine(Configuration.ApplicationFolder, "test-results", "playwright-report", "index.html"); + + if (File.Exists(reportPath)) + { + AnsiConsole.MarkupLine("[green]Opening combined test report...[/]"); + ProcessHelper.OpenBrowser(reportPath); + } + else + { + // Fall back to per-SCS reports + foreach (var scs in AvailableSelfContainedSystems) + { + OpenHtmlReport(scs); + } + } + } + private static void DeleteAllTestArtifacts() { AnsiConsole.MarkupLine("[blue]Deleting test artifacts...[/]"); From cd59d71a15abac0bf14e262372cfa2ff36454b7f Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 12:57:55 +0200 Subject: [PATCH 05/16] Increase Playwright test timeout to 3 minutes and assertion timeout to 20 seconds --- application/shared-webapp/tests/e2e/playwright.config.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/application/shared-webapp/tests/e2e/playwright.config.ts b/application/shared-webapp/tests/e2e/playwright.config.ts index bef7d3dad..f4078af41 100644 --- a/application/shared-webapp/tests/e2e/playwright.config.ts +++ b/application/shared-webapp/tests/e2e/playwright.config.ts @@ -61,12 +61,12 @@ export default defineConfig({ // Global timeout for each test (double timeout for slow motion) timeout: (() => { - const baseTimeout = process.env.PLAYWRIGHT_TIMEOUT ? Number.parseInt(process.env.PLAYWRIGHT_TIMEOUT, 10) : 60000; + const baseTimeout = process.env.PLAYWRIGHT_TIMEOUT ? Number.parseInt(process.env.PLAYWRIGHT_TIMEOUT, 10) : 180_000; const isSlowMotion = !!process.env.PLAYWRIGHT_SLOW_MO; return isSlowMotion ? baseTimeout * 2 : baseTimeout; })(), expect: { - timeout: 10000 + timeout: 20_000 }, // Output directories - centralized test artifacts From e007aecc4518439d60b281eb5426044950db706d Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 13:11:05 +0200 Subject: [PATCH 06/16] Suppress all console output in quiet mode for end-to-end tests --- developer-cli/Commands/End2EndCommand.cs | 154 ++++++++++++------ .../Installation/PlaywrightInstaller.cs | 36 +++- 2 files changed, 128 insertions(+), 62 deletions(-) diff --git a/developer-cli/Commands/End2EndCommand.cs b/developer-cli/Commands/End2EndCommand.cs index 35100c62c..83f49c272 100644 --- a/developer-cli/Commands/End2EndCommand.cs +++ b/developer-cli/Commands/End2EndCommand.cs @@ -1,12 +1,13 @@ using System.CommandLine; using System.Diagnostics; +using System.Text.RegularExpressions; using DeveloperCli.Installation; using DeveloperCli.Utilities; using Spectre.Console; namespace DeveloperCli.Commands; -public class End2EndCommand : Command +public partial class End2EndCommand : Command { private static readonly string[] ValidBrowsers = ["chromium", "firefox", "webkit", "safari", "all"]; @@ -112,14 +113,14 @@ private static void Execute( if (deleteArtifacts) { DeleteAllTestArtifacts(); - AnsiConsole.MarkupLine("[yellow]Note: --delete-artifacts is a standalone operation and exits after cleaning artifacts.[/]"); + if (!quiet) AnsiConsole.MarkupLine("[yellow]Note: --delete-artifacts is a standalone operation and exits after cleaning artifacts.[/]"); Environment.Exit(0); } - AnsiConsole.MarkupLine("[blue]Checking server availability...[/]"); - CheckWebsiteAccessibility(!noWaitForAspire); + if (!quiet) AnsiConsole.MarkupLine("[blue]Checking server availability...[/]"); + CheckWebsiteAccessibility(!noWaitForAspire, quiet); - PlaywrightInstaller.EnsurePlaywrightBrowsers(); + PlaywrightInstaller.EnsurePlaywrightBrowsers(quiet); // Convert search terms to test patterns and grep patterns var (testPatterns, searchGrep) = ProcessSearchTerms(searchTerms); @@ -130,7 +131,7 @@ private static void Execute( { if (!AvailableSelfContainedSystems.Contains(selfContainedSystem)) { - AnsiConsole.MarkupLine($"[red]Invalid self-contained system '{selfContainedSystem}'. Available systems: {string.Join(", ", AvailableSelfContainedSystems)}[/]"); + Console.WriteLine($"Invalid self-contained system '{selfContainedSystem}'. Available systems: {string.Join(", ", AvailableSelfContainedSystems)}"); Environment.Exit(1); } @@ -161,7 +162,7 @@ private static void Execute( // Validate browser option if (!ValidBrowsers.Contains(browser.ToLower())) { - AnsiConsole.MarkupLine($"[red]Invalid browser '{browser}'. Valid options are: {string.Join(", ", ValidBrowsers)}[/]"); + Console.WriteLine($"Invalid browser '{browser}'. Valid options are: {string.Join(", ", ValidBrowsers)}"); Environment.Exit(1); } @@ -174,7 +175,7 @@ private static void Execute( if (useCombinedRun) { overallSuccess = RunTestsCombined(selfContainedSystemsToTest, testPatterns, browser, debugTiming, searchGrep, includeSlow, lastFailed, - onlyChanged, repeatEach, retries, showReport, smoke, stopOnFirstFailure, workers + onlyChanged, repeatEach, retries, showReport, smoke, stopOnFirstFailure, workers, quiet ); if (!overallSuccess) failedSelfContainedSystems.AddRange(selfContainedSystemsToTest); } @@ -183,7 +184,7 @@ private static void Execute( foreach (var currentSelfContainedSystem in selfContainedSystemsToTest) { var selfContainedSystemSuccess = RunTestsForSystem(currentSelfContainedSystem, testPatterns, browser, debug, debugTiming, searchGrep, headed, includeSlow, lastFailed, - onlyChanged, repeatEach, retries, showReport, slowMo, smoke, stopOnFirstFailure, ui, workers + onlyChanged, repeatEach, retries, showReport, slowMo, smoke, stopOnFirstFailure, ui, workers, quiet ); if (!selfContainedSystemSuccess) @@ -198,13 +199,13 @@ private static void Execute( stopwatch.Stop(); - AnsiConsole.MarkupLine(overallSuccess - ? $"[green]All tests completed in {stopwatch.Elapsed.TotalSeconds:F1} seconds[/]" - : $"[red]Some tests failed in {stopwatch.Elapsed.TotalSeconds:F1} seconds[/]" - ); - if (!quiet) { + AnsiConsole.MarkupLine(overallSuccess + ? $"[green]All tests completed in {stopwatch.Elapsed.TotalSeconds:F1} seconds[/]" + : $"[red]Some tests failed in {stopwatch.Elapsed.TotalSeconds:F1} seconds[/]" + ); + if (useCombinedRun) { if (showReport || !overallSuccess) OpenCombinedHtmlReport(); @@ -283,17 +284,18 @@ private static bool RunTestsCombined( bool showReport, bool smoke, bool stopOnFirstFailure, - int? workers) + int? workers, + bool quiet) { // Build test directory arguments for all SCSs so Playwright runs everything in one invocation var testDirs = new List(); - foreach (var scs in selfContainedSystems) + foreach (var system in selfContainedSystems) { - var end2EndTestsPath = Path.Combine(scs, "WebApp", "tests", "e2e"); + var end2EndTestsPath = Path.Combine(system, "WebApp", "tests", "e2e"); var fullPath = Path.Combine(Configuration.ApplicationFolder, end2EndTestsPath); if (!Directory.Exists(fullPath)) { - AnsiConsole.MarkupLine($"[yellow]No end-to-end tests found for {scs}. Skipping...[/]"); + if (!quiet) AnsiConsole.MarkupLine($"[yellow]No end-to-end tests found for {system}. Skipping...[/]"); continue; } @@ -302,11 +304,11 @@ private static bool RunTestsCombined( if (testDirs.Count == 0) { - AnsiConsole.MarkupLine("[yellow]No end-to-end tests found for any system.[/]"); + if (!quiet) AnsiConsole.MarkupLine("[yellow]No end-to-end tests found for any system.[/]"); return true; } - AnsiConsole.MarkupLine($"[blue]Running tests for {string.Join(", ", selfContainedSystems)} in a single Playwright invocation...[/]"); + if (!quiet) AnsiConsole.MarkupLine($"[blue]Running tests for {string.Join(", ", selfContainedSystems)} in a single Playwright invocation...[/]"); // Write a temporary combined Playwright config var combinedConfigPath = Path.Combine(Configuration.ApplicationFolder, "playwright.combined.config.ts"); @@ -340,20 +342,33 @@ private static bool RunTestsCombined( retries, runSequential, smoke, stopOnFirstFailure, false, workers ); + var command = $"{(Configuration.IsWindows ? "cmd.exe /C npx" : "npx")} playwright test --config=./playwright.combined.config.ts {playwrightArgs}"; + + if (!quiet) AnsiConsole.MarkupLine($"[cyan]Running: npx playwright test --config=./playwright.combined.config.ts {playwrightArgs}[/]"); + + var environmentVariables = new List<(string Name, string Value)> { ("PUBLIC_URL", BaseUrl), ("PLAYWRIGHT_HTML_OPEN", "never") }; + if (isLocalhost) environmentVariables.Add(("PLAYWRIGHT_VIDEO_MODE", "on")); + if (debugTiming) environmentVariables.Add(("PLAYWRIGHT_SHOW_DEBUG_TIMING", "true")); + + if (quiet) + { + var result = ProcessHelper.ExecuteQuietly(command, Configuration.ApplicationFolder, environmentVariables.ToArray()); + Console.WriteLine(ExtractPlaywrightSummary(result.CombinedOutput) ?? (result.Success ? "All tests passed." : "Tests failed.")); + if (!result.Success) Console.WriteLine($"Full output: {result.TempFilePathWithSize}"); + return result.Success; + } + var processStartInfo = new ProcessStartInfo { FileName = Configuration.IsWindows ? "cmd.exe" : "npx", Arguments = $"{(Configuration.IsWindows ? "/C npx" : string.Empty)} playwright test --config=./playwright.combined.config.ts {playwrightArgs}", WorkingDirectory = Configuration.ApplicationFolder, - UseShellExecute = false + UseShellExecute = false, + Environment = { ["PUBLIC_URL"] = BaseUrl, ["PLAYWRIGHT_HTML_OPEN"] = "never" } }; - AnsiConsole.MarkupLine($"[cyan]Running: npx playwright test --config=./playwright.combined.config.ts {playwrightArgs}[/]"); - - processStartInfo.EnvironmentVariables["PUBLIC_URL"] = BaseUrl; if (isLocalhost) processStartInfo.EnvironmentVariables["PLAYWRIGHT_VIDEO_MODE"] = "on"; if (debugTiming) processStartInfo.EnvironmentVariables["PLAYWRIGHT_SHOW_DEBUG_TIMING"] = "true"; - processStartInfo.EnvironmentVariables["PLAYWRIGHT_HTML_OPEN"] = "never"; try { @@ -392,18 +407,19 @@ private static bool RunTestsForSystem( bool smoke, bool stopOnFirstFailure, bool ui, - int? workers) + int? workers, + bool quiet) { var systemPath = Path.Combine(Configuration.ApplicationFolder, selfContainedSystem, "WebApp"); var end2EndTestsPath = Path.Combine(systemPath, "tests/e2e"); if (!Directory.Exists(end2EndTestsPath)) { - AnsiConsole.MarkupLine($"[yellow]No end-to-end tests found for {selfContainedSystem}. Skipping...[/]"); + if (!quiet) AnsiConsole.MarkupLine($"[yellow]No end-to-end tests found for {selfContainedSystem}. Skipping...[/]"); return true; } - AnsiConsole.MarkupLine($"[blue]Running tests for {selfContainedSystem}...[/]"); + if (!quiet) AnsiConsole.MarkupLine($"[blue]Running tests for {selfContainedSystem}...[/]"); // Clean up report directory if we're going to show it if (showReport) @@ -411,7 +427,7 @@ private static bool RunTestsForSystem( var reportDirectory = Path.Combine(systemPath, "tests", "test-results", "playwright-report"); if (Directory.Exists(reportDirectory)) { - AnsiConsole.MarkupLine("[blue]Cleaning up previous test report...[/]"); + if (!quiet) AnsiConsole.MarkupLine("[blue]Cleaning up previous test report...[/]"); Directory.Delete(reportDirectory, true); } } @@ -425,6 +441,23 @@ private static bool RunTestsForSystem( retries, runSequential, smoke, stopOnFirstFailure, ui, workers ); + var command = $"{(Configuration.IsWindows ? "cmd.exe /C npx" : "npx")} playwright test --config=./tests/playwright.config.ts {playwrightArgs}"; + + if (!quiet) AnsiConsole.MarkupLine($"[cyan]Running command in {selfContainedSystem}: npx playwright test --config=./tests/playwright.config.ts {playwrightArgs}[/]"); + + var environmentVariables = new List<(string Name, string Value)> { ("PUBLIC_URL", BaseUrl), ("PLAYWRIGHT_HTML_OPEN", "never") }; + if (slowMo) environmentVariables.Add(("PLAYWRIGHT_SLOW_MO", "500")); + if (isLocalhost) environmentVariables.Add(("PLAYWRIGHT_VIDEO_MODE", "on")); + if (debugTiming) environmentVariables.Add(("PLAYWRIGHT_SHOW_DEBUG_TIMING", "true")); + + if (quiet) + { + var result = ProcessHelper.ExecuteQuietly(command, systemPath, environmentVariables.ToArray()); + Console.WriteLine(ExtractPlaywrightSummary(result.CombinedOutput) ?? (result.Success ? $"{selfContainedSystem}: all tests passed." : $"{selfContainedSystem}: tests failed.")); + if (!result.Success) Console.WriteLine($"Full output: {result.TempFilePathWithSize}"); + return result.Success; + } + var processStartInfo = new ProcessStartInfo { FileName = Configuration.IsWindows ? "cmd.exe" : "npx", @@ -433,25 +466,16 @@ private static bool RunTestsForSystem( UseShellExecute = false }; - AnsiConsole.MarkupLine($"[cyan]Running command in {selfContainedSystem}: npx playwright test --config=./tests/playwright.config.ts {playwrightArgs}[/]"); - - processStartInfo.EnvironmentVariables["PUBLIC_URL"] = BaseUrl; - - if (slowMo) processStartInfo.EnvironmentVariables["PLAYWRIGHT_SLOW_MO"] = "500"; - if (isLocalhost) processStartInfo.EnvironmentVariables["PLAYWRIGHT_VIDEO_MODE"] = "on"; - if (debugTiming) processStartInfo.EnvironmentVariables["PLAYWRIGHT_SHOW_DEBUG_TIMING"] = "true"; - - // Prevent HTML report from opening automatically - processStartInfo.EnvironmentVariables["PLAYWRIGHT_HTML_OPEN"] = "never"; + foreach (var (name, value) in environmentVariables) + { + processStartInfo.EnvironmentVariables[name] = value; + } var testsFailed = false; try { ProcessHelper.StartProcess(processStartInfo, throwOnError: true); - AnsiConsole.MarkupLine(testsFailed - ? $"[red]Tests for {selfContainedSystem} failed[/]" - : $"[green]Tests for {selfContainedSystem} completed successfully[/]" - ); + AnsiConsole.MarkupLine($"[green]Tests for {selfContainedSystem} completed successfully[/]"); } catch (Exception) { @@ -462,9 +486,9 @@ private static bool RunTestsForSystem( return !testsFailed; } - private static void CheckWebsiteAccessibility(bool waitForAspire) + private static void CheckWebsiteAccessibility(bool waitForAspire, bool quiet = false) { - var maxAttempts = waitForAspire ? 36 : 1; // 36 * 5s = 3 minutes + var maxAttempts = waitForAspire ? 6 : 1; // 6 * 5s = 30 seconds var retryDelaySeconds = 5; for (var attempt = 1; attempt <= maxAttempts; attempt++) @@ -483,13 +507,13 @@ private static void CheckWebsiteAccessibility(bool waitForAspire) if (response.IsSuccessStatusCode) { - AnsiConsole.MarkupLine($"[green]Server is accessible at {BaseUrl}[/]"); + if (!quiet) AnsiConsole.MarkupLine($"[green]Server is accessible at {BaseUrl}[/]"); return; } if (attempt < maxAttempts) { - AnsiConsole.MarkupLine($"[yellow]Server returned HTTP {(int)response.StatusCode} ({response.StatusCode}), retrying in {retryDelaySeconds}s... (attempt {attempt}/{maxAttempts})[/]"); + if (!quiet) AnsiConsole.MarkupLine($"[yellow]Server returned HTTP {(int)response.StatusCode} ({response.StatusCode}), retrying in {retryDelaySeconds}s... (attempt {attempt}/{maxAttempts})[/]"); Thread.Sleep(TimeSpan.FromSeconds(retryDelaySeconds)); } } @@ -499,14 +523,13 @@ private static void CheckWebsiteAccessibility(bool waitForAspire) if (attempt < maxAttempts) { - AnsiConsole.MarkupLine($"[yellow]Server not ready ({reason}), retrying in {retryDelaySeconds}s... (attempt {attempt}/{maxAttempts})[/]"); + if (!quiet) AnsiConsole.MarkupLine($"[yellow]Server not ready ({reason}), retrying in {retryDelaySeconds}s... (attempt {attempt}/{maxAttempts})[/]"); Thread.Sleep(TimeSpan.FromSeconds(retryDelaySeconds)); } } } - AnsiConsole.MarkupLine($"[red]Server is not accessible at {BaseUrl}[/]"); - AnsiConsole.MarkupLine($"[yellow]Please start AppHost in your IDE before running '{Configuration.AliasName} e2e'[/]"); + Console.WriteLine($"Server is not accessible at {BaseUrl}. Please start AppHost before running '{Configuration.AliasName} e2e'."); Environment.Exit(1); } @@ -665,14 +688,39 @@ private static void OpenCombinedHtmlReport() } else { - // Fall back to per-SCS reports - foreach (var scs in AvailableSelfContainedSystems) + // Fall back to per-system reports + foreach (var selfContainedSystem in AvailableSelfContainedSystems) { - OpenHtmlReport(scs); + OpenHtmlReport(selfContainedSystem); } } } + private static string? ExtractPlaywrightSummary(string output) + { + // Playwright summary lines: "8 failed" and "5 passed (31.3s)" near the end, possibly separated by test names + // Match lines like "N passed", "N failed", "N passed (Xs)", "N skipped" + var summaryPattern = SummaryLineRegex(); + var parts = new List(); + + foreach (var line in output.Split('\n').Reverse()) + { + var cleaned = AnsiEscapeRegex().Replace(line, "").Trim(); + if (summaryPattern.IsMatch(cleaned)) + { + parts.Insert(0, cleaned); + } + } + + return parts.Count > 0 ? string.Join(", ", parts) : null; + } + + [GeneratedRegex(@"^\d+ (passed|failed|skipped)")] + private static partial Regex SummaryLineRegex(); + + [GeneratedRegex(@"\x1B\[[0-9;]*m")] + private static partial Regex AnsiEscapeRegex(); + private static void DeleteAllTestArtifacts() { AnsiConsole.MarkupLine("[blue]Deleting test artifacts...[/]"); diff --git a/developer-cli/Installation/PlaywrightInstaller.cs b/developer-cli/Installation/PlaywrightInstaller.cs index f4afdddfe..e7724e247 100644 --- a/developer-cli/Installation/PlaywrightInstaller.cs +++ b/developer-cli/Installation/PlaywrightInstaller.cs @@ -6,18 +6,36 @@ namespace DeveloperCli.Installation; public static class PlaywrightInstaller { - public static void EnsurePlaywrightBrowsers() + public static void EnsurePlaywrightBrowsers(bool quiet = false) { - AnsiConsole.MarkupLine("[blue]Ensuring Playwright browsers are installed...[/]"); + if (!quiet) AnsiConsole.MarkupLine("[blue]Ensuring Playwright browsers are installed...[/]"); - var processStartInfo = new ProcessStartInfo + var command = Configuration.IsWindows + ? "cmd.exe" + : Configuration.IsLinux + ? "sudo" + : "npx"; + var arguments = Configuration.IsWindows + ? "/C npx --yes playwright install --with-deps" + : Configuration.IsLinux + ? "npx --yes playwright install --with-deps" + : "--yes playwright install --with-deps"; + + if (quiet) + { + ProcessHelper.ExecuteQuietly($"{command} {arguments}", Configuration.ApplicationFolder); + } + else { - FileName = Configuration.IsWindows ? "cmd.exe" : "npx", - Arguments = $"{(Configuration.IsWindows ? "/C npx" : string.Empty)} --yes playwright install --with-deps", - WorkingDirectory = Configuration.ApplicationFolder, - UseShellExecute = false - }; + var processStartInfo = new ProcessStartInfo + { + FileName = command, + Arguments = arguments, + WorkingDirectory = Configuration.ApplicationFolder, + UseShellExecute = false + }; - ProcessHelper.StartProcess(processStartInfo, throwOnError: true); + ProcessHelper.StartProcess(processStartInfo, throwOnError: true); + } } } From c2512dbabb482d049d628ee35b61db52c16e8da1 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 14:22:41 +0200 Subject: [PATCH 07/16] Add configurable e2e performance profiles with interactive setup --- .../tests/e2e/playwright.config.ts | 11 +- developer-cli/Commands/End2EndCommand.cs | 100 +++++++++++++++++- 2 files changed, 101 insertions(+), 10 deletions(-) diff --git a/application/shared-webapp/tests/e2e/playwright.config.ts b/application/shared-webapp/tests/e2e/playwright.config.ts index f4078af41..4c6204b1b 100644 --- a/application/shared-webapp/tests/e2e/playwright.config.ts +++ b/application/shared-webapp/tests/e2e/playwright.config.ts @@ -2,14 +2,7 @@ import { defineConfig, devices } from "@playwright/test"; import { getBaseUrl, isLinux, isWindows } from "./utils/constants"; -let workers: number | undefined; -if (process.env.CI) { - workers = 1; // Limit to 1 worker on CI -} else if (isWindows) { - workers = 4; // Limit to 4 workers on Windows to avoid performance issues -} else { - workers = undefined; // On non-Windows systems, use all available CPUs -} +const workers = process.env.CI ? 1 : undefined; /** * See https://playwright.dev/docs/test-configuration. @@ -66,7 +59,7 @@ export default defineConfig({ return isSlowMotion ? baseTimeout * 2 : baseTimeout; })(), expect: { - timeout: 20_000 + timeout: process.env.PLAYWRIGHT_EXPECT_TIMEOUT ? Number.parseInt(process.env.PLAYWRIGHT_EXPECT_TIMEOUT, 10) : 20_000 }, // Output directories - centralized test artifacts diff --git a/developer-cli/Commands/End2EndCommand.cs b/developer-cli/Commands/End2EndCommand.cs index 83f49c272..418d17c8c 100644 --- a/developer-cli/Commands/End2EndCommand.cs +++ b/developer-cli/Commands/End2EndCommand.cs @@ -35,7 +35,8 @@ public partial class End2EndCommand : Command var stopOnFirstFailureOption = new Option("--stop-on-first-failure", "-x") { Description = "Stop after the first failure" }; var uiOption = new Option("--ui") { Description = "Run tests in interactive UI mode with time-travel debugging" }; var workersOption = new Option("--workers", "-w") { Description = "Number of worker processes to use for running tests" }; - var noWaitForAspireOption = new Option("--no-wait-for-aspire") { Description = "Skip waiting for Aspire to start (by default, retries server check up to 3 minutes)" }; + var configOption = new Option("--config") { Description = "Configure test performance profile for this machine (workers, timeouts)" }; + var noWaitForAspireOption = new Option("--no-wait-for-aspire") { Description = "Skip waiting for Aspire to start (by default, retries server check up to 30 seconds)" }; Arguments.Add(searchTermsArgument); Options.Add(browserOption); @@ -56,6 +57,7 @@ public partial class End2EndCommand : Command Options.Add(stopOnFirstFailureOption); Options.Add(uiOption); Options.Add(workersOption); + Options.Add(configOption); Options.Add(noWaitForAspireOption); // SetHandler only supports up to 8 parameters, so we use SetAction for this complex command @@ -79,6 +81,7 @@ public partial class End2EndCommand : Command parseResult.GetValue(stopOnFirstFailureOption), parseResult.GetValue(uiOption), parseResult.GetValue(workersOption), + parseResult.GetValue(configOption) || parseResult.GetValue(searchTermsArgument) is ["config"], parseResult.GetValue(noWaitForAspireOption) ) ); @@ -86,6 +89,8 @@ public partial class End2EndCommand : Command private static string BaseUrl => Environment.GetEnvironmentVariable("PUBLIC_URL") ?? "https://localhost:9000"; + private static string DefaultsFilePath => Path.Combine(Configuration.SourceCodeFolder, ".workspace", "developer-cli", "end-to-end-tests", "e2e-defaults.json"); + private static void Execute( string[] searchTerms, string browser, @@ -106,10 +111,25 @@ private static void Execute( bool stopOnFirstFailure, bool ui, int? workers, + bool configure, bool noWaitForAspire) { Prerequisite.Ensure(Prerequisite.Node); + if (configure) + { + ConfigurePerformanceProfile(); + Environment.Exit(0); + } + + // Apply saved defaults if not explicitly provided + if (!File.Exists(DefaultsFilePath) && !quiet) + { + AnsiConsole.MarkupLine($"[yellow]Tip: Run '{Configuration.AliasName} e2e config' to set worker count and timeouts for this machine.[/]"); + } + + workers ??= LoadDefault("workers"); + if (deleteArtifacts) { DeleteAllTestArtifacts(); @@ -349,6 +369,10 @@ private static bool RunTestsCombined( var environmentVariables = new List<(string Name, string Value)> { ("PUBLIC_URL", BaseUrl), ("PLAYWRIGHT_HTML_OPEN", "never") }; if (isLocalhost) environmentVariables.Add(("PLAYWRIGHT_VIDEO_MODE", "on")); if (debugTiming) environmentVariables.Add(("PLAYWRIGHT_SHOW_DEBUG_TIMING", "true")); + var assertionTimeout = LoadDefault("assertionTimeout"); + if (assertionTimeout is not null) environmentVariables.Add(("PLAYWRIGHT_EXPECT_TIMEOUT", (assertionTimeout.Value * 1000).ToString())); + var testTimeout = LoadDefault("testTimeout"); + if (testTimeout is not null) environmentVariables.Add(("PLAYWRIGHT_TIMEOUT", (testTimeout.Value * 1000).ToString())); if (quiet) { @@ -449,6 +473,8 @@ private static bool RunTestsForSystem( if (slowMo) environmentVariables.Add(("PLAYWRIGHT_SLOW_MO", "500")); if (isLocalhost) environmentVariables.Add(("PLAYWRIGHT_VIDEO_MODE", "on")); if (debugTiming) environmentVariables.Add(("PLAYWRIGHT_SHOW_DEBUG_TIMING", "true")); + var assertionTimeout = LoadDefault("assertionTimeout"); + if (assertionTimeout is not null) environmentVariables.Add(("PLAYWRIGHT_EXPECT_TIMEOUT", (assertionTimeout.Value * 1000).ToString())); if (quiet) { @@ -721,6 +747,78 @@ private static void OpenCombinedHtmlReport() [GeneratedRegex(@"\x1B\[[0-9;]*m")] private static partial Regex AnsiEscapeRegex(); + private static void ConfigurePerformanceProfile() + { + var profiles = new Dictionary + { + ["High-end (8 workers, 15s assertions, 2m tests)"] = (8, 15, 120), + ["Mid-range (6 workers, 20s assertions, 3m tests)"] = (6, 20, 180), + ["Low-spec (4 workers, 30s assertions, 4m tests)"] = (4, 30, 240), + ["CI runner (1 worker, 30s assertions, 4m tests)"] = (1, 30, 240) + }; + + var currentWorkers = LoadDefault("workers"); + var currentAssertionTimeout = LoadDefault("assertionTimeout"); + var currentTestTimeout = LoadDefault("testTimeout"); + + if (currentWorkers is not null) + { + AnsiConsole.MarkupLine($"[blue]Current settings: {currentWorkers} workers, {currentAssertionTimeout ?? 20}s assertions, {currentTestTimeout ?? 180}s tests[/]"); + } + + var selection = AnsiConsole.Prompt( + new SelectionPrompt() + .Title("Select a [green]performance profile[/] for this machine:") + .AddChoices(profiles.Keys) + ); + + var (workers, assertionTimeout, testTimeout) = profiles[selection]; + SaveDefaults("workers", workers); + SaveDefaults("assertionTimeout", assertionTimeout); + SaveDefaults("testTimeout", testTimeout); + + AnsiConsole.MarkupLine($"[green]Profile saved: {workers} workers, {assertionTimeout}s assertion timeout, {testTimeout}s test timeout[/]"); + } + + private static void SaveDefaults(string key, int value) + { + var directory = Path.GetDirectoryName(DefaultsFilePath)!; + if (!Directory.Exists(directory)) Directory.CreateDirectory(directory); + + var defaults = LoadAllDefaults(); + defaults[key] = value; + + var entries = defaults.Select(kvp => $"\"{kvp.Key}\":{kvp.Value}"); + File.WriteAllText(DefaultsFilePath, $"{{{string.Join(",", entries)}}}"); + } + + private static int? LoadDefault(string key) + { + var defaults = LoadAllDefaults(); + return defaults.TryGetValue(key, out var value) ? value : null; + } + + private static Dictionary LoadAllDefaults() + { + if (!File.Exists(DefaultsFilePath)) return new Dictionary(); + + try + { + var json = File.ReadAllText(DefaultsFilePath); + var result = new Dictionary(); + foreach (Match match in Regex.Matches(json, "\"(\\w+)\":(\\d+)")) + { + result[match.Groups[1].Value] = int.Parse(match.Groups[2].Value); + } + + return result; + } + catch + { + return new Dictionary(); + } + } + private static void DeleteAllTestArtifacts() { AnsiConsole.MarkupLine("[blue]Deleting test artifacts...[/]"); From f22545d5228ed64f147a8c9e1a7d3278c5fef4dc Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Sat, 28 Mar 2026 20:46:17 +0100 Subject: [PATCH 08/16] Show test failures in red and prevent stderr deadlock in test runner --- developer-cli/Commands/TestCommand.cs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/developer-cli/Commands/TestCommand.cs b/developer-cli/Commands/TestCommand.cs index 22505f19d..5d836aeff 100644 --- a/developer-cli/Commands/TestCommand.cs +++ b/developer-cli/Commands/TestCommand.cs @@ -103,6 +103,9 @@ private static void RunTestsWithFilteredOutput(string command, string? workingDi using var process = Process.Start(processStartInfo)!; + // Drain stderr asynchronously to prevent deadlock when the buffer fills + var stderrTask = process.StandardError.ReadToEndAsync(); + // Stream stdout in real-time while (!process.StandardOutput.EndOfStream) { @@ -123,7 +126,7 @@ private static void RunTestsWithFilteredOutput(string command, string? workingDi { stats.Failed++; stats.FailedTests.Add(ExtractTestName(line)); - Console.WriteLine(line); + AnsiConsole.MarkupLine($"[red]{Markup.Escape(line)}[/]"); } else if (trimmedLine.StartsWith("Skipped ")) { @@ -138,12 +141,20 @@ private static void RunTestsWithFilteredOutput(string command, string? workingDi } process.WaitForExit(); + stderrTask.GetAwaiter().GetResult(); stopwatch.Stop(); var duration = stopwatch.Elapsed.TotalSeconds; // Print our summary Console.WriteLine(); - Console.WriteLine($"Test summary: total: {stats.Total}; failed: {stats.Failed}; succeeded: {stats.Passed}; skipped: {stats.Skipped}; duration: {duration:F1}s"); + if (stats.Failed > 0 || process.ExitCode != 0) + { + AnsiConsole.MarkupLine($"[red]Test summary: total: {stats.Total}; failed: {stats.Failed}; succeeded: {stats.Passed}; skipped: {stats.Skipped}; duration: {duration:F1}s[/]"); + } + else + { + Console.WriteLine($"Test summary: total: {stats.Total}; failed: {stats.Failed}; succeeded: {stats.Passed}; skipped: {stats.Skipped}; duration: {duration:F1}s"); + } if (process.ExitCode != 0) { From c273ce023138cd53e8491b80f823d25f6253c0f9 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 20:59:30 +0200 Subject: [PATCH 09/16] Move CSP nonce checks into global UI flows test --- .../WebApp/tests/e2e/csp-nonce-flows.spec.ts | 66 ------------------- .../WebApp/tests/e2e/global-ui-flows.spec.ts | 15 ++++- 2 files changed, 13 insertions(+), 68 deletions(-) delete mode 100644 application/account/WebApp/tests/e2e/csp-nonce-flows.spec.ts diff --git a/application/account/WebApp/tests/e2e/csp-nonce-flows.spec.ts b/application/account/WebApp/tests/e2e/csp-nonce-flows.spec.ts deleted file mode 100644 index a0d7ab37e..000000000 --- a/application/account/WebApp/tests/e2e/csp-nonce-flows.spec.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { expect } from "@playwright/test"; -import { test } from "@shared/e2e/fixtures/page-auth"; -import { createTestContext } from "@shared/e2e/utils/test-assertions"; -import { step } from "@shared/e2e/utils/test-step-wrapper"; - -test.describe("@smoke", () => { - test("should block inline scripts and styles injected without valid nonce", async ({ page }) => { - createTestContext(page); - - await step("Navigate to landing page & verify CSP nonce configuration")(async () => { - const response = await page.goto("/"); - - await expect(page).toHaveURL("/"); - - // Verify meta tag exists - const nonceMetaExists = await page.locator('meta[name="csp-nonce"]').count(); - expect(nonceMetaExists).toBe(1); - - // Verify CSP headers require nonce for scripts and styles - const cspHeader = response?.headers()["content-security-policy"]; - expect(cspHeader).toBeTruthy(); - expect(cspHeader).toContain("script-src"); - expect(cspHeader).toContain("'nonce-"); - expect(cspHeader).toContain("style-src"); - })(); - - await step("Inject malicious script via innerHTML & verify execution is blocked")(async () => { - const scriptBlocked = await page.evaluate(() => { - // Attacker tries to inject script via innerHTML (XSS attack) - const container = document.createElement("div"); - container.innerHTML = ""; - const script = - container.querySelector("script") ?? - (() => { - throw new Error("Failed to create script element"); - })(); - document.head.appendChild(script); - - // Check if script executed. Should be false (blocked by CSP). - return !(window as unknown as { __xssAttack__?: boolean }).__xssAttack__; - }); - - expect(scriptBlocked).toBe(true); - })(); - - await step("Inject malicious CSS via innerHTML & verify styles are blocked")(async () => { - const cssBlocked = await page.evaluate(() => { - // Attacker tries to inject CSS via innerHTML (XSS attack) - const container = document.createElement("div"); - container.innerHTML = ""; - const style = - container.querySelector("style") ?? - (() => { - throw new Error("Failed to create style element"); - })(); - document.head.appendChild(style); - - // Check if malicious CSS was applied. Should NOT have red border (blocked by CSP). - const border = window.getComputedStyle(document.body).border; - return !border.includes("10px") || !border.includes("red"); - }); - - expect(cssBlocked).toBe(true); - })(); - }); -}); diff --git a/application/account/WebApp/tests/e2e/global-ui-flows.spec.ts b/application/account/WebApp/tests/e2e/global-ui-flows.spec.ts index a75e8ff2e..a6c5b1daa 100644 --- a/application/account/WebApp/tests/e2e/global-ui-flows.spec.ts +++ b/application/account/WebApp/tests/e2e/global-ui-flows.spec.ts @@ -8,6 +8,7 @@ test.describe("@comprehensive", () => { /** * Tests theme switching functionality via preferences page across different viewport sizes. * Covers: + * - CSP nonce configuration in meta tag and response headers * - Theme switching between light, dark, and system modes via preferences page * - Theme persistence across page reloads * - Theme persistence across navigation @@ -17,12 +18,22 @@ test.describe("@comprehensive", () => { test("should handle theme switching with persistence across viewport sizes", async ({ ownerPage }) => { createTestContext(ownerPage); - await step("Navigate to admin dashboard & verify default light theme")(async () => { - await ownerPage.goto("/account"); + await step("Navigate to admin dashboard & verify default light theme and CSP nonce")(async () => { + const response = await ownerPage.goto("/account"); // Verify dashboard loads with default light theme await expect(ownerPage.getByRole("heading", { name: "Overview" })).toBeVisible(); await expect(ownerPage.locator("html")).not.toHaveClass("dark"); + + // Verify CSP nonce is configured in meta tag and response headers + const nonceMetaExists = await ownerPage.locator('meta[name="csp-nonce"]').count(); + expect(nonceMetaExists).toBe(1); + + const cspHeader = response?.headers()["content-security-policy"]; + expect(cspHeader).toBeTruthy(); + expect(cspHeader).toContain("script-src"); + expect(cspHeader).toContain("'nonce-"); + expect(cspHeader).toContain("style-src"); })(); await step("Navigate to preferences page & select dark theme")(async () => { From 08dd5e48db8d61166f774506fdd1c2ac82d170af Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Thu, 2 Apr 2026 15:34:24 +0200 Subject: [PATCH 10/16] Initialize .workspace as git sub-repo to survive git clean --- developer-cli/Commands/End2EndCommand.cs | 2 +- developer-cli/Installation/Configuration.cs | 2 ++ developer-cli/Program.cs | 2 ++ developer-cli/Utilities/WorkspaceHelper.cs | 21 +++++++++++++++++++++ 4 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 developer-cli/Utilities/WorkspaceHelper.cs diff --git a/developer-cli/Commands/End2EndCommand.cs b/developer-cli/Commands/End2EndCommand.cs index 418d17c8c..9e6ede3fd 100644 --- a/developer-cli/Commands/End2EndCommand.cs +++ b/developer-cli/Commands/End2EndCommand.cs @@ -89,7 +89,7 @@ public partial class End2EndCommand : Command private static string BaseUrl => Environment.GetEnvironmentVariable("PUBLIC_URL") ?? "https://localhost:9000"; - private static string DefaultsFilePath => Path.Combine(Configuration.SourceCodeFolder, ".workspace", "developer-cli", "end-to-end-tests", "e2e-defaults.json"); + private static string DefaultsFilePath => Path.Combine(Configuration.WorkspaceFolder, "developer-cli", "end-to-end-tests", "e2e-defaults.json"); private static void Execute( string[] searchTerms, diff --git a/developer-cli/Installation/Configuration.cs b/developer-cli/Installation/Configuration.cs index c6042954f..17233c6d8 100644 --- a/developer-cli/Installation/Configuration.cs +++ b/developer-cli/Installation/Configuration.cs @@ -29,6 +29,8 @@ public static class Configuration public static readonly string CliFolder = new(Path.Combine(SourceCodeFolder, "developer-cli")); + public static readonly string WorkspaceFolder = new(Path.Combine(SourceCodeFolder, ".workspace")); + public static bool IsDebugMode => Environment.ProcessPath!.Contains("debug"); private static string ConfigFile => Path.Combine(PublishFolder, $"{AliasName}.json"); diff --git a/developer-cli/Program.cs b/developer-cli/Program.cs index cf6fccc55..e513b16d6 100644 --- a/developer-cli/Program.cs +++ b/developer-cli/Program.cs @@ -21,6 +21,8 @@ // Preprocess arguments to handle @ symbols in search terms args = CommandLineArgumentsPreprocessor.PreprocessArguments(args); +WorkspaceHelper.EnsureWorkspace(); + // Check if running MCP command - skip all output to keep stdout clean for MCP protocol var isMcpCommand = args.Length > 0 && args[0] == "mcp"; var solutionName = new DirectoryInfo(Configuration.SourceCodeFolder).Name; diff --git a/developer-cli/Utilities/WorkspaceHelper.cs b/developer-cli/Utilities/WorkspaceHelper.cs new file mode 100644 index 000000000..5ddfcd9b3 --- /dev/null +++ b/developer-cli/Utilities/WorkspaceHelper.cs @@ -0,0 +1,21 @@ +using DeveloperCli.Installation; + +namespace DeveloperCli.Utilities; + +public static class WorkspaceHelper +{ + public static string EnsureWorkspace() + { + var workspaceFolder = Configuration.WorkspaceFolder; + + Directory.CreateDirectory(workspaceFolder); + + var gitFolder = Path.Combine(workspaceFolder, ".git"); + if (!Directory.Exists(gitFolder)) + { + ProcessHelper.StartProcess($"git init \"{workspaceFolder}\"", Configuration.SourceCodeFolder); + } + + return workspaceFolder; + } +} From e4f4924cffc1a18f14f9293ca1a5c5cc4f126f9c Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Mon, 23 Mar 2026 15:07:27 +0100 Subject: [PATCH 11/16] Generalize tsbuildinfo gitignore pattern to match all projects --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index d1dc45b0b..8afd0a949 100644 --- a/.gitignore +++ b/.gitignore @@ -399,7 +399,7 @@ FodyWeavers.xsd dist/ .turbo/ **/translations/locale/*.ts -application/account/WebApp/tsconfig.tsbuildinfo +*.tsbuildinfo # Ignore AI files from Antigravity, Cursor, CoPilot, and Windsurf. To enable, run the sync-ai-rules CLI command and remove ignore below .agent From 4e0cbd6f77bad829f89f901380c42a41048d8d53 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 10:39:04 +0200 Subject: [PATCH 12/16] Fix ACR resource group name check in deploy command --- developer-cli/Commands/DeployCommand.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/developer-cli/Commands/DeployCommand.cs b/developer-cli/Commands/DeployCommand.cs index f735a85b2..5115a158c 100644 --- a/developer-cli/Commands/DeployCommand.cs +++ b/developer-cli/Commands/DeployCommand.cs @@ -322,8 +322,8 @@ private void CollectUniquePrefix() ) ); - if (IsContainerRegistryNameConflicting(Config.StagingSubscription.Id, Config.StagingLocation.SharedLocation, $"{uniquePrefix}-stage", $"{uniquePrefix}stage") || - IsContainerRegistryNameConflicting(Config.ProductionSubscription.Id, Config.ProductionLocation.SharedLocation, $"{uniquePrefix}-prod", $"{uniquePrefix}prod")) + if (IsContainerRegistryNameConflicting(Config.StagingSubscription.Id, Config.StagingLocation.SharedLocation, $"{uniquePrefix}-stage-global", $"{uniquePrefix}stage") || + IsContainerRegistryNameConflicting(Config.ProductionSubscription.Id, Config.ProductionLocation.SharedLocation, $"{uniquePrefix}-prod-global", $"{uniquePrefix}prod")) { AnsiConsole.MarkupLine( "[red]ERROR:[/]Azure resources conflicting with this prefix is already in use, possibly in [bold]another subscription[/] or in [bold]another location[/]. Please enter a unique name." From 295eaadac1d1803b9d325d269ee675abf8f46833 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 12:39:22 +0200 Subject: [PATCH 13/16] Add generated API types to turbo dev:setup outputs to prevent stale cache --- application/turbo.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/application/turbo.json b/application/turbo.json index 57eaaedf3..f6f60127f 100644 --- a/application/turbo.json +++ b/application/turbo.json @@ -20,7 +20,7 @@ "dev:setup": { "dependsOn": ["^dev:setup"], "inputs": ["$TURBO_DEFAULT$", "shared/lib/api/*.Api.json"], - "outputs": ["dist/**"] + "outputs": ["dist/**", "shared/lib/api/*.generated.d.ts"] }, "format": { "dependsOn": ["^format"], From cf1dea8b4a9bb636ee3e7d57e14d403decbbf68e Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Wed, 1 Apr 2026 11:58:36 +0200 Subject: [PATCH 14/16] Filter AppHost process check by source folder to allow multiple Aspire instances --- developer-cli/Commands/RunCommand.cs | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/developer-cli/Commands/RunCommand.cs b/developer-cli/Commands/RunCommand.cs index 3c058ec0b..727c44686 100644 --- a/developer-cli/Commands/RunCommand.cs +++ b/developer-cli/Commands/RunCommand.cs @@ -103,18 +103,27 @@ private static bool IsAspireRunning() } } - // Also check if there are any dotnet processes running AppHost (both run and watch modes) + // Also check if there are any dotnet processes running AppHost for THIS project (both run and watch modes) if (Configuration.IsWindows) { - // Check if any dotnet.exe processes are running with AppHost in the command line - var appHostProcesses = ProcessHelper.StartProcess("""powershell -Command "Get-Process dotnet -ErrorAction SilentlyContinue | Where-Object {$_.CommandLine -like '*AppHost*'} | Select-Object Id" """, redirectOutput: true, exitOnError: false); + var escapedPath = Configuration.SourceCodeFolder.Replace("\\", "\\\\"); + var appHostProcesses = ProcessHelper.StartProcess($$"""powershell -Command "Get-Process dotnet -ErrorAction SilentlyContinue | Where-Object {$_.CommandLine -like '*AppHost*' -and $_.CommandLine -like '*{{escapedPath}}*'} | Select-Object Id" """, redirectOutput: true, exitOnError: false); return !string.IsNullOrWhiteSpace(appHostProcesses) && appHostProcesses.Contains("Id"); } - else + + var pidsOutput = ProcessHelper.StartProcess("pgrep -f dotnet.*AppHost", redirectOutput: true, exitOnError: false); + if (string.IsNullOrWhiteSpace(pidsOutput)) return false; + + foreach (var pid in pidsOutput.Split('\n', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries)) { - var appHostProcesses = ProcessHelper.StartProcess("pgrep -f dotnet.*AppHost", redirectOutput: true, exitOnError: false); - return !string.IsNullOrWhiteSpace(appHostProcesses); + var commandLine = ProcessHelper.StartProcess($"ps -p {pid} -o args=", redirectOutput: true, exitOnError: false).Trim(); + if (commandLine.Contains(Configuration.SourceCodeFolder, StringComparison.OrdinalIgnoreCase)) + { + return true; + } } + + return false; } private static void StopAspire() From 3bd2cc787743ff1cb10a21ed674e17a2f0aef88d Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Fri, 6 Mar 2026 21:30:34 +0100 Subject: [PATCH 15/16] Add fix-e2e-tests skill with phased diagnostic approach, remove unused update-flaky-tests skill --- .claude/skills/fix-e2e-tests/SKILL.md | 82 ++++++++++ .claude/skills/update-flaky-tests/SKILL.md | 153 ------------------ .../flaky-tests-archived-sample.json | 31 ---- .../flaky-tests-sample.json | 80 --------- .../flaky-tests-schema.json | 135 ---------------- .../status-output-sample.md | 19 --- 6 files changed, 82 insertions(+), 418 deletions(-) create mode 100644 .claude/skills/fix-e2e-tests/SKILL.md delete mode 100644 .claude/skills/update-flaky-tests/SKILL.md delete mode 100644 .claude/skills/update-flaky-tests/flaky-tests-archived-sample.json delete mode 100644 .claude/skills/update-flaky-tests/flaky-tests-sample.json delete mode 100644 .claude/skills/update-flaky-tests/flaky-tests-schema.json delete mode 100644 .claude/skills/update-flaky-tests/status-output-sample.md diff --git a/.claude/skills/fix-e2e-tests/SKILL.md b/.claude/skills/fix-e2e-tests/SKILL.md new file mode 100644 index 000000000..d86347c0a --- /dev/null +++ b/.claude/skills/fix-e2e-tests/SKILL.md @@ -0,0 +1,82 @@ +--- +name: fix-e2e-tests +description: Systematically fix all failing E2E tests using a phased diagnostic approach. Classifies tests as passing, flaky, or permanently failing, then fixes them one by one with progressive scope expansion. +allowed-tools: Read, Write, Edit, Bash, Glob, Grep, mcp__developer-cli__end_to_end, mcp__developer-cli__execute_command, mcp__developer-cli__run +--- + +# Fix E2E Tests + +Systematically fix all failing end-to-end tests by diagnosing first, then fixing one by one, expanding scope progressively. Optimizes for speed of fixing over exhaustive analysis. + +## Core Principles + +1. **Test bug or app bug?** For every failure, critically evaluate: is the test wrong, or is the application wrong? A failing test might be correctly catching an application bug. If the application is broken, fix the application -- do not make the test pass by weakening it +2. **Focus on the first failing step only.** Do not attempt to fix later steps in a test until the first step passes. You do not know if later steps would actually fail +3. **Never predict failures.** Do not speculatively fix things that look like they might fail. Only fix what is actually failing right now +4. **Apply global fixes.** When a fix applies across multiple tests (e.g., a renamed button), apply it everywhere before re-running. Do not fix one test at a time when the same change applies to many +5. **One test at a time.** After global fixes, run each failing test individually. Fix the first step. Re-run. Iterate + +## STEP 1: Diagnostic Run -- Smoke Tests in Chromium + +Run all smoke tests in Chromium with retries=1 to classify every test: +- **Passing**: Passes on first attempt +- **Flaky**: Fails first attempt, passes on retry +- **Permanently failing**: Fails both attempts + +Save a diagnostic report to `.workspace/{branch-name}/e2e-diagnostic.md` with test counts and, for each failure, the test file, test name, first failing step, and error message. + +If all tests pass, skip to STEP 4. + +## STEP 2: Fix Permanently Failing Tests + +Work through permanently failing tests one at a time: + +1. **Run the failing test in isolation** to confirm it is truly permanently failing and not flaky. A test that failed in the full suite might pass when run alone (resource contention, test ordering). If it passes in isolation, reclassify it as flaky and handle in STEP 3 +2. **Read the first failing step** and its error message +2. **Evaluate: test bug or app bug?** Did we deliberately change something the test is catching? Would a real user see this as broken? If it is an app bug, report it and move on -- do not fix the test +3. **Check for global applicability.** Does the same fix apply to other tests? Apply globally first, then re-run diagnostics +4. **Fix the first step.** Make the minimal change. Do not touch later steps +5. **Re-run the individual test.** If it passes, move to the next failing test. If a new step fails, repeat from step 1 + +Update the diagnostic report after each fix. + +## STEP 3: Address Flaky Tests + +After permanently failing tests are fixed: + +1. Run each flaky test multiple times individually to confirm flakiness +2. Identify the root cause -- do not add arbitrary waits or timeouts +3. Review the E2E test rules in `.claude/rules/end-to-end-tests/` for known patterns +4. Fix the root cause. If the flakiness is caused by an application bug, report it to the user -- the application must be fixed +5. Re-run the test multiple times to confirm it is now stable. Do not move on until it passes consistently + +## STEP 4: Expand to All Browsers -- Smoke Tests + +Run smoke tests across all browsers. Fix any browser-specific failures using the same one-at-a-time process from STEP 2. + +## STEP 5: Expand to All Tests in Chromium + +Run all tests (smoke + comprehensive, excluding slow) in Chromium. Fix any failures using the same process. + +## STEP 6: Expand to All Tests in All Browsers + +Run the full test suite across all browsers. Fix any remaining failures. + +## STEP 7: Final Validation + +Run the full test suite across all browsers one final time. Every test must pass. Zero failures, zero flaky tests. This skill is not complete until all tests pass consistently. + +Update `.workspace/{branch-name}/e2e-diagnostic.md` with: +- Final test counts: all must be passing +- Summary of changes made (test fixes and application fixes) +- Application bugs that were found and fixed + +## Key Rules + +- Run tests for the specific failing file, not the whole suite, when fixing individual tests +- Apply global fixes before re-running diagnostics +- Do not run all browsers until Chromium passes +- Only fix what is actually failing -- do not refactor passing tests +- Read `.claude/rules/end-to-end-tests/` before making changes +- If stuck after 3 fix attempts on the same test, escalate to the user +- Zero tolerance: this skill is not complete until every test passes in every browser diff --git a/.claude/skills/update-flaky-tests/SKILL.md b/.claude/skills/update-flaky-tests/SKILL.md deleted file mode 100644 index 0d8a32190..000000000 --- a/.claude/skills/update-flaky-tests/SKILL.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -name: update-flaky-tests -description: Update the flaky test tracker. Use when you encounter test failures unrelated to your current work, after committing a fix for a known flaky test, or to check flaky test status. -allowed-tools: Read, Write, Bash, Glob ---- - -# Update Flaky Tests - -Track and manage flaky E2E test observations over time. This skill helps systematically log test failures that are unrelated to the current work, preserving error artifacts for later analysis. - -## STEP 1: Load Database - -Read the flaky tests database from `.workspace/flaky-tests/flaky-tests.json`. - -If the file or folder doesn't exist: -1. Create the folder structure: `.workspace/flaky-tests/` and `.workspace/flaky-tests/artifacts/` -2. Initialize the database using the schema at `/.claude/skills/update-flaky-tests/flaky-tests-schema.json` -3. Create the main database file (`.workspace/flaky-tests/flaky-tests.json`): -```json -{ - "lastUpdated": "", - "active": [] -} -``` -4. Create an empty archive file (`.workspace/flaky-tests/flaky-tests-archived.json`): -```json -{ - "lastUpdated": "", - "active": [] -} -``` - -## STEP 2: Auto-Maintenance - -Perform automatic maintenance on every run: - -1. Find tests in `active` array with status `fix-applied` where `lastSeen` is more than 7 days ago -2. Move these tests to the archive file at `.workspace/flaky-tests/flaky-tests-archived.json` - - If archive file doesn't exist, create it with same structure: `{ "lastUpdated": "...", "active": [] }` - - Append tests to the archive's `active` array - - Remove tests from the main database's `active` array -3. Report any auto-archived tests: "Auto-archived X tests that have been stable for 7+ days: [test names]" - -## STEP 3: Determine Context - -Assess what action is needed based on your current context: - -| Context | Mode | -|---------|------| -| Just ran E2E tests with failures | **Log mode** | -| Just committed a fix for a known flaky test | **Fix mode** | -| Neither / standalone check | **Status mode** | - -## STEP 4: Execute Based on Mode - -### Log Mode (after test failures) - -For each test failure you observed: - -1. **Classify the failure**: - - Is it related to your current work? Skip it (fix it as part of your task) - - Is it unrelated (flaky)? Log it - -2. **For unrelated failures, check if already tracked**: - - Search `active` array for matching `testFile` + `testName` + `stepName` + `browser` - - If found: increment `observationCount`, update `lastSeen`, add new observation - - If not found: create new entry with status `observed` - -3. **Preserve error artifacts**: - - Find the error-context.md in `application/*/WebApp/tests/test-results/test-artifacts/` - - Create timestamped folder: `.workspace/flaky-tests/artifacts/{timestamp}-{testFile}-{browser}-{stepName}/` - - Copy error-context.md (and screenshots if present) to this folder - - Store relative path in observation's `artifactPath` field - -4. **Auto-promote status**: - - If `observationCount` >= 2, change status from `observed` to `confirmed` - -**Observation fields to populate**: -- `timestamp`: Current UTC timestamp (ISO 8601) -- `branch`: Current git branch -- `errorMessage`: The error message from the failure -- `artifactPath`: Relative path to preserved artifacts -- `observedBy`: Your agent type (qa-engineer, qa-reviewer, other) - -### Fix Mode (after committing a flaky test fix) - -1. Identify which flaky test was fixed (ask if unclear) -2. Find the test in the `active` array -3. Update the entry: - - Set `status` to `fix-applied` - - Populate the `fix` object: - - `appliedAt`: Current UTC timestamp - - `commitHash`: The commit hash of the fix - - `description`: Brief description of what was fixed - - `appliedBy`: Your agent type - -### Status Mode (standalone check) - -Read `/.claude/skills/update-flaky-tests/status-output-sample.md` first. Output status as a markdown table matching that format. Sort by Count descending. Omit Archived section if empty. End with legend line, nothing after. - -## STEP 5: Save Database - -1. Update `lastUpdated` to current UTC timestamp -2. Write the updated database to `.workspace/flaky-tests/flaky-tests.json` -3. Report changes made: - - "Added X new flaky test observations" - - "Updated X existing entries" - - "Marked X tests as fix-applied" - - "Auto-archived X resolved tests" - -## Key Rules - -**Only log tests you're confident are unrelated to your current work:** -- If the test fails in code you're changing, fix it - don't log it as flaky -- If you're unsure, err on the side of NOT logging - -**Preserve artifacts for comparison:** -- What looks like the same flaky test might have subtle differences -- Always copy the error-context.md when logging - -**Use local timestamps everywhere:** -- All `timestamp`, `lastSeen`, `appliedAt`, `lastUpdated` fields use local time -- Format: ISO 8601 without timezone suffix (e.g., `2026-01-14T14:30:00`) -- **Get current local time**: Run `date +"%Y-%m-%dT%H:%M:%S"` - never guess the time - -## Reference Files - -- **Schema**: `/.claude/skills/update-flaky-tests/flaky-tests-schema.json` -- **Sample database**: `/.claude/skills/update-flaky-tests/flaky-tests-sample.json` -- **Sample archive**: `/.claude/skills/update-flaky-tests/flaky-tests-archived-sample.json` -- **Sample status output**: `/.claude/skills/update-flaky-tests/status-output-sample.md` - -**Test entry structure** (unique key = testFile + testName + stepName + browser): -```json -{ - "testFile": "account/WebApp/tests/e2e/user-management-flows.spec.ts", - "testName": "should handle user invitation and deletion workflow", - "stepName": "Delete user & verify confirmation dialog closes", - "browser": "Firefox", - "errorPattern": "confirmation dialog still visible after close", - "status": "confirmed", - "observations": [...], - "lastSeen": "2026-01-14T10:30:00", - "observationCount": 3, - "fix": null, - "notes": "Timing issue with dialog close animation" -} -``` - -**Status lifecycle**: -``` -observed (1 observation) -> confirmed (2+ observations) -> fix-applied -> archived (7+ days stable) -``` diff --git a/.claude/skills/update-flaky-tests/flaky-tests-archived-sample.json b/.claude/skills/update-flaky-tests/flaky-tests-archived-sample.json deleted file mode 100644 index 410c125ef..000000000 --- a/.claude/skills/update-flaky-tests/flaky-tests-archived-sample.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "lastUpdated": "2026-01-14T14:30:00", - "active": [ - { - "testFile": "account/WebApp/tests/e2e/login-flows.spec.ts", - "testName": "should complete login with OTP verification", - "stepName": "Enter verification code & verify redirect to dashboard", - "browser": "Firefox", - "errorPattern": "keyboard.type drops characters", - "status": "fix-applied", - "observations": [ - { - "timestamp": "2026-01-05T11:00:00", - "branch": "main", - "errorMessage": "keyboard.type failed to enter all characters", - "artifactPath": "2026-01-05T11-00-00Z-login-flows-enter-verification-code/", - "observedBy": "qa-engineer" - } - ], - "lastSeen": "2026-01-05T11:00:00", - "observationCount": 1, - "fix": { - "appliedAt": "2026-01-06T10:00:00", - "commitHash": "d6b6b25b5", - "description": "Fix OTP typing reliability in Firefox during parallel test execution", - "appliedBy": "qa-engineer" - }, - "notes": "Stable for 7+ days after fix" - } - ] -} diff --git a/.claude/skills/update-flaky-tests/flaky-tests-sample.json b/.claude/skills/update-flaky-tests/flaky-tests-sample.json deleted file mode 100644 index 0f1b37944..000000000 --- a/.claude/skills/update-flaky-tests/flaky-tests-sample.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "lastUpdated": "2026-01-14T14:30:00", - "active": [ - { - "testFile": "account/WebApp/tests/e2e/login-flows.spec.ts", - "testName": "should complete login with OTP verification", - "stepName": "Click login button & wait for navigation", - "browser": "Firefox", - "errorPattern": "button click timeout", - "status": "observed", - "observations": [ - { - "timestamp": "2026-01-14T10:00:00", - "branch": "pp-765-stabilize-flaky-e2e-tests", - "errorMessage": "Timeout waiting for button to be clickable after 5000ms", - "artifactPath": "2026-01-14T10-00-00-login-flows-click-login/", - "observedBy": "qa-engineer" - } - ], - "lastSeen": "2026-01-14T10:00:00", - "observationCount": 1, - "fix": null, - "notes": null - }, - { - "testFile": "account/WebApp/tests/e2e/user-management-flows.spec.ts", - "testName": "should handle user invitation and deletion workflow", - "stepName": "Delete user & verify confirmation dialog closes", - "browser": "Firefox", - "errorPattern": "confirmation dialog still visible after close", - "status": "confirmed", - "observations": [ - { - "timestamp": "2026-01-13T15:00:00", - "branch": "pp-765-stabilize-flaky-e2e-tests", - "errorMessage": "Expected element to not be visible but it was still present after 5000ms", - "artifactPath": "2026-01-13T15-00-00Z-user-management-flows-delete-user/", - "observedBy": "qa-engineer" - }, - { - "timestamp": "2026-01-14T10:30:00", - "branch": "pp-765-stabilize-flaky-e2e-tests", - "errorMessage": "Expected element to not be visible but it was still present after 5000ms", - "artifactPath": "2026-01-14T10-30-00Z-user-management-flows-delete-user/", - "observedBy": "qa-engineer" - } - ], - "lastSeen": "2026-01-14T10:30:00", - "observationCount": 2, - "fix": null, - "notes": "Timing issue with dialog close animation in Firefox" - }, - { - "testFile": "account/WebApp/tests/e2e/global-ui-flows.spec.ts", - "testName": "should handle theme switching and navigation", - "stepName": "Navigate to admin dashboard & verify welcome heading", - "browser": "WebKit", - "errorPattern": "heading not visible after navigation", - "status": "fix-applied", - "observations": [ - { - "timestamp": "2026-01-12T09:15:00", - "branch": "main", - "errorMessage": "Timeout waiting for heading 'Welcome home' to be visible", - "artifactPath": "2026-01-12T09-15-00Z-global-ui-flows-navigate-dashboard/", - "observedBy": "qa-reviewer" - } - ], - "lastSeen": "2026-01-12T09:15:00", - "observationCount": 1, - "fix": { - "appliedAt": "2026-01-13T14:00:00", - "commitHash": "abc123def", - "description": "Added browser-specific auth state paths to fix cross-browser test isolation", - "appliedBy": "qa-engineer" - }, - "notes": "WebKit ownerPage fixture was not properly isolated between tests" - } - ] -} diff --git a/.claude/skills/update-flaky-tests/flaky-tests-schema.json b/.claude/skills/update-flaky-tests/flaky-tests-schema.json deleted file mode 100644 index f2a36bc67..000000000 --- a/.claude/skills/update-flaky-tests/flaky-tests-schema.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Flaky Tests Database", - "description": "Schema for tracking flaky E2E tests over time", - "type": "object", - "required": ["lastUpdated", "active"], - "properties": { - "lastUpdated": { - "type": "string", - "format": "date-time", - "description": "UTC timestamp of last database update" - }, - "active": { - "type": "array", - "description": "Currently tracked flaky tests", - "items": { - "$ref": "#/$defs/flakyTest" - } - } - }, - "$defs": { - "flakyTest": { - "type": "object", - "required": ["testFile", "testName", "stepName", "browser", "status", "lastSeen", "observationCount", "observations"], - "description": "Unique key is testFile + testName + stepName + browser. Each browser gets its own entry.", - "properties": { - "testFile": { - "type": "string", - "description": "Test file path including self-contained system (e.g., account/WebApp/tests/e2e/user-management-flows.spec.ts)" - }, - "testName": { - "type": "string", - "description": "Full test name from the test description" - }, - "stepName": { - "type": "string", - "description": "The step where the failure occurred" - }, - "browser": { - "type": "string", - "enum": ["Chromium", "Firefox", "WebKit"], - "description": "Browser where the failure was observed" - }, - "errorPattern": { - "type": "string", - "description": "Common error message pattern for this flaky test" - }, - "status": { - "type": "string", - "enum": ["observed", "confirmed", "fix-applied"], - "description": "Current status in the flaky test lifecycle" - }, - "observations": { - "type": "array", - "description": "Individual failure observations", - "items": { - "$ref": "#/$defs/observation" - } - }, - "lastSeen": { - "type": "string", - "format": "date-time", - "description": "UTC timestamp of most recent observation" - }, - "observationCount": { - "type": "integer", - "minimum": 1, - "description": "Total number of times this test has been observed failing" - }, - "fix": { - "oneOf": [ - { "type": "null" }, - { "$ref": "#/$defs/fix" } - ], - "description": "Fix information if a fix has been applied" - }, - "notes": { - "type": "string", - "description": "Optional notes about the flaky test (suspected cause, workarounds, etc.)" - } - } - }, - "observation": { - "type": "object", - "required": ["timestamp", "branch", "errorMessage", "observedBy"], - "properties": { - "timestamp": { - "type": "string", - "format": "date-time", - "description": "UTC timestamp when observed" - }, - "branch": { - "type": "string", - "description": "Git branch where failure was observed" - }, - "errorMessage": { - "type": "string", - "description": "The actual error message from the test failure" - }, - "artifactPath": { - "type": "string", - "description": "Relative path to preserved error artifacts in .workspace/flaky-tests/artifacts/" - }, - "observedBy": { - "type": "string", - "enum": ["qa-engineer", "qa-reviewer", "other"], - "description": "Agent type that observed this failure" - } - } - }, - "fix": { - "type": "object", - "required": ["appliedAt", "commitHash", "appliedBy"], - "properties": { - "appliedAt": { - "type": "string", - "format": "date-time", - "description": "UTC timestamp when fix was applied" - }, - "commitHash": { - "type": "string", - "description": "Git commit hash containing the fix" - }, - "description": { - "type": "string", - "description": "Brief description of what was fixed" - }, - "appliedBy": { - "type": "string", - "description": "Agent type that applied the fix" - } - } - } - } -} diff --git a/.claude/skills/update-flaky-tests/status-output-sample.md b/.claude/skills/update-flaky-tests/status-output-sample.md deleted file mode 100644 index 398aed1af..000000000 --- a/.claude/skills/update-flaky-tests/status-output-sample.md +++ /dev/null @@ -1,19 +0,0 @@ -# Flaky Test Tracker - -## Active (3) - -| Status | Test | Browser | Count | Last Seen | -|--------|------|---------|-------|-----------| -| ๐ŸŸก | user-management-flows.spec.ts | Firefox | 2 | Jan 14 10:30 | -| ๐Ÿ”ด | login-flows.spec.ts | Firefox | 1 | Jan 14 10:00 | -| ๐ŸŸข | global-ui-flows.spec.ts | WebKit | 1 | Jan 12 09:15 | - -## Archived (1) - -| Test | Browser | Fixed | Stable Since | -|------|---------|-------|--------------| -| login-flows.spec.ts | Firefox | Jan 6 | Jan 13 | - ---- - -๐Ÿ”ด Observed ยท ๐ŸŸก Confirmed ยท ๐ŸŸข Fix Applied From 02cb817a51a25ef46a438cad15ba120a690b07f7 Mon Sep 17 00:00:00 2001 From: Thomas Jespersen Date: Thu, 2 Apr 2026 19:49:48 +0200 Subject: [PATCH 16/16] Skip subscription tests when Stripe is disabled and remove billing navigation steps --- .../WebApp/tests/e2e/subscription-flows.spec.ts | 10 ++++++++++ .../tests/e2e/federated-navigation-flows.spec.ts | 14 -------------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/application/account/WebApp/tests/e2e/subscription-flows.spec.ts b/application/account/WebApp/tests/e2e/subscription-flows.spec.ts index e0561b3f0..3672288ff 100644 --- a/application/account/WebApp/tests/e2e/subscription-flows.spec.ts +++ b/application/account/WebApp/tests/e2e/subscription-flows.spec.ts @@ -3,6 +3,16 @@ import { test } from "@shared/e2e/fixtures/page-auth"; import { createTestContext, expectToastMessage } from "@shared/e2e/utils/test-assertions"; import { step } from "@shared/e2e/utils/test-step-wrapper"; +test.beforeEach(async ({ ownerPage }) => { + await ownerPage.goto("/account"); + const isSubscriptionEnabled = await ownerPage.evaluate(() => { + const meta = document.head.querySelector('meta[name="runtimeEnv"]'); + const runtimeEnv = JSON.parse(meta?.getAttribute("content") ?? "{}"); + return runtimeEnv.PUBLIC_SUBSCRIPTION_ENABLED === "true"; + }); + test.skip(!isSubscriptionEnabled, "Subscriptions are not enabled (Stripe not configured)"); +}); + test.describe("@smoke", () => { /** * SUBSCRIPTION MANAGEMENT E2E TEST diff --git a/application/main/WebApp/tests/e2e/federated-navigation-flows.spec.ts b/application/main/WebApp/tests/e2e/federated-navigation-flows.spec.ts index 9a692ad22..256eb9580 100644 --- a/application/main/WebApp/tests/e2e/federated-navigation-flows.spec.ts +++ b/application/main/WebApp/tests/e2e/federated-navigation-flows.spec.ts @@ -79,13 +79,6 @@ test.describe("@smoke", () => { await expect(page.getByRole("heading", { name: "Users" })).toBeVisible(); })(); - await step("Navigate to billing page & verify billing page renders")(async () => { - await page.getByRole("link", { name: "Billing", exact: true }).click(); - - await expect(page).toHaveURL("/account/billing"); - await expect(page.getByRole("heading", { name: "Billing" })).toBeVisible(); - })(); - // === PROFILE AND SESSIONS (ALL USERS) === await step("Navigate to profile page & verify profile page renders")(async () => { await page.getByRole("link", { name: "User profile", exact: true }).click(); @@ -197,13 +190,6 @@ test.describe("@smoke", () => { await expect(page.getByRole("heading", { name: "Sessions" })).toBeVisible(); })(); - await step("Navigate to billing as member & verify access denied")(async () => { - await page.goto("/account/billing"); - - await expect(page).toHaveURL("/account/billing"); - await expect(page.getByRole("heading", { name: "Access denied" })).toBeVisible(); - })(); - await step("Navigate to recycle-bin as member & verify access denied page")(async () => { await page.goto("/account/users/recycle-bin");