From a524ee11ebd170437a5486e66701eda502d90756 Mon Sep 17 00:00:00 2001
From: It Apilium
Date: Wed, 11 Mar 2026 19:32:15 +0100
Subject: [PATCH 1/4] =?UTF-8?q?feat:=20v0.1.14=20=E2=80=94=20intelligent?=
=?UTF-8?q?=20routing,=20code=20transforms,=20governance,=20and=20cross-pl?=
=?UTF-8?q?atform=20coordination?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Phase 1 — Intelligent Routing:
- Eruberu: Q-Learning model router with contextual bandit exploration
- Miteru: task-to-agent routing with capability scoring
- Kimeru: multi-strategy consensus engine (majority, weighted, arbitrate,
pbft-local, leader-score)
- Tomeru: rate limiting, loop detection, and emergency stop guard
- Token economy: response cache, budget bridge, cost tracking
Phase 2 — Competitive Parity:
- Hayameru: deterministic code transforms (var-to-const, remove-console,
sort-imports, add-semicolons, remove-comments) via before_agent_run hook
- Osameru: governance control plane with HMAC-chained audit trail,
policy compilation from MAYROS.md, trust tiers with persistence
- Kakeru: dual-platform bridge with file locking and workflow coordination
- Vector search client wired to Cortex HNSW endpoints
Hardening (22 fixes):
- Security: timing-safe HMAC comparison, path traversal validation
- Data integrity: promise-based mutex for audit trail, ENOSPC handling,
atomic file writes with backup
- Transforms: escaped quote handling, side-effect imports, destructuring
detection, string-aware paren counting, expanded skip patterns
- Honesty: file-size-based token estimates, renamed consensus strategies
to reflect local simulation, honest bridge stubs, git-diff file detection
- Governance: require-approval enforcement, dynamic trust scoring,
dead code removal, policy file size limits
Bump REQUIRED_CORTEX_VERSION to 0.4.2 for HNSW vector search support.
---
.github/workflows/release.yml | 40 ++
CHANGELOG.md | 183 +++++++
README.md | 8 +
.../agent-mesh/byzantine-validator.test.ts | 133 +++++
extensions/agent-mesh/byzantine-validator.ts | 216 ++++++++
extensions/agent-mesh/config.ts | 130 ++++-
.../agent-mesh/consensus-engine.test.ts | 95 ++++
extensions/agent-mesh/consensus-engine.ts | 392 ++++++++++++++
extensions/agent-mesh/index.ts | 228 ++++++++
extensions/agent-mesh/mesh-protocol.ts | 18 +-
extensions/agent-mesh/package.json | 2 +-
.../agent-mesh/performance-tracker.test.ts | 97 ++++
extensions/agent-mesh/performance-tracker.ts | 192 +++++++
extensions/agent-mesh/raft-leader.test.ts | 82 +++
extensions/agent-mesh/raft-leader.ts | 130 +++++
extensions/agent-mesh/task-router.test.ts | 82 +++
extensions/agent-mesh/task-router.ts | 387 ++++++++++++++
.../agent-mesh/workflow-orchestrator.ts | 123 ++++-
extensions/agent-mesh/workflows/types.ts | 22 +
extensions/analytics/package.json | 2 +-
extensions/bash-sandbox/package.json | 2 +-
extensions/bluebubbles/package.json | 2 +-
extensions/ci-plugin/package.json | 2 +-
extensions/code-indexer/package.json | 2 +-
extensions/code-tools/package.json | 2 +-
extensions/copilot-proxy/package.json | 2 +-
extensions/cortex-sync/package.json | 2 +-
extensions/diagnostics-otel/package.json | 2 +-
extensions/discord/package.json | 2 +-
extensions/eruberu/config.ts | 179 +++++++
extensions/eruberu/cortex-persistence.ts | 125 +++++
extensions/eruberu/index.ts | 487 ++++++++++++++++++
extensions/eruberu/mayros.plugin.json | 26 +
extensions/eruberu/package.json | 18 +
extensions/eruberu/q-learning.test.ts | 131 +++++
extensions/eruberu/q-learning.ts | 201 ++++++++
extensions/eruberu/task-classifier.test.ts | 53 ++
extensions/eruberu/task-classifier.ts | 133 +++++
extensions/feishu/package.json | 2 +-
.../google-antigravity-auth/package.json | 2 +-
.../google-gemini-cli-auth/package.json | 2 +-
extensions/googlechat/package.json | 2 +-
extensions/hayameru/atomic-write.test.ts | 78 +++
extensions/hayameru/config.ts | 79 +++
extensions/hayameru/index.ts | 198 +++++++
extensions/hayameru/intent-detector.test.ts | 41 ++
extensions/hayameru/intent-detector.ts | 144 ++++++
extensions/hayameru/mayros.plugin.json | 28 +
extensions/hayameru/metrics.ts | 65 +++
extensions/hayameru/package.json | 18 +
extensions/hayameru/path-safety.test.ts | 41 ++
.../transforms/add-semicolons.test.ts | 111 ++++
.../hayameru/transforms/add-semicolons.ts | 55 ++
extensions/hayameru/transforms/index.ts | 34 ++
.../transforms/remove-comments.test.ts | 74 +++
.../hayameru/transforms/remove-comments.ts | 118 +++++
.../transforms/remove-console.test.ts | 71 +++
.../hayameru/transforms/remove-console.ts | 77 +++
.../hayameru/transforms/sort-imports.test.ts | 113 ++++
.../hayameru/transforms/sort-imports.ts | 164 ++++++
.../hayameru/transforms/var-to-const.test.ts | 70 +++
.../hayameru/transforms/var-to-const.ts | 90 ++++
extensions/imessage/package.json | 2 +-
.../interactive-permissions/package.json | 2 +-
extensions/iot-bridge/package.json | 2 +-
extensions/irc/package.json | 2 +-
.../kakeru-bridge/bridges/claude-bridge.ts | 61 +++
.../bridges/codex-bridge.test.ts | 48 ++
.../kakeru-bridge/bridges/codex-bridge.ts | 181 +++++++
extensions/kakeru-bridge/config.ts | 61 +++
extensions/kakeru-bridge/coordinator.test.ts | 62 +++
extensions/kakeru-bridge/coordinator.ts | 132 +++++
extensions/kakeru-bridge/index.ts | 204 ++++++++
extensions/kakeru-bridge/mayros.plugin.json | 21 +
extensions/kakeru-bridge/package.json | 18 +
extensions/kakeru-bridge/platform-bridge.ts | 36 ++
extensions/line/package.json | 2 +-
extensions/llm-hooks/package.json | 2 +-
extensions/llm-task/package.json | 2 +-
extensions/lobster/package.json | 2 +-
extensions/lsp-bridge/package.json | 2 +-
extensions/matrix/CHANGELOG.md | 6 +
extensions/matrix/package.json | 2 +-
extensions/mattermost/package.json | 2 +-
extensions/mcp-client/package.json | 2 +-
extensions/mcp-server/package.json | 2 +-
extensions/memory-core/package.json | 2 +-
extensions/memory-lancedb/package.json | 2 +-
extensions/memory-semantic/ineru-client.ts | 25 +
extensions/memory-semantic/package.json | 2 +-
extensions/minimax-portal-auth/package.json | 2 +-
extensions/msteams/CHANGELOG.md | 6 +
extensions/msteams/package.json | 2 +-
extensions/nextcloud-talk/package.json | 2 +-
extensions/nostr/CHANGELOG.md | 6 +
extensions/nostr/package.json | 2 +-
extensions/open-prose/package.json | 2 +-
.../osameru-governance/audit-trail.test.ts | 126 +++++
extensions/osameru-governance/audit-trail.ts | 181 +++++++
extensions/osameru-governance/config.ts | 85 +++
.../enforcement-gate.test.ts | 95 ++++
.../osameru-governance/enforcement-gate.ts | 107 ++++
extensions/osameru-governance/index.ts | 274 ++++++++++
.../osameru-governance/mayros.plugin.json | 25 +
extensions/osameru-governance/package.json | 18 +
.../policy-compiler.test.ts | 45 ++
.../osameru-governance/policy-compiler.ts | 217 ++++++++
.../osameru-governance/trust-tiers.test.ts | 41 ++
extensions/osameru-governance/trust-tiers.ts | 104 ++++
.../semantic-observability/package.json | 2 +-
extensions/semantic-skills/package.json | 2 +-
extensions/shared/budget-bridge.ts | 36 ++
extensions/shared/cortex-client.ts | 25 +
extensions/shared/cortex-version.ts | 2 +-
extensions/signal/package.json | 2 +-
extensions/skill-hub/package.json | 2 +-
extensions/slack/package.json | 2 +-
extensions/telegram/package.json | 2 +-
extensions/tlon/package.json | 2 +-
extensions/token-economy/budget-tracker.ts | 10 +
extensions/token-economy/config.ts | 23 +
extensions/token-economy/index.ts | 90 +++-
extensions/token-economy/package.json | 2 +-
extensions/token-economy/response-cache.ts | 126 +++++
extensions/tomeru-guard/config.ts | 154 ++++++
extensions/tomeru-guard/index.ts | 284 ++++++++++
extensions/tomeru-guard/loop-breaker.test.ts | 130 +++++
extensions/tomeru-guard/loop-breaker.ts | 196 +++++++
extensions/tomeru-guard/mayros.plugin.json | 33 ++
extensions/tomeru-guard/package.json | 18 +
extensions/tomeru-guard/rate-limiter.test.ts | 117 +++++
extensions/tomeru-guard/rate-limiter.ts | 197 +++++++
extensions/twitch/CHANGELOG.md | 6 +
extensions/twitch/package.json | 2 +-
extensions/voice-call/CHANGELOG.md | 6 +
extensions/voice-call/package.json | 2 +-
extensions/whatsapp/package.json | 2 +-
extensions/zalo/CHANGELOG.md | 6 +
extensions/zalo/package.json | 2 +-
extensions/zalouser/CHANGELOG.md | 6 +
extensions/zalouser/package.json | 2 +-
package.json | 2 +-
src/agents/pi-embedded-runner/run.ts | 32 ++
src/cli/headless-cli.test.ts | 12 +
src/infra/ensure-services.test.ts | 14 +-
src/plugins/hooks.ts | 44 ++
src/plugins/types.ts | 18 +
src/routing/model-router.ts | 55 ++
148 files changed, 9421 insertions(+), 84 deletions(-)
create mode 100644 .github/workflows/release.yml
create mode 100644 extensions/agent-mesh/byzantine-validator.test.ts
create mode 100644 extensions/agent-mesh/byzantine-validator.ts
create mode 100644 extensions/agent-mesh/consensus-engine.test.ts
create mode 100644 extensions/agent-mesh/consensus-engine.ts
create mode 100644 extensions/agent-mesh/performance-tracker.test.ts
create mode 100644 extensions/agent-mesh/performance-tracker.ts
create mode 100644 extensions/agent-mesh/raft-leader.test.ts
create mode 100644 extensions/agent-mesh/raft-leader.ts
create mode 100644 extensions/agent-mesh/task-router.test.ts
create mode 100644 extensions/agent-mesh/task-router.ts
create mode 100644 extensions/eruberu/config.ts
create mode 100644 extensions/eruberu/cortex-persistence.ts
create mode 100644 extensions/eruberu/index.ts
create mode 100644 extensions/eruberu/mayros.plugin.json
create mode 100644 extensions/eruberu/package.json
create mode 100644 extensions/eruberu/q-learning.test.ts
create mode 100644 extensions/eruberu/q-learning.ts
create mode 100644 extensions/eruberu/task-classifier.test.ts
create mode 100644 extensions/eruberu/task-classifier.ts
create mode 100644 extensions/hayameru/atomic-write.test.ts
create mode 100644 extensions/hayameru/config.ts
create mode 100644 extensions/hayameru/index.ts
create mode 100644 extensions/hayameru/intent-detector.test.ts
create mode 100644 extensions/hayameru/intent-detector.ts
create mode 100644 extensions/hayameru/mayros.plugin.json
create mode 100644 extensions/hayameru/metrics.ts
create mode 100644 extensions/hayameru/package.json
create mode 100644 extensions/hayameru/path-safety.test.ts
create mode 100644 extensions/hayameru/transforms/add-semicolons.test.ts
create mode 100644 extensions/hayameru/transforms/add-semicolons.ts
create mode 100644 extensions/hayameru/transforms/index.ts
create mode 100644 extensions/hayameru/transforms/remove-comments.test.ts
create mode 100644 extensions/hayameru/transforms/remove-comments.ts
create mode 100644 extensions/hayameru/transforms/remove-console.test.ts
create mode 100644 extensions/hayameru/transforms/remove-console.ts
create mode 100644 extensions/hayameru/transforms/sort-imports.test.ts
create mode 100644 extensions/hayameru/transforms/sort-imports.ts
create mode 100644 extensions/hayameru/transforms/var-to-const.test.ts
create mode 100644 extensions/hayameru/transforms/var-to-const.ts
create mode 100644 extensions/kakeru-bridge/bridges/claude-bridge.ts
create mode 100644 extensions/kakeru-bridge/bridges/codex-bridge.test.ts
create mode 100644 extensions/kakeru-bridge/bridges/codex-bridge.ts
create mode 100644 extensions/kakeru-bridge/config.ts
create mode 100644 extensions/kakeru-bridge/coordinator.test.ts
create mode 100644 extensions/kakeru-bridge/coordinator.ts
create mode 100644 extensions/kakeru-bridge/index.ts
create mode 100644 extensions/kakeru-bridge/mayros.plugin.json
create mode 100644 extensions/kakeru-bridge/package.json
create mode 100644 extensions/kakeru-bridge/platform-bridge.ts
create mode 100644 extensions/osameru-governance/audit-trail.test.ts
create mode 100644 extensions/osameru-governance/audit-trail.ts
create mode 100644 extensions/osameru-governance/config.ts
create mode 100644 extensions/osameru-governance/enforcement-gate.test.ts
create mode 100644 extensions/osameru-governance/enforcement-gate.ts
create mode 100644 extensions/osameru-governance/index.ts
create mode 100644 extensions/osameru-governance/mayros.plugin.json
create mode 100644 extensions/osameru-governance/package.json
create mode 100644 extensions/osameru-governance/policy-compiler.test.ts
create mode 100644 extensions/osameru-governance/policy-compiler.ts
create mode 100644 extensions/osameru-governance/trust-tiers.test.ts
create mode 100644 extensions/osameru-governance/trust-tiers.ts
create mode 100644 extensions/shared/budget-bridge.ts
create mode 100644 extensions/token-economy/response-cache.ts
create mode 100644 extensions/tomeru-guard/config.ts
create mode 100644 extensions/tomeru-guard/index.ts
create mode 100644 extensions/tomeru-guard/loop-breaker.test.ts
create mode 100644 extensions/tomeru-guard/loop-breaker.ts
create mode 100644 extensions/tomeru-guard/mayros.plugin.json
create mode 100644 extensions/tomeru-guard/package.json
create mode 100644 extensions/tomeru-guard/rate-limiter.test.ts
create mode 100644 extensions/tomeru-guard/rate-limiter.ts
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 00000000..93c21a16
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,40 @@
+name: Release
+
+on:
+ push:
+ tags:
+ - "v*"
+
+permissions:
+ contents: write
+
+jobs:
+ create-release:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Extract version from tag
+ id: version
+ run: echo "version=${GITHUB_REF_NAME#v}" >> "$GITHUB_OUTPUT"
+
+ - name: Extract changelog section
+ id: changelog
+ run: |
+ version="${{ steps.version.outputs.version }}"
+ body=$(awk "/^## ${version//./\\.} /{found=1; next} /^## [0-9]/{if(found) exit} found{print}" CHANGELOG.md)
+ {
+ echo "body<> "$GITHUB_OUTPUT"
+
+ - name: Create GitHub Release
+ uses: softprops/action-gh-release@v2
+ with:
+ name: "Mayros ${{ github.ref_name }}"
+ body: ${{ steps.changelog.outputs.body }}
+ draft: false
+ prerelease: ${{ contains(github.ref_name, '-') }}
+ generate_release_notes: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a12e90bf..4c05c624 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,189 @@ Product: https://apilium.com/us/products/mayros
Download: https://mayros.apilium.com
Docs: https://apilium.com/us/doc/mayros
+## 0.1.14 (2026-03-11)
+
+Intelligent routing, multi-agent consensus, and execution safety.
+
+### Eruberu — Adaptive Model Routing
+
+- Q-Learning model selector: learns optimal provider/model per task type, budget level, and time slot
+- Budget-driven fallback: auto-switches to cheaper models when budget exceeds configurable thresholds
+- Task classifier: keyword-based prompt classification (code, chat, analysis, creative)
+- Cortex persistence: Q-table stored as RDF triples with JSON file fallback
+- Integrates via `before_model_resolve` hook — zero changes to core execution path
+- New tools: `routing_status`, `routing_set_strategy`
+- New CLI: `mayros routing status|strategy|reset`
+
+### Miteru — Intelligent Task-to-Agent Routing
+
+- Q-Learning agent selector: learns which agent handles each task type best
+- Task classification by type, complexity, and language domain
+- Performance tracker: EMA-based agent scoring with Cortex persistence
+- Integrated into workflow orchestrator as optional routing layer
+- New tool: `mesh_route_task`
+
+### Kimeru — Multi-Agent Consensus
+
+- Three consensus strategies: majority vote, weighted (by EMA score), LLM-arbitrated
+- Automatic conflict resolution when parallel agents produce conflicting results
+- Confidence scoring and detailed vote breakdown
+- New tools: `mesh_agent_performance`, `mesh_consensus`
+
+### Tomeru — Rate Limiting & Loop Breaking
+
+- Sliding window rate limiter: per-tool call limits with configurable windows
+- Global token bucket: burst protection across all tools
+- Loop breaker: SHA256-based identical-call sequence detection
+- Velocity circuit breaker: hard block on runaway execution
+- Configurable modes: enforce, warn, off
+- New tools: `rate_limit_status`, `rate_limit_adjust`
+- New CLI: `mayros ratelimit status|adjust|reset`
+
+### Token Economy Enhancements
+
+- Response cache (Oboeru): LRU cache with TTL for observational response deduplication
+- Budget bridge: Symbol-based cross-plugin bridge exposes BudgetTracker to routing subsystems
+- Cache savings tracking in budget summaries
+
+### Model Router
+
+- `buildFromPricingCatalog()`: construct router from token-economy pricing catalog
+- `routeWithBudget()`: budget-aware routing that filters by remaining spend
+
+### Infrastructure
+
+- 55 extensions synced at v0.1.14
+- 55 new tests across 7 test files (Q-Learning, task classification, routing, performance tracking, consensus, rate limiting, loop breaking)
+- Auto-release workflow: GitHub Releases created automatically on version tags
+
+## 0.1.13 (2026-03-08)
+
+Fix plugin loading, headless mode, and postinstall reliability.
+
+- Fix gateway health check in headless mode
+- Add postinstall retry logic for flaky network environments
+- Include `src/` in npm package for extension runtime imports
+
+## 0.1.12 (2026-03-07)
+
+Auto-install gateway daemon on first run.
+
+- Auto-install gateway daemon service on first run
+- Fix duplicate `resolveGatewayPort` call in ensure-services
+
+## 0.1.11 (2026-03-06)
+
+Auto-update outdated Cortex binary.
+
+- Auto-update outdated Cortex binary on sidecar start
+- Require Cortex >= 0.4.1
+
+## 0.1.10 (2026-03-05)
+
+Persistent Cortex storage and sidecar hardening.
+
+- Persistent Cortex storage via Sled backend (`~/.mayros/cortex-data/`)
+- Lifecycle callback registry for flush-before-update flow
+- Graceful sidecar restart with binary update
+- Lock file reclaim on sidecar auto-restart
+- Drain timeout and external Cortex detection fixes
+- Complete sidecar lifecycle hardening (10 gaps)
+- Hide internal instructions from slash command display
+
+## 0.1.9 (2026-03-04)
+
+Ineru rename and Cortex 0.4.0.
+
+- Rename Titans memory client to Ineru across all modules
+- Require Cortex >= 0.4.0
+
+## 0.1.8 (2026-03-03)
+
+P2P sync and enhanced Cortex networking.
+
+- Native P2P sync mode with pairing, gossip, and status CLI
+- Dual sync mode bridge: native P2P with polled fallback
+- P2P config, CortexClient P2P methods, and sidecar flag forwarding
+- Require Cortex >= 0.3.8
+
+## 0.1.7 (2026-03-02)
+
+Scoped package and update runner fix.
+
+- Fix update-runner for scoped package name (`@apilium/mayros`)
+
+## 0.1.6 (2026-03-01)
+
+Cortex auto-start, resilience, and TUI improvements.
+
+- Auto-start gateway and Cortex before TUI
+- Cortex CLI commands, gateway methods, and TUI view
+- Cortex auto-restart with resilience monitor
+- Change default Cortex port from 8080 to 19090
+- Enable semantic ecosystem plugins by default
+- Zero-config semantic plugin startup
+- `/kg` handler with tool fallback and diagnostic hints
+- `/mouse` toggle for native text selection
+- Dynamic VERSION in TUI welcome screen
+- Pixel avatar art banner
+- Adapt sidecar for Cortex 0.3.7
+
+## 0.1.5 (2026-02-28)
+
+Stability, resource cleanup, and IDE plugin hardening.
+
+- Clear feedNext timer chain on PTY exit
+- Replace eval with array-based command execution in mayroslog.sh
+- Nullable TeamManager parameters with runtime guards
+- Headless CLI timeout cleanup and prototype pollution guard
+- Cap trace events at 5000 entries
+- EventEmitter dispose in tree providers
+- WebView message listener lifecycle management
+- JetBrains plugin: daemon threads, error logging, panel disposal
+- Thread-safe stream buffering (StringBuilder → StringBuffer)
+- Synchronized disconnect to prevent race conditions
+- Migrate gateway token to IntelliJ PasswordSafe
+- Per-request cache key map for token-economy concurrency
+
+## 0.1.4 (2026-02-27)
+
+IDE extensions, CLI evolution, and security updates.
+
+- VSCode extension: context menu actions, gutter markers, protocol v3
+- JetBrains plugin: unified tabbed panel, Skills/Plan/KG views, protocol v3
+- Welcome screen, image paste, and onboarding UX
+- Heartbeat filtering, interactive selectors, and command cleanup
+- Bump hono, @hono/node-server, and dompurify for security fixes
+- Fix timer leak in sync timeout, stats filter, and panel disposal
+
+## 0.1.3 (2026-02-26)
+
+CI fixes and plugin loading.
+
+- Fix CI: skip Android playstore without keystore
+- Strip `mayros-` prefix from plugin entry hints to match manifest IDs
+
+## 0.1.2 (2026-02-26)
+
+Post-launch fixes and dependency updates.
+
+- Fix 15 test files for vitest 4.x mock hoisting compatibility
+- Fix 15 broken internal links in docs
+- Fix README links
+- Update plugin SDK exports and rename legacy plist references
+- Update extensions: zod v4, observability route API, esbuild import
+- Update CI workflow and build scripts
+- Platform app updates: macOS, iOS, Android
+
+## 0.1.1 (2026-02-25)
+
+Skills Hub launch.
+
+- Add 8 official Apilium skills with Ed25519 signatures
+- Platinum skill structure and documentation
+- Add markdownlint configuration
+
## 0.1.0 (2026-02-25)
First public release of Mayros — personal AI assistant platform.
diff --git a/README.md b/README.md
index a2710196..7f992861 100644
--- a/README.md
+++ b/README.md
@@ -17,6 +17,14 @@
+
+
+
+
+
+
+
+
Product · Download · Docs · Getting Started · Vision · Discord
diff --git a/extensions/agent-mesh/byzantine-validator.test.ts b/extensions/agent-mesh/byzantine-validator.test.ts
new file mode 100644
index 00000000..0c2447eb
--- /dev/null
+++ b/extensions/agent-mesh/byzantine-validator.test.ts
@@ -0,0 +1,133 @@
+import { describe, it, expect } from "vitest";
+import { ByzantineValidator } from "./byzantine-validator.js";
+
+describe("ByzantineValidator", () => {
+ it("requires at least 4 agents for byzantine consensus", () => {
+ const bv = new ByzantineValidator();
+ expect(bv.canRunByzantine(3)).toBe(false);
+ expect(bv.canRunByzantine(4)).toBe(true);
+ expect(bv.canRunByzantine(7)).toBe(true);
+ });
+
+ it("generates session keys and signs votes", () => {
+ const bv = new ByzantineValidator();
+ const key = bv.generateSessionKey("agent-a");
+ expect(key.agentId).toBe("agent-a");
+ expect(key.key.length).toBe(32);
+
+ const vote = bv.signVote("agent-a", "value-x");
+ expect(vote.agentId).toBe("agent-a");
+ expect(vote.value).toBe("value-x");
+ expect(vote.signature).toBeTruthy();
+ });
+
+ it("verifies valid votes", () => {
+ const bv = new ByzantineValidator();
+ bv.generateSessionKey("agent-a");
+ const vote = bv.signVote("agent-a", "value-x");
+ expect(bv.verifyVote(vote)).toBe(true);
+ });
+
+ it("rejects tampered votes", () => {
+ const bv = new ByzantineValidator();
+ bv.generateSessionKey("agent-a");
+ const vote = bv.signVote("agent-a", "value-x");
+ vote.value = "tampered";
+ expect(bv.verifyVote(vote)).toBe(false);
+ });
+
+ it("rejects votes from unknown agents", () => {
+ const bv = new ByzantineValidator();
+ const vote = {
+ agentId: "unknown",
+ value: "x",
+ timestamp: Date.now(),
+ signature: "fake",
+ };
+ expect(bv.verifyVote(vote)).toBe(false);
+ });
+
+ it("computes quorum correctly", () => {
+ const bv = new ByzantineValidator();
+ // n=4: f=1, need 2f+1=3
+ const q4 = bv.checkQuorum(4, 3);
+ expect(q4.reached).toBe(true);
+ expect(q4.faultTolerance).toBe(1);
+ expect(q4.requiredCount).toBe(3);
+
+ // n=4, only 2 agree
+ const q4low = bv.checkQuorum(4, 2);
+ expect(q4low.reached).toBe(false);
+
+ // n=7: f=2, need 2f+1=5
+ const q7 = bv.checkQuorum(7, 5);
+ expect(q7.reached).toBe(true);
+ expect(q7.faultTolerance).toBe(2);
+ });
+
+ it("runs PBFT successfully with 4 agents agreeing", async () => {
+ const bv = new ByzantineValidator();
+ const result = await bv.runPBFT({
+ agentIds: ["a", "b", "c", "d"],
+ values: ["yes", "yes", "yes", "no"],
+ agentValues: { a: "yes", b: "yes", c: "yes", d: "no" },
+ });
+
+ expect(result.success).toBe(true);
+ expect(result.resolvedValue).toBe("yes");
+ expect(result.phase).toBe("complete");
+ });
+
+ it("fails PBFT when insufficient agents", async () => {
+ const bv = new ByzantineValidator();
+ const result = await bv.runPBFT({
+ agentIds: ["a", "b", "c"],
+ values: ["yes", "no", "maybe"],
+ agentValues: { a: "yes", b: "no", c: "maybe" },
+ });
+
+ expect(result.success).toBe(false);
+ });
+
+ it("clears session keys", () => {
+ const bv = new ByzantineValidator();
+ bv.generateSessionKey("agent-a");
+ const vote = bv.signVote("agent-a", "x");
+ expect(bv.verifyVote(vote)).toBe(true);
+
+ bv.clearKeys();
+ expect(bv.verifyVote(vote)).toBe(false);
+ });
+
+ // --- Timing-safe HMAC comparison tests ---
+
+ it("rejects a tampered signature of the same length", () => {
+ const bv = new ByzantineValidator();
+ bv.generateSessionKey("agent-a");
+ const vote = bv.signVote("agent-a", "value-x");
+
+ // Flip last hex char to produce a same-length but different signature
+ const lastChar = vote.signature[vote.signature.length - 1];
+ const flipped = lastChar === "0" ? "1" : "0";
+ vote.signature = vote.signature.slice(0, vote.signature.length - 1) + flipped;
+
+ expect(bv.verifyVote(vote)).toBe(false);
+ });
+
+ it("rejects a signature of different length", () => {
+ const bv = new ByzantineValidator();
+ bv.generateSessionKey("agent-a");
+ const vote = bv.signVote("agent-a", "value-x");
+
+ // Truncate signature to make it shorter
+ vote.signature = vote.signature.slice(0, 8);
+ expect(bv.verifyVote(vote)).toBe(false);
+ });
+
+ it("still accepts valid signatures after timing-safe fix", () => {
+ const bv = new ByzantineValidator();
+ bv.generateSessionKey("agent-b");
+ const vote = bv.signVote("agent-b", "some-value");
+ expect(bv.verifyVote(vote)).toBe(true);
+ });
+});
diff --git a/extensions/agent-mesh/byzantine-validator.ts b/extensions/agent-mesh/byzantine-validator.ts
new file mode 100644
index 00000000..0e0ebeeb
--- /dev/null
+++ b/extensions/agent-mesh/byzantine-validator.ts
@@ -0,0 +1,216 @@
+/**
+ * PBFT-Local Validator (Kimeru extension)
+ *
+ * Local simulation of PBFT for single-process multi-agent consensus.
+ * NOT a real distributed BFT protocol. All agents run in the same process;
+ * HMAC-signed votes prevent accidental value corruption but do not defend
+ * against network-level Byzantine faults.
+ *
+ * Requires >= 4 agents (3f+1 where f >= 1).
+ * Falls back to weighted consensus if insufficient agents.
+ */
+
+import { createHmac, randomBytes, timingSafeEqual } from "node:crypto";
+
+// ============================================================================
+// Types
+// ============================================================================
+
+export type SignedVote = {
+ agentId: string;
+ value: string;
+ timestamp: number;
+ signature: string;
+};
+
+export type QuorumResult = {
+ reached: boolean;
+ agreementCount: number;
+ requiredCount: number;
+ faultTolerance: number;
+ totalAgents: number;
+};
+
+export type ByzantinePhaseResult = {
+ phase: "pre-prepare" | "prepare" | "commit" | "complete";
+ success: boolean;
+ resolvedValue: string;
+ votes: SignedVote[];
+ quorum: QuorumResult;
+};
+
+export type SessionKey = {
+ agentId: string;
+ key: Buffer;
+ createdAt: number;
+};
+
+// ============================================================================
+// ByzantineValidator
+// ============================================================================
+
+export class ByzantineValidator {
+ private sessionKeys = new Map();
+
+ generateSessionKey(agentId: string): SessionKey {
+ const key: SessionKey = {
+ agentId,
+ key: randomBytes(32),
+ createdAt: Date.now(),
+ };
+ this.sessionKeys.set(agentId, key);
+ return key;
+ }
+
+ signVote(agentId: string, value: string): SignedVote {
+ let sessionKey = this.sessionKeys.get(agentId);
+ if (!sessionKey) {
+ sessionKey = this.generateSessionKey(agentId);
+ }
+
+ const timestamp = Date.now();
+ const data = `${agentId}:${value}:${timestamp}`;
+ const signature = createHmac("sha256", sessionKey.key).update(data).digest("hex");
+
+ return { agentId, value, timestamp, signature };
+ }
+
+ verifyVote(vote: SignedVote): boolean {
+ const sessionKey = this.sessionKeys.get(vote.agentId);
+ if (!sessionKey) return false;
+
+ const data = `${vote.agentId}:${vote.value}:${vote.timestamp}`;
+ const expected = createHmac("sha256", sessionKey.key).update(data).digest("hex");
+
+ const sigBuf = Buffer.from(vote.signature, "hex");
+ const expBuf = Buffer.from(expected, "hex");
+ if (sigBuf.length !== expBuf.length) return false;
+ return timingSafeEqual(sigBuf, expBuf);
+ }
+
+ /**
+ * Check if quorum is reached: need 2f+1 agreeing agents.
+ * f = floor((n-1)/3)
+ */
+ checkQuorum(totalAgents: number, agreementCount: number): QuorumResult {
+ const f = Math.floor((totalAgents - 1) / 3);
+ const required = 2 * f + 1;
+
+ return {
+ reached: agreementCount >= required,
+ agreementCount,
+ requiredCount: required,
+ faultTolerance: f,
+ totalAgents,
+ };
+ }
+
+ /**
+ * Byzantine consensus requires at least 4 agents (3f+1 where f >= 1).
+ */
+ canRunByzantine(totalAgents: number): boolean {
+ return totalAgents >= 4;
+ }
+
+ /**
+ * Run practical BFT (PBFT) consensus.
+ *
+ * Phases:
+ * 1. Pre-prepare: primary proposes a value
+ * 2. Prepare: agents sign votes, need 2f+1
+ * 3. Commit: agents confirm, need 2f+1
+ */
+ async runPBFT(params: {
+ agentIds: string[];
+ values: string[];
+ agentValues: Record;
+ }): Promise {
+ const { agentIds, values, agentValues } = params;
+ const n = agentIds.length;
+
+ if (!this.canRunByzantine(n)) {
+ return {
+ phase: "pre-prepare",
+ success: false,
+ resolvedValue: "",
+ votes: [],
+ quorum: this.checkQuorum(n, 0),
+ };
+ }
+
+ // Ensure all agents have session keys
+ for (const id of agentIds) {
+ if (!this.sessionKeys.has(id)) {
+ this.generateSessionKey(id);
+ }
+ }
+
+ // Phase 1: Pre-prepare — primary proposes the most common value
+ const valueCounts = new Map();
+ for (const v of Object.values(agentValues)) {
+ valueCounts.set(v, (valueCounts.get(v) ?? 0) + 1);
+ }
+ let proposedValue = values[0] ?? "";
+ let maxCount = 0;
+ for (const [v, c] of valueCounts) {
+ if (c > maxCount) {
+ maxCount = c;
+ proposedValue = v;
+ }
+ }
+
+ // Phase 2: Prepare — agents vote
+ const prepareVotes: SignedVote[] = [];
+ for (const agentId of agentIds) {
+ const agentValue = agentValues[agentId] ?? proposedValue;
+ const vote = this.signVote(agentId, agentValue);
+ if (this.verifyVote(vote)) {
+ prepareVotes.push(vote);
+ }
+ }
+
+ // Count agreements with proposed value
+ const prepareAgreements = prepareVotes.filter((v) => v.value === proposedValue).length;
+ const prepareQuorum = this.checkQuorum(n, prepareAgreements);
+
+ if (!prepareQuorum.reached) {
+ return {
+ phase: "prepare",
+ success: false,
+ resolvedValue: proposedValue,
+ votes: prepareVotes,
+ quorum: prepareQuorum,
+ };
+ }
+
+ // Phase 3: Commit — agents confirm
+ const commitVotes: SignedVote[] = [];
+ for (const agentId of agentIds) {
+ const agentValue = agentValues[agentId] ?? proposedValue;
+ if (agentValue === proposedValue) {
+ const vote = this.signVote(agentId, `commit:${proposedValue}`);
+ if (this.verifyVote(vote)) {
+ commitVotes.push(vote);
+ }
+ }
+ }
+
+ const commitAgreements = commitVotes.length;
+ const commitQuorum = this.checkQuorum(n, commitAgreements);
+
+ return {
+ phase: commitQuorum.reached ? "complete" : "commit",
+ success: commitQuorum.reached,
+ resolvedValue: proposedValue,
+ votes: [...prepareVotes, ...commitVotes],
+ quorum: commitQuorum,
+ };
+ }
+
+ /**
+ * Clear session keys.
+ */
+ clearKeys(): void {
+ this.sessionKeys.clear();
+ }
+}
diff --git a/extensions/agent-mesh/config.ts b/extensions/agent-mesh/config.ts
index 29d7fc8c..5461d414 100644
--- a/extensions/agent-mesh/config.ts
+++ b/extensions/agent-mesh/config.ts
@@ -34,6 +34,19 @@ export type BackgroundConfig = {
taskTimeoutSeconds: number;
};
+export type MiteruConfig = {
+ enabled: boolean;
+ qLearning: { alpha: number; gamma: number; epsilon: number };
+};
+
+export type KimeruConfig = {
+ enabled: boolean;
+ defaultStrategy: "majority" | "weighted" | "arbitrate" | "pbft-local" | "leader-score";
+ autoResolve: boolean;
+ byzantine: { enabled: boolean; minAgents: number; commitTimeoutMs: number };
+ raft: { enabled: boolean; leaderTimeoutMs: number; maxReElections: number };
+};
+
export type AgentMeshConfig = {
cortex: CortexConfig;
agentNamespace: string;
@@ -42,6 +55,8 @@ export type AgentMeshConfig = {
worktree: WorktreeConfig;
mailbox: MailboxConfig;
background: BackgroundConfig;
+ miteru: MiteruConfig;
+ kimeru: KimeruConfig;
};
const DEFAULT_NAMESPACE = "mayros";
@@ -59,6 +74,9 @@ const DEFAULT_MAILBOX_MAX_MESSAGES = 1000;
const DEFAULT_MAILBOX_RETENTION_DAYS = 30;
const DEFAULT_BG_MAX_CONCURRENT = 5;
const DEFAULT_BG_TASK_TIMEOUT = 3600;
+const DEFAULT_MITERU_ENABLED = true;
+const DEFAULT_KIMERU_ENABLED = true;
+const DEFAULT_KIMERU_STRATEGY = "weighted" as const;
const VALID_STRATEGIES: MergeStrategy[] = [
"additive",
@@ -190,6 +208,74 @@ export function parseBackgroundConfig(raw: unknown): BackgroundConfig {
return { maxConcurrentTasks, taskTimeoutSeconds };
}
+export function parseMiteruConfig(raw: unknown): MiteruConfig {
+ const m = (raw ?? {}) as Record;
+ if (typeof raw === "object" && raw !== null && !Array.isArray(raw)) {
+ assertAllowedKeys(m, ["enabled", "qLearning"], "miteru config");
+ }
+
+ const qRaw = (m.qLearning ?? {}) as Record;
+ return {
+ enabled: m.enabled !== false ? DEFAULT_MITERU_ENABLED : false,
+ qLearning: {
+ alpha: typeof qRaw.alpha === "number" && qRaw.alpha > 0 && qRaw.alpha <= 1 ? qRaw.alpha : 0.1,
+ gamma:
+ typeof qRaw.gamma === "number" && qRaw.gamma >= 0 && qRaw.gamma <= 1 ? qRaw.gamma : 0.9,
+ epsilon:
+ typeof qRaw.epsilon === "number" && qRaw.epsilon >= 0 && qRaw.epsilon <= 1
+ ? qRaw.epsilon
+ : 0.15,
+ },
+ };
+}
+
+export function parseKimeruConfig(raw: unknown): KimeruConfig {
+ const k = (raw ?? {}) as Record;
+ if (typeof raw === "object" && raw !== null && !Array.isArray(raw)) {
+ assertAllowedKeys(
+ k,
+ ["enabled", "defaultStrategy", "autoResolve", "byzantine", "raft"],
+ "kimeru config",
+ );
+ }
+
+ const validStrategies = ["majority", "weighted", "arbitrate", "pbft-local", "leader-score"];
+ const defaultStrategy =
+ typeof k.defaultStrategy === "string" && validStrategies.includes(k.defaultStrategy)
+ ? (k.defaultStrategy as KimeruConfig["defaultStrategy"])
+ : DEFAULT_KIMERU_STRATEGY;
+
+ let byzantine = { enabled: false, minAgents: 4, commitTimeoutMs: 10_000 };
+ if (k.byzantine && typeof k.byzantine === "object" && !Array.isArray(k.byzantine)) {
+ const b = k.byzantine as Record;
+ byzantine = {
+ enabled: b.enabled === true,
+ minAgents: typeof b.minAgents === "number" && b.minAgents >= 4 ? Math.floor(b.minAgents) : 4,
+ commitTimeoutMs:
+ typeof b.commitTimeoutMs === "number" ? Math.floor(b.commitTimeoutMs) : 10_000,
+ };
+ }
+
+ let raft = { enabled: false, leaderTimeoutMs: 30_000, maxReElections: 3 };
+ if (k.raft && typeof k.raft === "object" && !Array.isArray(k.raft)) {
+ const r = k.raft as Record;
+ raft = {
+ enabled: r.enabled === true,
+ leaderTimeoutMs:
+ typeof r.leaderTimeoutMs === "number" ? Math.floor(r.leaderTimeoutMs) : 30_000,
+ maxReElections: typeof r.maxReElections === "number" ? Math.floor(r.maxReElections) : 3,
+ };
+ }
+
+ return {
+ enabled: k.enabled !== false ? DEFAULT_KIMERU_ENABLED : false,
+ defaultStrategy,
+ autoResolve: k.autoResolve !== false,
+ byzantine,
+ raft,
+ };
+}
+
export const agentMeshConfigSchema = {
parse(value: unknown): AgentMeshConfig {
if (value === null || value === undefined) {
@@ -201,7 +287,17 @@ export const agentMeshConfigSchema = {
const cfg = value as Record;
assertAllowedKeys(
cfg,
- ["cortex", "agentNamespace", "mesh", "teams", "worktree", "mailbox", "background"],
+ [
+ "cortex",
+ "agentNamespace",
+ "mesh",
+ "teams",
+ "worktree",
+ "mailbox",
+ "background",
+ "miteru",
+ "kimeru",
+ ],
"agent mesh config",
);
@@ -211,6 +307,8 @@ export const agentMeshConfigSchema = {
const worktree = parseWorktreeConfig(cfg.worktree);
const mailbox = parseMailboxConfig(cfg.mailbox);
const background = parseBackgroundConfig(cfg.background);
+ const miteru = parseMiteruConfig(cfg.miteru);
+ const kimeru = parseKimeruConfig(cfg.kimeru);
const agentNamespace =
typeof cfg.agentNamespace === "string" ? cfg.agentNamespace : DEFAULT_NAMESPACE;
@@ -220,7 +318,7 @@ export const agentMeshConfigSchema = {
);
}
- return { cortex, agentNamespace, mesh, teams, worktree, mailbox, background };
+ return { cortex, agentNamespace, mesh, teams, worktree, mailbox, background, miteru, kimeru };
},
uiHints: {
"cortex.host": {
@@ -315,5 +413,33 @@ export const agentMeshConfigSchema = {
advanced: true,
help: "Timeout in seconds before a background task is considered stale",
},
+ "miteru.enabled": {
+ label: "Enable Miteru (Task Routing)",
+ help: "Enable Q-Learning based task-to-agent routing in workflows",
+ },
+ "miteru.qLearning.alpha": {
+ label: "Miteru Learning Rate (α)",
+ placeholder: "0.1",
+ advanced: true,
+ },
+ "miteru.qLearning.epsilon": {
+ label: "Miteru Exploration Rate (ε)",
+ placeholder: "0.15",
+ advanced: true,
+ },
+ "kimeru.enabled": {
+ label: "Enable Kimeru (Consensus)",
+ help: "Enable multi-agent consensus resolution for conflicting results",
+ },
+ "kimeru.defaultStrategy": {
+ label: "Kimeru Default Strategy",
+ placeholder: "weighted",
+ advanced: true,
+ help: "Default consensus strategy: majority, weighted, or arbitrate",
+ },
+ "kimeru.autoResolve": {
+ label: "Kimeru Auto-Resolve",
+ help: "Automatically resolve conflicts after each workflow phase",
+ },
},
};
diff --git a/extensions/agent-mesh/consensus-engine.test.ts b/extensions/agent-mesh/consensus-engine.test.ts
new file mode 100644
index 00000000..4b8a2be1
--- /dev/null
+++ b/extensions/agent-mesh/consensus-engine.test.ts
@@ -0,0 +1,95 @@
+import { describe, it, expect } from "vitest";
+import { ConsensusEngine } from "./consensus-engine.js";
+import { PerformanceTracker } from "./performance-tracker.js";
+import type { Conflict } from "./mesh-protocol.js";
+
+const perfTracker = new PerformanceTracker(null, "test");
+
+describe("ConsensusEngine", () => {
+ const conflicts: Conflict[] = [
+ {
+ subject: "test:entity",
+ predicate: "test:status",
+ values: ["active", "inactive", "active"],
+ namespaces: ["ns-a", "ns-b", "ns-c"],
+ },
+ ];
+
+ it("majority vote picks most common value", async () => {
+ const engine = new ConsensusEngine(null, "test", perfTracker);
+
+ const result = await engine.resolve({
+ id: "test-1",
+ conflicts,
+ agentIds: ["agent-a", "agent-b", "agent-c"],
+ strategy: "majority",
+ });
+
+ expect(result.resolved).toBe(true);
+ expect(result.resolutions[0]!.resolvedValue).toBe("active"); // 2 vs 1
+ });
+
+ it("weighted vote uses agent scores", async () => {
+ // Set up different scores
+ await perfTracker.recordOutcome({
+ agentId: "agent-a",
+ completed: true,
+ durationMs: 1000,
+ costUsd: 0,
+ findings: 10,
+ conflicts: 0,
+ });
+
+ const engine = new ConsensusEngine(null, "test", perfTracker);
+
+ const result = await engine.resolve({
+ id: "test-2",
+ conflicts: [
+ {
+ subject: "test:entity",
+ predicate: "test:value",
+ values: ["x", "y"],
+ namespaces: ["ns-a", "ns-b"],
+ },
+ ],
+ agentIds: ["agent-a", "agent-b"],
+ strategy: "weighted",
+ });
+
+ expect(result.resolved).toBe(true);
+ expect(result.strategy).toBe("weighted");
+ expect(result.resolutions.length).toBe(1);
+ });
+
+ it("resolvePhaseConflicts returns empty for no conflicts", async () => {
+ const engine = new ConsensusEngine(null, "test", perfTracker);
+ const results = await engine.resolvePhaseConflicts([], {}, "majority");
+ expect(results.length).toBe(0);
+ });
+
+ it("resolvePhaseConflicts processes conflicts", async () => {
+ const engine = new ConsensusEngine(null, "test", perfTracker);
+ const results = await engine.resolvePhaseConflicts(
+ conflicts,
+ { "ns-a": "agent-a", "ns-b": "agent-b" },
+ "weighted",
+ );
+
+ expect(results.length).toBe(1);
+ expect(results[0]!.breakdown.totalConflicts).toBe(1);
+ });
+
+ it("arbitrate falls back to weighted when no LLM", async () => {
+ const engine = new ConsensusEngine(null, "test", perfTracker);
+
+ const result = await engine.resolve({
+ id: "test-3",
+ conflicts,
+ agentIds: ["agent-a", "agent-b", "agent-c"],
+ strategy: "arbitrate",
+ });
+
+ // Without LLM, should still resolve via weighted fallback
+ expect(result.resolved).toBe(true);
+ });
+});
diff --git a/extensions/agent-mesh/consensus-engine.ts b/extensions/agent-mesh/consensus-engine.ts
new file mode 100644
index 00000000..f1c21e0f
--- /dev/null
+++ b/extensions/agent-mesh/consensus-engine.ts
@@ -0,0 +1,392 @@
+/**
+ * Consensus Engine (Kimeru)
+ *
+ * Resolves conflicts when parallel agents produce divergent results.
+ * Three strategies: majority vote, weighted vote (by EMA score), and
+ * LLM-arbitrated decision.
+ */
+
+import type { CortexClient } from "../shared/cortex-client.js";
+import type { PerformanceTracker } from "./performance-tracker.js";
+import type { Conflict } from "./mesh-protocol.js";
+import type { ByzantineValidator } from "./byzantine-validator.js";
+import type { RaftLeader } from "./raft-leader.js";
+
+// ============================================================================
+// Types
+// ============================================================================
+
+export type ConsensusStrategy =
+ | "majority"
+ | "weighted"
+ | "arbitrate"
+ | "pbft-local"
+ | "leader-score";
+
+export type ConsensusRequest = {
+ id: string;
+ conflicts: Conflict[];
+ agentIds: string[];
+ strategy: ConsensusStrategy;
+};
+
+export type ConsensusResult = {
+ id: string;
+ resolved: boolean;
+ strategy: ConsensusStrategy;
+ confidence: number;
+ resolutions: ConsensusResolution[];
+ breakdown: ConsensusBreakdown;
+};
+
+export type ConsensusResolution = {
+ subject: string;
+ predicate: string;
+ resolvedValue: string;
+ discardedValues: string[];
+ votes: Record;
+};
+
+export type ConsensusBreakdown = {
+ totalConflicts: number;
+ resolvedCount: number;
+ unresolvedCount: number;
+ averageConfidence: number;
+};
+
+// ============================================================================
+// Constants
+// ============================================================================
+
+const ARBITRATE_MARGIN = 0.15;
+const CORTEX_PREFIX = "kimeru:consensus:";
+
+// ============================================================================
+// ConsensusEngine
+// ============================================================================
+
+export class ConsensusEngine {
+ private byzantineValidator?: ByzantineValidator;
+ private raftLeader?: RaftLeader;
+
+ constructor(
+ private readonly client: CortexClient | null,
+ private readonly ns: string,
+ private readonly perfTracker: PerformanceTracker,
+ private readonly callLlm?: (prompt: string, opts?: { maxTokens?: number }) => Promise,
+ byzantineValidator?: ByzantineValidator,
+ raftLeader?: RaftLeader,
+ ) {
+ this.byzantineValidator = byzantineValidator;
+ this.raftLeader = raftLeader;
+ }
+
+ /**
+ * Resolve a set of conflicts between agents.
+ */
+ async resolve(request: ConsensusRequest): Promise {
+ const resolutions: ConsensusResolution[] = [];
+ let totalConfidence = 0;
+
+ for (const conflict of request.conflicts) {
+ const resolution = await this.resolveConflict(conflict, request.agentIds, request.strategy);
+ resolutions.push(resolution);
+ totalConfidence += resolution.votes
+ ? Math.max(...Object.values(resolution.votes)) /
+ Object.values(resolution.votes).reduce((a, b) => a + b, 0)
+ : 0.5;
+ }
+
+ const resolvedCount = resolutions.filter((r) => r.resolvedValue !== "").length;
+
+ const result: ConsensusResult = {
+ id: request.id,
+ resolved: resolvedCount === request.conflicts.length,
+ strategy: request.strategy,
+ confidence: resolutions.length > 0 ? totalConfidence / resolutions.length : 1.0,
+ resolutions,
+ breakdown: {
+ totalConflicts: request.conflicts.length,
+ resolvedCount,
+ unresolvedCount: request.conflicts.length - resolvedCount,
+ averageConfidence: resolutions.length > 0 ? totalConfidence / resolutions.length : 1.0,
+ },
+ };
+
+ // Persist result
+ await this.persistResult(result);
+
+ return result;
+ }
+
+ /**
+ * Resolve conflicts from a workflow phase.
+ * Convenience method that maps phase conflicts into ConsensusRequests.
+ */
+ async resolvePhaseConflicts(
+ conflicts: Conflict[],
+ agentIdByNs: Record,
+ strategy: ConsensusStrategy,
+ ): Promise {
+ if (conflicts.length === 0) return [];
+
+ const agentIds = Object.values(agentIdByNs);
+ const results: ConsensusResult[] = [];
+
+ // Group conflicts by subject for batched resolution
+ const id = `phase-${Date.now()}`;
+ const request: ConsensusRequest = {
+ id,
+ conflicts,
+ agentIds,
+ strategy,
+ };
+
+ results.push(await this.resolve(request));
+ return results;
+ }
+
+ // ---------- internal ----------
+
+ private async resolveConflict(
+ conflict: Conflict,
+ agentIds: string[],
+ strategy: ConsensusStrategy,
+ ): Promise {
+ const votes: Record = {};
+
+ // Initialize votes from conflict values
+ for (const value of conflict.values) {
+ votes[value] = 0;
+ }
+
+ switch (strategy) {
+ case "majority":
+ return this.majorityVote(conflict, agentIds, votes);
+
+ case "weighted":
+ return this.weightedVote(conflict, agentIds, votes);
+
+ case "arbitrate": {
+ // First try weighted; if margin too small, escalate to LLM
+ const weighted = await this.weightedVote(conflict, agentIds, votes);
+ const totalVotes = Object.values(weighted.votes).reduce((a, b) => a + b, 0);
+ const maxVote = Math.max(...Object.values(weighted.votes));
+
+ if (totalVotes > 0 && maxVote / totalVotes - ARBITRATE_MARGIN > 0) {
+ return weighted;
+ }
+
+ // LLM arbitration
+ return this.llmArbitrate(conflict, weighted);
+ }
+
+ case "pbft-local": {
+ if (!this.byzantineValidator || !this.byzantineValidator.canRunByzantine(agentIds.length)) {
+ // Fallback to weighted if insufficient agents or no validator
+ return this.weightedVote(conflict, agentIds, votes);
+ }
+
+ // Build agent→value map
+ const agentValues: Record = {};
+ for (let i = 0; i < conflict.namespaces.length; i++) {
+ const agentId = agentIds[i % agentIds.length] ?? agentIds[0]!;
+ const value = conflict.values[i % conflict.values.length] ?? conflict.values[0] ?? "";
+ agentValues[agentId] = value;
+ }
+
+ const pbftResult = await this.byzantineValidator.runPBFT({
+ agentIds,
+ values: conflict.values,
+ agentValues,
+ });
+
+ if (pbftResult.success) {
+ // Convert PBFT votes to vote counts
+ for (const v of pbftResult.votes) {
+ const cleanValue = v.value.replace(/^commit:/, "");
+ if (conflict.values.includes(cleanValue)) {
+ votes[cleanValue] = (votes[cleanValue] ?? 0) + 1;
+ }
+ }
+ return {
+ subject: conflict.subject,
+ predicate: conflict.predicate,
+ resolvedValue: pbftResult.resolvedValue,
+ discardedValues: conflict.values.filter((v) => v !== pbftResult.resolvedValue),
+ votes,
+ };
+ }
+
+ // PBFT failed — fallback to weighted
+ return this.weightedVote(conflict, agentIds, votes);
+ }
+
+ case "leader-score": {
+ if (!this.raftLeader || agentIds.length < 2) {
+ return this.weightedVote(conflict, agentIds, votes);
+ }
+
+ try {
+ const election = await this.raftLeader.electLeader(agentIds);
+
+ // Build leader's value and follower values
+ const leaderIdx = agentIds.indexOf(election.leaderId);
+ const leaderValue =
+ conflict.values[leaderIdx % conflict.values.length] ?? conflict.values[0] ?? "";
+ const followerIds = agentIds.filter((id) => id !== election.leaderId);
+ const followerValues: Record = {};
+ for (const fid of followerIds) {
+ const fidIdx = agentIds.indexOf(fid);
+ followerValues[fid] =
+ conflict.values[fidIdx % conflict.values.length] ?? conflict.values[0] ?? "";
+ }
+
+ const raftResult = await this.raftLeader.proposeResolution({
+ leaderId: election.leaderId,
+ value: leaderValue,
+ followerIds,
+ followerValues,
+ });
+
+ if (raftResult.success) {
+ votes[raftResult.proposedValue] = raftResult.confirmations;
+ return {
+ subject: conflict.subject,
+ predicate: conflict.predicate,
+ resolvedValue: raftResult.proposedValue,
+ discardedValues: conflict.values.filter((v) => v !== raftResult.proposedValue),
+ votes,
+ };
+ }
+ } catch {
+ // Raft failed — fallback
+ }
+
+ return this.weightedVote(conflict, agentIds, votes);
+ }
+ }
+ }
+
+ private majorityVote(
+ conflict: Conflict,
+ _agentIds: string[],
+ votes: Record,
+ ): ConsensusResolution {
+ // Each namespace gets 1 vote for its value
+ for (let i = 0; i < conflict.values.length; i++) {
+ const value = conflict.values[i]!;
+ votes[value] = (votes[value] ?? 0) + 1;
+ }
+
+ // Find winner
+ let winner = conflict.values[0] ?? "";
+ let maxVotes = 0;
+ for (const [value, count] of Object.entries(votes)) {
+ if (count > maxVotes) {
+ maxVotes = count;
+ winner = value;
+ }
+ }
+
+ return {
+ subject: conflict.subject,
+ predicate: conflict.predicate,
+ resolvedValue: winner,
+ discardedValues: conflict.values.filter((v) => v !== winner),
+ votes,
+ };
+ }
+
+ private async weightedVote(
+ conflict: Conflict,
+ agentIds: string[],
+ votes: Record,
+ ): Promise {
+ // Weight each agent's vote by their EMA score
+ for (let i = 0; i < conflict.namespaces.length; i++) {
+ const ns = conflict.namespaces[i]!;
+ const value = conflict.values[i % conflict.values.length] ?? conflict.values[0] ?? "";
+
+ // Find agent for this namespace
+ const agentId = agentIds[i % agentIds.length] ?? agentIds[0];
+ const score = agentId ? await this.perfTracker.getScore(agentId) : 0.5;
+
+ votes[value] = (votes[value] ?? 0) + score;
+ }
+
+ // Find winner
+ let winner = conflict.values[0] ?? "";
+ let maxVotes = 0;
+ for (const [value, weight] of Object.entries(votes)) {
+ if (weight > maxVotes) {
+ maxVotes = weight;
+ winner = value;
+ }
+ }
+
+ return {
+ subject: conflict.subject,
+ predicate: conflict.predicate,
+ resolvedValue: winner,
+ discardedValues: conflict.values.filter((v) => v !== winner),
+ votes,
+ };
+ }
+
+ private async llmArbitrate(
+ conflict: Conflict,
+ fallback: ConsensusResolution,
+ ): Promise {
+ if (!this.callLlm) {
+ return fallback;
+ }
+
+ try {
+ const prompt = [
+ "You are resolving a conflict between agents. Pick the best value.",
+ `Subject: ${conflict.subject}`,
+ `Predicate: ${conflict.predicate}`,
+ `Values: ${conflict.values.map((v, i) => `[${i + 1}] "${v}"`).join(", ")}`,
+ "Reply with ONLY the number of the best value (e.g., '1').",
+ ].join("\n");
+
+ const response = await this.callLlm(prompt, { maxTokens: 10 });
+ const choice = parseInt(response.trim(), 10);
+
+ if (choice >= 1 && choice <= conflict.values.length) {
+ const winner = conflict.values[choice - 1]!;
+ return {
+ ...fallback,
+ resolvedValue: winner,
+ discardedValues: conflict.values.filter((v) => v !== winner),
+ };
+ }
+ } catch {
+ // Fallback to weighted result
+ }
+
+ return fallback;
+ }
+
+ private async persistResult(result: ConsensusResult): Promise {
+ if (!this.client) return;
+
+ try {
+ const subject = `${this.ns}:${CORTEX_PREFIX}${result.id}`;
+ await this.client.createTriple({
+ subject,
+ predicate: `${this.ns}:kimeru:result`,
+ object: JSON.stringify({
+ resolved: result.resolved,
+ strategy: result.strategy,
+ confidence: result.confidence,
+ totalConflicts: result.breakdown.totalConflicts,
+ resolvedCount: result.breakdown.resolvedCount,
+ }),
+ });
+ } catch {
+ // best-effort
+ }
+ }
+}
diff --git a/extensions/agent-mesh/index.ts b/extensions/agent-mesh/index.ts
index 148fe301..bdb7b5e9 100644
--- a/extensions/agent-mesh/index.ts
+++ b/extensions/agent-mesh/index.ts
@@ -41,6 +41,11 @@ import { TeamDashboardService } from "./team-dashboard.js";
import { TeamManager } from "./team-manager.js";
import { WorkflowOrchestrator } from "./workflow-orchestrator.js";
import { listWorkflows as listWorkflowDefs } from "./workflows/registry.js";
+import { TaskRouter } from "./task-router.js";
+import { PerformanceTracker } from "./performance-tracker.js";
+import { ConsensusEngine } from "./consensus-engine.js";
+import { ByzantineValidator } from "./byzantine-validator.js";
+import { RaftLeader } from "./raft-leader.js";
// ============================================================================
// Plugin Definition
@@ -69,6 +74,25 @@ const agentMeshPlugin = {
});
const mailbox = new AgentMailbox(client, ns);
const bgTracker = new BackgroundTracker(client, ns);
+
+ // Miteru (task routing) + Kimeru (consensus)
+ const perfTracker = new PerformanceTracker(client, ns);
+ const taskRouter = cfg.miteru.enabled ? new TaskRouter(client, ns, perfTracker) : undefined;
+ // Byzantine validator + Raft leader (Kimeru extensions)
+ const byzantineValidator =
+ cfg.kimeru.enabled && cfg.kimeru.byzantine.enabled ? new ByzantineValidator() : undefined;
+ const raftLeader =
+ cfg.kimeru.enabled && cfg.kimeru.raft.enabled
+ ? new RaftLeader(
+ perfTracker,
+ cfg.kimeru.raft.leaderTimeoutMs,
+ cfg.kimeru.raft.maxReElections,
+ )
+ : undefined;
+ const consensusEngine = cfg.kimeru.enabled
+ ? new ConsensusEngine(client, ns, perfTracker, api.callLlm, byzantineValidator, raftLeader)
+ : undefined;
+
const orchestrator = new WorkflowOrchestrator(
client,
ns,
@@ -77,6 +101,10 @@ const agentMeshPlugin = {
nsMgr,
mailbox,
bgTracker,
+ undefined, // phaseTimeoutMs (use default)
+ taskRouter,
+ consensusEngine,
+ perfTracker,
);
const dashboard = new TeamDashboardService(teamMgr, mailbox, null, ns);
let cortexAvailable = false;
@@ -951,6 +979,206 @@ const agentMeshPlugin = {
{ name: "mesh_run_workflow" },
);
+ // 12b. mesh_route_task (Miteru)
+ api.registerTool(
+ {
+ name: "mesh_route_task",
+ label: "Route Task to Agent",
+ description:
+ "Use Miteru Q-Learning to select the best agent for a task. Returns a routing decision with confidence score.",
+ parameters: Type.Object({
+ description: Type.String({ description: "Task description" }),
+ agents: Type.Array(Type.String(), { description: "Available agent IDs" }),
+ path: Type.Optional(Type.String({ description: "Target file/directory path" })),
+ }),
+ async execute(_toolCallId, params) {
+ const { description, agents, path } = params as {
+ description: string;
+ agents: string[];
+ path?: string;
+ };
+
+ if (!taskRouter) {
+ return {
+ content: [{ type: "text", text: "Miteru task routing is disabled." }],
+ details: { error: "disabled" },
+ };
+ }
+
+ try {
+ const decision = await taskRouter.selectAgent(description, agents, path);
+ const classification = taskRouter.classifyTask(description, path);
+
+ return {
+ content: [
+ {
+ type: "text",
+ text: [
+ `Routed to: ${decision.agentId}`,
+ `Task type: ${classification.taskType} (${classification.complexity}, ${classification.domain})`,
+ `Confidence: ${(decision.confidence * 100).toFixed(1)}%`,
+ `Reason: ${decision.reason}`,
+ ].join("\n"),
+ },
+ ],
+ details: { decision, classification },
+ };
+ } catch (err) {
+ return {
+ content: [{ type: "text", text: `Routing failed: ${String(err)}` }],
+ details: { error: String(err) },
+ };
+ }
+ },
+ },
+ { name: "mesh_route_task" },
+ );
+
+ // 12c. mesh_agent_performance
+ api.registerTool(
+ {
+ name: "mesh_agent_performance",
+ label: "Agent Performance",
+ description:
+ "Show performance metrics for agents in the mesh — EMA scores, task counts, cost data.",
+ parameters: Type.Object({
+ agentId: Type.Optional(Type.String({ description: "Specific agent ID (omit for all)" })),
+ }),
+ async execute(_toolCallId, params) {
+ const { agentId: targetId } = params as { agentId?: string };
+
+ if (targetId) {
+ const record = await perfTracker.getPerformance(targetId);
+ if (!record) {
+ return {
+ content: [{ type: "text", text: `No performance data for agent "${targetId}".` }],
+ details: { error: "not_found" },
+ };
+ }
+ return {
+ content: [
+ {
+ type: "text",
+ text: [
+ `Agent: ${record.agentId}`,
+ `Score (EMA): ${(record.scoreEma * 100).toFixed(1)}%`,
+ `Tasks: ${record.completedTasks}/${record.totalTasks} completed`,
+ `Avg duration: ${(record.avgDurationMs / 1000).toFixed(1)}s`,
+ `Avg cost: $${record.avgCostUsd.toFixed(4)}`,
+ ].join("\n"),
+ },
+ ],
+ details: record,
+ };
+ }
+
+ const all = perfTracker.getAllCached();
+ if (all.length === 0) {
+ return {
+ content: [{ type: "text", text: "No performance data recorded yet." }],
+ details: { agents: [] },
+ };
+ }
+
+ const lines = ["Agent Performance", "─────────────────"];
+ for (const r of all.sort((a, b) => b.scoreEma - a.scoreEma)) {
+ lines.push(
+ `${r.agentId}: score=${(r.scoreEma * 100).toFixed(1)}% tasks=${r.completedTasks}/${r.totalTasks}`,
+ );
+ }
+
+ return {
+ content: [{ type: "text", text: lines.join("\n") }],
+ details: { agents: all },
+ };
+ },
+ },
+ { name: "mesh_agent_performance" },
+ );
+
+ // 12d. mesh_consensus
+ api.registerTool(
+ {
+ name: "mesh_consensus",
+ label: "Consensus Resolve",
+ description:
+ "Use Kimeru to resolve conflicts between agents. Strategies: majority, weighted, arbitrate.",
+ parameters: Type.Object({
+ ns1: Type.String({ description: "First namespace" }),
+ ns2: Type.String({ description: "Second namespace" }),
+ strategy: Type.Optional(
+ Type.Unsafe({
+ type: "string",
+ enum: ["majority", "weighted", "arbitrate"],
+ description: "Consensus strategy (default: weighted)",
+ }),
+ ),
+ }),
+ async execute(_toolCallId, params) {
+ const {
+ ns1,
+ ns2,
+ strategy: strat,
+ } = params as {
+ ns1: string;
+ ns2: string;
+ strategy?: string;
+ };
+
+ if (!consensusEngine) {
+ return {
+ content: [{ type: "text", text: "Kimeru consensus is disabled." }],
+ details: { error: "disabled" },
+ };
+ }
+
+ try {
+ const conflicts = await fusion.detectConflicts(ns1, ns2);
+ if (conflicts.length === 0) {
+ return {
+ content: [{ type: "text", text: "No conflicts detected between the namespaces." }],
+ details: { conflicts: 0 },
+ };
+ }
+
+ const validStrategies = ["majority", "weighted", "arbitrate"];
+ const strategy = (
+ strat && validStrategies.includes(strat) ? strat : cfg.kimeru.defaultStrategy
+ ) as "majority" | "weighted" | "arbitrate";
+
+ const result = await consensusEngine.resolve({
+ id: `manual-${Date.now()}`,
+ conflicts,
+ agentIds: [ns1, ns2],
+ strategy,
+ });
+
+ return {
+ content: [
+ {
+ type: "text",
+ text: [
+ `Consensus (${strategy}): ${result.breakdown.resolvedCount}/${result.breakdown.totalConflicts} resolved`,
+ `Confidence: ${(result.confidence * 100).toFixed(1)}%`,
+ ...result.resolutions.map(
+ (r) => ` ${r.subject} ${r.predicate}: "${r.resolvedValue}"`,
+ ),
+ ].join("\n"),
+ },
+ ],
+ details: result,
+ };
+ } catch (err) {
+ return {
+ content: [{ type: "text", text: `Consensus failed: ${String(err)}` }],
+ details: { error: String(err) },
+ };
+ }
+ },
+ },
+ { name: "mesh_consensus" },
+ );
+
// 13. agent_send_message
api.registerTool(
{
diff --git a/extensions/agent-mesh/mesh-protocol.ts b/extensions/agent-mesh/mesh-protocol.ts
index 7d468519..955de998 100644
--- a/extensions/agent-mesh/mesh-protocol.ts
+++ b/extensions/agent-mesh/mesh-protocol.ts
@@ -13,7 +13,15 @@ export type MeshMessageType =
| "task"
| "finding"
| "question"
- | "status-update";
+ | "status-update"
+ | "routing-decision"
+ | "performance-update"
+ | "consensus-request"
+ | "consensus-result"
+ | "vote-prepare"
+ | "vote-commit"
+ | "leader-election"
+ | "leader-proposal";
export type MeshMessage = {
type: MeshMessageType;
@@ -113,6 +121,14 @@ export function isValidMessageType(type: string): type is MeshMessageType {
"finding",
"question",
"status-update",
+ "routing-decision",
+ "performance-update",
+ "consensus-request",
+ "consensus-result",
+ "vote-prepare",
+ "vote-commit",
+ "leader-election",
+ "leader-proposal",
].includes(type);
}
diff --git a/extensions/agent-mesh/package.json b/extensions/agent-mesh/package.json
index 12e3433b..edf16b45 100644
--- a/extensions/agent-mesh/package.json
+++ b/extensions/agent-mesh/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-agent-mesh",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Mayros multi-agent coordination mesh with shared namespaces, delegation, and knowledge fusion",
"type": "module",
diff --git a/extensions/agent-mesh/performance-tracker.test.ts b/extensions/agent-mesh/performance-tracker.test.ts
new file mode 100644
index 00000000..93f3389e
--- /dev/null
+++ b/extensions/agent-mesh/performance-tracker.test.ts
@@ -0,0 +1,97 @@
+import { describe, it, expect } from "vitest";
+import { PerformanceTracker } from "./performance-tracker.js";
+
+describe("PerformanceTracker", () => {
+ it("starts with no records", async () => {
+ const tracker = new PerformanceTracker(null, "test");
+ const record = await tracker.getPerformance("agent-a");
+ expect(record).toBeNull();
+ });
+
+ it("getScore returns 0.5 for unknown agent", async () => {
+ const tracker = new PerformanceTracker(null, "test");
+ const score = await tracker.getScore("unknown");
+ expect(score).toBe(0.5);
+ });
+
+ it("records outcome and updates EMA", async () => {
+ const tracker = new PerformanceTracker(null, "test");
+
+ // Record a successful outcome
+ await tracker.recordOutcome({
+ agentId: "agent-a",
+ completed: true,
+ durationMs: 5000,
+ costUsd: 0.01,
+ findings: 3,
+ conflicts: 0,
+ });
+
+ const record = await tracker.getPerformance("agent-a");
+ expect(record).not.toBeNull();
+ expect(record!.totalTasks).toBe(1);
+ expect(record!.completedTasks).toBe(1);
+ expect(record!.scoreEma).toBeGreaterThan(0.5); // EMA moved up from success
+ });
+
+ it("EMA decreases on failure", async () => {
+ const tracker = new PerformanceTracker(null, "test");
+
+ // Record a failed outcome
+ await tracker.recordOutcome({
+ agentId: "agent-b",
+ completed: false,
+ durationMs: 30000,
+ costUsd: 0.05,
+ findings: 0,
+ conflicts: 2,
+ });
+
+ const record = await tracker.getPerformance("agent-b");
+ expect(record!.scoreEma).toBeLessThan(0.5); // EMA moved down from failure
+ });
+
+ it("tracks multiple outcomes for same agent", async () => {
+ const tracker = new PerformanceTracker(null, "test");
+
+ for (let i = 0; i < 5; i++) {
+ await tracker.recordOutcome({
+ agentId: "agent-c",
+ completed: true,
+ durationMs: 3000 + i * 1000,
+ costUsd: 0.01,
+ findings: 2,
+ conflicts: 0,
+ });
+ }
+
+ const record = await tracker.getPerformance("agent-c");
+ expect(record!.totalTasks).toBe(5);
+ expect(record!.completedTasks).toBe(5);
+ expect(record!.avgDurationMs).toBeGreaterThan(3000);
+ });
+
+ it("getAllCached returns all tracked agents", async () => {
+ const tracker = new PerformanceTracker(null, "test");
+
+ await tracker.recordOutcome({
+ agentId: "a1",
+ completed: true,
+ durationMs: 1000,
+ costUsd: 0,
+ findings: 0,
+ conflicts: 0,
+ });
+ await tracker.recordOutcome({
+ agentId: "a2",
+ completed: true,
+ durationMs: 1000,
+ costUsd: 0,
+ findings: 0,
+ conflicts: 0,
+ });
+
+ const all = tracker.getAllCached();
+ expect(all.length).toBe(2);
+ });
+});
diff --git a/extensions/agent-mesh/performance-tracker.ts b/extensions/agent-mesh/performance-tracker.ts
new file mode 100644
index 00000000..e7d1896b
--- /dev/null
+++ b/extensions/agent-mesh/performance-tracker.ts
@@ -0,0 +1,192 @@
+/**
+ * Performance Tracker
+ *
+ * Tracks per-agent performance metrics with EMA smoothing.
+ * Stores records in Cortex triples for persistence.
+ */
+
+import type { CortexClient } from "../shared/cortex-client.js";
+
+// ============================================================================
+// Types
+// ============================================================================
+
+export type TaskOutcome = {
+ agentId: string;
+ completed: boolean;
+ durationMs: number;
+ costUsd: number;
+ findings: number;
+ conflicts: number;
+};
+
+export type AgentPerformanceRecord = {
+ agentId: string;
+ totalTasks: number;
+ completedTasks: number;
+ avgDurationMs: number;
+ avgCostUsd: number;
+ scoreEma: number; // 0.0 - 1.0
+};
+
+// ============================================================================
+// Constants
+// ============================================================================
+
+const EMA_SMOOTHING = 0.3;
+const PREDICATE_PREFIX = "miteru:perf:";
+
+// ============================================================================
+// PerformanceTracker
+// ============================================================================
+
+export class PerformanceTracker {
+ private cache = new Map();
+
+ constructor(
+ private readonly client: CortexClient | null,
+ private readonly ns: string,
+ ) {}
+
+ /**
+ * Record a task outcome and update the agent's performance record.
+ */
+ async recordOutcome(outcome: TaskOutcome): Promise {
+ let record = this.cache.get(outcome.agentId);
+ if (!record) {
+ record = await this.loadRecord(outcome.agentId);
+ }
+
+ if (!record) {
+ record = {
+ agentId: outcome.agentId,
+ totalTasks: 0,
+ completedTasks: 0,
+ avgDurationMs: 0,
+ avgCostUsd: 0,
+ scoreEma: 0.5, // neutral start
+ };
+ }
+
+ record.totalTasks++;
+ if (outcome.completed) record.completedTasks++;
+
+ // Running average for duration and cost
+ record.avgDurationMs =
+ record.avgDurationMs + (outcome.durationMs - record.avgDurationMs) / record.totalTasks;
+ record.avgCostUsd =
+ record.avgCostUsd + (outcome.costUsd - record.avgCostUsd) / record.totalTasks;
+
+ // Compute instant score (0-1) from outcome
+ const completionScore = outcome.completed ? 1.0 : 0.0;
+ const findingsBonus = Math.min(0.2, outcome.findings * 0.02);
+ const conflictPenalty = Math.min(0.15, outcome.conflicts * 0.05);
+ const instantScore = Math.max(
+ 0,
+ Math.min(1, completionScore + findingsBonus - conflictPenalty),
+ );
+
+ // EMA update
+ record.scoreEma = EMA_SMOOTHING * instantScore + (1 - EMA_SMOOTHING) * record.scoreEma;
+
+ this.cache.set(outcome.agentId, record);
+ await this.persistRecord(record);
+ }
+
+ /**
+ * Get the performance record for an agent.
+ */
+ async getPerformance(agentId: string): Promise {
+ const cached = this.cache.get(agentId);
+ if (cached) return cached;
+ return this.loadRecord(agentId);
+ }
+
+ /**
+ * Get the EMA score for an agent (0.0 - 1.0). Returns 0.5 if unknown.
+ */
+ async getScore(agentId: string): Promise {
+ const record = await this.getPerformance(agentId);
+ return record?.scoreEma ?? 0.5;
+ }
+
+ /**
+ * Get all cached performance records.
+ */
+ getAllCached(): AgentPerformanceRecord[] {
+ return [...this.cache.values()];
+ }
+
+ // ---------- persistence ----------
+
+ private async loadRecord(agentId: string): Promise {
+ if (!this.client) return null;
+
+ try {
+ const subject = `${this.ns}:${PREDICATE_PREFIX}${agentId}`;
+ const result = await this.client.listTriples({ subject, limit: 10 });
+
+ if (result.triples.length === 0) return null;
+
+ const fields: Record = {};
+ for (const t of result.triples) {
+ const pred = String(t.predicate);
+ const prefix = `${this.ns}:${PREDICATE_PREFIX}`;
+ if (pred.startsWith(prefix)) {
+ const field = pred.slice(prefix.length);
+ fields[field] =
+ typeof t.object === "object" && t.object !== null && "node" in t.object
+ ? String((t.object as { node: string }).node)
+ : String(t.object);
+ }
+ }
+
+ const record: AgentPerformanceRecord = {
+ agentId,
+ totalTasks: Number(fields.totalTasks) || 0,
+ completedTasks: Number(fields.completedTasks) || 0,
+ avgDurationMs: Number(fields.avgDurationMs) || 0,
+ avgCostUsd: Number(fields.avgCostUsd) || 0,
+ scoreEma: Number(fields.scoreEma) || 0.5,
+ };
+
+ this.cache.set(agentId, record);
+ return record;
+ } catch {
+ return null;
+ }
+ }
+
+ private async persistRecord(record: AgentPerformanceRecord): Promise {
+ if (!this.client) return;
+
+ try {
+ const subject = `${this.ns}:${PREDICATE_PREFIX}${record.agentId}`;
+
+ // Delete existing
+ const existing = await this.client.listTriples({ subject, limit: 20 });
+ for (const t of existing.triples) {
+ if (t.id) await this.client.deleteTriple(t.id);
+ }
+
+ // Write new values
+ const fields: Array<[string, string]> = [
+ ["totalTasks", String(record.totalTasks)],
+ ["completedTasks", String(record.completedTasks)],
+ ["avgDurationMs", String(record.avgDurationMs)],
+ ["avgCostUsd", String(record.avgCostUsd)],
+ ["scoreEma", String(record.scoreEma)],
+ ];
+
+ for (const [field, value] of fields) {
+ await this.client.createTriple({
+ subject,
+ predicate: `${this.ns}:${PREDICATE_PREFIX}${field}`,
+ object: value,
+ });
+ }
+ } catch {
+ // best-effort persistence
+ }
+ }
+}
diff --git a/extensions/agent-mesh/raft-leader.test.ts b/extensions/agent-mesh/raft-leader.test.ts
new file mode 100644
index 00000000..24ba6430
--- /dev/null
+++ b/extensions/agent-mesh/raft-leader.test.ts
@@ -0,0 +1,82 @@
+import { describe, it, expect } from "vitest";
+import { RaftLeader } from "./raft-leader.js";
+import { PerformanceTracker } from "./performance-tracker.js";
+
+const perfTracker = new PerformanceTracker(null, "test");
+
+describe("RaftLeader", () => {
+ it("elects leader with highest EMA score", async () => {
+ // Give agent-a a high score
+ await perfTracker.recordOutcome({
+ agentId: "agent-a",
+ completed: true,
+ durationMs: 1000,
+ costUsd: 0,
+ findings: 10,
+ conflicts: 0,
+ });
+
+ const raft = new RaftLeader(perfTracker);
+ const result = await raft.electLeader(["agent-a", "agent-b", "agent-c"]);
+
+ expect(result.leaderId).toBe("agent-a");
+ expect(result.leaderScore).toBeGreaterThan(0.5);
+ expect(result.candidates.length).toBe(3);
+ expect(result.term).toBe(1);
+ });
+
+ it("increments term on each election", async () => {
+ const raft = new RaftLeader(perfTracker);
+ await raft.electLeader(["agent-a", "agent-b"]);
+ expect(raft.getCurrentTerm()).toBe(1);
+
+ await raft.electLeader(["agent-a", "agent-b"]);
+ expect(raft.getCurrentTerm()).toBe(2);
+ });
+
+ it("excludes agent from re-election", async () => {
+ const raft = new RaftLeader(perfTracker);
+ const result = await raft.reElect(["agent-a", "agent-b", "agent-c"], "agent-a");
+
+ expect(result.leaderId).not.toBe("agent-a");
+ expect(result.candidates.every((c) => c.agentId !== "agent-a")).toBe(true);
+ });
+
+ it("proposes resolution with majority confirmation", async () => {
+ const raft = new RaftLeader(perfTracker);
+ await raft.electLeader(["agent-a", "agent-b", "agent-c"]);
+
+ const result = await raft.proposeResolution({
+ leaderId: "agent-a",
+ value: "yes",
+ followerIds: ["agent-b", "agent-c"],
+ followerValues: { "agent-b": "yes", "agent-c": "no" },
+ });
+
+ // Leader (yes) + agent-b (yes) = 2 out of 3, majority
+ expect(result.success).toBe(true);
+ expect(result.confirmations).toBe(2);
+ expect(result.required).toBe(2);
+ });
+
+ it("fails resolution without majority", async () => {
+ const raft = new RaftLeader(perfTracker);
+ await raft.electLeader(["a", "b", "c", "d", "e"]);
+
+ const result = await raft.proposeResolution({
+ leaderId: "a",
+ value: "yes",
+ followerIds: ["b", "c", "d", "e"],
+ followerValues: { b: "no", c: "no", d: "no", e: "no" },
+ });
+
+ // Only leader confirms = 1 out of 5, need 3
+ expect(result.success).toBe(false);
+ expect(result.confirmations).toBe(1);
+ });
+
+ it("throws with no agents", async () => {
+ const raft = new RaftLeader(perfTracker);
+ await expect(raft.electLeader([])).rejects.toThrow("No agents");
+ });
+});
diff --git a/extensions/agent-mesh/raft-leader.ts b/extensions/agent-mesh/raft-leader.ts
new file mode 100644
index 00000000..4b257d7c
--- /dev/null
+++ b/extensions/agent-mesh/raft-leader.ts
@@ -0,0 +1,130 @@
+/**
+ * Leader-Score Election (Kimeru extension)
+ *
+ * Leader election based on EMA performance scores. Local simulation,
+ * NOT a real Raft implementation. There is no distributed log replication,
+ * no heartbeats, and no term-based leader fencing. The "leader" is simply
+ * the agent with the highest performance score at election time.
+ *
+ * Leader proposes resolution, majority of followers must confirm.
+ */
+
+import type { PerformanceTracker } from "./performance-tracker.js";
+
+// ============================================================================
+// Types
+// ============================================================================
+
+export type LeaderElectionResult = {
+ leaderId: string;
+ leaderScore: number;
+ candidates: Array<{ agentId: string; score: number }>;
+ term: number;
+};
+
+export type RaftConsensusResult = {
+ success: boolean;
+ leaderId: string;
+ proposedValue: string;
+ confirmations: number;
+ required: number;
+ term: number;
+};
+
+// ============================================================================
+// RaftLeader
+// ============================================================================
+
+export class RaftLeader {
+ private currentTerm = 0;
+ private currentLeader: string | null = null;
+
+ constructor(
+ private readonly perfTracker: PerformanceTracker,
+ private readonly leaderTimeoutMs: number = 30_000,
+ private readonly maxReElections: number = 3,
+ ) {}
+
+ /**
+ * Elect a leader based on highest EMA performance score.
+ */
+ async electLeader(agentIds: string[], excludeAgent?: string): Promise {
+ if (agentIds.length === 0) {
+ throw new Error("No agents available for leader election");
+ }
+
+ const candidates: Array<{ agentId: string; score: number }> = [];
+
+ for (const agentId of agentIds) {
+ if (agentId === excludeAgent) continue;
+ const score = await this.perfTracker.getScore(agentId);
+ candidates.push({ agentId, score });
+ }
+
+ if (candidates.length === 0) {
+ throw new Error("No eligible candidates after exclusion");
+ }
+
+ // Sort by score descending
+ candidates.sort((a, b) => b.score - a.score);
+
+ this.currentTerm++;
+ this.currentLeader = candidates[0]!.agentId;
+
+ return {
+ leaderId: candidates[0]!.agentId,
+ leaderScore: candidates[0]!.score,
+ candidates,
+ term: this.currentTerm,
+ };
+ }
+
+ /**
+ * Leader proposes a resolution; majority of followers must confirm.
+ */
+ async proposeResolution(params: {
+ leaderId: string;
+ value: string;
+ followerIds: string[];
+ followerValues: Record;
+ }): Promise {
+ const { leaderId, value, followerIds, followerValues } = params;
+ const totalVoters = followerIds.length + 1; // followers + leader
+ const required = Math.floor(totalVoters / 2) + 1;
+
+ // Leader always confirms its own proposal
+ let confirmations = 1;
+
+ // Followers confirm if their value matches the leader's proposal
+ for (const followerId of followerIds) {
+ const followerValue = followerValues[followerId];
+ if (followerValue === value) {
+ confirmations++;
+ }
+ }
+
+ return {
+ success: confirmations >= required,
+ leaderId,
+ proposedValue: value,
+ confirmations,
+ required,
+ term: this.currentTerm,
+ };
+ }
+
+ /**
+ * Re-elect a new leader, optionally excluding the current leader.
+ */
+ async reElect(agentIds: string[], excludeAgent?: string): Promise {
+ return this.electLeader(agentIds, excludeAgent);
+ }
+
+ getCurrentTerm(): number {
+ return this.currentTerm;
+ }
+
+ getCurrentLeader(): string | null {
+ return this.currentLeader;
+ }
+}
diff --git a/extensions/agent-mesh/task-router.test.ts b/extensions/agent-mesh/task-router.test.ts
new file mode 100644
index 00000000..4537da71
--- /dev/null
+++ b/extensions/agent-mesh/task-router.test.ts
@@ -0,0 +1,82 @@
+import { describe, it, expect } from "vitest";
+import { TaskRouter } from "./task-router.js";
+import { PerformanceTracker } from "./performance-tracker.js";
+
+// Minimal stubs — no real Cortex
+const perfTracker = new PerformanceTracker(null, "test");
+
+describe("TaskRouter", () => {
+ it("classifyTask detects code-review", () => {
+ const router = new TaskRouter(null, "test", perfTracker);
+ const c = router.classifyTask("review the TypeScript code for bugs");
+ expect(c.taskType).toBe("code-review");
+ expect(c.domain).toBe("typescript");
+ });
+
+ it("classifyTask detects security-scan", () => {
+ const router = new TaskRouter(null, "test", perfTracker);
+ const c = router.classifyTask("run a security audit on the API");
+ expect(c.taskType).toBe("security-scan");
+ });
+
+ it("classifyTask detects complexity", () => {
+ const router = new TaskRouter(null, "test", perfTracker);
+ const short = router.classifyTask("fix bug");
+ expect(short.complexity).toBe("low");
+
+ const long = router.classifyTask(
+ "Review all the entire codebase for multiple security vulnerabilities and create a comprehensive report " +
+ "covering each module individually with recommendations for each finding",
+ );
+ expect(long.complexity).toBe("high");
+ });
+
+ it("classifyTask detects domain from path", () => {
+ const router = new TaskRouter(null, "test", perfTracker);
+ const c = router.classifyTask("fix the issue", "src/main.rs");
+ expect(c.domain).toBe("rust");
+ });
+
+ it("selectAgent with single agent returns it", async () => {
+ const router = new TaskRouter(null, "test", perfTracker);
+ const decision = await router.selectAgent("review code", ["agent-a"]);
+ expect(decision.agentId).toBe("agent-a");
+ expect(decision.confidence).toBe(1.0);
+ });
+
+ it("selectAgent with override returns override", async () => {
+ const router = new TaskRouter(null, "test", perfTracker);
+ const decision = await router.selectAgent(
+ "review code",
+ ["agent-a", "agent-b"],
+ undefined,
+ "agent-b",
+ );
+ expect(decision.agentId).toBe("agent-b");
+ });
+
+ it("selectAgent throws with no agents", async () => {
+ const router = new TaskRouter(null, "test", perfTracker);
+ await expect(router.selectAgent("task", [])).rejects.toThrow("No available agents");
+ });
+
+ it("recordReward and computeReward", async () => {
+ const router = new TaskRouter(null, "test", perfTracker);
+ const decision = await router.selectAgent("implement feature", ["a", "b"]);
+
+ const reward = router.computeReward({
+ completed: true,
+ findings: 5,
+ conflicts: 0,
+ durationMs: 10_000,
+ costUsd: 0.01,
+ });
+
+ expect(reward.completion).toBe(1.0);
+ expect(reward.total).toBeGreaterThan(0);
+
+ // Should not throw
+ await router.recordReward(decision.routingId, reward);
+ expect(router.size()).toBeGreaterThan(0);
+ });
+});
diff --git a/extensions/agent-mesh/task-router.ts b/extensions/agent-mesh/task-router.ts
new file mode 100644
index 00000000..4038dde3
--- /dev/null
+++ b/extensions/agent-mesh/task-router.ts
@@ -0,0 +1,387 @@
+/**
+ * Task Router (Miteru)
+ *
+ * Q-Learning based routing of tasks to agents. Learns which agent
+ * handles which type of task best based on performance history.
+ */
+
+import { randomUUID } from "node:crypto";
+import type { CortexClient } from "../shared/cortex-client.js";
+import type { PerformanceTracker } from "./performance-tracker.js";
+
+// ============================================================================
+// Types
+// ============================================================================
+
+export type TaskClassification = {
+ taskType: string; // e.g., "code-review", "security-scan", "implementation"
+ complexity: "low" | "medium" | "high";
+ domain: string; // e.g., "typescript", "python", "general"
+};
+
+export type RoutingDecision = {
+ routingId: string;
+ agentId: string;
+ stateKey: string;
+ confidence: number;
+ reason: string;
+};
+
+export type RoutingReward = {
+ completion: number; // 0.35 weight
+ quality: number; // 0.30 weight
+ costEfficiency: number; // 0.20 weight
+ speed: number; // 0.15 weight
+ total: number;
+};
+
+type QTableMap = Map>;
+
+// ============================================================================
+// Constants
+// ============================================================================
+
+const ALPHA = 0.1;
+const GAMMA = 0.9;
+const EPSILON_INIT = 0.15;
+const EPSILON_DECAY = 0.995;
+const EPSILON_MIN = 0.05;
+const CORTEX_PREFIX = "miteru:qtable:";
+
+// ============================================================================
+// Task classification helpers
+// ============================================================================
+
+const TASK_TYPE_KEYWORDS: Record = {
+ "code-review": ["review", "check", "lint", "inspect"],
+ "security-scan": ["security", "vulnerability", "cve", "audit", "pentest"],
+ implementation: ["implement", "build", "create", "add", "feature"],
+ refactoring: ["refactor", "clean", "simplify", "restructure"],
+ testing: ["test", "spec", "coverage", "assertion"],
+ documentation: ["document", "docs", "readme", "explain"],
+ debugging: ["debug", "fix", "bug", "error", "crash"],
+ analysis: ["analyze", "report", "benchmark", "profile"],
+};
+
+const DOMAIN_EXTENSIONS: Record = {
+ typescript: [".ts", ".tsx"],
+ javascript: [".js", ".jsx", ".mjs"],
+ python: [".py"],
+ rust: [".rs"],
+ go: [".go"],
+ java: [".java"],
+};
+
+function detectTaskType(description: string): string {
+ const lower = description.toLowerCase();
+ let bestType = "general";
+ let bestScore = 0;
+
+ for (const [type, keywords] of Object.entries(TASK_TYPE_KEYWORDS)) {
+ let score = 0;
+ for (const kw of keywords) {
+ if (lower.includes(kw)) score++;
+ }
+ if (score > bestScore) {
+ bestScore = score;
+ bestType = type;
+ }
+ }
+
+ return bestType;
+}
+
+function detectComplexity(description: string): "low" | "medium" | "high" {
+ const words = description.split(/\s+/).length;
+ const hasScope = /\b(all|entire|full|complete|whole)\b/i.test(description);
+ const hasMultiple = /\b(multiple|several|many|each|every)\b/i.test(description);
+
+ if (words > 100 || (hasScope && hasMultiple)) return "high";
+ if (words > 30 || hasScope || hasMultiple) return "medium";
+ return "low";
+}
+
+function detectDomain(description: string, path?: string): string {
+ // Check path first
+ if (path) {
+ for (const [domain, exts] of Object.entries(DOMAIN_EXTENSIONS)) {
+ for (const ext of exts) {
+ if (path.endsWith(ext)) return domain;
+ }
+ }
+ }
+
+ // Check description keywords
+ const lower = description.toLowerCase();
+ for (const domain of Object.keys(DOMAIN_EXTENSIONS)) {
+ if (lower.includes(domain)) return domain;
+ }
+
+ return "general";
+}
+
+// ============================================================================
+// TaskRouter
+// ============================================================================
+
+export class TaskRouter {
+ private qTable: QTableMap = new Map();
+ private epsilon = EPSILON_INIT;
+ private pendingDecisions = new Map();
+
+ constructor(
+ private readonly client: CortexClient | null,
+ private readonly ns: string,
+ private readonly perfTracker: PerformanceTracker,
+ ) {}
+
+ /**
+ * Classify a task description into structured classification.
+ */
+ classifyTask(description: string, path?: string): TaskClassification {
+ return {
+ taskType: detectTaskType(description),
+ complexity: detectComplexity(description),
+ domain: detectDomain(description, path),
+ };
+ }
+
+ /**
+ * Select the best agent for a task using Q-Learning.
+ * Falls back to the first available agent if Q-table is empty.
+ */
+ async selectAgent(
+ description: string,
+ available: string[],
+ path?: string,
+ override?: string,
+ ): Promise {
+ const routingId = randomUUID().slice(0, 8);
+
+ // Explicit override
+ if (override && available.includes(override)) {
+ return {
+ routingId,
+ agentId: override,
+ stateKey: "",
+ confidence: 1.0,
+ reason: `Explicit override: ${override}`,
+ };
+ }
+
+ if (available.length === 0) {
+ throw new Error("No available agents for routing");
+ }
+
+ if (available.length === 1) {
+ return {
+ routingId,
+ agentId: available[0]!,
+ stateKey: "",
+ confidence: 1.0,
+ reason: "Only one agent available",
+ };
+ }
+
+ const classification = this.classifyTask(description, path);
+ const stateKey = `${classification.taskType}:${classification.complexity}:${classification.domain}`;
+
+ // Epsilon-greedy
+ let agentId: string;
+ let confidence: number;
+ let reason: string;
+
+ if (Math.random() < this.epsilon) {
+ // Exploration
+ agentId = available[Math.floor(Math.random() * available.length)]!;
+ confidence = 0.5;
+ reason = `Exploration (ε=${this.epsilon.toFixed(3)})`;
+ } else {
+ // Exploitation
+ const stateActions = this.qTable.get(stateKey);
+ if (!stateActions || stateActions.size === 0) {
+ // No Q-values: prefer agent with best EMA score
+ let bestAgent = available[0]!;
+ let bestScore = -Infinity;
+ for (const a of available) {
+ const score = await this.perfTracker.getScore(a);
+ if (score > bestScore) {
+ bestScore = score;
+ bestAgent = a;
+ }
+ }
+ agentId = bestAgent;
+ confidence = 0.6;
+ reason = `Performance-based (no Q-data for ${stateKey})`;
+ } else {
+ let bestAgent = available[0]!;
+ let bestQ = -Infinity;
+ for (const a of available) {
+ const q = stateActions.get(a) ?? 0;
+ if (q > bestQ) {
+ bestQ = q;
+ bestAgent = a;
+ }
+ }
+ agentId = bestAgent;
+ confidence = Math.min(1.0, 0.7 + bestQ * 0.1);
+ reason = `Q-value ${bestQ.toFixed(3)} for state ${stateKey}`;
+ }
+ }
+
+ this.pendingDecisions.set(routingId, { stateKey, agentId });
+
+ return { routingId, agentId, stateKey, confidence, reason };
+ }
+
+ /**
+ * Record reward for a completed routing decision.
+ */
+ async recordReward(routingId: string, reward: RoutingReward): Promise {
+ const decision = this.pendingDecisions.get(routingId);
+ if (!decision) return;
+ this.pendingDecisions.delete(routingId);
+
+ const { stateKey, agentId } = decision;
+ if (!stateKey) return; // was an override
+
+ // Q-Learning update
+ const currentQ = this.getQ(stateKey, agentId);
+ const maxNextQ = this.maxQ(stateKey);
+ const newQ = currentQ + ALPHA * (reward.total + GAMMA * maxNextQ - currentQ);
+ this.setQ(stateKey, agentId, newQ);
+
+ // Decay epsilon
+ this.epsilon = Math.max(EPSILON_MIN, this.epsilon * EPSILON_DECAY);
+
+ // Persist to Cortex
+ await this.persistQValue(stateKey, agentId, newQ);
+ }
+
+ /**
+ * Compute a reward signal from task outcome.
+ */
+ computeReward(outcome: {
+ completed: boolean;
+ findings: number;
+ conflicts: number;
+ durationMs: number;
+ costUsd: number;
+ }): RoutingReward {
+ const completion = outcome.completed ? 1.0 : 0.0;
+ const quality = Math.min(1.0, outcome.findings * 0.1) - Math.min(0.5, outcome.conflicts * 0.1);
+ const costEfficiency = Math.max(0, 1.0 - outcome.costUsd * 2);
+ const speed = Math.max(0, 1.0 - outcome.durationMs / 300_000); // 5 min baseline
+
+ const total = completion * 0.35 + quality * 0.3 + costEfficiency * 0.2 + speed * 0.15;
+
+ return { completion, quality, costEfficiency, speed, total };
+ }
+
+ /**
+ * Load Q-table from Cortex.
+ */
+ async loadFromCortex(): Promise {
+ if (!this.client) return;
+
+ try {
+ const result = await this.client.patternQuery({
+ predicate: `${this.ns}:${CORTEX_PREFIX}`,
+ limit: 5000,
+ });
+
+ // Also try listing triples with subject prefix
+ const listResult = await this.client.listTriples({
+ subject: `${this.ns}:${CORTEX_PREFIX}`,
+ limit: 5000,
+ });
+
+ for (const triple of [...(result.matches ?? []), ...listResult.triples]) {
+ const subject = String(triple.subject);
+ const prefix = `${this.ns}:${CORTEX_PREFIX}`;
+ if (!subject.startsWith(prefix)) continue;
+
+ const stateKey = subject.slice(prefix.length);
+ const pred = String(triple.predicate);
+ const agentPrefix = `${this.ns}:miteru:q:`;
+ if (!pred.startsWith(agentPrefix)) continue;
+
+ const agentId = pred.slice(agentPrefix.length);
+ const value =
+ typeof triple.object === "object" && triple.object !== null && "node" in triple.object
+ ? Number((triple.object as { node: string }).node)
+ : Number(triple.object);
+
+ if (!isNaN(value)) {
+ this.setQ(stateKey, agentId, value);
+ }
+ }
+ } catch {
+ // best-effort
+ }
+ }
+
+ /**
+ * Get the current epsilon value.
+ */
+ getEpsilon(): number {
+ return this.epsilon;
+ }
+
+ /**
+ * Get Q-table size.
+ */
+ size(): number {
+ let count = 0;
+ for (const actions of this.qTable.values()) {
+ count += actions.size;
+ }
+ return count;
+ }
+
+ // ---------- Q-table operations ----------
+
+ private getQ(state: string, action: string): number {
+ return this.qTable.get(state)?.get(action) ?? 0;
+ }
+
+ private setQ(state: string, action: string, value: number): void {
+ if (!this.qTable.has(state)) {
+ this.qTable.set(state, new Map());
+ }
+ this.qTable.get(state)!.set(action, value);
+ }
+
+ private maxQ(state: string): number {
+ const actions = this.qTable.get(state);
+ if (!actions || actions.size === 0) return 0;
+ let max = -Infinity;
+ for (const v of actions.values()) {
+ if (v > max) max = v;
+ }
+ return max;
+ }
+
+ private async persistQValue(stateKey: string, agentId: string, value: number): Promise {
+ if (!this.client) return;
+
+ try {
+ const subject = `${this.ns}:${CORTEX_PREFIX}${stateKey}`;
+ const predicate = `${this.ns}:miteru:q:${agentId}`;
+
+ // Delete existing
+ const existing = await this.client.listTriples({ subject, predicate, limit: 1 });
+ for (const t of existing.triples) {
+ if (t.id) await this.client.deleteTriple(t.id);
+ }
+
+ await this.client.createTriple({
+ subject,
+ predicate,
+ object: String(value),
+ });
+ } catch {
+ // best-effort
+ }
+ }
+}
diff --git a/extensions/agent-mesh/workflow-orchestrator.ts b/extensions/agent-mesh/workflow-orchestrator.ts
index b33009ad..9e03eb75 100644
--- a/extensions/agent-mesh/workflow-orchestrator.ts
+++ b/extensions/agent-mesh/workflow-orchestrator.ts
@@ -15,8 +15,13 @@ import type { MergeStrategy } from "./mesh-protocol.js";
import type { NamespaceManager } from "./namespace-manager.js";
import { TeamManager, type TeamManagerConfig } from "./team-manager.js";
import { getWorkflow, listWorkflows as listDefs } from "./workflows/registry.js";
+import type { TaskRouter } from "./task-router.js";
+import type { ConsensusEngine } from "./consensus-engine.js";
+import type { PerformanceTracker } from "./performance-tracker.js";
import type {
PhaseResult,
+ RoutingDecisionEntry,
+ ConsensusResultEntry,
WorkflowDefinition,
WorkflowEntry,
WorkflowResult,
@@ -59,6 +64,9 @@ export class WorkflowOrchestrator {
private readonly mailbox?: AgentMailbox,
private readonly bgTracker?: BackgroundTracker,
private readonly phaseTimeoutMs: number = DEFAULT_PHASE_TIMEOUT_MS,
+ private readonly taskRouter?: TaskRouter,
+ private readonly consensusEngine?: ConsensusEngine,
+ private readonly perfTracker?: PerformanceTracker,
) {
this.teamMgr = teamMgr;
}
@@ -84,14 +92,39 @@ export class WorkflowOrchestrator {
const targetPath = opts.path ?? ".";
const config = opts.config ?? {};
- // Interpolate ${path} in agent task templates
- const phases = def.phases.map((phase) => ({
- ...phase,
- agents: phase.agents.map((agent) => ({
- ...agent,
- task: agent.task.replace(/\$\{path\}/g, targetPath),
- })),
- }));
+ // Interpolate ${path} in agent task templates and apply Miteru routing
+ const phases = await Promise.all(
+ def.phases.map(async (phase) => {
+ const agents = await Promise.all(
+ phase.agents.map(async (agent) => {
+ const interpolatedTask = agent.task.replace(/\$\{path\}/g, targetPath);
+
+ // Miteru routing: select best agent for this task
+ if (this.taskRouter) {
+ const available = phase.agents.map((a) => a.agentId);
+ try {
+ const decision = await this.taskRouter.selectAgent(
+ interpolatedTask,
+ available,
+ targetPath,
+ );
+ return {
+ ...agent,
+ task: interpolatedTask,
+ routingId: decision.routingId,
+ routedAgentId: decision.agentId,
+ };
+ } catch {
+ // Fallback to hardcoded agent
+ }
+ }
+
+ return { ...agent, task: interpolatedTask };
+ }),
+ );
+ return { ...phase, agents };
+ }),
+ );
const firstPhase = phases[0]?.name ?? "done";
@@ -402,6 +435,78 @@ export class WorkflowOrchestrator {
await this.updateField(workflowId, "state", "merging");
const mergeResult = await this.teamMgr.mergeTeamResults(workflow.teamId);
+ // Record routing rewards and performance outcomes
+ const routingDecisions: RoutingDecisionEntry[] = [];
+ for (const agent of phase.agents) {
+ const memberResult = mergeResult.memberResults.find((m) => m.agentId === agent.agentId);
+
+ // Record performance outcome
+ if (this.perfTracker && memberResult) {
+ await this.perfTracker.recordOutcome({
+ agentId: agent.agentId,
+ completed: true,
+ durationMs: Date.now() - startTime,
+ costUsd: 0, // cost tracking happens in token-economy
+ findings: memberResult.findings,
+ conflicts: mergeResult.conflicts,
+ });
+ }
+
+ // Record routing reward
+ if (this.taskRouter && agent.routingId) {
+ const reward = this.taskRouter.computeReward({
+ completed: true,
+ findings: memberResult?.findings ?? 0,
+ conflicts: mergeResult.conflicts,
+ durationMs: Date.now() - startTime,
+ costUsd: 0,
+ });
+ await this.taskRouter.recordReward(agent.routingId, reward);
+
+ routingDecisions.push({
+ routingId: agent.routingId,
+ originalAgentId: agent.agentId,
+ routedAgentId: agent.routedAgentId ?? agent.agentId,
+ stateKey: "",
+ confidence: 0,
+ reason: `reward=${reward.total.toFixed(3)}`,
+ });
+ }
+ }
+
+ // Kimeru consensus: resolve conflicts if engine is available
+ let consensusResults: ConsensusResultEntry[] | undefined;
+ if (this.consensusEngine && mergeResult.conflicts > 0) {
+ try {
+ // Detect conflicts between agent namespaces
+ const agentNs = phase.agents.map((a) => `${this.ns}:agent:${a.agentId}`);
+ if (agentNs.length >= 2) {
+ const conflicts = await this.fusion.detectConflicts(agentNs[0]!, agentNs[1]!);
+ if (conflicts.length > 0) {
+ const agentIdByNs: Record = {};
+ for (const a of phase.agents) {
+ agentIdByNs[`${this.ns}:agent:${a.agentId}`] = a.agentId;
+ }
+ const results = await this.consensusEngine.resolvePhaseConflicts(
+ conflicts,
+ agentIdByNs,
+ "weighted",
+ );
+ consensusResults = results.map((r) => ({
+ id: r.id,
+ resolved: r.resolved,
+ strategy: r.strategy,
+ confidence: r.confidence,
+ resolvedCount: r.breakdown.resolvedCount,
+ totalConflicts: r.breakdown.totalConflicts,
+ }));
+ }
+ }
+ } catch {
+ // Consensus failure doesn't block workflow
+ }
+ }
+
const phaseResult: PhaseResult = {
phase: phase.name,
status: "completed",
@@ -409,6 +514,8 @@ export class WorkflowOrchestrator {
conflicts: mergeResult.conflicts,
duration: Date.now() - startTime,
completedAt: new Date().toISOString(),
+ routingDecisions: routingDecisions.length > 0 ? routingDecisions : undefined,
+ consensusResults,
};
// Store phase result
diff --git a/extensions/agent-mesh/workflows/types.ts b/extensions/agent-mesh/workflows/types.ts
index 1c169599..4346f0b4 100644
--- a/extensions/agent-mesh/workflows/types.ts
+++ b/extensions/agent-mesh/workflows/types.ts
@@ -14,6 +14,8 @@ export type AgentRole = {
agentId: string;
role: string;
task: string;
+ routingId?: string;
+ routedAgentId?: string;
};
// ============================================================================
@@ -65,6 +67,24 @@ export type WorkflowEntry = {
// Phase Result
// ============================================================================
+export type RoutingDecisionEntry = {
+ routingId: string;
+ originalAgentId: string;
+ routedAgentId: string;
+ stateKey: string;
+ confidence: number;
+ reason: string;
+};
+
+export type ConsensusResultEntry = {
+ id: string;
+ resolved: boolean;
+ strategy: string;
+ confidence: number;
+ resolvedCount: number;
+ totalConflicts: number;
+};
+
export type PhaseResult = {
phase: string;
status: "completed" | "failed";
@@ -72,6 +92,8 @@ export type PhaseResult = {
conflicts: number;
duration: number;
completedAt: string;
+ routingDecisions?: RoutingDecisionEntry[];
+ consensusResults?: ConsensusResultEntry[];
};
// ============================================================================
diff --git a/extensions/analytics/package.json b/extensions/analytics/package.json
index 1c732d13..9da66ca7 100644
--- a/extensions/analytics/package.json
+++ b/extensions/analytics/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-analytics",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"type": "module",
"main": "index.ts",
diff --git a/extensions/bash-sandbox/package.json b/extensions/bash-sandbox/package.json
index 00b80ff9..00b90b67 100644
--- a/extensions/bash-sandbox/package.json
+++ b/extensions/bash-sandbox/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-bash-sandbox",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Bash command sandbox with domain allowlist, command blocklist, and dangerous pattern detection",
"type": "module",
diff --git a/extensions/bluebubbles/package.json b/extensions/bluebubbles/package.json
index ba2a15de..f27ab618 100644
--- a/extensions/bluebubbles/package.json
+++ b/extensions/bluebubbles/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-bluebubbles",
- "version": "0.1.13",
+ "version": "0.1.14",
"description": "Mayros BlueBubbles channel plugin",
"license": "MIT",
"type": "module",
diff --git a/extensions/ci-plugin/package.json b/extensions/ci-plugin/package.json
index fa80d3fe..f6904fd4 100644
--- a/extensions/ci-plugin/package.json
+++ b/extensions/ci-plugin/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-ci-plugin",
- "version": "0.1.13",
+ "version": "0.1.14",
"description": "CI/CD pipeline integration for Mayros — GitHub Actions and GitLab CI providers",
"type": "module",
"dependencies": {
diff --git a/extensions/code-indexer/package.json b/extensions/code-indexer/package.json
index d4494c29..b42e83b6 100644
--- a/extensions/code-indexer/package.json
+++ b/extensions/code-indexer/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-code-indexer",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Mayros code indexer plugin — regex-based codebase scanning with RDF triple storage in Cortex",
"type": "module",
diff --git a/extensions/code-tools/package.json b/extensions/code-tools/package.json
index c59ec03a..e19725aa 100644
--- a/extensions/code-tools/package.json
+++ b/extensions/code-tools/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-code-tools",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"type": "module",
"dependencies": {
diff --git a/extensions/copilot-proxy/package.json b/extensions/copilot-proxy/package.json
index 155b132f..f47e4acd 100644
--- a/extensions/copilot-proxy/package.json
+++ b/extensions/copilot-proxy/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-copilot-proxy",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Mayros Copilot Proxy provider plugin",
"type": "module",
diff --git a/extensions/cortex-sync/package.json b/extensions/cortex-sync/package.json
index 116cbb4d..ba871593 100644
--- a/extensions/cortex-sync/package.json
+++ b/extensions/cortex-sync/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-cortex-sync",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Cortex DAG synchronization — peer discovery, delta sync, and cross-device knowledge replication",
"type": "module",
diff --git a/extensions/diagnostics-otel/package.json b/extensions/diagnostics-otel/package.json
index 0f742cb3..324907c5 100644
--- a/extensions/diagnostics-otel/package.json
+++ b/extensions/diagnostics-otel/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-diagnostics-otel",
- "version": "0.1.13",
+ "version": "0.1.14",
"description": "Mayros diagnostics OpenTelemetry exporter",
"license": "MIT",
"type": "module",
diff --git a/extensions/discord/package.json b/extensions/discord/package.json
index 3f7ae72e..97203a0a 100644
--- a/extensions/discord/package.json
+++ b/extensions/discord/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-discord",
- "version": "0.1.13",
+ "version": "0.1.14",
"description": "Mayros Discord channel plugin",
"license": "MIT",
"type": "module",
diff --git a/extensions/eruberu/config.ts b/extensions/eruberu/config.ts
new file mode 100644
index 00000000..3d3ca565
--- /dev/null
+++ b/extensions/eruberu/config.ts
@@ -0,0 +1,179 @@
+/**
+ * Eruberu Configuration
+ *
+ * Parsing and defaults for the intelligent model routing plugin.
+ */
+
+import type { ModelRoutingStrategy } from "../../src/routing/model-router.js";
+
+export type QLearningConfig = {
+ alpha: number;
+ gamma: number;
+ epsilon: number;
+ epsilonDecay: number;
+ minEpsilon: number;
+};
+
+export type EruberuConfig = {
+ enabled: boolean;
+ strategy: "auto" | ModelRoutingStrategy;
+ budgetDrivenFallback: boolean;
+ budgetWarnThreshold: number;
+ budgetCriticalThreshold: number;
+ qLearning: QLearningConfig;
+ persistPath: string;
+ cortexPersist: boolean;
+};
+
+const DEFAULT_ALPHA = 0.1;
+const DEFAULT_GAMMA = 0.9;
+const DEFAULT_EPSILON = 0.15;
+const DEFAULT_EPSILON_DECAY = 0.995;
+const DEFAULT_MIN_EPSILON = 0.05;
+const DEFAULT_PERSIST_PATH = "~/.mayros/eruberu-qtable.json";
+
+function assertAllowedKeys(value: Record, allowed: string[], label: string) {
+ const unknown = Object.keys(value).filter((key) => !allowed.includes(key));
+ if (unknown.length > 0) {
+ throw new Error(`${label} has unknown keys: ${unknown.join(", ")}`);
+ }
+}
+
+function parseQLearningConfig(raw: unknown): QLearningConfig {
+ if (!raw || typeof raw !== "object" || Array.isArray(raw)) {
+ return {
+ alpha: DEFAULT_ALPHA,
+ gamma: DEFAULT_GAMMA,
+ epsilon: DEFAULT_EPSILON,
+ epsilonDecay: DEFAULT_EPSILON_DECAY,
+ minEpsilon: DEFAULT_MIN_EPSILON,
+ };
+ }
+ const c = raw as Record;
+ assertAllowedKeys(
+ c,
+ ["alpha", "gamma", "epsilon", "epsilonDecay", "minEpsilon"],
+ "qLearning config",
+ );
+
+ return {
+ alpha: typeof c.alpha === "number" && c.alpha > 0 && c.alpha <= 1 ? c.alpha : DEFAULT_ALPHA,
+ gamma: typeof c.gamma === "number" && c.gamma >= 0 && c.gamma <= 1 ? c.gamma : DEFAULT_GAMMA,
+ epsilon:
+ typeof c.epsilon === "number" && c.epsilon >= 0 && c.epsilon <= 1
+ ? c.epsilon
+ : DEFAULT_EPSILON,
+ epsilonDecay:
+ typeof c.epsilonDecay === "number" && c.epsilonDecay > 0 && c.epsilonDecay <= 1
+ ? c.epsilonDecay
+ : DEFAULT_EPSILON_DECAY,
+ minEpsilon:
+ typeof c.minEpsilon === "number" && c.minEpsilon >= 0 && c.minEpsilon <= 1
+ ? c.minEpsilon
+ : DEFAULT_MIN_EPSILON,
+ };
+}
+
+export function parseEruberuConfig(raw: unknown): EruberuConfig {
+ const cfg = (raw && typeof raw === "object" && !Array.isArray(raw) ? raw : {}) as Record<
+ string,
+ unknown
+ >;
+
+ if (typeof raw === "object" && raw !== null && !Array.isArray(raw)) {
+ assertAllowedKeys(
+ cfg,
+ [
+ "enabled",
+ "strategy",
+ "budgetDrivenFallback",
+ "budgetWarnThreshold",
+ "budgetCriticalThreshold",
+ "qLearning",
+ "persistPath",
+ "cortexPersist",
+ ],
+ "eruberu config",
+ );
+ }
+
+ const validStrategies = ["auto", "default", "fallback", "cost-optimized", "capability"];
+ const strategy =
+ typeof cfg.strategy === "string" && validStrategies.includes(cfg.strategy)
+ ? (cfg.strategy as "auto" | ModelRoutingStrategy)
+ : ("auto" as const);
+
+ const budgetWarnThreshold =
+ typeof cfg.budgetWarnThreshold === "number" &&
+ cfg.budgetWarnThreshold > 0 &&
+ cfg.budgetWarnThreshold <= 1
+ ? cfg.budgetWarnThreshold
+ : 0.8;
+
+ const budgetCriticalThreshold =
+ typeof cfg.budgetCriticalThreshold === "number" &&
+ cfg.budgetCriticalThreshold > 0 &&
+ cfg.budgetCriticalThreshold <= 1
+ ? cfg.budgetCriticalThreshold
+ : 0.95;
+
+ return {
+ enabled: cfg.enabled !== false,
+ strategy,
+ budgetDrivenFallback: cfg.budgetDrivenFallback !== false,
+ budgetWarnThreshold,
+ budgetCriticalThreshold,
+ qLearning: parseQLearningConfig(cfg.qLearning),
+ persistPath:
+ typeof cfg.persistPath === "string" && cfg.persistPath.length > 0
+ ? cfg.persistPath
+ : DEFAULT_PERSIST_PATH,
+ cortexPersist: cfg.cortexPersist !== false,
+ };
+}
+
+export const eruberuConfigSchema = {
+ parse: (value: unknown) => parseEruberuConfig(value),
+ uiHints: {
+ enabled: {
+ label: "Enable Eruberu",
+ help: "Enable intelligent model routing (no-op if only 1 provider configured)",
+ },
+ strategy: {
+ label: "Routing Strategy",
+ placeholder: "auto",
+ help: "auto = Q-Learning adaptive, or fixed: default, fallback, cost-optimized, capability",
+ },
+ budgetDrivenFallback: {
+ label: "Budget-Driven Fallback",
+ help: "Automatically switch to cheaper models when budget is running low",
+ },
+ budgetWarnThreshold: {
+ label: "Budget Warn Threshold",
+ placeholder: "0.8",
+ advanced: true,
+ help: "Budget usage fraction (0-1) that triggers cost-optimized routing",
+ },
+ budgetCriticalThreshold: {
+ label: "Budget Critical Threshold",
+ placeholder: "0.95",
+ advanced: true,
+ help: "Budget usage fraction (0-1) that forces cheapest model",
+ },
+ "qLearning.alpha": {
+ label: "Learning Rate (α)",
+ placeholder: "0.1",
+ advanced: true,
+ },
+ "qLearning.gamma": {
+ label: "Discount Factor (γ)",
+ placeholder: "0.9",
+ advanced: true,
+ },
+ "qLearning.epsilon": {
+ label: "Exploration Rate (ε)",
+ placeholder: "0.15",
+ advanced: true,
+ },
+ },
+};
diff --git a/extensions/eruberu/cortex-persistence.ts b/extensions/eruberu/cortex-persistence.ts
new file mode 100644
index 00000000..14588972
--- /dev/null
+++ b/extensions/eruberu/cortex-persistence.ts
@@ -0,0 +1,125 @@
+/**
+ * Cortex Persistence for Eruberu Q-Table
+ *
+ * Stores Q-table values as RDF triples in Cortex when available,
+ * with fallback to local JSON file.
+ */
+
+import { readFile, writeFile, mkdir } from "node:fs/promises";
+import { dirname } from "node:path";
+import { homedir } from "node:os";
+import type { QTableData } from "./q-learning.js";
+
+// ============================================================================
+// Cortex persistence (primary)
+// ============================================================================
+
+export type CortexPersistenceClient = {
+ createTriple(params: { subject: string; predicate: string; object: string }): Promise;
+ listTriples(params: {
+ subject?: string;
+ predicate?: string;
+ limit?: number;
+ }): Promise<{
+ triples: Array<{ id?: string; subject: string; predicate: string; object: unknown }>;
+ }>;
+ deleteTriple(id: string): Promise;
+};
+
+const SUBJECT = "eruberu:qtable";
+const PREDICATE_PREFIX = "eruberu:qvalue:";
+
+/**
+ * Save Q-table to Cortex as triples.
+ * Each state:action pair is stored as a separate triple.
+ */
+export async function saveToCortex(
+ client: CortexPersistenceClient,
+ data: QTableData,
+): Promise {
+ // Delete existing entries
+ const existing = await client.listTriples({ subject: SUBJECT, limit: 10000 });
+ for (const triple of existing.triples) {
+ if (triple.id) {
+ await client.deleteTriple(triple.id);
+ }
+ }
+
+ // Write new entries
+ for (const [state, actions] of Object.entries(data)) {
+ for (const [action, value] of Object.entries(actions)) {
+ await client.createTriple({
+ subject: SUBJECT,
+ predicate: `${PREDICATE_PREFIX}${state}:${action}`,
+ object: String(value),
+ });
+ }
+ }
+}
+
+/**
+ * Load Q-table from Cortex triples.
+ */
+export async function loadFromCortex(client: CortexPersistenceClient): Promise {
+ const result = await client.listTriples({ subject: SUBJECT, limit: 10000 });
+ const data: QTableData = {};
+
+ for (const triple of result.triples) {
+ const pred = String(triple.predicate);
+ if (!pred.startsWith(PREDICATE_PREFIX)) continue;
+
+ const rest = pred.slice(PREDICATE_PREFIX.length);
+ // Parse "taskType:budgetLevel:timeSlot:strategy:provider?" pattern
+ // State is first 3 segments, action is the remainder
+ const segments = rest.split(":");
+ if (segments.length < 4) continue;
+
+ const stateKey = segments.slice(0, 3).join(":");
+ const actionKey = segments.slice(3).join(":");
+
+ const value =
+ typeof triple.object === "object" && triple.object !== null && "node" in triple.object
+ ? Number((triple.object as { node: string }).node)
+ : Number(triple.object);
+
+ if (isNaN(value)) continue;
+
+ if (!data[stateKey]) data[stateKey] = {};
+ data[stateKey]![actionKey] = value;
+ }
+
+ return data;
+}
+
+// ============================================================================
+// File persistence (fallback)
+// ============================================================================
+
+function resolvePath(path: string): string {
+ if (path.startsWith("~")) {
+ return path.replace("~", homedir());
+ }
+ return path;
+}
+
+/**
+ * Save Q-table to a JSON file.
+ */
+export async function saveToFile(path: string, data: QTableData): Promise {
+ const resolved = resolvePath(path);
+ await mkdir(dirname(resolved), { recursive: true });
+ await writeFile(resolved, JSON.stringify(data, null, 2), "utf-8");
+}
+
+/**
+ * Load Q-table from a JSON file. Returns empty data if file doesn't exist.
+ */
+export async function loadFromFile(path: string): Promise {
+ const resolved = resolvePath(path);
+ try {
+ const content = await readFile(resolved, "utf-8");
+ return JSON.parse(content) as QTableData;
+ } catch {
+ return {};
+ }
+}
diff --git a/extensions/eruberu/index.ts b/extensions/eruberu/index.ts
new file mode 100644
index 00000000..26eb6119
--- /dev/null
+++ b/extensions/eruberu/index.ts
@@ -0,0 +1,487 @@
+/**
+ * Eruberu — Intelligent Model Routing Plugin
+ *
+ * Activates ModelRouter via the before_model_resolve hook and uses
+ * Q-Learning to adaptively select the best provider/model for each task.
+ *
+ * Hooks: before_model_resolve, llm_output, agent_end, session_start, session_end
+ * Tools: routing_status, routing_set_strategy
+ */
+
+import { Type } from "@sinclair/typebox";
+import type { MayrosPluginApi } from "mayros/plugin-sdk";
+import type { ModelRoutingStrategy } from "../../src/routing/model-router.js";
+import { getBudgetBridge } from "../shared/budget-bridge.js";
+import { parseEruberuConfig, type EruberuConfig } from "./config.js";
+import {
+ saveToCortex,
+ loadFromCortex,
+ saveToFile,
+ loadFromFile,
+ type CortexPersistenceClient,
+} from "./cortex-persistence.js";
+import { QTable, stateKey, computeReward, type QState, type RewardSignal } from "./q-learning.js";
+import {
+ classifyTask,
+ classifyBudgetLevel,
+ classifyTimeSlot,
+ type TaskType,
+ type BudgetLevel,
+} from "./task-classifier.js";
+
+// ============================================================================
+// Plugin
+// ============================================================================
+
+const eruberuPlugin = {
+ id: "eruberu",
+ name: "Eruberu",
+ description:
+ "Intelligent model routing with Q-Learning — adapts provider/model selection based on task type, budget, and performance history",
+ kind: "routing" as const,
+
+ async register(api: MayrosPluginApi) {
+ const cfg = parseEruberuConfig(api.pluginConfig);
+
+ if (!cfg.enabled) {
+ api.logger.info("eruberu: disabled via config");
+ return;
+ }
+
+ let qTable: QTable | undefined;
+ let cortexClient: CortexPersistenceClient | null = null;
+ let flushInterval: ReturnType | undefined;
+
+ // Track pending routing decisions for reward computation
+ const pendingDecisions = new Map<
+ string,
+ { state: QState; action: string; startTime: number }
+ >();
+
+ // Available routing strategies (actions for Q-learning)
+ function buildAvailableActions(): string[] {
+ return ["default:", "fallback:", "cost-optimized:", "capability:"];
+ }
+
+ // ========================================================================
+ // Hooks
+ // ========================================================================
+
+ // session_start — load Q-table
+ api.on("session_start", async () => {
+ qTable = new QTable(cfg.qLearning);
+
+ // Try Cortex first, then file fallback
+ try {
+ if (cfg.cortexPersist) {
+ const { CortexClient } = await import("../shared/cortex-client.js");
+ const cortexCfg = (api.pluginConfig as Record | undefined)?.cortex as
+ | Record
+ | undefined;
+ const host = (cortexCfg?.host as string | undefined) ?? "127.0.0.1";
+ const port = (cortexCfg?.port as number | undefined) ?? 19090;
+ cortexClient = new CortexClient(host, port) as unknown as CortexPersistenceClient;
+ const data = await loadFromCortex(cortexClient);
+ if (Object.keys(data).length > 0) {
+ qTable.import(data);
+ api.logger.info(`eruberu: loaded ${qTable.size()} Q-values from Cortex`);
+ }
+ }
+ } catch {
+ cortexClient = null;
+ }
+
+ // Fallback to file
+ if (qTable.size() === 0) {
+ try {
+ const data = await loadFromFile(cfg.persistPath);
+ if (Object.keys(data).length > 0) {
+ qTable.import(data);
+ api.logger.info(`eruberu: loaded ${qTable.size()} Q-values from file`);
+ }
+ } catch {
+ // Start fresh
+ }
+ }
+
+ api.logger.info(
+ `eruberu: session started (strategy=${cfg.strategy}, ε=${qTable.getEpsilon().toFixed(3)}, entries=${qTable.size()})`,
+ );
+
+ // Periodic persist every 60s
+ flushInterval = setInterval(async () => {
+ if (!qTable) return;
+ try {
+ await persistQTable(qTable, cortexClient, cfg);
+ } catch {
+ // best-effort
+ }
+ }, 60_000);
+ });
+
+ // before_model_resolve — main routing logic
+ api.on(
+ "before_model_resolve",
+ async (event) => {
+ if (!qTable || !cfg.enabled) return;
+
+ // Skip if agent has explicit model override
+ if (event.modelOverride) return;
+
+ // Determine task type from prompt
+ const prompt = event.prompt ?? event.systemPrompt ?? "";
+ const taskType = classifyTask(prompt);
+
+ // Get budget status
+ const tracker = getBudgetBridge();
+ let budgetLevel: BudgetLevel = "low";
+ let budgetFraction: number | undefined;
+
+ if (tracker) {
+ const status = tracker.getOverallStatus();
+ budgetFraction = status.percent;
+ budgetLevel = classifyBudgetLevel(budgetFraction);
+ }
+
+ const timeSlot = classifyTimeSlot();
+ const state: QState = { taskType, budgetLevel, timeSlot };
+ const sk = stateKey(state);
+
+ // Budget-driven override
+ if (cfg.budgetDrivenFallback && budgetFraction !== undefined) {
+ if (budgetFraction >= cfg.budgetCriticalThreshold) {
+ // Force cheapest model
+ pendingDecisions.set(event.runId, {
+ state,
+ action: "cost-optimized:",
+ startTime: Date.now(),
+ });
+ return {
+ strategyOverride: "cost-optimized" as ModelRoutingStrategy,
+ };
+ }
+ if (budgetFraction >= cfg.budgetWarnThreshold) {
+ pendingDecisions.set(event.runId, {
+ state,
+ action: "cost-optimized:",
+ startTime: Date.now(),
+ });
+ return {
+ strategyOverride: "cost-optimized" as ModelRoutingStrategy,
+ };
+ }
+ }
+
+ // Fixed strategy
+ if (cfg.strategy !== "auto") {
+ return {
+ strategyOverride: cfg.strategy as ModelRoutingStrategy,
+ };
+ }
+
+ // Q-Learning selection
+ const availableActions = buildAvailableActions();
+ const chosenAction = qTable.selectAction(sk, availableActions);
+
+ if (!chosenAction) return;
+
+ // Parse action: "strategy:provider?"
+ const [strategyPart, providerPart] = chosenAction.split(":");
+ const strategy = (strategyPart || "default") as ModelRoutingStrategy;
+
+ pendingDecisions.set(event.runId, {
+ state,
+ action: chosenAction,
+ startTime: Date.now(),
+ });
+
+ const result: Record = {
+ strategyOverride: strategy,
+ };
+ if (providerPart) {
+ result.preferredProvider = providerPart;
+ }
+
+ return result;
+ },
+ { priority: 50 },
+ );
+
+ // llm_output — collect reward signals
+ api.on("llm_output", async (event) => {
+ if (!qTable) return;
+
+ const decision = pendingDecisions.get(event.runId);
+ if (!decision) return;
+ pendingDecisions.delete(event.runId);
+
+ const latencyMs = Date.now() - decision.startTime;
+ const usage = event.usage as { input?: number; output?: number; total?: number } | undefined;
+ const totalTokens = usage?.total ?? (usage?.input ?? 0) + (usage?.output ?? 0);
+
+ // Compute reward signal
+ const signal: RewardSignal = {
+ success: event.error ? -1.0 : 1.0,
+ costEfficiency: 0,
+ qualityProxy: 0,
+ latencyPenalty: 0,
+ rateLimitPenalty: 0,
+ };
+
+ // Cost efficiency: reward cheaper calls
+ if (totalTokens > 0) {
+ // Normalize: <1k tokens = max efficiency
+ const tokenK = totalTokens / 1000;
+ signal.costEfficiency = Math.max(0, Math.min(0.5, 0.5 - tokenK * 0.01));
+ }
+
+ // Quality proxy: output length relative to input suggests useful response
+ if (usage?.output && usage.output > 50) {
+ signal.qualityProxy = Math.min(0.3, usage.output / 5000);
+ }
+
+ // Latency penalty: penalize slow responses (>30s)
+ if (latencyMs > 30_000) {
+ signal.latencyPenalty = -0.2;
+ }
+
+ // Rate limit penalty
+ if (event.rateLimited) {
+ signal.rateLimitPenalty = -0.8;
+ }
+
+ const reward = computeReward(signal);
+ const currentState = stateKey(decision.state);
+
+ // Next state = same task type, possibly updated budget
+ const tracker = getBudgetBridge();
+ const nextBudgetFraction = tracker?.getOverallStatus().percent;
+ const nextState = stateKey({
+ taskType: decision.state.taskType,
+ budgetLevel: classifyBudgetLevel(nextBudgetFraction),
+ timeSlot: classifyTimeSlot(),
+ });
+
+ qTable.update(currentState, decision.action, reward, nextState);
+ });
+
+ // session_end — persist Q-table
+ api.on("session_end", async () => {
+ if (flushInterval) {
+ clearInterval(flushInterval);
+ flushInterval = undefined;
+ }
+
+ if (qTable) {
+ try {
+ await persistQTable(qTable, cortexClient, cfg);
+ } catch (err) {
+ api.logger.warn(`eruberu: failed to persist Q-table: ${String(err)}`);
+ }
+ }
+
+ pendingDecisions.clear();
+ api.logger.info("eruberu: session ended");
+ });
+
+ // ========================================================================
+ // Tools
+ // ========================================================================
+
+ api.registerTool(
+ {
+ name: "routing_status",
+ label: "Routing Status",
+ description:
+ "Show current Eruberu intelligent routing status — strategy, Q-table size, epsilon, budget level.",
+ parameters: Type.Object({}),
+ async execute() {
+ if (!qTable) {
+ return {
+ content: [{ type: "text", text: "Eruberu not initialized (no active session)." }],
+ details: { error: "not_initialized" },
+ };
+ }
+
+ const tracker = getBudgetBridge();
+ const budgetStatus = tracker?.getOverallStatus();
+ const budgetFraction = budgetStatus?.percent;
+
+ const lines = [
+ `Strategy: ${cfg.strategy}`,
+ `Q-Table entries: ${qTable.size()}`,
+ `Epsilon (ε): ${qTable.getEpsilon().toFixed(4)}`,
+ `Budget level: ${classifyBudgetLevel(budgetFraction)}`,
+ `Budget-driven fallback: ${cfg.budgetDrivenFallback ? "enabled" : "disabled"}`,
+ `Pending decisions: ${pendingDecisions.size}`,
+ ];
+
+ return {
+ content: [{ type: "text", text: lines.join("\n") }],
+ details: {
+ strategy: cfg.strategy,
+ qTableSize: qTable.size(),
+ epsilon: qTable.getEpsilon(),
+ budgetLevel: classifyBudgetLevel(budgetFraction),
+ budgetFraction,
+ pendingDecisions: pendingDecisions.size,
+ },
+ };
+ },
+ },
+ { name: "routing_status" },
+ );
+
+ api.registerTool(
+ {
+ name: "routing_set_strategy",
+ label: "Set Routing Strategy",
+ description:
+ "Change the Eruberu routing strategy at runtime. Options: auto, default, fallback, cost-optimized, capability.",
+ parameters: Type.Object({
+ strategy: Type.String({
+ description: "Routing strategy: auto, default, fallback, cost-optimized, capability",
+ }),
+ }),
+ async execute(_toolCallId, params) {
+ const { strategy } = params as { strategy: string };
+ const valid = ["auto", "default", "fallback", "cost-optimized", "capability"];
+ if (!valid.includes(strategy)) {
+ return {
+ content: [
+ {
+ type: "text",
+ text: `Invalid strategy "${strategy}". Valid: ${valid.join(", ")}`,
+ },
+ ],
+ details: { error: "invalid_strategy" },
+ };
+ }
+
+ (cfg as { strategy: string }).strategy = strategy;
+ return {
+ content: [{ type: "text", text: `Routing strategy set to "${strategy}".` }],
+ details: { strategy },
+ };
+ },
+ },
+ { name: "routing_set_strategy" },
+ );
+
+ // ========================================================================
+ // CLI
+ // ========================================================================
+
+ api.registerCli(
+ ({ program }) => {
+ const routing = program.command("routing").description("Eruberu intelligent model routing");
+
+ routing
+ .command("status")
+ .description("Show routing status and Q-table info")
+ .action(async () => {
+ if (!qTable) {
+ console.log("Eruberu not initialized (no active session).");
+ return;
+ }
+
+ const tracker = getBudgetBridge();
+ const budgetFraction = tracker?.getOverallStatus().percent;
+
+ console.log("Eruberu Routing Status");
+ console.log("─────────────────────");
+ console.log(`Strategy: ${cfg.strategy}`);
+ console.log(`Q-Table: ${qTable.size()} entries`);
+ console.log(`Epsilon (ε): ${qTable.getEpsilon().toFixed(4)}`);
+ console.log(`Budget level: ${classifyBudgetLevel(budgetFraction)}`);
+ console.log(`Budget fallback: ${cfg.budgetDrivenFallback ? "enabled" : "disabled"}`);
+ });
+
+ routing
+ .command("strategy")
+ .description("Set routing strategy")
+ .argument("", "auto, default, fallback, cost-optimized, capability")
+ .action(async (strategy: string) => {
+ const valid = ["auto", "default", "fallback", "cost-optimized", "capability"];
+ if (!valid.includes(strategy)) {
+ console.log(`Invalid strategy "${strategy}". Valid: ${valid.join(", ")}`);
+ return;
+ }
+ (cfg as { strategy: string }).strategy = strategy;
+ console.log(`Routing strategy set to "${strategy}".`);
+ });
+
+ routing
+ .command("reset")
+ .description("Clear Q-table and start fresh")
+ .action(async () => {
+ if (!qTable) {
+ console.log("Eruberu not initialized.");
+ return;
+ }
+ qTable.clear();
+ try {
+ await persistQTable(qTable, cortexClient, cfg);
+ } catch {
+ // best-effort
+ }
+ console.log("Q-table cleared.");
+ });
+ },
+ { commands: ["routing"] },
+ );
+
+ // ========================================================================
+ // Service
+ // ========================================================================
+
+ api.registerService({
+ id: "eruberu",
+ async start() {
+ api.logger.info("eruberu: service started");
+ },
+ async stop() {
+ if (flushInterval) {
+ clearInterval(flushInterval);
+ flushInterval = undefined;
+ }
+ if (qTable) {
+ try {
+ await persistQTable(qTable, cortexClient, cfg);
+ } catch {
+ // best-effort
+ }
+ }
+ qTable = undefined;
+ cortexClient = null;
+ pendingDecisions.clear();
+ api.logger.info("eruberu: service stopped");
+ },
+ });
+ },
+};
+
+// ============================================================================
+// Helpers
+// ============================================================================
+
+async function persistQTable(
+ qTable: QTable,
+ cortexClient: CortexPersistenceClient | null,
+ cfg: EruberuConfig,
+): Promise {
+ const data = qTable.export();
+
+ if (cortexClient && cfg.cortexPersist) {
+ try {
+ await saveToCortex(cortexClient, data);
+ return;
+ } catch {
+ // Fall through to file
+ }
+ }
+
+ await saveToFile(cfg.persistPath, data);
+}
+
+export default eruberuPlugin;
diff --git a/extensions/eruberu/mayros.plugin.json b/extensions/eruberu/mayros.plugin.json
new file mode 100644
index 00000000..d8090d08
--- /dev/null
+++ b/extensions/eruberu/mayros.plugin.json
@@ -0,0 +1,26 @@
+{
+ "id": "eruberu",
+ "kind": "routing",
+ "configSchema": {
+ "type": "object",
+ "properties": {
+ "enabled": { "type": "boolean", "default": true },
+ "strategy": { "type": "string", "default": "auto" },
+ "budgetDrivenFallback": { "type": "boolean", "default": true },
+ "budgetWarnThreshold": { "type": "number", "default": 0.8 },
+ "budgetCriticalThreshold": { "type": "number", "default": 0.95 },
+ "qLearning": {
+ "type": "object",
+ "properties": {
+ "alpha": { "type": "number", "default": 0.1 },
+ "gamma": { "type": "number", "default": 0.9 },
+ "epsilon": { "type": "number", "default": 0.15 },
+ "epsilonDecay": { "type": "number", "default": 0.995 },
+ "minEpsilon": { "type": "number", "default": 0.05 }
+ }
+ },
+ "persistPath": { "type": "string" },
+ "cortexPersist": { "type": "boolean", "default": true }
+ }
+ }
+}
diff --git a/extensions/eruberu/package.json b/extensions/eruberu/package.json
new file mode 100644
index 00000000..84e67662
--- /dev/null
+++ b/extensions/eruberu/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "@apilium/mayros-eruberu",
+ "version": "0.1.14",
+ "private": true,
+ "description": "Mayros intelligent model routing plugin — Q-Learning adaptive provider/model selection",
+ "type": "module",
+ "dependencies": {
+ "@sinclair/typebox": "0.34.48"
+ },
+ "devDependencies": {
+ "@apilium/mayros": "workspace:*"
+ },
+ "mayros": {
+ "extensions": [
+ "./index.ts"
+ ]
+ }
+}
diff --git a/extensions/eruberu/q-learning.test.ts b/extensions/eruberu/q-learning.test.ts
new file mode 100644
index 00000000..c16b4170
--- /dev/null
+++ b/extensions/eruberu/q-learning.test.ts
@@ -0,0 +1,131 @@
+import { describe, it, expect } from "vitest";
+import {
+ QTable,
+ stateKey,
+ computeReward,
+ type QState,
+ type RewardSignal,
+ type QTableData,
+} from "./q-learning.js";
+
+describe("stateKey", () => {
+ it("builds correct key from state", () => {
+ const state: QState = { taskType: "code", budgetLevel: "mid", timeSlot: "peak" };
+ expect(stateKey(state)).toBe("code:mid:peak");
+ });
+});
+
+describe("computeReward", () => {
+ it("computes sum of all signal components", () => {
+ const signal: RewardSignal = {
+ success: 1.0,
+ costEfficiency: 0.3,
+ qualityProxy: 0.2,
+ latencyPenalty: 0,
+ rateLimitPenalty: 0,
+ };
+ expect(computeReward(signal)).toBeCloseTo(1.5);
+ });
+
+ it("handles negative penalties", () => {
+ const signal: RewardSignal = {
+ success: -1.0,
+ costEfficiency: 0,
+ qualityProxy: 0,
+ latencyPenalty: -0.2,
+ rateLimitPenalty: -0.8,
+ };
+ expect(computeReward(signal)).toBeCloseTo(-2.0);
+ });
+});
+
+describe("QTable", () => {
+ const config = {
+ alpha: 0.1,
+ gamma: 0.9,
+ epsilon: 0.15,
+ epsilonDecay: 0.995,
+ minEpsilon: 0.05,
+ };
+
+ it("get/set Q-values", () => {
+ const qt = new QTable(config);
+ expect(qt.getQ("s1", "a1")).toBe(0);
+
+ qt.setQ("s1", "a1", 1.5);
+ expect(qt.getQ("s1", "a1")).toBe(1.5);
+ });
+
+ it("maxQ returns highest value for state", () => {
+ const qt = new QTable(config);
+ qt.setQ("s1", "a1", 0.5);
+ qt.setQ("s1", "a2", 1.2);
+ qt.setQ("s1", "a3", 0.8);
+
+ expect(qt.maxQ("s1")).toBeCloseTo(1.2);
+ expect(qt.maxQ("s_unknown")).toBe(0);
+ });
+
+ it("update applies Q-learning formula", () => {
+ const qt = new QTable(config);
+ qt.setQ("s1", "a1", 0.0);
+ qt.setQ("s2", "a1", 1.0);
+
+ // Q(s,a) += α(r + γ·max Q(s',a') - Q(s,a))
+ // Q(s1,a1) += 0.1 * (0.5 + 0.9 * 1.0 - 0.0) = 0.1 * 1.4 = 0.14
+ qt.update("s1", "a1", 0.5, "s2");
+ expect(qt.getQ("s1", "a1")).toBeCloseTo(0.14);
+ });
+
+ it("epsilon decays after update", () => {
+ const qt = new QTable({ ...config, epsilon: 0.5 });
+ const before = qt.getEpsilon();
+ qt.update("s1", "a1", 1.0, "s1");
+ expect(qt.getEpsilon()).toBeLessThan(before);
+ });
+
+ it("epsilon floor is respected", () => {
+ const qt = new QTable({ ...config, epsilon: 0.05, epsilonDecay: 0.5 });
+ qt.update("s1", "a1", 1.0, "s1");
+ expect(qt.getEpsilon()).toBeGreaterThanOrEqual(config.minEpsilon);
+ });
+
+ it("selectAction returns action from available list", () => {
+ const qt = new QTable({ ...config, epsilon: 0 }); // no exploration
+ qt.setQ("s1", "a1", 0.5);
+ qt.setQ("s1", "a2", 1.5);
+
+ const action = qt.selectAction("s1", ["a1", "a2", "a3"]);
+ expect(action).toBe("a2"); // highest Q-value
+ });
+
+ it("selectAction returns null for empty actions", () => {
+ const qt = new QTable(config);
+ expect(qt.selectAction("s1", [])).toBeNull();
+ });
+
+ it("export/import roundtrip", () => {
+ const qt = new QTable(config);
+ qt.setQ("s1", "a1", 0.5);
+ qt.setQ("s1", "a2", 1.0);
+ qt.setQ("s2", "a1", 0.3);
+
+ const exported = qt.export();
+ expect(exported["s1"]?.["a1"]).toBeCloseTo(0.5);
+ expect(exported["s1"]?.["a2"]).toBeCloseTo(1.0);
+
+ const qt2 = new QTable(config);
+ qt2.import(exported);
+ expect(qt2.getQ("s1", "a1")).toBeCloseTo(0.5);
+ expect(qt2.getQ("s2", "a1")).toBeCloseTo(0.3);
+ expect(qt2.size()).toBe(3);
+ });
+
+ it("clear removes all entries", () => {
+ const qt = new QTable(config);
+ qt.setQ("s1", "a1", 1.0);
+ expect(qt.size()).toBe(1);
+ qt.clear();
+ expect(qt.size()).toBe(0);
+ });
+});
diff --git a/extensions/eruberu/q-learning.ts b/extensions/eruberu/q-learning.ts
new file mode 100644
index 00000000..7e1e5719
--- /dev/null
+++ b/extensions/eruberu/q-learning.ts
@@ -0,0 +1,201 @@
+/**
+ * Q-Learning Engine for Eruberu
+ *
+ * Tabular Q-Learning with epsilon-greedy exploration.
+ * State: (taskType, budgetLevel, timeSlot)
+ * Action: (strategy, provider?) — e.g., "cost-optimized:", "capability:anthropic"
+ */
+
+import type { QLearningConfig } from "./config.js";
+import type { TaskType, BudgetLevel, TimeSlot } from "./task-classifier.js";
+
+// ============================================================================
+// Types
+// ============================================================================
+
+export type QState = {
+ taskType: TaskType;
+ budgetLevel: BudgetLevel;
+ timeSlot: TimeSlot;
+};
+
+export type QAction = string; // e.g., "cost-optimized:", "capability:anthropic"
+
+export type RewardSignal = {
+ success: number; // +1.0 or -1.0
+ costEfficiency: number; // 0 to +0.5
+ qualityProxy: number; // 0 to +0.3
+ latencyPenalty: number; // 0 or -0.2
+ rateLimitPenalty: number; // 0 or -0.8
+};
+
+export type QTableData = Record>;
+
+// ============================================================================
+// Helpers
+// ============================================================================
+
+export function stateKey(state: QState): string {
+ return `${state.taskType}:${state.budgetLevel}:${state.timeSlot}`;
+}
+
+export function computeReward(signal: RewardSignal): number {
+ return (
+ signal.success +
+ signal.costEfficiency +
+ signal.qualityProxy +
+ signal.latencyPenalty +
+ signal.rateLimitPenalty
+ );
+}
+
+// ============================================================================
+// Q-Table
+// ============================================================================
+
+export class QTable {
+ private table: Map> = new Map();
+ private epsilon: number;
+ private readonly alpha: number;
+ private readonly gamma: number;
+ private readonly epsilonDecay: number;
+ private readonly minEpsilon: number;
+
+ constructor(config: QLearningConfig) {
+ this.alpha = config.alpha;
+ this.gamma = config.gamma;
+ this.epsilon = config.epsilon;
+ this.epsilonDecay = config.epsilonDecay;
+ this.minEpsilon = config.minEpsilon;
+ }
+
+ /**
+ * Get Q-value for a state-action pair. Returns 0 if unseen.
+ */
+ getQ(state: string, action: string): number {
+ return this.table.get(state)?.get(action) ?? 0;
+ }
+
+ /**
+ * Set Q-value for a state-action pair.
+ */
+ setQ(state: string, action: string, value: number): void {
+ if (!this.table.has(state)) {
+ this.table.set(state, new Map());
+ }
+ this.table.get(state)!.set(action, value);
+ }
+
+ /**
+ * Get all Q-values for a state as { action: qValue }.
+ */
+ getStateActions(state: string): Map {
+ return this.table.get(state) ?? new Map();
+ }
+
+ /**
+ * Get the max Q-value across all actions for a state.
+ */
+ maxQ(state: string): number {
+ const actions = this.table.get(state);
+ if (!actions || actions.size === 0) return 0;
+ let max = -Infinity;
+ for (const v of actions.values()) {
+ if (v > max) max = v;
+ }
+ return max;
+ }
+
+ /**
+ * Epsilon-greedy action selection.
+ * Returns the chosen action, or null if no actions are known.
+ */
+ selectAction(state: string, availableActions: string[]): string | null {
+ if (availableActions.length === 0) return null;
+
+ // Exploration: random action
+ if (Math.random() < this.epsilon) {
+ return availableActions[Math.floor(Math.random() * availableActions.length)]!;
+ }
+
+ // Exploitation: best known Q-value
+ let bestAction = availableActions[0]!;
+ let bestQ = -Infinity;
+
+ for (const action of availableActions) {
+ const q = this.getQ(state, action);
+ if (q > bestQ) {
+ bestQ = q;
+ bestAction = action;
+ }
+ }
+
+ return bestAction;
+ }
+
+ /**
+ * Q-Learning update: Q(s,a) += α(r + γ·max Q(s',a') - Q(s,a))
+ */
+ update(state: string, action: string, reward: number, nextState: string): void {
+ const currentQ = this.getQ(state, action);
+ const maxNextQ = this.maxQ(nextState);
+ const newQ = currentQ + this.alpha * (reward + this.gamma * maxNextQ - currentQ);
+ this.setQ(state, action, newQ);
+
+ // Decay epsilon
+ this.epsilon = Math.max(this.minEpsilon, this.epsilon * this.epsilonDecay);
+ }
+
+ /**
+ * Get current epsilon value.
+ */
+ getEpsilon(): number {
+ return this.epsilon;
+ }
+
+ /**
+ * Get total number of state-action entries.
+ */
+ size(): number {
+ let count = 0;
+ for (const actions of this.table.values()) {
+ count += actions.size;
+ }
+ return count;
+ }
+
+ /**
+ * Export Q-table as serializable data.
+ */
+ export(): QTableData {
+ const data: QTableData = {};
+ for (const [state, actions] of this.table) {
+ data[state] = {};
+ for (const [action, value] of actions) {
+ data[state]![action] = value;
+ }
+ }
+ return data;
+ }
+
+ /**
+ * Import Q-table from serialized data.
+ */
+ import(data: QTableData): void {
+ this.table.clear();
+ for (const [state, actions] of Object.entries(data)) {
+ const actionMap = new Map();
+ for (const [action, value] of Object.entries(actions)) {
+ actionMap.set(action, value);
+ }
+ this.table.set(state, actionMap);
+ }
+ }
+
+ /**
+ * Clear all entries.
+ */
+ clear(): void {
+ this.table.clear();
+ }
+}
diff --git a/extensions/eruberu/task-classifier.test.ts b/extensions/eruberu/task-classifier.test.ts
new file mode 100644
index 00000000..eed8ea3b
--- /dev/null
+++ b/extensions/eruberu/task-classifier.test.ts
@@ -0,0 +1,53 @@
+import { describe, it, expect } from "vitest";
+import { classifyTask, classifyBudgetLevel, classifyTimeSlot } from "./task-classifier.js";
+
+describe("classifyTask", () => {
+ it("classifies code-related prompts", () => {
+ expect(classifyTask("implement a function to parse JSON")).toBe("code");
+ expect(classifyTask("fix the bug in the login module")).toBe("code");
+ expect(classifyTask("debug the runtime error in main.ts")).toBe("code");
+ expect(classifyTask("refactor the database query")).toBe("code");
+ });
+
+ it("classifies analysis prompts", () => {
+ expect(classifyTask("analyze the performance of this algorithm")).toBe("analysis");
+ expect(classifyTask("explain how the caching layer works")).toBe("analysis");
+ expect(classifyTask("review the security audit report")).toBe("analysis");
+ });
+
+ it("classifies creative prompts", () => {
+ expect(classifyTask("write a blog post about microservices")).toBe("creative");
+ expect(classifyTask("design a new user interface layout")).toBe("creative");
+ expect(classifyTask("compose an email template")).toBe("creative");
+ });
+
+ it("defaults to chat for generic prompts", () => {
+ expect(classifyTask("hello how are you")).toBe("chat");
+ expect(classifyTask("what time is it")).toBe("chat");
+ });
+
+ it("boosts code score for code-like patterns", () => {
+ expect(classifyTask("look at this ```code block```")).toBe("code");
+ expect(classifyTask("check the file main.ts")).toBe("code");
+ });
+});
+
+describe("classifyBudgetLevel", () => {
+ it("returns low for undefined", () => {
+ expect(classifyBudgetLevel(undefined)).toBe("low");
+ });
+
+ it("returns levels based on fraction", () => {
+ expect(classifyBudgetLevel(0.1)).toBe("low");
+ expect(classifyBudgetLevel(0.5)).toBe("mid");
+ expect(classifyBudgetLevel(0.75)).toBe("high");
+ expect(classifyBudgetLevel(0.95)).toBe("critical");
+ });
+});
+
+describe("classifyTimeSlot", () => {
+ it("returns peak or off-peak", () => {
+ const result = classifyTimeSlot();
+ expect(["peak", "off-peak"]).toContain(result);
+ });
+});
diff --git a/extensions/eruberu/task-classifier.ts b/extensions/eruberu/task-classifier.ts
new file mode 100644
index 00000000..4f6ee632
--- /dev/null
+++ b/extensions/eruberu/task-classifier.ts
@@ -0,0 +1,133 @@
+/**
+ * Task Classifier
+ *
+ * Classifies prompts into task types using keyword heuristics.
+ * No LLM calls — operates purely on string analysis.
+ */
+
+export type TaskType = "code" | "chat" | "analysis" | "creative";
+
+const CODE_KEYWORDS = [
+ "implement",
+ "fix",
+ "debug",
+ "function",
+ "refactor",
+ "compile",
+ "build",
+ "test",
+ "import",
+ "class",
+ "method",
+ "variable",
+ "type",
+ "interface",
+ "module",
+ "error",
+ "bug",
+ "patch",
+ "syntax",
+ "runtime",
+ "lint",
+ "deploy",
+ "endpoint",
+ "api",
+ "database",
+ "query",
+ "migration",
+];
+
+const ANALYSIS_KEYWORDS = [
+ "analyze",
+ "explain",
+ "review",
+ "compare",
+ "evaluate",
+ "assess",
+ "summarize",
+ "describe",
+ "investigate",
+ "diagnose",
+ "benchmark",
+ "profile",
+ "audit",
+ "inspect",
+ "examine",
+ "report",
+];
+
+const CREATIVE_KEYWORDS = [
+ "write",
+ "story",
+ "design",
+ "create",
+ "compose",
+ "draft",
+ "brainstorm",
+ "ideate",
+ "imagine",
+ "generate",
+ "style",
+ "format",
+ "template",
+ "layout",
+ "prose",
+ "poem",
+ "essay",
+ "blog",
+ "narrative",
+];
+
+/**
+ * Classify a prompt into a task type based on keyword frequency.
+ */
+export function classifyTask(prompt: string): TaskType {
+ const lower = prompt.toLowerCase();
+ const words = lower.split(/\s+/);
+
+ let codeScore = 0;
+ let analysisScore = 0;
+ let creativeScore = 0;
+
+ for (const word of words) {
+ const cleaned = word.replace(/[^a-z]/g, "");
+ if (CODE_KEYWORDS.includes(cleaned)) codeScore++;
+ if (ANALYSIS_KEYWORDS.includes(cleaned)) analysisScore++;
+ if (CREATIVE_KEYWORDS.includes(cleaned)) creativeScore++;
+ }
+
+ // Check for code-like patterns (backticks, file extensions, function syntax)
+ if (/```/.test(prompt)) codeScore += 2;
+ if (/\.(ts|js|py|rs|go|java|cpp|c|rb|sh)\b/.test(lower)) codeScore += 2;
+ if (/\bfunction\s*\(/.test(lower) || /\bconst\s+\w+\s*=/.test(lower)) codeScore += 2;
+
+ const max = Math.max(codeScore, analysisScore, creativeScore);
+
+ if (max === 0) return "chat";
+ if (codeScore === max) return "code";
+ if (analysisScore === max) return "analysis";
+ return "creative";
+}
+
+/**
+ * Determine budget level from a usage fraction (0.0 - 1.0+).
+ */
+export type BudgetLevel = "low" | "mid" | "high" | "critical";
+
+export function classifyBudgetLevel(usageFraction: number | undefined): BudgetLevel {
+ if (usageFraction === undefined || usageFraction < 0.3) return "low";
+ if (usageFraction < 0.7) return "mid";
+ if (usageFraction < 0.9) return "high";
+ return "critical";
+}
+
+/**
+ * Determine time slot based on current hour (UTC).
+ */
+export type TimeSlot = "peak" | "off-peak";
+
+export function classifyTimeSlot(): TimeSlot {
+ const hour = new Date().getUTCHours();
+ // Peak: 9 AM - 6 PM UTC (business hours)
+ return hour >= 9 && hour < 18 ? "peak" : "off-peak";
+}
diff --git a/extensions/feishu/package.json b/extensions/feishu/package.json
index caf60f24..c476e984 100644
--- a/extensions/feishu/package.json
+++ b/extensions/feishu/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-feishu",
- "version": "0.1.13",
+ "version": "0.1.14",
"description": "Mayros Feishu/Lark channel plugin (community maintained by @m1heng)",
"license": "MIT",
"type": "module",
diff --git a/extensions/google-antigravity-auth/package.json b/extensions/google-antigravity-auth/package.json
index 70dae68b..8dd32134 100644
--- a/extensions/google-antigravity-auth/package.json
+++ b/extensions/google-antigravity-auth/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-google-antigravity-auth",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Mayros Google Antigravity OAuth provider plugin",
"type": "module",
diff --git a/extensions/google-gemini-cli-auth/package.json b/extensions/google-gemini-cli-auth/package.json
index 54aedf18..5e157c37 100644
--- a/extensions/google-gemini-cli-auth/package.json
+++ b/extensions/google-gemini-cli-auth/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-google-gemini-cli-auth",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Mayros Gemini CLI OAuth provider plugin",
"type": "module",
diff --git a/extensions/googlechat/package.json b/extensions/googlechat/package.json
index c1d48f56..256b637e 100644
--- a/extensions/googlechat/package.json
+++ b/extensions/googlechat/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-googlechat",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Mayros Google Chat channel plugin",
"type": "module",
diff --git a/extensions/hayameru/atomic-write.test.ts b/extensions/hayameru/atomic-write.test.ts
new file mode 100644
index 00000000..6a0c40ae
--- /dev/null
+++ b/extensions/hayameru/atomic-write.test.ts
@@ -0,0 +1,78 @@
+import { describe, it, expect, beforeEach, afterEach } from "vitest";
+import fs from "node:fs/promises";
+import path from "node:path";
+import os from "node:os";
+
+describe("hayameru atomic write", () => {
+ let tmpDir: string;
+
+ beforeEach(async () => {
+ tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "hayameru-test-"));
+ });
+
+ afterEach(async () => {
+ await fs.rm(tmpDir, { recursive: true, force: true });
+ });
+
+ it("creates backup and writes atomically", async () => {
+ const filePath = path.join(tmpDir, "test.ts");
+ await fs.writeFile(filePath, "const x = 1;", "utf-8");
+
+ // Simulate what hayameru does
+ const tmpPath = filePath + ".hayameru-tmp";
+ const bakPath = filePath + ".hayameru-bak";
+ await fs.copyFile(filePath, bakPath);
+ await fs.writeFile(tmpPath, "let x = 1;", "utf-8");
+ await fs.rename(tmpPath, filePath);
+
+ // Verify backup exists with original content
+ const bakContent = await fs.readFile(bakPath, "utf-8");
+ expect(bakContent).toBe("const x = 1;");
+
+ // Verify file has new content
+ const content = await fs.readFile(filePath, "utf-8");
+ expect(content).toBe("let x = 1;");
+
+ // Verify tmp file doesn't remain
+ await expect(fs.stat(tmpPath)).rejects.toThrow();
+ });
+
+ it("preserves original file if tmp write fails", async () => {
+ const filePath = path.join(tmpDir, "safe.ts");
+ const originalContent = "function hello() { return 42; }";
+ await fs.writeFile(filePath, originalContent, "utf-8");
+
+ const bakPath = filePath + ".hayameru-bak";
+ await fs.copyFile(filePath, bakPath);
+
+ // Simulate a crash after backup but before rename —
+ // the original file should still contain the original content
+ const content = await fs.readFile(filePath, "utf-8");
+ expect(content).toBe(originalContent);
+
+ // Backup should also have original content for recovery
+ const bakContent = await fs.readFile(bakPath, "utf-8");
+ expect(bakContent).toBe(originalContent);
+ });
+
+ it("handles multiple sequential atomic writes", async () => {
+ const filePath = path.join(tmpDir, "multi.ts");
+ await fs.writeFile(filePath, "v1", "utf-8");
+
+ for (let i = 2; i <= 5; i++) {
+ const tmpPath = filePath + ".hayameru-tmp";
+ const bakPath = filePath + ".hayameru-bak";
+ await fs.copyFile(filePath, bakPath);
+ await fs.writeFile(tmpPath, `v${i}`, "utf-8");
+ await fs.rename(tmpPath, filePath);
+ }
+
+ // Final content should be v5
+ const content = await fs.readFile(filePath, "utf-8");
+ expect(content).toBe("v5");
+
+ // Backup should be v4 (the content before the last write)
+ const bakContent = await fs.readFile(filePath + ".hayameru-bak", "utf-8");
+ expect(bakContent).toBe("v4");
+ });
+});
diff --git a/extensions/hayameru/config.ts b/extensions/hayameru/config.ts
new file mode 100644
index 00000000..cca7a31c
--- /dev/null
+++ b/extensions/hayameru/config.ts
@@ -0,0 +1,79 @@
+import { assertAllowedKeys } from "../shared/cortex-config.js";
+
+export type HayameruConfig = {
+ enabled: boolean;
+ confidenceThreshold: number;
+ maxFileSize: number;
+ transforms: Record;
+ metrics: { enabled: boolean };
+};
+
+const DEFAULTS: HayameruConfig = {
+ enabled: true,
+ confidenceThreshold: 0.85,
+ maxFileSize: 100_000,
+ transforms: {
+ "var-to-const": true,
+ "remove-console": true,
+ "sort-imports": true,
+ "add-semicolons": true,
+ "remove-comments": true,
+ },
+ metrics: { enabled: true },
+};
+
+export function parseHayameruConfig(raw: unknown): HayameruConfig {
+ if (!raw || typeof raw !== "object" || Array.isArray(raw)) return { ...DEFAULTS };
+ const cfg = raw as Record;
+ assertAllowedKeys(
+ cfg,
+ ["enabled", "confidenceThreshold", "maxFileSize", "transforms", "metrics"],
+ "hayameru config",
+ );
+
+ const enabled = typeof cfg.enabled === "boolean" ? cfg.enabled : DEFAULTS.enabled;
+ const confidenceThreshold =
+ typeof cfg.confidenceThreshold === "number" &&
+ cfg.confidenceThreshold > 0 &&
+ cfg.confidenceThreshold <= 1
+ ? cfg.confidenceThreshold
+ : DEFAULTS.confidenceThreshold;
+ const maxFileSize =
+ typeof cfg.maxFileSize === "number" && cfg.maxFileSize > 0
+ ? Math.floor(cfg.maxFileSize)
+ : DEFAULTS.maxFileSize;
+
+ let transforms = { ...DEFAULTS.transforms };
+ if (cfg.transforms && typeof cfg.transforms === "object" && !Array.isArray(cfg.transforms)) {
+ const raw = cfg.transforms as Record;
+ for (const [k, v] of Object.entries(raw)) {
+ if (typeof v === "boolean") transforms[k] = v;
+ }
+ }
+
+ let metricsEnabled = DEFAULTS.metrics.enabled;
+ if (cfg.metrics && typeof cfg.metrics === "object" && !Array.isArray(cfg.metrics)) {
+ const m = cfg.metrics as Record;
+ if (typeof m.enabled === "boolean") metricsEnabled = m.enabled;
+ }
+
+ return {
+ enabled,
+ confidenceThreshold,
+ maxFileSize,
+ transforms,
+ metrics: { enabled: metricsEnabled },
+ };
+}
+
+export const hayameruConfigSchema = {
+ parse: parseHayameruConfig,
+ uiHints: {
+ enabled: {
+ label: "Enable Hayameru",
+ help: "Enable deterministic code transforms that bypass LLM",
+ },
+ confidenceThreshold: { label: "Confidence Threshold", placeholder: "0.85", advanced: true },
+ maxFileSize: { label: "Max File Size (bytes)", placeholder: "100000", advanced: true },
+ },
+};
diff --git a/extensions/hayameru/index.ts b/extensions/hayameru/index.ts
new file mode 100644
index 00000000..a6637f2a
--- /dev/null
+++ b/extensions/hayameru/index.ts
@@ -0,0 +1,198 @@
+import fs from "node:fs/promises";
+import path from "node:path";
+import { Type } from "@sinclair/typebox";
+import type { MayrosPluginApi } from "mayros/plugin-sdk";
+import { parseHayameruConfig, hayameruConfigSchema } from "./config.js";
+import { detectIntent } from "./intent-detector.js";
+import { getTransform, listTransforms } from "./transforms/index.js";
+import { HayameruMetrics } from "./metrics.js";
+
+const hayameruPlugin = {
+ id: "hayameru",
+ name: "Hayameru",
+ description:
+ "Deterministic code transforms that bypass LLM for simple edits — zero tokens, sub-millisecond",
+ kind: "optimization" as const,
+ configSchema: hayameruConfigSchema,
+
+ async register(api: MayrosPluginApi) {
+ const cfg = parseHayameruConfig(api.pluginConfig);
+ if (!cfg.enabled) {
+ api.logger.info("hayameru: disabled by config");
+ return;
+ }
+
+ const metrics = new HayameruMetrics();
+ const workDir = api.config?.workspaceDir ?? process.cwd();
+
+ // before_agent_run hook — intercept simple code edits
+ api.on(
+ "before_agent_run",
+ async (event) => {
+ const start = performance.now();
+ const intent = detectIntent(event.prompt);
+
+ if (intent.kind === "none" || intent.confidence < cfg.confidenceThreshold) {
+ return; // fall through to LLM
+ }
+
+ if (!cfg.transforms[intent.kind]) {
+ return; // this transform is disabled
+ }
+
+ const transform = getTransform(intent.kind);
+ if (!transform) return;
+
+ // Resolve file path
+ if (!intent.filePath) return; // need a target file
+
+ const rawResolved = path.isAbsolute(intent.filePath)
+ ? intent.filePath
+ : path.resolve(workDir, intent.filePath);
+
+ // Prevent path traversal — resolved path must be inside workspace
+ let resolvedPath: string;
+ try {
+ resolvedPath = await fs.realpath(rawResolved);
+ const realWorkDir = await fs.realpath(workDir);
+ if (resolvedPath !== realWorkDir && !resolvedPath.startsWith(realWorkDir + path.sep)) {
+ api.logger.warn(`hayameru: path traversal blocked: ${intent.filePath}`);
+ return;
+ }
+ } catch {
+ // File doesn't exist yet or path is invalid — try without realpath
+ const normalized = path.normalize(rawResolved);
+ const normalizedWork = path.normalize(workDir);
+ if (normalized !== normalizedWork && !normalized.startsWith(normalizedWork + path.sep)) {
+ api.logger.warn(`hayameru: path traversal blocked: ${intent.filePath}`);
+ return;
+ }
+ resolvedPath = normalized;
+ }
+
+ if (cfg.metrics.enabled) metrics.recordAttempt();
+
+ try {
+ const stat = await fs.stat(resolvedPath);
+ if (stat.size > cfg.maxFileSize) {
+ api.logger.warn(`hayameru: file too large (${stat.size} bytes > ${cfg.maxFileSize})`);
+ return;
+ }
+
+ const source = await fs.readFile(resolvedPath, "utf-8");
+ const result = transform(source, resolvedPath);
+
+ if (!result.changed) {
+ return; // nothing to do, fall through to LLM
+ }
+
+ // Atomic write: backup → tmp → rename
+ const tmpPath = resolvedPath + ".hayameru-tmp";
+ const bakPath = resolvedPath + ".hayameru-bak";
+ await fs.copyFile(resolvedPath, bakPath);
+ await fs.writeFile(tmpPath, result.output, "utf-8");
+ await fs.rename(tmpPath, resolvedPath);
+
+ const durationMs = performance.now() - start;
+ if (cfg.metrics.enabled) metrics.recordSuccess(intent.kind, durationMs, stat.size);
+
+ const estimatedTokens = Math.ceil(stat.size / 4);
+ const summary = [
+ `**Hayameru** — ${result.description}`,
+ `File: \`${path.relative(workDir, resolvedPath)}\``,
+ `Edits: ${result.edits} | Time: ${durationMs.toFixed(1)}ms | Est. tokens saved: ~${estimatedTokens}`,
+ ].join("\n");
+
+ return {
+ shortCircuit: true,
+ response: summary,
+ metadata: {
+ hayameru: true,
+ transform: intent.kind,
+ edits: result.edits,
+ durationMs,
+ },
+ };
+ } catch (err) {
+ if (cfg.metrics.enabled) metrics.recordFailure();
+ api.logger.warn(`hayameru: transform failed: ${String(err)}`);
+ return; // fall through to LLM
+ }
+ },
+ { priority: 100 },
+ );
+
+ // Tool: hayameru_status
+ api.registerTool({
+ name: "hayameru_status",
+ description: "Show Hayameru code transform metrics and available transforms",
+ parameters: Type.Object({}),
+ execute: async () => {
+ const m = metrics.getMetrics();
+ const transforms = listTransforms();
+ return {
+ content: [
+ {
+ type: "text" as const,
+ text: JSON.stringify(
+ {
+ metrics: m,
+ transforms: transforms.map((t) => ({
+ kind: t.kind,
+ available: t.available,
+ enabled: cfg.transforms[t.kind] !== false,
+ })),
+ config: {
+ confidenceThreshold: cfg.confidenceThreshold,
+ maxFileSize: cfg.maxFileSize,
+ },
+ },
+ null,
+ 2,
+ ),
+ },
+ ],
+ };
+ },
+ });
+
+ // CLI
+ api.registerCli(
+ (ctx) => {
+ const cmd = ctx.program
+ .command("hayameru")
+ .description("Hayameru code transform accelerator");
+
+ cmd
+ .command("status")
+ .description("Show metrics and config")
+ .action(() => {
+ const m = metrics.getMetrics();
+ console.log("Hayameru Status:");
+ console.log(` Total attempts: ${m.totalAttempts}`);
+ console.log(` Boost successes: ${m.boostSuccesses}`);
+ console.log(` Boost failures: ${m.boostFailures}`);
+ console.log(` Est. tokens saved: ${m.estimatedTokensSaved}`);
+ console.log(` Avg transform time: ${m.avgTransformMs.toFixed(1)}ms`);
+ });
+
+ cmd
+ .command("transforms")
+ .description("List available transforms")
+ .action(() => {
+ const transforms = listTransforms();
+ console.log("Available transforms:");
+ for (const t of transforms) {
+ const status = cfg.transforms[t.kind] !== false ? "enabled" : "disabled";
+ console.log(` ${t.kind}: ${status}`);
+ }
+ });
+ },
+ { commands: ["hayameru"] },
+ );
+
+ api.logger.info(`hayameru: initialized with ${Object.keys(cfg.transforms).length} transforms`);
+ },
+};
+
+export default hayameruPlugin;
diff --git a/extensions/hayameru/intent-detector.test.ts b/extensions/hayameru/intent-detector.test.ts
new file mode 100644
index 00000000..af0d2bdc
--- /dev/null
+++ b/extensions/hayameru/intent-detector.test.ts
@@ -0,0 +1,41 @@
+import { describe, it, expect } from "vitest";
+import { detectIntent } from "./intent-detector.js";
+
+describe("detectIntent", () => {
+ it("detects var-to-const intent", () => {
+ const r = detectIntent("convert var to const in `src/app.ts`");
+ expect(r.kind).toBe("var-to-const");
+ expect(r.confidence).toBeGreaterThan(0.5);
+ expect(r.filePath).toBe("src/app.ts");
+ });
+
+ it("detects remove-console intent", () => {
+ const r = detectIntent("remove all console.log statements from `utils/logger.ts`");
+ expect(r.kind).toBe("remove-console");
+ expect(r.confidence).toBeGreaterThan(0.5);
+ expect(r.filePath).toBe("utils/logger.ts");
+ });
+
+ it("detects sort-imports intent", () => {
+ const r = detectIntent("sort the imports in src/index.ts");
+ expect(r.kind).toBe("sort-imports");
+ expect(r.filePath).toBe("src/index.ts");
+ });
+
+ it("returns none for unrecognized prompts", () => {
+ const r = detectIntent("explain how the auth system works");
+ expect(r.kind).toBe("none");
+ expect(r.confidence).toBe(0);
+ });
+
+ it("extracts file path from backticks", () => {
+ const r = detectIntent("change var to const in `src/utils/helpers.ts`");
+ expect(r.filePath).toBe("src/utils/helpers.ts");
+ });
+
+ it("boosts confidence when file path present", () => {
+ const withFile = detectIntent("remove console in `app.ts`");
+ const withoutFile = detectIntent("remove console statements");
+ expect(withFile.confidence).toBeGreaterThan(withoutFile.confidence);
+ });
+});
diff --git a/extensions/hayameru/intent-detector.ts b/extensions/hayameru/intent-detector.ts
new file mode 100644
index 00000000..b7f7a05b
--- /dev/null
+++ b/extensions/hayameru/intent-detector.ts
@@ -0,0 +1,144 @@
+export type IntentKind =
+ | "var-to-const"
+ | "remove-console"
+ | "sort-imports"
+ | "add-semicolons"
+ | "remove-comments"
+ | "none";
+
+export type DetectedIntent = {
+ kind: IntentKind;
+ confidence: number;
+ filePath?: string;
+ targetPattern?: string;
+};
+
+const INTENT_PATTERNS: Array<{ kind: IntentKind; keywords: string[][]; boost: number }> = [
+ {
+ kind: "var-to-const",
+ keywords: [
+ ["var", "const"],
+ ["var", "let"],
+ ["convert", "var"],
+ ["replace", "var"],
+ ["change", "var", "const"],
+ ["var to const"],
+ ["var to let"],
+ ],
+ boost: 0.1,
+ },
+ {
+ kind: "remove-console",
+ keywords: [
+ ["remove", "console"],
+ ["delete", "console"],
+ ["strip", "console"],
+ ["clean", "console"],
+ ["remove", "log"],
+ ["strip", "debug"],
+ ],
+ boost: 0.1,
+ },
+ {
+ kind: "sort-imports",
+ keywords: [
+ ["sort", "import"],
+ ["organize", "import"],
+ ["order", "import"],
+ ["alphabetize", "import"],
+ ["clean", "import"],
+ ],
+ boost: 0.1,
+ },
+ {
+ kind: "add-semicolons",
+ keywords: [
+ ["add", "semicolon"],
+ ["missing", "semicolon"],
+ ["insert", "semicolon"],
+ ["fix", "semicolon"],
+ ],
+ boost: 0.05,
+ },
+ {
+ kind: "remove-comments",
+ keywords: [
+ ["remove", "comment"],
+ ["delete", "comment"],
+ ["strip", "comment"],
+ ["clean", "comment"],
+ ["remove all comments"],
+ ],
+ boost: 0.1,
+ },
+];
+
+// File path extraction patterns
+const FILE_PATH_PATTERNS = [
+ /`([^`]+\.[a-zA-Z]{1,10})`/,
+ /(?:in|file|from|of)\s+(\S+\.[a-zA-Z]{1,10})/i,
+ /(\S+\.[tj]sx?)/,
+ /(\S+\.(?:js|ts|jsx|tsx|mjs|cjs|mts|cts))/,
+];
+
+function tokenize(text: string): string[] {
+ return text
+ .toLowerCase()
+ .replace(/[^a-z0-9\s]/g, " ")
+ .split(/\s+/)
+ .filter(Boolean);
+}
+
+function extractFilePath(prompt: string): string | undefined {
+ for (const pattern of FILE_PATH_PATTERNS) {
+ const match = prompt.match(pattern);
+ if (match?.[1]) return match[1];
+ }
+ return undefined;
+}
+
+export function detectIntent(prompt: string): DetectedIntent {
+ const tokens = tokenize(prompt);
+ const promptLower = prompt.toLowerCase();
+ let bestKind: IntentKind = "none";
+ let bestScore = 0;
+
+ for (const { kind, keywords, boost } of INTENT_PATTERNS) {
+ let matchScore = 0;
+ let matchCount = 0;
+
+ for (const keywordSet of keywords) {
+ // Check if all keywords in the set are present
+ const allPresent = keywordSet.every((kw) =>
+ kw.includes(" ")
+ ? promptLower.includes(kw)
+ : tokens.some((t) => t === kw || t === kw + "s" || t + "s" === kw),
+ );
+ if (allPresent) {
+ matchCount++;
+ matchScore += keywordSet.length * 0.2;
+ }
+ }
+
+ if (matchCount > 0) {
+ // Normalize and add boost for multiple pattern matches
+ const score = Math.min(1.0, matchScore + (matchCount > 1 ? boost * matchCount : 0));
+ if (score > bestScore) {
+ bestScore = score;
+ bestKind = kind;
+ }
+ }
+ }
+
+ const filePath = extractFilePath(prompt);
+ // Boost confidence when a file path is explicitly mentioned
+ if (filePath && bestKind !== "none") {
+ bestScore = Math.min(1.0, bestScore + 0.15);
+ }
+
+ return {
+ kind: bestKind,
+ confidence: bestScore,
+ filePath,
+ };
+}
diff --git a/extensions/hayameru/mayros.plugin.json b/extensions/hayameru/mayros.plugin.json
new file mode 100644
index 00000000..667ba228
--- /dev/null
+++ b/extensions/hayameru/mayros.plugin.json
@@ -0,0 +1,28 @@
+{
+ "id": "hayameru",
+ "kind": "optimization",
+ "configSchema": {
+ "type": "object",
+ "properties": {
+ "enabled": { "type": "boolean", "default": true },
+ "confidenceThreshold": { "type": "number", "default": 0.85 },
+ "maxFileSize": { "type": "integer", "default": 100000 },
+ "transforms": {
+ "type": "object",
+ "properties": {
+ "var-to-const": { "type": "boolean", "default": true },
+ "remove-console": { "type": "boolean", "default": true },
+ "sort-imports": { "type": "boolean", "default": true },
+ "add-semicolons": { "type": "boolean", "default": true },
+ "remove-comments": { "type": "boolean", "default": true }
+ }
+ },
+ "metrics": {
+ "type": "object",
+ "properties": {
+ "enabled": { "type": "boolean", "default": true }
+ }
+ }
+ }
+ }
+}
diff --git a/extensions/hayameru/metrics.ts b/extensions/hayameru/metrics.ts
new file mode 100644
index 00000000..fb56a2fa
--- /dev/null
+++ b/extensions/hayameru/metrics.ts
@@ -0,0 +1,65 @@
+export type BoostMetrics = {
+ totalAttempts: number;
+ boostSuccesses: number;
+ boostFailures: number;
+ estimatedTokensSaved: number;
+ avgTransformMs: number;
+ byTransform: Record;
+};
+
+export class HayameruMetrics {
+ private data: BoostMetrics = {
+ totalAttempts: 0,
+ boostSuccesses: 0,
+ boostFailures: 0,
+ estimatedTokensSaved: 0,
+ avgTransformMs: 0,
+ byTransform: {},
+ };
+
+ private totalTransformMs = 0;
+
+ recordAttempt(): void {
+ this.data.totalAttempts++;
+ }
+
+ /**
+ * Record a successful transform.
+ * @param transformKind - the type of transform applied
+ * @param durationMs - actual measured wall-clock time for the transform
+ * @param fileSizeBytes - size of the file in bytes; used to estimate tokens via Math.ceil(bytes/4)
+ */
+ recordSuccess(transformKind: string, durationMs: number, fileSizeBytes: number): void {
+ this.data.boostSuccesses++;
+ this.data.estimatedTokensSaved += Math.ceil(fileSizeBytes / 4);
+ this.totalTransformMs += durationMs;
+ this.data.avgTransformMs =
+ this.data.boostSuccesses > 0 ? this.totalTransformMs / this.data.boostSuccesses : 0;
+
+ const entry = this.data.byTransform[transformKind] ?? { count: 0, avgMs: 0, totalMs: 0 };
+ entry.count++;
+ entry.totalMs += durationMs;
+ entry.avgMs = entry.totalMs / entry.count;
+ this.data.byTransform[transformKind] = entry;
+ }
+
+ recordFailure(): void {
+ this.data.boostFailures++;
+ }
+
+ getMetrics(): BoostMetrics {
+ return { ...this.data };
+ }
+
+ reset(): void {
+ this.data = {
+ totalAttempts: 0,
+ boostSuccesses: 0,
+ boostFailures: 0,
+ estimatedTokensSaved: 0,
+ avgTransformMs: 0,
+ byTransform: {},
+ };
+ this.totalTransformMs = 0;
+ }
+}
diff --git a/extensions/hayameru/package.json b/extensions/hayameru/package.json
new file mode 100644
index 00000000..0f4c661e
--- /dev/null
+++ b/extensions/hayameru/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "@apilium/mayros-hayameru",
+ "version": "0.1.14",
+ "private": true,
+ "description": "Mayros deterministic code transforms — bypass LLM for simple edits",
+ "type": "module",
+ "dependencies": {
+ "@sinclair/typebox": "0.34.48"
+ },
+ "devDependencies": {
+ "@apilium/mayros": "workspace:*"
+ },
+ "mayros": {
+ "extensions": [
+ "./index.ts"
+ ]
+ }
+}
diff --git a/extensions/hayameru/path-safety.test.ts b/extensions/hayameru/path-safety.test.ts
new file mode 100644
index 00000000..ff90112d
--- /dev/null
+++ b/extensions/hayameru/path-safety.test.ts
@@ -0,0 +1,41 @@
+import { describe, it, expect } from "vitest";
+import path from "node:path";
+
+// We test the path validation logic indirectly through the plugin hook.
+// Mock the plugin API to capture the hook handler, then invoke it with various paths.
+
+describe("hayameru path safety", () => {
+ const workDir = "/workspace/project";
+
+ // Helper: validate path like hayameru does
+ function isPathSafe(filePath: string, baseDir: string): boolean {
+ const rawResolved = path.isAbsolute(filePath) ? filePath : path.resolve(baseDir, filePath);
+ const normalized = path.normalize(rawResolved);
+ const normalizedWork = path.normalize(baseDir);
+ return normalized === normalizedWork || normalized.startsWith(normalizedWork + path.sep);
+ }
+
+ it("allows normal relative paths", () => {
+ expect(isPathSafe("src/foo.ts", workDir)).toBe(true);
+ expect(isPathSafe("./src/foo.ts", workDir)).toBe(true);
+ });
+
+ it("blocks path traversal with ../", () => {
+ expect(isPathSafe("../../../etc/passwd", workDir)).toBe(false);
+ expect(isPathSafe("src/../../etc/passwd", workDir)).toBe(false);
+ });
+
+ it("blocks absolute paths outside workspace", () => {
+ expect(isPathSafe("/etc/passwd", workDir)).toBe(false);
+ expect(isPathSafe("/tmp/evil.ts", workDir)).toBe(false);
+ });
+
+ it("allows absolute paths inside workspace", () => {
+ expect(isPathSafe("/workspace/project/src/foo.ts", workDir)).toBe(true);
+ });
+
+ it("blocks paths that are prefix but not child", () => {
+ // /workspace/project-evil/foo.ts starts with /workspace/project but is NOT a child
+ expect(isPathSafe("/workspace/project-evil/foo.ts", workDir)).toBe(false);
+ });
+});
diff --git a/extensions/hayameru/transforms/add-semicolons.test.ts b/extensions/hayameru/transforms/add-semicolons.test.ts
new file mode 100644
index 00000000..7eb28f76
--- /dev/null
+++ b/extensions/hayameru/transforms/add-semicolons.test.ts
@@ -0,0 +1,111 @@
+import { describe, it, expect } from "vitest";
+import { addSemicolons } from "./add-semicolons.js";
+
+describe("addSemicolons", () => {
+ it("adds semicolons to statements missing them", () => {
+ const source = "const x = 1\nconst y = 2";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.edits).toBe(2);
+ expect(r.output).toBe("const x = 1;\nconst y = 2;");
+ });
+
+ it("does not double-add semicolons", () => {
+ const source = "const x = 1;";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(false);
+ });
+
+ it("skips lines ending with brackets", () => {
+ const source = "if (x) {\n return 1\n}";
+ const r = addSemicolons(source, "test.ts");
+ // Only "return 1" gets a semicolon
+ expect(r.output).toContain("return 1;");
+ expect(r.output).toContain("if (x) {");
+ });
+
+ // --- H8 fixes ---
+
+ it("does not add semicolon to object property lines ending with :", () => {
+ const source = "const obj = {\n key:\n value\n}";
+ const r = addSemicolons(source, "test.ts");
+ const lines = r.output.split("\n");
+ // " key:" should NOT get a semicolon
+ expect(lines[1]).toBe(" key:");
+ });
+
+ it("does not add semicolon to standalone return", () => {
+ const source = " return";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(" return");
+ });
+
+ it("does not add semicolon to standalone throw", () => {
+ const source = " throw";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(" throw");
+ });
+
+ it("does not add semicolon to standalone break", () => {
+ const source = " break";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(false);
+ });
+
+ it("does not add semicolon to standalone continue", () => {
+ const source = " continue";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(false);
+ });
+
+ it("does not add semicolon to standalone yield", () => {
+ const source = " yield";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(false);
+ });
+
+ it("does not add semicolon to case label", () => {
+ const source = ' case "foo":';
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(' case "foo":');
+ });
+
+ it("does not add semicolon to default label", () => {
+ const source = " default:";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.changed).toBe(false);
+ });
+
+ it("does not add semicolon to decorators", () => {
+ const source = "@Component\nclass Foo {}";
+ const r = addSemicolons(source, "test.ts");
+ const lines = r.output.split("\n");
+ expect(lines[0]).toBe("@Component");
+ });
+
+ it("does not add semicolon to decorator with args", () => {
+ const source = "@Injectable()";
+ const r = addSemicolons(source, "test.ts");
+ // Ends with `)`, the pattern `[{}\[\](,]\s*$` includes `)`
+ // Actually `)` is matched by the bracket pattern already
+ expect(r.output).toBe("@Injectable()");
+ });
+
+ it("does not add semicolon to chained method lines starting with .", () => {
+ const source = "promise\n .then(fn)\n .catch(err)";
+ const r = addSemicolons(source, "test.ts");
+ const lines = r.output.split("\n");
+ // Lines starting with `.` should be skipped
+ expect(lines[1]).toBe(" .then(fn)");
+ expect(lines[2]).toBe(" .catch(err)");
+ });
+
+ it("still adds semicolons to regular statements", () => {
+ const source = "const x = 1\nlet y = 'hello'\nreturn x + y";
+ const r = addSemicolons(source, "test.ts");
+ expect(r.output).toBe("const x = 1;\nlet y = 'hello';\nreturn x + y;");
+ });
+});
diff --git a/extensions/hayameru/transforms/add-semicolons.ts b/extensions/hayameru/transforms/add-semicolons.ts
new file mode 100644
index 00000000..81a5eebc
--- /dev/null
+++ b/extensions/hayameru/transforms/add-semicolons.ts
@@ -0,0 +1,55 @@
+import type { TransformResult } from "./var-to-const.js";
+
+const SKIP_PATTERNS = [
+ /^\s*$/, // empty line
+ /^\s*\/\//, // single-line comment
+ /^\s*\/?\*/, // block comment
+ /[{}[\](,]\s*$/, // ends with opening bracket/comma
+ /^\s*(?:if|else|for|while|do|switch|try|catch|finally|class|function|interface|type|enum|namespace)\b/,
+ /=>\s*\{?\s*$/, // arrow function
+ /^\s*(?:import|export)\b/, // import/export (handled separately)
+ /:\s*$/, // lines ending with `:` (object properties, switch cases)
+ /^\s*(?:return|throw|yield|break|continue)\s*$/, // standalone keywords with no expression
+ /^\s*case\s+.+:\s*$/, // case ...:
+ /^\s*default\s*:\s*$/, // default:
+ /^\s*@\S/, // decorators (@Component, @Injectable, etc.)
+ /^\s*\./, // chained method calls (line starts with `.`)
+];
+
+export function addSemicolons(source: string, _filePath: string): TransformResult {
+ const lines = source.split("\n");
+ const result: string[] = [];
+ let edits = 0;
+
+ for (const line of lines) {
+ const trimmed = line.trimEnd();
+
+ if (
+ trimmed === "" ||
+ trimmed.endsWith(";") ||
+ trimmed.endsWith(",") ||
+ trimmed.endsWith("{") ||
+ trimmed.endsWith("}")
+ ) {
+ result.push(line);
+ continue;
+ }
+
+ const shouldSkip = SKIP_PATTERNS.some((p) => p.test(trimmed));
+ if (shouldSkip) {
+ result.push(line);
+ continue;
+ }
+
+ // Likely a statement that needs a semicolon
+ result.push(trimmed + ";");
+ edits++;
+ }
+
+ return {
+ output: result.join("\n"),
+ changed: edits > 0,
+ edits,
+ description: edits > 0 ? `Added ${edits} semicolon(s)` : "No missing semicolons found",
+ };
+}
diff --git a/extensions/hayameru/transforms/index.ts b/extensions/hayameru/transforms/index.ts
new file mode 100644
index 00000000..c1f98b6a
--- /dev/null
+++ b/extensions/hayameru/transforms/index.ts
@@ -0,0 +1,34 @@
+import type { IntentKind } from "../intent-detector.js";
+import type { TransformResult } from "./var-to-const.js";
+import { varToConst } from "./var-to-const.js";
+import { removeConsole } from "./remove-console.js";
+import { sortImports } from "./sort-imports.js";
+import { addSemicolons } from "./add-semicolons.js";
+import { removeComments } from "./remove-comments.js";
+
+export type { TransformResult };
+
+export type TransformFn = (source: string, filePath: string) => TransformResult;
+
+const REGISTRY: Partial> = {
+ "var-to-const": varToConst,
+ "remove-console": removeConsole,
+ "sort-imports": sortImports,
+ "add-semicolons": addSemicolons,
+ "remove-comments": removeComments,
+};
+
+export function getTransform(kind: IntentKind): TransformFn | undefined {
+ return REGISTRY[kind];
+}
+
+export function listTransforms(): Array<{ kind: IntentKind; available: boolean }> {
+ const all: IntentKind[] = [
+ "var-to-const",
+ "remove-console",
+ "sort-imports",
+ "add-semicolons",
+ "remove-comments",
+ ];
+ return all.map((kind) => ({ kind, available: kind in REGISTRY }));
+}
diff --git a/extensions/hayameru/transforms/remove-comments.test.ts b/extensions/hayameru/transforms/remove-comments.test.ts
new file mode 100644
index 00000000..15404582
--- /dev/null
+++ b/extensions/hayameru/transforms/remove-comments.test.ts
@@ -0,0 +1,74 @@
+import { describe, it, expect } from "vitest";
+import { removeComments } from "./remove-comments.js";
+
+describe("removeComments", () => {
+ it("removes single-line comments", () => {
+ const r = removeComments("const x = 1; // inline\n// full line", "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.edits).toBe(2);
+ expect(r.output).toBe("const x = 1;");
+ });
+
+ it("removes block comments", () => {
+ const r = removeComments("/* block */\nconst x = 1;", "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.output.trim()).toBe("const x = 1;");
+ });
+
+ it("preserves JSDoc comments", () => {
+ const source = "/** @param x */\nfunction f(x) {}";
+ const r = removeComments(source, "test.ts");
+ expect(r.output).toContain("/** @param x */");
+ });
+
+ // --- H4 fixes ---
+
+ it("preserves URL in double-quoted string (// is not a comment)", () => {
+ const source = 'const url = "http://example.com";';
+ const r = removeComments(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(source);
+ });
+
+ it("preserves URL in single-quoted string", () => {
+ const source = "const url = 'http://example.com';";
+ const r = removeComments(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(source);
+ });
+
+ it("handles escaped single quote inside string", () => {
+ const source = "const s = 'it\\'s fine';";
+ const r = removeComments(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(source);
+ });
+
+ it("handles escaped double quote inside string", () => {
+ const source = 'const s = "say \\"hello\\"";';
+ const r = removeComments(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(source);
+ });
+
+ it("preserves template literal with // inside", () => {
+ const source = "const s = `http://example.com`;";
+ const r = removeComments(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(source);
+ });
+
+ it("preserves template literal with // and removes trailing comment", () => {
+ const source = "const s = `http://example.com`; // real comment";
+ const r = removeComments(source, "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.output).toBe("const s = `http://example.com`;");
+ });
+
+ it("handles string with /* inside (not a block comment)", () => {
+ const source = 'const s = "/* not a comment */";';
+ const r = removeComments(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.output).toBe(source);
+ });
+});
diff --git a/extensions/hayameru/transforms/remove-comments.ts b/extensions/hayameru/transforms/remove-comments.ts
new file mode 100644
index 00000000..e562bffa
--- /dev/null
+++ b/extensions/hayameru/transforms/remove-comments.ts
@@ -0,0 +1,118 @@
+import type { TransformResult } from "./var-to-const.js";
+
+/**
+ * Checks if a given index in a line falls inside a string literal.
+ * Handles single-quoted, double-quoted, and template literal (backtick) strings,
+ * including escaped characters within those strings.
+ */
+function isInString(line: string, targetIdx: number): boolean {
+ let inStr: "'" | '"' | "`" | null = null;
+ for (let i = 0; i < targetIdx; i++) {
+ const ch = line[i]!;
+ if (ch === "\\" && inStr !== null) {
+ // Skip the next character (escaped)
+ i++;
+ continue;
+ }
+ if (inStr === null) {
+ if (ch === "'" || ch === '"' || ch === "`") {
+ inStr = ch;
+ }
+ } else if (ch === inStr) {
+ inStr = null;
+ }
+ }
+ return inStr !== null;
+}
+
+export function removeComments(source: string, _filePath: string): TransformResult {
+ const lines = source.split("\n");
+ const result: string[] = [];
+ let edits = 0;
+ let inBlockComment = false;
+ let inJsDoc = false;
+
+ for (const line of lines) {
+ // Inside JSDoc — preserve
+ if (inJsDoc) {
+ result.push(line);
+ if (line.includes("*/")) inJsDoc = false;
+ continue;
+ }
+
+ // Inside block comment — skip
+ if (inBlockComment) {
+ edits++;
+ if (line.includes("*/")) {
+ inBlockComment = false;
+ const after = line.slice(line.indexOf("*/") + 2).trim();
+ if (after) result.push(after);
+ }
+ continue;
+ }
+
+ const trimmed = line.trimStart();
+
+ // JSDoc start — preserve
+ if (trimmed.startsWith("/**")) {
+ result.push(line);
+ if (!line.includes("*/")) inJsDoc = true;
+ continue;
+ }
+
+ // Scan for block comment start (/*) outside of strings
+ let foundBlock = false;
+ for (let i = 0; i < line.length - 1; i++) {
+ const ch = line[i]!;
+ // Skip escaped characters inside strings
+ if (ch === "\\" && isInString(line, i)) {
+ i++;
+ continue;
+ }
+ if (ch === "/" && line[i + 1] === "*" && !isInString(line, i)) {
+ const blockIdx = i;
+ const before = line.slice(0, blockIdx).trimEnd();
+ const endIdx = line.indexOf("*/", blockIdx + 2);
+ if (endIdx !== -1) {
+ const after = line.slice(endIdx + 2);
+ const combined = (before + after).trimEnd();
+ if (combined) result.push(combined);
+ } else {
+ inBlockComment = true;
+ if (before) result.push(before);
+ }
+ edits++;
+ foundBlock = true;
+ break;
+ }
+ }
+ if (foundBlock) continue;
+
+ // Scan for single-line comment (//) outside of strings
+ let foundSingle = false;
+ for (let i = 0; i < line.length - 1; i++) {
+ const ch = line[i]!;
+ if (ch === "\\" && isInString(line, i)) {
+ i++;
+ continue;
+ }
+ if (ch === "/" && line[i + 1] === "/" && !isInString(line, i)) {
+ const trimBefore = line.slice(0, i).trimEnd();
+ if (trimBefore) result.push(trimBefore);
+ edits++;
+ foundSingle = true;
+ break;
+ }
+ }
+ if (foundSingle) continue;
+
+ result.push(line);
+ }
+
+ return {
+ output: result.join("\n"),
+ changed: edits > 0,
+ edits,
+ description: edits > 0 ? `Removed ${edits} comment(s) (preserved JSDoc)` : "No comments found",
+ };
+}
diff --git a/extensions/hayameru/transforms/remove-console.test.ts b/extensions/hayameru/transforms/remove-console.test.ts
new file mode 100644
index 00000000..132c11e0
--- /dev/null
+++ b/extensions/hayameru/transforms/remove-console.test.ts
@@ -0,0 +1,71 @@
+import { describe, it, expect } from "vitest";
+import { removeConsole } from "./remove-console.js";
+
+describe("removeConsole", () => {
+ it("removes console.log", () => {
+ const r = removeConsole("console.log('hello');\nconst x = 1;", "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.output).toBe("const x = 1;");
+ });
+
+ it("removes multiple console methods", () => {
+ const source = "console.log('a');\nconsole.warn('b');\nconsole.debug('c');";
+ const r = removeConsole(source, "test.ts");
+ expect(r.edits).toBe(3);
+ expect(r.output.trim()).toBe("");
+ });
+
+ it("handles multi-line console calls", () => {
+ const source = "console.log(\n 'hello',\n 'world'\n);\nconst x = 1;";
+ const r = removeConsole(source, "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.output.trim()).toBe("const x = 1;");
+ });
+
+ it("leaves non-console code untouched", () => {
+ const source = "const x = 1;\nreturn x;";
+ const r = removeConsole(source, "test.ts");
+ expect(r.changed).toBe(false);
+ });
+
+ // --- H7 fixes ---
+
+ it("ignores parentheses inside double-quoted strings", () => {
+ const source = 'console.log(")()(");\nconst x = 1;';
+ const r = removeConsole(source, "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.edits).toBe(1);
+ expect(r.output).toBe("const x = 1;");
+ });
+
+ it("ignores parentheses inside single-quoted strings", () => {
+ const source = "console.log('())(');\nconst x = 1;";
+ const r = removeConsole(source, "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.edits).toBe(1);
+ expect(r.output).toBe("const x = 1;");
+ });
+
+ it("ignores parentheses inside template literals", () => {
+ const source = "console.log(`()()`);\nconst x = 1;";
+ const r = removeConsole(source, "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.edits).toBe(1);
+ expect(r.output).toBe("const x = 1;");
+ });
+
+ it("handles multi-line console with parens in string arguments", () => {
+ const source = 'console.log(\n "has ) inside",\n "and ( too"\n);\nconst y = 2;';
+ const r = removeConsole(source, "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.output.trim()).toBe("const y = 2;");
+ });
+
+ it("handles escaped quotes inside console string arguments", () => {
+ const source = 'console.log("say \\"(\\"");\nconst x = 1;';
+ const r = removeConsole(source, "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.edits).toBe(1);
+ expect(r.output).toBe("const x = 1;");
+ });
+});
diff --git a/extensions/hayameru/transforms/remove-console.ts b/extensions/hayameru/transforms/remove-console.ts
new file mode 100644
index 00000000..f56ef0db
--- /dev/null
+++ b/extensions/hayameru/transforms/remove-console.ts
@@ -0,0 +1,77 @@
+import type { TransformResult } from "./var-to-const.js";
+
+/**
+ * Count parentheses depth outside of string literals.
+ * Tracks single-quoted, double-quoted, and backtick strings,
+ * properly handling escaped characters within those strings.
+ */
+function countParensOutsideStrings(line: string): number {
+ let depth = 0;
+ let inStr: "'" | '"' | "`" | null = null;
+ for (let i = 0; i < line.length; i++) {
+ const ch = line[i]!;
+ // Handle escaped characters inside strings
+ if (ch === "\\" && inStr !== null) {
+ i++; // skip next character
+ continue;
+ }
+ if (inStr === null) {
+ if (ch === "'" || ch === '"' || ch === "`") {
+ inStr = ch;
+ } else if (ch === "(") {
+ depth++;
+ } else if (ch === ")") {
+ depth--;
+ }
+ } else if (ch === inStr) {
+ inStr = null;
+ }
+ }
+ return depth;
+}
+
+export function removeConsole(source: string, _filePath: string): TransformResult {
+ const lines = source.split("\n");
+ const result: string[] = [];
+ let edits = 0;
+ let inMultiLine = false;
+ let parenDepth = 0;
+
+ for (const line of lines) {
+ if (inMultiLine) {
+ // Count parens outside strings to detect end of multi-line console call
+ parenDepth += countParensOutsideStrings(line);
+ edits++;
+ if (parenDepth <= 0) {
+ inMultiLine = false;
+ parenDepth = 0;
+ }
+ continue;
+ }
+
+ const consoleMatch = line.match(
+ /^\s*console\.(log|debug|warn|info|error|trace|dir|table|time|timeEnd)\s*\(/,
+ );
+ if (consoleMatch) {
+ // Count open/close parens on this line (outside strings)
+ const depth = countParensOutsideStrings(line);
+ edits++;
+ if (depth > 0) {
+ // Multi-line console call
+ inMultiLine = true;
+ parenDepth = depth;
+ }
+ continue;
+ }
+
+ result.push(line);
+ }
+
+ return {
+ output: result.join("\n"),
+ changed: edits > 0,
+ edits,
+ description:
+ edits > 0 ? `Removed ${edits} console statement(s)` : "No console statements found",
+ };
+}
diff --git a/extensions/hayameru/transforms/sort-imports.test.ts b/extensions/hayameru/transforms/sort-imports.test.ts
new file mode 100644
index 00000000..4b64410e
--- /dev/null
+++ b/extensions/hayameru/transforms/sort-imports.test.ts
@@ -0,0 +1,113 @@
+import { describe, it, expect } from "vitest";
+import { sortImports } from "./sort-imports.js";
+
+describe("sortImports", () => {
+ it("sorts imports alphabetically", () => {
+ const source = [
+ 'import { z } from "zod";',
+ 'import { a } from "alpha";',
+ "",
+ "const x = 1;",
+ ].join("\n");
+ const r = sortImports(source, "test.ts");
+ expect(r.changed).toBe(true);
+ const lines = r.output.split("\n");
+ expect(lines[0]).toContain("alpha");
+ });
+
+ it("groups by type: node, scoped, bare, relative", () => {
+ const source = [
+ 'import { readFile } from "node:fs";',
+ 'import { join } from "./utils.js";',
+ 'import { Type } from "@sinclair/typebox";',
+ 'import express from "express";',
+ ].join("\n");
+ const r = sortImports(source, "test.ts");
+ expect(r.changed).toBe(true);
+ const lines = r.output.split("\n").filter(Boolean);
+ // node: first, then @scope, then bare, then relative
+ expect(lines[0]).toContain("node:fs");
+ expect(lines[lines.length - 1]).toContain("./utils");
+ });
+
+ it("leaves already sorted imports unchanged", () => {
+ const source = 'import { a } from "a";\nimport { b } from "b";';
+ const r = sortImports(source, "test.ts");
+ expect(r.changed).toBe(false);
+ });
+
+ // --- H5 fixes ---
+
+ it("keeps side-effect imports in place", () => {
+ const source = [
+ 'import "polyfill";',
+ 'import { z } from "zod";',
+ 'import { a } from "alpha";',
+ "",
+ "const x = 1;",
+ ].join("\n");
+ const r = sortImports(source, "test.ts");
+ const lines = r.output.split("\n").filter(Boolean);
+ // Side-effect import should be first (before sorted imports)
+ expect(lines[0]).toBe('import "polyfill";');
+ // Sorted imports follow
+ expect(lines[1]).toContain("alpha");
+ expect(lines[2]).toContain("zod");
+ });
+
+ it("handles side-effect imports with single quotes", () => {
+ const source = [
+ "import './setup';",
+ 'import { b } from "beta";',
+ 'import { a } from "alpha";',
+ "",
+ "const x = 1;",
+ ].join("\n");
+ const r = sortImports(source, "test.ts");
+ const lines = r.output.split("\n").filter(Boolean);
+ expect(lines[0]).toBe("import './setup';");
+ });
+
+ it("handles multi-line imports", () => {
+ const source = [
+ "import {",
+ " readFile,",
+ " writeFile,",
+ '} from "node:fs";',
+ 'import { a } from "alpha";',
+ "",
+ "const x = 1;",
+ ].join("\n");
+ const r = sortImports(source, "test.ts");
+ // Multi-line import should be parsed and included in sorting
+ const outputLines = r.output.split("\n");
+ // alpha (bare, group 2) should come after node:fs (group 0)
+ const fsIdx = outputLines.findIndex((l: string) => l.includes("node:fs"));
+ const alphaIdx = outputLines.findIndex((l: string) => l.includes("alpha"));
+ expect(fsIdx).toBeLessThan(alphaIdx);
+ });
+
+ it("handles mix of side-effect, single-line, and multi-line imports", () => {
+ const source = [
+ 'import "reflect-metadata";',
+ 'import { z } from "zod";',
+ "import {",
+ " Component,",
+ " OnInit,",
+ '} from "@angular/core";',
+ 'import { a } from "alpha";',
+ "",
+ "const x = 1;",
+ ].join("\n");
+ const r = sortImports(source, "test.ts");
+ const outputLines = r.output.split("\n");
+ // Side-effect import first
+ expect(outputLines[0]).toBe('import "reflect-metadata";');
+ // Then sorted: @angular/core (group 1), alpha (group 2), zod (group 2)
+ const angularIdx = outputLines.findIndex((l: string) => l.includes("@angular/core"));
+ const alphaIdx = outputLines.findIndex((l: string) => l.includes("alpha"));
+ const zodIdx = outputLines.findIndex((l: string) => l.includes("zod"));
+ expect(angularIdx).toBeLessThan(alphaIdx);
+ expect(alphaIdx).toBeLessThan(zodIdx);
+ });
+});
diff --git a/extensions/hayameru/transforms/sort-imports.ts b/extensions/hayameru/transforms/sort-imports.ts
new file mode 100644
index 00000000..75d40a39
--- /dev/null
+++ b/extensions/hayameru/transforms/sort-imports.ts
@@ -0,0 +1,164 @@
+import type { TransformResult } from "./var-to-const.js";
+
+type ImportLine = {
+ raw: string;
+ source: string;
+ group: number; // 0=node:, 1=@scope, 2=bare, 3=relative
+};
+
+/** Side-effect imports like `import "polyfill"` or `import './setup'` */
+type SideEffectImport = {
+ raw: string;
+ originalIndex: number;
+};
+
+function classifyImport(source: string): number {
+ if (source.startsWith("node:")) return 0;
+ if (source.startsWith("@")) return 1;
+ if (source.startsWith(".")) return 3;
+ return 2;
+}
+
+const SIDE_EFFECT_RE = /^import\s+["'][^"']+["']\s*;?\s*$/;
+const IMPORT_FROM_RE = /^import\s+.*?from\s+["']([^"']+)["']/;
+
+export function sortImports(source: string, _filePath: string): TransformResult {
+ const lines = source.split("\n");
+ const result: string[] = [];
+ const importBlock: ImportLine[] = [];
+ const sideEffects: SideEffectImport[] = [];
+ let blockStart = -1;
+ let edits = 0;
+
+ // Track multi-line import state
+ let multiLineAccum: string[] = [];
+ let inMultiLineImport = false;
+
+ function flushBlock() {
+ if (importBlock.length <= 1 && sideEffects.length === 0) {
+ for (const imp of importBlock) result.push(imp.raw);
+ importBlock.length = 0;
+ sideEffects.length = 0;
+ blockStart = -1;
+ return;
+ }
+
+ if (importBlock.length <= 1 && sideEffects.length > 0) {
+ // Only side-effect imports, push them as-is
+ for (const se of sideEffects) result.push(se.raw);
+ for (const imp of importBlock) result.push(imp.raw);
+ importBlock.length = 0;
+ sideEffects.length = 0;
+ blockStart = -1;
+ return;
+ }
+
+ const original = importBlock.map((i) => i.raw).join("\n");
+
+ // Sort by group, then alphabetically within group
+ const sorted = [...importBlock].sort((a, b) => {
+ if (a.group !== b.group) return a.group - b.group;
+ return a.source.localeCompare(b.source);
+ });
+
+ // Side-effect imports go first (they stay in relative order)
+ for (const se of sideEffects) {
+ result.push(se.raw);
+ }
+
+ // Add blank lines between groups
+ let lastGroup = -1;
+ for (const imp of sorted) {
+ if (lastGroup !== -1 && imp.group !== lastGroup) {
+ result.push("");
+ }
+ result.push(imp.raw);
+ lastGroup = imp.group;
+ }
+
+ const sortedStr = sorted.map((i) => i.raw).join("\n");
+ if (original !== sortedStr) edits++;
+
+ importBlock.length = 0;
+ sideEffects.length = 0;
+ blockStart = -1;
+ }
+
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i]!;
+
+ // Multi-line import continuation
+ if (inMultiLineImport) {
+ multiLineAccum.push(line);
+ // Check if this line closes the import (contains `}` and `from`)
+ const joined = multiLineAccum.join("\n");
+ const closedMatch = joined.match(/^import\s+.*?from\s+["']([^"']+)["']/s);
+ if (closedMatch) {
+ // Multi-line import is complete
+ inMultiLineImport = false;
+ if (blockStart === -1) blockStart = i;
+ importBlock.push({
+ raw: joined,
+ source: closedMatch[1]!,
+ group: classifyImport(closedMatch[1]!),
+ });
+ multiLineAccum = [];
+ }
+ continue;
+ }
+
+ // Side-effect import
+ if (SIDE_EFFECT_RE.test(line.trim())) {
+ if (blockStart === -1) blockStart = i;
+ sideEffects.push({ raw: line, originalIndex: i });
+ continue;
+ }
+
+ // Standard single-line import
+ const importMatch = line.match(IMPORT_FROM_RE);
+ if (importMatch) {
+ if (blockStart === -1) blockStart = i;
+ importBlock.push({
+ raw: line,
+ source: importMatch[1]!,
+ group: classifyImport(importMatch[1]!),
+ });
+ continue;
+ }
+
+ // Detect start of multi-line import: `import {` or `import type {` without `from` on same line
+ if (
+ /^\s*import\s/.test(line) &&
+ line.includes("{") &&
+ !line.includes("}") &&
+ !IMPORT_FROM_RE.test(line)
+ ) {
+ inMultiLineImport = true;
+ multiLineAccum = [line];
+ if (blockStart === -1) blockStart = i;
+ continue;
+ }
+
+ if ((importBlock.length > 0 || sideEffects.length > 0) && line.trim() === "") {
+ // Empty line in import block — keep collecting
+ continue;
+ }
+
+ flushBlock();
+ result.push(line);
+ }
+
+ // Flush any remaining multi-line accumulator as raw lines
+ if (inMultiLineImport && multiLineAccum.length > 0) {
+ for (const ml of multiLineAccum) result.push(ml);
+ }
+
+ flushBlock();
+
+ return {
+ output: result.join("\n"),
+ changed: edits > 0,
+ edits,
+ description: edits > 0 ? "Sorted and grouped import statements" : "Imports already sorted",
+ };
+}
diff --git a/extensions/hayameru/transforms/var-to-const.test.ts b/extensions/hayameru/transforms/var-to-const.test.ts
new file mode 100644
index 00000000..b8ba40d6
--- /dev/null
+++ b/extensions/hayameru/transforms/var-to-const.test.ts
@@ -0,0 +1,70 @@
+import { describe, it, expect } from "vitest";
+import { varToConst } from "./var-to-const.js";
+
+describe("varToConst", () => {
+ it("converts var to const", () => {
+ const r = varToConst("var x = 1;\nvar y = 'hello';", "test.ts");
+ expect(r.changed).toBe(true);
+ expect(r.edits).toBe(2);
+ expect(r.output).toContain("const x = 1;");
+ expect(r.output).toContain("const y = 'hello';");
+ });
+
+ it("uses let for reassigned variables", () => {
+ const source = "var x = 1;\nx = 2;";
+ const r = varToConst(source, "test.ts");
+ expect(r.output).toContain("let x = 1;");
+ });
+
+ it("leaves const/let unchanged", () => {
+ const source = "const x = 1;\nlet y = 2;";
+ const r = varToConst(source, "test.ts");
+ expect(r.changed).toBe(false);
+ expect(r.edits).toBe(0);
+ });
+
+ it("preserves indentation", () => {
+ const source = " var x = 1;";
+ const r = varToConst(source, "test.ts");
+ expect(r.output).toBe(" const x = 1;");
+ });
+
+ // --- H6 fixes ---
+
+ it("detects array destructuring reassignment -> uses let", () => {
+ const source = "var a = 1;\nvar b = 2;\n[a, b] = [b, a];";
+ const r = varToConst(source, "test.ts");
+ expect(r.output).toContain("let a = 1;");
+ expect(r.output).toContain("let b = 2;");
+ });
+
+ it("detects object destructuring reassignment -> uses let", () => {
+ const source = "var x = 0;\nvar y = 0;\n({x, y} = getCoords());";
+ const r = varToConst(source, "test.ts");
+ expect(r.output).toContain("let x = 0;");
+ expect(r.output).toContain("let y = 0;");
+ });
+
+ it("non-reassigned vars still become const with destructuring elsewhere", () => {
+ const source = "var a = 1;\nvar b = 2;\nvar c = 3;\n[a, b] = [b, a];";
+ const r = varToConst(source, "test.ts");
+ // a and b are reassigned -> let
+ expect(r.output).toContain("let a = 1;");
+ expect(r.output).toContain("let b = 2;");
+ // c is not reassigned -> const
+ expect(r.output).toContain("const c = 3;");
+ });
+
+ it("detects object destructuring with renaming", () => {
+ const source = "var name = '';\n({label: name} = obj);";
+ const r = varToConst(source, "test.ts");
+ expect(r.output).toContain("let name = '';");
+ });
+
+ it("detects array destructuring with rest", () => {
+ const source = "var first = 0;\nvar rest = [];\n[first, ...rest] = arr;";
+ const r = varToConst(source, "test.ts");
+ expect(r.output).toContain("let first = 0;");
+ expect(r.output).toContain("let rest = [];");
+ });
+});
diff --git a/extensions/hayameru/transforms/var-to-const.ts b/extensions/hayameru/transforms/var-to-const.ts
new file mode 100644
index 00000000..bbc8ee8c
--- /dev/null
+++ b/extensions/hayameru/transforms/var-to-const.ts
@@ -0,0 +1,90 @@
+export type TransformResult = {
+ output: string;
+ changed: boolean;
+ edits: number;
+ description: string;
+};
+
+/**
+ * Extract variable names from a destructuring pattern (array or object).
+ * Handles simple patterns like `[a, b]` and `{x, y}`.
+ */
+function extractDestructuredNames(pattern: string): string[] {
+ // Remove outer brackets/braces
+ const inner = pattern.slice(1, -1).trim();
+ if (!inner) return [];
+ return inner
+ .split(",")
+ .map((s) => {
+ const trimmed = s.trim();
+ // Handle renaming: `{ orig: alias }` -> alias
+ if (trimmed.includes(":")) {
+ return trimmed.split(":").pop()!.trim();
+ }
+ // Handle rest: `...rest` -> rest
+ if (trimmed.startsWith("...")) {
+ return trimmed.slice(3).trim();
+ }
+ return trimmed;
+ })
+ .filter(Boolean);
+}
+
+export function varToConst(source: string, _filePath: string): TransformResult {
+ const lines = source.split("\n");
+ let edits = 0;
+ const result: string[] = [];
+
+ // Track variables that are reassigned
+ const reassigned = new Set();
+ for (const line of lines) {
+ // Standard reassignment: `identifier =`, `identifier +=`, etc.
+ const assignMatch = line.match(/^\s*(\w+)\s*(?:\+|-|\*|\/|%|\|\||&&)?=/);
+ if (assignMatch && !line.match(/^\s*(?:var|let|const)\s/)) {
+ reassigned.add(assignMatch[1]!);
+ }
+
+ // Array destructuring reassignment: `[a, b] = ...`
+ const arrayDestructMatch = line.match(/^\s*(\[[^\]]+\])\s*=/);
+ if (arrayDestructMatch && !line.match(/^\s*(?:var|let|const)\s/)) {
+ for (const name of extractDestructuredNames(arrayDestructMatch[1]!)) {
+ reassigned.add(name);
+ }
+ }
+
+ // Object destructuring reassignment: `({x, y} = ...)` or `{x, y} = ...`
+ // Note: bare `{x} = expr` is technically a syntax error without parens,
+ // but we detect both patterns for robustness.
+ const objDestructMatch = line.match(/^\s*\(?\s*(\{[^}]+\})\s*\)?\s*=/);
+ if (objDestructMatch && !line.match(/^\s*(?:var|let|const)\s/)) {
+ for (const name of extractDestructuredNames(objDestructMatch[1]!)) {
+ reassigned.add(name);
+ }
+ }
+ }
+
+ for (const line of lines) {
+ const match = line.match(/^(\s*)var\s+(\w+)/);
+ if (match) {
+ const [, indent, varName] = match;
+ if (reassigned.has(varName!)) {
+ result.push(line.replace(/^(\s*)var\s/, `${indent}let `));
+ } else {
+ result.push(line.replace(/^(\s*)var\s/, `${indent}const `));
+ }
+ edits++;
+ } else {
+ result.push(line);
+ }
+ }
+
+ return {
+ output: result.join("\n"),
+ changed: edits > 0,
+ edits,
+ description:
+ edits > 0
+ ? `Converted ${edits} var declaration(s) to const/let`
+ : "No var declarations found",
+ };
+}
diff --git a/extensions/imessage/package.json b/extensions/imessage/package.json
index d0ef0b9f..f0fc04b5 100644
--- a/extensions/imessage/package.json
+++ b/extensions/imessage/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-imessage",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Mayros iMessage channel plugin",
"type": "module",
diff --git a/extensions/interactive-permissions/package.json b/extensions/interactive-permissions/package.json
index bb3a6991..b4cd56ef 100644
--- a/extensions/interactive-permissions/package.json
+++ b/extensions/interactive-permissions/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-interactive-permissions",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "Runtime permission dialogs, bash intent classification, policy persistence, and audit trail",
"type": "module",
diff --git a/extensions/iot-bridge/package.json b/extensions/iot-bridge/package.json
index 78a0c08e..6952230f 100644
--- a/extensions/iot-bridge/package.json
+++ b/extensions/iot-bridge/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-iot-bridge",
- "version": "0.1.13",
+ "version": "0.1.14",
"private": true,
"description": "IoT Bridge — connect MAYROS agents to aingle_minimal IoT nodes via REST",
"type": "module",
diff --git a/extensions/irc/package.json b/extensions/irc/package.json
index 7e9603e4..f0b023c1 100644
--- a/extensions/irc/package.json
+++ b/extensions/irc/package.json
@@ -1,6 +1,6 @@
{
"name": "@apilium/mayros-irc",
- "version": "0.1.13",
+ "version": "0.1.14",
"description": "Mayros IRC channel plugin",
"license": "MIT",
"type": "module",
diff --git a/extensions/kakeru-bridge/bridges/claude-bridge.ts b/extensions/kakeru-bridge/bridges/claude-bridge.ts
new file mode 100644
index 00000000..21543287
--- /dev/null
+++ b/extensions/kakeru-bridge/bridges/claude-bridge.ts
@@ -0,0 +1,61 @@
+import type {
+ IPlatformBridge,
+ PlatformCapability,
+ PlatformStatus,
+ PlatformTask,
+ TaskResult,
+} from "../platform-bridge.js";
+
+export class ClaudeBridge implements IPlatformBridge {
+ readonly id = "claude";
+ readonly name = "Claude Code (Native)";
+ readonly capabilities: PlatformCapability[] = [
+ "code-edit",
+ "file-read",
+ "shell-exec",
+ "vision",
+ "long-context",
+ ];
+ private status: PlatformStatus = "idle";
+ private activeTasks = new Map();
+
+ async connect(): Promise {
+ this.status = "idle";
+ }
+
+ async disconnect(): Promise {
+ for (const [, ctrl] of this.activeTasks) ctrl.abort();
+ this.activeTasks.clear();
+ this.status = "disconnected";
+ }
+
+ getStatus(): PlatformStatus {
+ return this.status;
+ }
+
+ async executeTask(task: PlatformTask): Promise {
+ const controller = new AbortController();
+ this.activeTasks.set(task.id, controller);
+ this.status = "busy";
+
+ try {
+ return {
+ success: false,
+ output: "Claude native bridge: not yet implemented. Use direct agent execution instead.",
+ filesModified: [],
+ durationMs: 0,
+ };
+ } finally {
+ this.activeTasks.delete(task.id);
+ this.status = "idle";
+ }
+ }
+
+ async cancelTask(taskId: string): Promise {
+ const ctrl = this.activeTasks.get(taskId);
+ if (ctrl) {
+ ctrl.abort();
+ this.activeTasks.delete(taskId);
+ }
+ }
+}
diff --git a/extensions/kakeru-bridge/bridges/codex-bridge.test.ts b/extensions/kakeru-bridge/bridges/codex-bridge.test.ts
new file mode 100644
index 00000000..9fd2ae54
--- /dev/null
+++ b/extensions/kakeru-bridge/bridges/codex-bridge.test.ts
@@ -0,0 +1,48 @@
+import { describe, it, expect } from "vitest";
+import { CodexBridge } from "./codex-bridge.js";
+
+describe("CodexBridge", () => {
+ it("starts disconnected", () => {
+ const bridge = new CodexBridge({
+ binaryPath: "codex",
+ apiKeyEnv: "OPENAI_API_KEY",
+ defaultTimeout: 5000,
+ });
+ expect(bridge.getStatus()).toBe("disconnected");
+ expect(bridge.id).toBe("codex");
+ expect(bridge.name).toBe("OpenAI Codex CLI");
+ });
+
+ it("has expected capabilities", () => {
+ const bridge = new CodexBridge({
+ binaryPath: "codex",
+ apiKeyEnv: "OPENAI_API_KEY",
+ defaultTimeout: 5000,
+ });
+ expect(bridge.capabilities).toContain("code-edit");
+ expect(bridge.capabilities).toContain("shell-exec");
+ });
+
+ it("fails to connect when binary not found", async () => {
+ const bridge = new CodexBridge({
+ binaryPath: "/nonexistent/codex-binary",
+ apiKeyEnv: "OPENAI_API_KEY",
+ defaultTimeout: 5000,
+ });
+
+ await expect(bridge.connect()).rejects.toThrow();
+ expect(bridge.getStatus()).toBe("error");
+ });
+
+ it("rejects task when not connected", async () => {
+ const bridge = new CodexBridge({
+ binaryPath: "codex",
+ apiKeyEnv: "OPENAI_API_KEY",
+ defaultTimeout: 5000,
+ });
+
+ await expect(bridge.executeTask({ id: "t1", prompt: "test", workDir: "/tmp" })).rejects.toThrow(
+ "not ready",
+ );
+ });
+});
diff --git a/extensions/kakeru-bridge/bridges/codex-bridge.ts b/extensions/kakeru-bridge/bridges/codex-bridge.ts
new file mode 100644
index 00000000..5c72117c
--- /dev/null
+++ b/extensions/kakeru-bridge/bridges/codex-bridge.ts
@@ -0,0 +1,181 @@
+import { spawn } from "node:child_process";
+import type {
+ IPlatformBridge,
+ PlatformCapability,
+ PlatformStatus,
+ PlatformTask,
+ TaskResult,
+} from "../platform-bridge.js";
+
+export type CodexBridgeConfig = {
+ binaryPath: string;
+ apiKeyEnv: string;
+ defaultTimeout: number;
+};
+
+export class CodexBridge implements IPlatformBridge {
+ readonly id = "codex";
+ readonly name = "OpenAI Codex CLI";
+ readonly capabilities: PlatformCapability[] = ["code-edit", "file-read", "shell-exec"];
+ private status: PlatformStatus = "disconnected";
+ private config: CodexBridgeConfig;
+ private activeProcesses = new Map void }>();
+
+ constructor(config: CodexBridgeConfig) {
+ this.config = config;
+ }
+
+ async connect(): Promise {
+ this.status = "connecting";
+
+ // Verify binary exists
+ try {
+ await new Promise((resolve, reject) => {
+ const proc = spawn(this.config.binaryPath, ["--version"], {
+ stdio: "pipe",
+ timeout: 5000,
+ });
+ proc.on("close", (code) => {
+ if (code === 0) resolve();
+ else reject(new Error(`codex --version exited with code ${code}`));
+ });
+ proc.on("error", reject);
+ });
+ } catch (err) {
+ this.status = "error";
+ throw new Error(`Codex binary not found at ${this.config.binaryPath}: ${String(err)}`);
+ }
+
+ // Verify API key
+ if (!process.env[this.config.apiKeyEnv]) {
+ this.status = "error";
+ throw new Error(`Environment variable ${this.config.apiKeyEnv} not set`);
+ }
+
+ this.status = "idle";
+ }
+
+ async disconnect(): Promise {
+ for (const [, proc] of this.activeProcesses) proc.kill();
+ this.activeProcesses.clear();
+ this.status = "disconnected";
+ }
+
+ getStatus(): PlatformStatus {
+ return this.status;
+ }
+
+ async executeTask(task: PlatformTask): Promise {
+ if (this.status !== "idle") {
+ throw new Error(`Codex bridge not ready (status: ${this.status})`);
+ }
+
+ const start = Date.now();
+ const timeout = task.timeout ?? this.config.defaultTimeout;
+ this.status = "busy";
+
+ try {
+ const output = await new Promise((resolve, reject) => {
+ const args = ["--quiet", "--approval-mode", "full-auto", "--prompt", task.prompt];
+
+ const proc = spawn(this.config.binaryPath, args, {
+ cwd: task.workDir,
+ stdio: ["ignore", "pipe", "pipe"],
+ timeout,
+ env: { ...process.env },
+ });
+
+ this.activeProcesses.set(task.id, {
+ kill: () => proc.kill("SIGTERM"),
+ });
+
+ let stdout = "";
+ let stderr = "";
+
+ proc.stdout?.on("data", (data: Buffer) => {
+ stdout += data.toString();
+ });
+ proc.stderr?.on("data", (data: Buffer) => {
+ stderr += data.toString();
+ });
+
+ proc.on("close", (code) => {
+ this.activeProcesses.delete(task.id);
+ if (code === 0) {
+ resolve(stdout);
+ } else {
+ reject(new Error(`Codex exited with code ${code}: ${stderr.slice(0, 500)}`));
+ }
+ });
+
+ proc.on("error", (err) => {
+ this.activeProcesses.delete(task.id);
+ reject(err);
+ });
+ });
+
+ // Parse modified files from output (regex heuristic — fallback)
+ const filesModified: string[] = [];
+ const filePatterns = output.match(/(?:wrote|modified|created|updated)\s+(\S+)/gi);
+ if (filePatterns) {
+ for (const match of filePatterns) {
+ const file = match.replace(/^(?:wrote|modified|created|updated)\s+/i, "").trim();
+ if (file && !filesModified.includes(file)) filesModified.push(file);
+ }
+ }
+
+ // Primary: use git diff for accurate file detection
+ try {
+ const gitOutput = await new Promise((resolve, reject) => {
+ const git = spawn("git", ["diff", "--name-only"], {
+ cwd: task.workDir,
+ stdio: ["ignore", "pipe", "pipe"],
+ timeout: 5000,
+ });
+ let out = "";
+ git.stdout?.on("data", (d: Buffer) => {
+ out += d.toString();
+ });
+ git.on("close", (code) => {
+ if (code === 0) resolve(out);
+ else reject(new Error(`git diff exited ${code}`));
+ });
+ git.on("error", reject);
+ });
+ filesModified.push(
+ ...gitOutput
+ .split("\n")
+ .map((f) => f.trim())
+ .filter(Boolean)
+ .filter((f) => !filesModified.includes(f)),
+ );
+ } catch {
+ // git diff failed — fall through to regex fallback only
+ }
+
+ return {
+ success: true,
+ output,
+ filesModified,
+ durationMs: Date.now() - start,
+ };
+ } catch (err) {
+ return {
+ success: false,
+ output: String(err),
+ filesModified: [],
+ durationMs: Date.now() - start,
+ };
+ } finally {
+ this.status = "idle";
+ }
+ }
+
+ async cancelTask(taskId: string): Promise {
+ const proc = this.activeProcesses.get(taskId);
+ if (proc) {
+ proc.kill();
+ this.activeProcesses.delete(taskId);
+ }
+ }
+}
diff --git a/extensions/kakeru-bridge/config.ts b/extensions/kakeru-bridge/config.ts
new file mode 100644
index 00000000..36dedad6
--- /dev/null
+++ b/extensions/kakeru-bridge/config.ts
@@ -0,0 +1,61 @@
+import { assertAllowedKeys } from "../shared/cortex-config.js";
+
+export type KakeruConfig = {
+ enabled: boolean;
+ codex: {
+ enabled: boolean;
+ binaryPath: string;
+ apiKeyEnv: string;
+ defaultTimeout: number;
+ };
+ branchPrefix: string;
+ autoMerge: boolean;
+};
+
+const DEFAULTS: KakeruConfig = {
+ enabled: false,
+ codex: {
+ enabled: false,
+ binaryPath: "codex",
+ apiKeyEnv: "OPENAI_API_KEY",
+ defaultTimeout: 300_000,
+ },
+ branchPrefix: "kakeru",
+ autoMerge: false,
+};
+
+export function parseKakeruConfig(raw: unknown): KakeruConfig {
+ if (!raw || typeof raw !== "object" || Array.isArray(raw)) return { ...DEFAULTS };
+ const cfg = raw as Record;
+ assertAllowedKeys(cfg, ["enabled", "codex", "branchPrefix", "autoMerge"], "kakeru config");
+
+ const enabled = cfg.enabled === true;
+ const branchPrefix =
+ typeof cfg.branchPrefix === "string" ? cfg.branchPrefix : DEFAULTS.branchPrefix;
+ const autoMerge = cfg.autoMerge === true;
+
+ let codex = { ...DEFAULTS.codex };
+ if (cfg.codex && typeof cfg.codex === "object" && !Array.isArray(cfg.codex)) {
+ const c = cfg.codex as Record;
+ codex = {
+ enabled: c.enabled === true,
+ binaryPath: typeof c.binaryPath === "string" ? c.binaryPath : DEFAULTS.codex.binaryPath,
+ apiKeyEnv: typeof c.apiKeyEnv === "string" ? c.apiKeyEnv : DEFAULTS.codex.apiKeyEnv,
+ defaultTimeout:
+ typeof c.defaultTimeout === "number" ? c.defaultTimeout : DEFAULTS.codex.defaultTimeout,
+ };
+ }
+
+ return { enabled, codex, branchPrefix, autoMerge };
+}
+
+export const kakeruConfigSchema = {
+ parse: parseKakeruConfig,
+ uiHints: {
+ enabled: { label: "Enable Kakeru", help: "Enable dual-platform coordination (opt-in)" },
+ "codex.enabled": { label: "Enable Codex Bridge", help: "Enable OpenAI Codex CLI bridge" },
+ "codex.binaryPath": { label: "Codex Binary Path", placeholder: "codex", advanced: true },
+ branchPrefix: { label: "Branch Prefix", placeholder: "kakeru", advanced: true },
+ autoMerge: { label: "Auto-Merge", help: "Automatically merge platform results" },
+ },
+};
diff --git a/extensions/kakeru-bridge/coordinator.test.ts b/extensions/kakeru-bridge/coordinator.test.ts
new file mode 100644
index 00000000..d9c590d8
--- /dev/null
+++ b/extensions/kakeru-bridge/coordinator.test.ts
@@ -0,0 +1,62 @@
+import { describe, it, expect } from "vitest";
+import { PlatformCoordinator } from "./coordinator.js";
+import { ClaudeBridge } from "./bridges/claude-bridge.js";
+
+describe("PlatformCoordinator", () => {
+ it("registers and lists bridges", () => {
+ const coordinator = new PlatformCoordinator();
+ const bridge = new ClaudeBridge();
+ coordinator.registerBridge(bridge);
+
+ const list = coordinator.listBridges();
+ expect(list.length).toBe(1);
+ expect(list[0]!.id).toBe("claude");
+ });
+
+ it("acquires and releases file locks", () => {
+ const coordinator = new PlatformCoordinator();
+
+ expect(coordinator.acquireLock("src/app.ts", "claude")).toBe(true);
+ expect(coordinator.acquireLock("src/app.ts", "codex")).toBe(false); // conflict
+ expect(coordinator.acquireLock("src/app.ts", "claude")).toBe(true); // same owner OK
+
+ coordinator.releaseLock("src/app.ts", "claude");
+ expect(coordinator.acquireLock("src/app.ts", "codex")).toBe(true); // now available
+ });
+
+ it("releaseAllLocks clears all for a platform", () => {
+ const coordinator = new PlatformCoordinator();
+ coordinator.acquireLock("a.ts", "claude");
+ coordinator.acquireLock("b.ts", "claude");
+ coordinator.releaseAllLocks("claude");
+
+ expect(coordinator.acquireLock("a.ts", "codex")).toBe(true);
+ expect(coordinator.acquireLock("b.ts", "codex")).toBe(true);
+ });
+
+ it("executeWorkflow returns error for missing platform", async () => {
+ const coordinator = new PlatformCoordinator();
+ const results = await coordinator.executeWorkflow(
+ [{ platformId: "unknown", task: { id: "t1", prompt: "test", workDir: "/tmp" } }],
+ "kakeru",
+ );
+
+ expect(results.get("t1")!.success).toBe(false);
+ });
+
+ it("executeWorkflow runs tasks on registered bridges", async () => {
+ const coordinator = new PlatformCoordinator();
+ const bridge = new ClaudeBridge();
+ await bridge.connect();
+ coordinator.registerBridge(bridge);
+
+ const results = await coordinator.executeWorkflow(
+ [{ platformId: "claude", task: { id: "t1", prompt: "hello", workDir: "/tmp" } }],
+ "kakeru",
+ );
+
+ // Claude bridge is not yet implemented — returns honest failure
+ expect(results.get("t1")!.success).toBe(false);
+ expect(results.get("t1")!.output).toContain("not yet implemented");
+ });
+});
diff --git a/extensions/kakeru-bridge/coordinator.ts b/extensions/kakeru-bridge/coordinator.ts
new file mode 100644
index 00000000..4bdafd6c
--- /dev/null
+++ b/extensions/kakeru-bridge/coordinator.ts
@@ -0,0 +1,132 @@
+import { randomUUID } from "node:crypto";
+import type { IPlatformBridge, PlatformTask, TaskResult } from "./platform-bridge.js";
+
+export type WorkflowTask = {
+ platformId: string;
+ task: PlatformTask;
+};
+
+export class PlatformCoordinator {
+ private bridges = new Map();
+ private fileLocks = new Map(); // path -> platformId
+
+ registerBridge(bridge: IPlatformBridge): void {
+ this.bridges.set(bridge.id, bridge);
+ }
+
+ unregisterBridge(id: string): void {
+ this.bridges.delete(id);
+ }
+
+ getBridge(id: string): IPlatformBridge | undefined {
+ return this.bridges.get(id);
+ }
+
+ listBridges(): Array<{ id: string; name: string; status: string; capabilities: string[] }> {
+ return [...this.bridges.values()].map((b) => ({
+ id: b.id,
+ name: b.name,
+ status: b.getStatus(),
+ capabilities: [...b.capabilities],
+ }));
+ }
+
+ acquireLock(filePath: string, platformId: string): boolean {
+ const existing = this.fileLocks.get(filePath);
+ if (existing && existing !== platformId) return false;
+ this.fileLocks.set(filePath, platformId);
+ return true;
+ }
+
+ releaseLock(filePath: string, platformId: string): void {
+ if (this.fileLocks.get(filePath) === platformId) {
+ this.fileLocks.delete(filePath);
+ }
+ }
+
+ releaseAllLocks(platformId: string): void {
+ const toDelete: string[] = [];
+ for (const [path, owner] of this.fileLocks) {
+ if (owner === platformId) toDelete.push(path);
+ }
+ for (const p of toDelete) this.fileLocks.delete(p);
+ }
+
+ async executeWorkflow(
+ tasks: WorkflowTask[],
+ branchPrefix: string,
+ ): Promise