mirror of
https://github.com/instructkr/claude-code.git
synced 2026-05-16 02:46:44 +00:00
Compare commits
208 Commits
8f55870dad
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
63ce483c27 | ||
|
|
c910063161 | ||
|
|
04c2abb412 | ||
|
|
33df16b6dd | ||
|
|
17260f69f1 | ||
|
|
6f73103bf1 | ||
|
|
a92e5b2892 | ||
|
|
0fb1c2d39e | ||
|
|
0eddcca702 | ||
|
|
2e93264919 | ||
|
|
1ac8ce8882 | ||
|
|
ab27f61597 | ||
|
|
5a43d3b553 | ||
|
|
2c601ef22d | ||
|
|
4cd2bb859b | ||
|
|
62bc7b6a17 | ||
|
|
9278748038 | ||
|
|
02889d701a | ||
|
|
7b63c0a2eb | ||
|
|
b11cdf34b3 | ||
|
|
de0f1bba2e | ||
|
|
4d78e91229 | ||
|
|
cf5eb157e1 | ||
|
|
8019999ce5 | ||
|
|
21bbbb7f1f | ||
|
|
124d55f13e | ||
|
|
eb7a2088e2 | ||
|
|
11c6a6007f | ||
|
|
060603c196 | ||
|
|
4ccbd8f97c | ||
|
|
2221dd4f0f | ||
|
|
c5a18e1864 | ||
|
|
d7f1ad7139 | ||
|
|
238c0a49d1 | ||
|
|
d04a74cc97 | ||
|
|
8fd5894022 | ||
|
|
0f8717834f | ||
|
|
fb9095c611 | ||
|
|
761e50d1c6 | ||
|
|
5155225b25 | ||
|
|
deeb1efde8 | ||
|
|
c9b34a2947 | ||
|
|
5e0cf62be5 | ||
|
|
51fa5a7048 | ||
|
|
33ac5c30d3 | ||
|
|
89d1052f3a | ||
|
|
afd88088d6 | ||
|
|
673d37d86a | ||
|
|
fc35dc878c | ||
|
|
e199a392fb | ||
|
|
f27bd46759 | ||
|
|
5294648373 | ||
|
|
a3af0133e0 | ||
|
|
1bceda2063 | ||
|
|
99efb2131e | ||
|
|
7d859ae8a2 | ||
|
|
c886cbca99 | ||
|
|
f8270b34e6 | ||
|
|
0940253376 | ||
|
|
0bb145141e | ||
|
|
7b21ac12b9 | ||
|
|
2db0a5f70d | ||
|
|
8c9e41aab4 | ||
|
|
90c1d38d40 | ||
|
|
3767addd11 | ||
|
|
8c9a05e71b | ||
|
|
d5620c06b1 | ||
|
|
b63a1bf2bf | ||
|
|
dccb3e72d9 | ||
|
|
ea95bf2576 | ||
|
|
dec8efa5c8 | ||
|
|
ce02ace3a2 | ||
|
|
bc32639ce3 | ||
|
|
a212c662e5 | ||
|
|
2cac66cd38 | ||
|
|
1a110bd870 | ||
|
|
685f078204 | ||
|
|
e4ef0f7f19 | ||
|
|
76581f7239 | ||
|
|
82ec223ed4 | ||
|
|
a6ca5c489b | ||
|
|
3ff8743e79 | ||
|
|
4cf9d43e71 | ||
|
|
29029bfc14 | ||
|
|
22024102dd | ||
|
|
98204a73d4 | ||
|
|
8565b68fb1 | ||
|
|
7ed1cabc14 | ||
|
|
5de73ecf12 | ||
|
|
b655d49bd1 | ||
|
|
e05268e216 | ||
|
|
d6b4349a7d | ||
|
|
ccd99a5188 | ||
|
|
557ab8a9dd | ||
|
|
1f00771fd2 | ||
|
|
0bcab573f3 | ||
|
|
4a76632f6c | ||
|
|
9910d5805e | ||
|
|
39568feff6 | ||
|
|
686cc89a36 | ||
|
|
d3ae7beefb | ||
|
|
faa7551ac2 | ||
|
|
7ce6b78d3a | ||
|
|
2831c45f71 | ||
|
|
ace260139e | ||
|
|
db6f30fa33 | ||
|
|
983ceb939c | ||
|
|
cac73b4410 | ||
|
|
9ae6aa3f30 | ||
|
|
985c6e97f9 | ||
|
|
c522dc970f | ||
|
|
db91a235e9 | ||
|
|
f0e8896d2e | ||
|
|
2454f012b6 | ||
|
|
17b4ab45c6 | ||
|
|
80b8984b62 | ||
|
|
b01192dde7 | ||
|
|
12ca5550fa | ||
|
|
1a6e475f74 | ||
|
|
0cd1eabb5d | ||
|
|
f2ba3648d6 | ||
|
|
76920c7d6c | ||
|
|
0a14f8511e | ||
|
|
391e343220 | ||
|
|
18805b565a | ||
|
|
65a144c3f7 | ||
|
|
6d809cb278 | ||
|
|
f7235ca932 | ||
|
|
41b769fc5a | ||
|
|
7426ede2eb | ||
|
|
8f7eaffcef | ||
|
|
d2b5f5d498 | ||
|
|
607f071ca8 | ||
|
|
d3f8ff9916 | ||
|
|
204af77596 | ||
|
|
5c40d4e778 | ||
|
|
5625ba597b | ||
|
|
4f60cf70f1 | ||
|
|
6a37442ee1 | ||
|
|
0bca524c8c | ||
|
|
2ad56860df | ||
|
|
1fbde9f47f | ||
|
|
879962b826 | ||
|
|
0b0d55d7ec | ||
|
|
7214573f35 | ||
|
|
dcf11f8190 | ||
|
|
f79ca989ba | ||
|
|
e1641aa010 | ||
|
|
5cebdd999d | ||
|
|
bf533d77a7 | ||
|
|
e34209ff7f | ||
|
|
ff37d395bb | ||
|
|
f8d744bb37 | ||
|
|
c8c936ede1 | ||
|
|
57b3e3258b | ||
|
|
06e545325d | ||
|
|
ed3ccae844 | ||
|
|
f4e08d0ecf | ||
|
|
030f2ef20f | ||
|
|
16d6525de4 | ||
|
|
42c79218c9 | ||
|
|
4e0211d36c | ||
|
|
aec291caab | ||
|
|
43b182882a | ||
|
|
307b23d27f | ||
|
|
8c11dd16f4 | ||
|
|
2012718749 | ||
|
|
79d3b809f9 | ||
|
|
9ec4d8398e | ||
|
|
5f45740408 | ||
|
|
675d9ddc78 | ||
|
|
087e31d190 | ||
|
|
a6ee51baab | ||
|
|
6df60a4683 | ||
|
|
3cf0db8f79 | ||
|
|
964458ad4a | ||
|
|
d87c3e6400 | ||
|
|
ac888623a8 | ||
|
|
3a8ce83234 | ||
|
|
37b2b75287 | ||
|
|
f2dc615a8a | ||
|
|
9bc55f9946 | ||
|
|
180ebb3b02 | ||
|
|
534442b8da | ||
|
|
9c2ebb4f39 | ||
|
|
2c48400293 | ||
|
|
713ca7aee4 | ||
|
|
02b591ac64 | ||
|
|
f789525839 | ||
|
|
b1d8a66515 | ||
|
|
ad9e0234a9 | ||
|
|
145413d624 | ||
|
|
17da2964d7 | ||
|
|
9ab569e626 | ||
|
|
4af5664ff8 | ||
|
|
1864ce38ad | ||
|
|
74cc590407 | ||
|
|
a4b20ea34d | ||
|
|
8d0cee46d5 | ||
|
|
45b43b5a96 | ||
|
|
d15268e2cc | ||
|
|
424825f8cb | ||
|
|
07dad88e8c | ||
|
|
5c77896dec | ||
|
|
74bbf4b36f | ||
|
|
481585f865 | ||
|
|
c6e2a7dee4 | ||
|
|
83116555ff |
54
.github/ISSUE_TEMPLATE/anti_slop_triage.yml
vendored
Normal file
54
.github/ISSUE_TEMPLATE/anti_slop_triage.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Anti-slop triage
|
||||
about: Classify low-signal, duplicate, generated, or unsafe reports before engineering work starts.
|
||||
title: "triage: "
|
||||
labels: ["needs-triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Use this form for issue intake that needs evidence-backed classification before anyone closes, fixes, or escalates it.
|
||||
Do not paste secrets, live tokens, private logs, or non-public customer data.
|
||||
- type: dropdown
|
||||
id: classification
|
||||
attributes:
|
||||
label: Initial classification
|
||||
description: Pick the strongest current classification. Update it if evidence changes.
|
||||
options:
|
||||
- actionable-bug
|
||||
- actionable-docs
|
||||
- actionable-feature
|
||||
- duplicate
|
||||
- spam-or-promotion
|
||||
- generated-slop-or-hallucinated
|
||||
- unsafe-or-security-sensitive
|
||||
- not-reproducible-yet
|
||||
- externally-blocked
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: evidence
|
||||
attributes:
|
||||
label: Evidence
|
||||
description: Link the PR, issue, command output, docs page, reproduction, duplicate, or policy that supports the classification.
|
||||
placeholder: "Evidence: ..."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: safe_next_action
|
||||
attributes:
|
||||
label: Safe next action
|
||||
description: State the next non-destructive action. If closure or merge is proposed, name the required owner/gate.
|
||||
placeholder: "Next action: label only / request repro / link duplicate / fix docs / defer with rationale / owner review required"
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: guardrails
|
||||
attributes:
|
||||
label: Guardrails
|
||||
options:
|
||||
- label: I did not close, merge, or mutate remote state as part of this triage-only report.
|
||||
required: true
|
||||
- label: I checked for duplicates or related PRs/issues before recommending action.
|
||||
required: true
|
||||
- label: If this touches credentials, security, or private data, I avoided public reproduction details and routed to the appropriate private/security path.
|
||||
required: true
|
||||
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
## Summary
|
||||
- TBD
|
||||
|
||||
## Anti-slop triage
|
||||
- Classification: <!-- actionable-fix | docs-only | duplicate | generated-slop | unsafe | out-of-scope | needs-maintainer-decision -->
|
||||
- Evidence: <!-- issue link, repro command, failing test, docs source, or duplicate PR -->
|
||||
- Non-destructive review result: <!-- merge candidate | request changes | close/defer with rationale | needs owner gate -->
|
||||
|
||||
## Verification
|
||||
- [ ] Targeted tests/docs checks ran, or the gap is explicitly recorded.
|
||||
- [ ] `git diff --check` passes.
|
||||
- [ ] No live secrets, tokens, private logs, or unrelated generated churn are included.
|
||||
|
||||
## Resolution gate
|
||||
- [ ] If this PR resolves an issue, the issue number and fix evidence are linked.
|
||||
- [ ] If this PR should not merge, the rejection/defer rationale is evidence-backed and does not rely on vibes.
|
||||
- [ ] I did not merge/close remote PRs or issues from an automation lane without owner approval.
|
||||
169
.github/scripts/check_release_readiness.py
vendored
Normal file
169
.github/scripts/check_release_readiness.py
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Validate release-readiness docs that are easy to regress.
|
||||
|
||||
The check is intentionally dependency-free so it can run on developer machines,
|
||||
Windows CI, and minimal release jobs. It validates:
|
||||
|
||||
* required repository policy files exist;
|
||||
* local Markdown links and image targets resolve;
|
||||
* local heading anchors referenced from Markdown resolve; and
|
||||
* command examples do not present the deprecated `cargo install claw-code`
|
||||
package as an executable install path.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from urllib.parse import unquote, urlparse
|
||||
import re
|
||||
import sys
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[2]
|
||||
|
||||
REQUIRED_POLICY_FILES = [
|
||||
"LICENSE",
|
||||
"CONTRIBUTING.md",
|
||||
"SECURITY.md",
|
||||
"SUPPORT.md",
|
||||
"CODE_OF_CONDUCT.md",
|
||||
]
|
||||
|
||||
MARKDOWN_ROOTS = [
|
||||
ROOT / "README.md",
|
||||
ROOT / "USAGE.md",
|
||||
ROOT / "PARITY.md",
|
||||
ROOT / "PHILOSOPHY.md",
|
||||
ROOT / "ROADMAP.md",
|
||||
ROOT / "CONTRIBUTING.md",
|
||||
ROOT / "SECURITY.md",
|
||||
ROOT / "SUPPORT.md",
|
||||
ROOT / "CODE_OF_CONDUCT.md",
|
||||
ROOT / "docs",
|
||||
ROOT / "rust" / "README.md",
|
||||
ROOT / "rust" / "USAGE.md",
|
||||
ROOT / "rust" / "MOCK_PARITY_HARNESS.md",
|
||||
]
|
||||
|
||||
LINK_PATTERN = re.compile(r"(?<!!)\[[^\]\n]+\]\(([^)\s]+)(?:\s+\"[^\"]*\")?\)")
|
||||
HTML_LINK_PATTERN = re.compile(r"""<(?:a|img)\b[^>]*(?:href|src)=["']([^"']+)["']""", re.I)
|
||||
FENCE_PATTERN = re.compile(r"```(?P<lang>[^\n`]*)\n(?P<body>.*?)```", re.S)
|
||||
|
||||
|
||||
def iter_markdown_files() -> list[Path]:
|
||||
files: set[Path] = set()
|
||||
for entry in MARKDOWN_ROOTS:
|
||||
if entry.is_file():
|
||||
files.add(entry)
|
||||
elif entry.is_dir():
|
||||
files.update(entry.rglob("*.md"))
|
||||
return sorted(files)
|
||||
|
||||
|
||||
def github_anchor(heading: str) -> str:
|
||||
anchor = heading.strip().lower()
|
||||
anchor = re.sub(r"<[^>]+>", "", anchor)
|
||||
anchor = re.sub(r"`([^`]*)`", r"\1", anchor)
|
||||
anchor = re.sub(r"[^a-z0-9 _-]", "", anchor)
|
||||
anchor = anchor.replace(" ", "-")
|
||||
anchor = re.sub(r"-+", "-", anchor)
|
||||
return anchor.strip("-")
|
||||
|
||||
|
||||
def anchors_for(path: Path) -> set[str]:
|
||||
anchors: set[str] = set()
|
||||
for line in path.read_text(encoding="utf-8").splitlines():
|
||||
match = re.match(r"^(#{1,6})\s+(.+?)\s*#*\s*$", line)
|
||||
if match:
|
||||
anchors.add(github_anchor(match.group(2)))
|
||||
return anchors
|
||||
|
||||
|
||||
def is_external(target: str) -> bool:
|
||||
parsed = urlparse(target)
|
||||
return parsed.scheme in {"http", "https", "mailto"}
|
||||
|
||||
|
||||
def validate_policies(errors: list[str]) -> None:
|
||||
for relative in REQUIRED_POLICY_FILES:
|
||||
path = ROOT / relative
|
||||
if not path.is_file():
|
||||
errors.append(f"missing required policy file: {relative}")
|
||||
|
||||
|
||||
def validate_markdown_links(errors: list[str]) -> None:
|
||||
anchor_cache: dict[Path, set[str]] = {}
|
||||
for path in iter_markdown_files():
|
||||
text = path.read_text(encoding="utf-8")
|
||||
candidates = [m.group(1) for m in LINK_PATTERN.finditer(text)]
|
||||
candidates.extend(m.group(1) for m in HTML_LINK_PATTERN.finditer(text))
|
||||
for target in candidates:
|
||||
if (
|
||||
not target
|
||||
or is_external(target)
|
||||
or target.startswith(("mailto:", "tel:", "data:"))
|
||||
):
|
||||
continue
|
||||
link_path, _, raw_anchor = target.partition("#")
|
||||
if not link_path:
|
||||
destination = path
|
||||
else:
|
||||
destination = (path.parent / unquote(link_path)).resolve()
|
||||
try:
|
||||
destination.relative_to(ROOT)
|
||||
except ValueError:
|
||||
errors.append(
|
||||
f"{path.relative_to(ROOT)}: link escapes repo root: {target}"
|
||||
)
|
||||
continue
|
||||
if not destination.exists():
|
||||
errors.append(
|
||||
f"{path.relative_to(ROOT)}: missing local link target: {target}"
|
||||
)
|
||||
continue
|
||||
if raw_anchor and destination.suffix.lower() == ".md":
|
||||
anchor = unquote(raw_anchor).lower()
|
||||
anchor_cache.setdefault(destination, anchors_for(destination))
|
||||
if anchor not in anchor_cache[destination]:
|
||||
errors.append(
|
||||
f"{path.relative_to(ROOT)}: missing anchor `{raw_anchor}` in "
|
||||
f"{destination.relative_to(ROOT)}"
|
||||
)
|
||||
|
||||
|
||||
def validate_command_examples(errors: list[str]) -> None:
|
||||
for path in iter_markdown_files():
|
||||
text = path.read_text(encoding="utf-8")
|
||||
for match in FENCE_PATTERN.finditer(text):
|
||||
lang = match.group("lang").strip().lower()
|
||||
if lang not in {"bash", "sh", "shell", "zsh", "powershell", "ps1"}:
|
||||
continue
|
||||
body = match.group("body")
|
||||
for offset, line in enumerate(body.splitlines(), start=1):
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith(("#", ">")):
|
||||
continue
|
||||
if re.search(r"\bcargo\s+install\s+claw-code\b", stripped):
|
||||
line_no = text.count("\n", 0, match.start()) + offset + 1
|
||||
errors.append(
|
||||
f"{path.relative_to(ROOT)}:{line_no}: deprecated "
|
||||
"`cargo install claw-code` appears in an executable "
|
||||
"command block; use build-from-source docs instead"
|
||||
)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
errors: list[str] = []
|
||||
validate_policies(errors)
|
||||
validate_markdown_links(errors)
|
||||
validate_command_examples(errors)
|
||||
if errors:
|
||||
print("release-readiness check failed:", file=sys.stderr)
|
||||
for error in errors:
|
||||
print(f" - {error}", file=sys.stderr)
|
||||
return 1
|
||||
print("release-readiness check passed")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
15
.github/workflows/release.yml
vendored
15
.github/workflows/release.yml
vendored
@@ -32,6 +32,10 @@ jobs:
|
||||
os: macos-14
|
||||
bin: claw
|
||||
artifact_name: claw-macos-arm64
|
||||
- name: windows-x64
|
||||
os: windows-latest
|
||||
bin: claw.exe
|
||||
artifact_name: claw-windows-x64.exe
|
||||
defaults:
|
||||
run:
|
||||
working-directory: rust
|
||||
@@ -47,22 +51,27 @@ jobs:
|
||||
- name: Build release binary
|
||||
run: cargo build --release -p rusty-claude-cli
|
||||
|
||||
- name: Package artifact
|
||||
- name: Package artifact and checksum
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p dist
|
||||
cp "target/release/${{ matrix.bin }}" "dist/${{ matrix.artifact_name }}"
|
||||
chmod +x "dist/${{ matrix.artifact_name }}"
|
||||
(cd dist && sha256sum "${{ matrix.artifact_name }}" > "${{ matrix.artifact_name }}.sha256")
|
||||
|
||||
- name: Upload workflow artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.artifact_name }}
|
||||
path: rust/dist/${{ matrix.artifact_name }}
|
||||
path: |
|
||||
rust/dist/${{ matrix.artifact_name }}
|
||||
rust/dist/${{ matrix.artifact_name }}.sha256
|
||||
|
||||
- name: Upload release asset
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: rust/dist/${{ matrix.artifact_name }}
|
||||
files: |
|
||||
rust/dist/${{ matrix.artifact_name }}
|
||||
rust/dist/${{ matrix.artifact_name }}.sha256
|
||||
fail_on_unmatched_files: true
|
||||
|
||||
53
.github/workflows/rust-ci.yml
vendored
53
.github/workflows/rust-ci.yml
vendored
@@ -9,8 +9,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/rust-ci.yml
|
||||
- .github/scripts/check_doc_source_of_truth.py
|
||||
- .github/scripts/check_release_readiness.py
|
||||
- .github/FUNDING.yml
|
||||
- CODE_OF_CONDUCT.md
|
||||
- CONTRIBUTING.md
|
||||
- LICENSE
|
||||
- README.md
|
||||
- SECURITY.md
|
||||
- SUPPORT.md
|
||||
- USAGE.md
|
||||
- PARITY.md
|
||||
- PHILOSOPHY.md
|
||||
@@ -23,8 +29,14 @@ on:
|
||||
paths:
|
||||
- .github/workflows/rust-ci.yml
|
||||
- .github/scripts/check_doc_source_of_truth.py
|
||||
- .github/scripts/check_release_readiness.py
|
||||
- .github/FUNDING.yml
|
||||
- CODE_OF_CONDUCT.md
|
||||
- CONTRIBUTING.md
|
||||
- LICENSE
|
||||
- README.md
|
||||
- SECURITY.md
|
||||
- SUPPORT.md
|
||||
- USAGE.md
|
||||
- PARITY.md
|
||||
- PHILOSOPHY.md
|
||||
@@ -58,6 +70,8 @@ jobs:
|
||||
python-version: "3.x"
|
||||
- name: Check docs and metadata for stale branding
|
||||
run: python .github/scripts/check_doc_source_of_truth.py
|
||||
- name: Check release policy docs and local links
|
||||
run: python .github/scripts/check_release_readiness.py
|
||||
|
||||
fmt:
|
||||
name: cargo fmt
|
||||
@@ -98,3 +112,42 @@ jobs:
|
||||
workspaces: rust -> target
|
||||
- name: Run workspace clippy
|
||||
run: cargo clippy --workspace
|
||||
|
||||
windows-smoke:
|
||||
name: windows PowerShell smoke
|
||||
runs-on: windows-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: rust
|
||||
shell: pwsh
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: rust -> target
|
||||
- name: Build CLI for Windows smoke
|
||||
run: cargo build -p rusty-claude-cli
|
||||
- name: Smoke local commands without live credentials
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ""
|
||||
ANTHROPIC_AUTH_TOKEN: ""
|
||||
OPENAI_API_KEY: ""
|
||||
XAI_API_KEY: ""
|
||||
DASHSCOPE_API_KEY: ""
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
$env:CLAW_CONFIG_HOME = Join-Path $env:RUNNER_TEMP "claw config home"
|
||||
New-Item -ItemType Directory -Force -Path $env:CLAW_CONFIG_HOME | Out-Null
|
||||
$workspace = Join-Path $env:RUNNER_TEMP "claw path smoke"
|
||||
New-Item -ItemType Directory -Force -Path $workspace | Out-Null
|
||||
$claw = Join-Path $env:GITHUB_WORKSPACE "rust\target\debug\claw.exe"
|
||||
Push-Location $workspace
|
||||
try {
|
||||
& $claw help
|
||||
& $claw status
|
||||
& $claw config env
|
||||
& $claw doctor
|
||||
} finally {
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -10,3 +10,5 @@ archive/
|
||||
.claw/sessions/
|
||||
.clawhip/
|
||||
status-help.txt
|
||||
# Legacy Python port session scratch artifacts
|
||||
.port_sessions/
|
||||
|
||||
14886
.omx/cc2/board.json
Normal file
14886
.omx/cc2/board.json
Normal file
File diff suppressed because one or more lines are too long
842
.omx/cc2/board.md
Normal file
842
.omx/cc2/board.md
Normal file
File diff suppressed because one or more lines are too long
429
.omx/cc2/issue-parity-intake.json
Normal file
429
.omx/cc2/issue-parity-intake.json
Normal file
@@ -0,0 +1,429 @@
|
||||
{
|
||||
"schema_version": "cc2.issue_parity_intake.v1",
|
||||
"generated_at": "2026-05-14T08:02:00Z",
|
||||
"task_id": "3",
|
||||
"owner": "worker-2",
|
||||
"goal": "G001-stream0-board",
|
||||
"notes": [
|
||||
"Leader owns Ultragoal; this artifact does not mutate .omx/ultragoal.",
|
||||
"Rows are scoped intake/classification evidence for Worker 1/Task 2 board integration."
|
||||
],
|
||||
"source_manifest": {
|
||||
"claw_open_latest": {
|
||||
"path": ".omx/research/claw-open-latest.json",
|
||||
"sha256_prefix_from_plan": "89e3e027fa735f38",
|
||||
"covered_issue_numbers": [3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038]
|
||||
},
|
||||
"claw_issues": {
|
||||
"path": ".omx/research/claw-issues.json",
|
||||
"sha256_prefix_from_plan": "e64fdba7df3b78ed",
|
||||
"covered_issue_numbers": [2997, 3003, 3004, 3005, 3006, 3007, 3020, 3023]
|
||||
},
|
||||
"opencode": {
|
||||
"repo_path": ".omx/research/repos/opencode",
|
||||
"metadata_path": ".omx/research/opencode-repo.json",
|
||||
"issues_path": ".omx/research/opencode-issues.json",
|
||||
"head_from_plan": "27ac53aaacc677b1401c4e75ca7a7dadf8b2c349"
|
||||
},
|
||||
"codex": {
|
||||
"repo_path": ".omx/research/repos/codex",
|
||||
"metadata_path": ".omx/research/codex-repo.json",
|
||||
"issues_path": ".omx/research/codex-issues.json",
|
||||
"head_from_plan": "6a225e4005209f2325ab3c681c7c6beba2907d4d"
|
||||
}
|
||||
},
|
||||
"issue_clusters": [
|
||||
{
|
||||
"id": "CC2-ISSUE-3007",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3007",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3007,
|
||||
"title": "Permission modes do not enforce path scope on file tools or shell expansion in bash",
|
||||
"theme": "security/path-scope",
|
||||
"release_bucket": "alpha_blocker",
|
||||
"lifecycle_status": "active",
|
||||
"roadmap_anchor": "ROADMAP.md#11-policy-engine-for-autonomous-coding; ROADMAP.md#9-green-ness-contract",
|
||||
"dependencies": ["permission path canonicalization", "file tool target validation", "bash command/path validation reachability", "policy regression fixtures"],
|
||||
"verification_required": ["workspace-write cannot read/write/delete outside workspace", "shell expansion and symlink traversal are rejected or policy-blocked", "file tools and bash use the same target-scope decision record"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Security/sandbox escape class; plan names #3007 as alpha blocker."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3020",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3020",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3020,
|
||||
"title": "OpenAI-compatible model IDs with slashes are stripped before request",
|
||||
"theme": "provider/model-routing",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#provider-routing-model-name-prefix-must-win-over-env-var-presence-fixed-2026-04-08-0530c50",
|
||||
"dependencies": ["provider profile contract", "wire model-id preservation option", "routing-prefix source reporting"],
|
||||
"verification_required": ["OpenAI-compatible endpoint receives exact model id when preservation is enabled", "status JSON reports raw model input, route, and wire model id"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Core provider correctness but below alpha state/security contracts unless it blocks the selected alpha model path."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3006",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3006",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3006,
|
||||
"title": "Not Working in windows",
|
||||
"theme": "windows/install",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#immediate-backlog-from-current-real-pain",
|
||||
"dependencies": ["Windows support policy", "PowerShell install path", "dependency/version matrix", "diagnostic setup output"],
|
||||
"verification_required": ["fresh Windows/PowerShell setup smoke documented", "unsupported native paths fail with actionable WSL2/native guidance"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Real adoption blocker; plan places Windows/install in beta adoption overlay."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3005",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3005",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3005,
|
||||
"title": "DeepSeek V4-flash/pro fails with 400 Bad Request (missing reasoning_content) while deepseek-reasoner works",
|
||||
"theme": "provider/response-shape",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#5-failure-taxonomy; ROADMAP.md#provider-routing-model-name-prefix-must-win-over-env-var-presence-fixed-2026-04-08-0530c50",
|
||||
"dependencies": ["OpenAI-compatible diagnostics playbook", "provider error taxonomy", "reasoning/thinking field compatibility tests"],
|
||||
"verification_required": ["provider 400 response classified with actionable remediation", "DeepSeek-compatible response-shape fixture does not hide assistant output"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Provider compatibility issue that shares the #3032 diagnostics lane."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3004",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3004",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3004,
|
||||
"title": "When can we adapt to zed?",
|
||||
"theme": "ide/acp",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"roadmap_anchor": "ROADMAP.md#phase-5-plugin-and-mcp-lifecycle-maturity",
|
||||
"dependencies": ["stable session/control API", "plugin/MCP lifecycle", "engine API or ACP bridge decision"],
|
||||
"verification_required": ["Zed/ACP smoke once core state/control contracts exist"],
|
||||
"deferral_rationale": "IDE integration is valuable but should wait until boot/session/event/control truth surfaces are stable.",
|
||||
"classification_rationale": "Matches plan's GA ecosystem lane for Zed/ACP."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3003",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3003",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3003,
|
||||
"title": ".claude/sessions should not be submitted to repo",
|
||||
"theme": "session-hygiene/gitignore",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#9-green-ness-contract; ROADMAP.md#8-recovery-recipes-for-common-failures",
|
||||
"dependencies": ["artifact ignore policy", "session storage boundary docs", "repo hygiene check"],
|
||||
"verification_required": ["session directories are ignored", "status/doctor warns about tracked session artifacts"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Small but user-visible session hygiene and data-leak prevention item."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-2997",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/2997",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 2997,
|
||||
"title": "License?",
|
||||
"theme": "docs/license",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#immediate-backlog-from-current-real-pain",
|
||||
"dependencies": ["maintainer license decision", "LICENSE file", "README/USAGE attribution wording"],
|
||||
"verification_required": ["repository license file exists", "package metadata and docs reference the same license"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Adoption/readiness documentation gap; requires maintainer decision before implementation."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3023",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3023",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3023,
|
||||
"title": "Protect claw-code from AI slop PRs",
|
||||
"theme": "repo-hygiene/anti-slop",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#immediate-backlog-from-current-real-pain",
|
||||
"dependencies": ["contributor policy", "PR quality gate selection", "false-positive review escape hatch"],
|
||||
"verification_required": ["selected PR quality gate runs on sample good/bad PR fixtures", "maintainers can override false positives"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Protects project throughput but should not precede alpha core safety contracts."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3028",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3028",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3028,
|
||||
"title": "docs: add navigation and file-context usage guide",
|
||||
"theme": "docs/navigation-context",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#7-human-ux-still-leaks-into-claw-workflows",
|
||||
"dependencies": ["current TUI/shell key behavior inventory", "file context syntax docs", "secret-handling guidance"],
|
||||
"verification_required": ["docs include terminal history, scrollback, @file context, attach/external file caveats", "examples work against current CLI"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Documentation support item from latest open issue refresh."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3029",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3029",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3029,
|
||||
"title": "build: add cross-platform installer path and release artifact quickstart",
|
||||
"theme": "install/distribution",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#immediate-backlog-from-current-real-pain",
|
||||
"dependencies": ["release artifact policy", "install.sh/install.ps1 contract", "PATH/update/uninstall instructions"],
|
||||
"verification_required": ["install quickstart smoke on supported OS/arch", "failed install prints actionable diagnostics"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Distribution friction belongs in adoption overlay."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3030",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3030",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3030,
|
||||
"title": "feat: make provider/model setup less env-var-driven",
|
||||
"theme": "provider/setup-profiles",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#3-structured-session-control-api; ROADMAP.md#145-boot-preflight-doctor-contract",
|
||||
"dependencies": ["provider profiles", "setup wizard or dry-run", "secret redaction", "base-url/model smoke test"],
|
||||
"verification_required": ["setup validates provider route without echoing keys", "session-only versus persisted profile behavior is explicit"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Directly reduces current provider setup support churn."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3031",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3031",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3031,
|
||||
"title": "feat: auto-compact or clearly recover from context-window provider errors",
|
||||
"theme": "session-recovery/context-window",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#8-recovery-recipes-for-common-failures; ROADMAP.md#158-compact_messages_if_needed-drops-turns-silently-no-structured-compaction-event-emitted",
|
||||
"dependencies": ["provider error classifier", "safe compact retry policy", "compaction event/audit trail", "retry loop cap"],
|
||||
"verification_required": ["context-window error either compacts+retries once safely or emits exact recovery command", "compaction event is machine-visible"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Recovery reliability item; promoted only if selected alpha provider path hits it."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3032",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3032",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3032,
|
||||
"title": "docs: add OpenAI-compatible/local provider diagnostics playbook",
|
||||
"theme": "provider/diagnostics-docs",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#5-failure-taxonomy",
|
||||
"dependencies": ["raw chat-completions smoke tests", "tool-call response-shape examples", "provider failure taxonomy"],
|
||||
"verification_required": ["playbook distinguishes Claw bugs from wrapper/tool-call-shape bugs", "curl examples cover non-streaming and streaming tool calls"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Shared diagnostic lane for #3005/#3020/local model reports."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3033",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3033",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3033,
|
||||
"title": "feat: add minimal claw serve JSON-RPC engine API",
|
||||
"theme": "engine-api/control-plane",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"roadmap_anchor": "ROADMAP.md#3-structured-session-control-api; ROADMAP.md#phase-4-claws-first-task-execution",
|
||||
"dependencies": ["stable session state API", "event schema v1", "permission policy contract", "cancel/prompt stream semantics"],
|
||||
"verification_required": ["protocol conformance fixtures for session/create prompt/stream cancel error", "capability negotiation backwards compatibility"],
|
||||
"deferral_rationale": "Engine API should expose, not invent, stable core control-plane semantics after alpha contracts land.",
|
||||
"classification_rationale": "Useful integration surface but too broad for alpha unless narrowed to existing session control API."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3034",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3034",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3034,
|
||||
"title": "docs: define evidence-gated Hermes handoff loop for Claw Code execution",
|
||||
"theme": "sdlc/evidence-handoff",
|
||||
"release_bucket": "post_2_0_research",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"roadmap_anchor": "ROADMAP.md#4-canonical-lane-event-schema; ROADMAP.md#10-typed-task-packet-format",
|
||||
"dependencies": ["typed task packet", "evidence bundle schema", "report gate status vocabulary"],
|
||||
"verification_required": ["handoff packet fixture validates scope/success/test evidence fields", "post-flight gate consumes evidence instead of free-text summary"],
|
||||
"deferral_rationale": "Can inform event/report/task contracts, but Hermes-specific loop should stay research/docs until core schemas are stable.",
|
||||
"classification_rationale": "Only the generic evidence-gated contract is Claw 2.0; Hermes branding is not core."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3035",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3035",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3035,
|
||||
"title": "fix: improve compacted session resume discoverability",
|
||||
"theme": "session-resume/discoverability",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#8-recovery-recipes-for-common-failures; ROADMAP.md#160-session_store-has-no-list_sessions-delete_session-or-session_exists",
|
||||
"dependencies": ["session enumeration", "latest-session workspace search boundary", "compacted session marker"],
|
||||
"verification_required": ["/resume latest finds newest eligible compacted session", "/session or status lists resumable compacted sessions with path/id"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Session recovery/adoption item."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3036",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3036",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3036,
|
||||
"title": "docs: add official Ollama/llama.cpp/vLLM local model examples",
|
||||
"theme": "provider/local-docs",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#145-boot-preflight-doctor-contract; ROADMAP.md#5-failure-taxonomy",
|
||||
"dependencies": ["known-good local provider examples", "raw /v1 smoke test", "tool-call limitation warning"],
|
||||
"verification_required": ["docs include Ollama/llama.cpp/vLLM examples and HELLO smoke", "tool-call caveats are explicit"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Local provider adoption support."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3037",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3037",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3037,
|
||||
"title": "docs: clarify Claw Code positioning as multi-provider Claude-Code-shaped runtime",
|
||||
"theme": "docs/product-positioning",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#goal; ROADMAP.md#definition-of-clawable",
|
||||
"dependencies": ["README positioning copy", "provider support truth table", "identity leak bug policy"],
|
||||
"verification_required": ["README/docs answer Claude-only question directly", "provider support wording matches implemented routes"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Clarifies product identity for adoption without broad implementation."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3038",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3038",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3038,
|
||||
"title": "roadmap: track skills/plugins/marketplace ecosystem gap after core UX stabilizes",
|
||||
"theme": "plugin-marketplace/ecosystem",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"roadmap_anchor": "ROADMAP.md#13-first-class-pluginmcp-lifecycle-contract; ROADMAP.md#14-mcp-end-to-end-lifecycle-parity",
|
||||
"dependencies": ["plugin/MCP lifecycle contract", "extension point inventory", "discovery/install/update flow design"],
|
||||
"verification_required": ["extension point inventory exists", "marketplace work explicitly depends on core UX stabilization"],
|
||||
"deferral_rationale": "Marketplace breadth should wait until core setup/auth/provider/session UX and plugin lifecycle are reliable.",
|
||||
"classification_rationale": "Matches plan's ga_ecosystem/post-2.0 caution for marketplace parity."
|
||||
}
|
||||
],
|
||||
"parity_rows": [
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-PLUGIN-ECOSYSTEM",
|
||||
"source_anchor": "anomalyco/opencode@27ac53aa packages/app/web/desktop/plugin/sdk/extensions/zed/slack/containers plus issue #3038",
|
||||
"source_type": "repo_clone_and_local_issue",
|
||||
"title": "Plugin/skills/marketplace ecosystem inventory",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"dependencies": ["Claw plugin/MCP lifecycle contract", "current extension-point inventory"],
|
||||
"verification_required": ["inventory maps current Claw plugin/skill/MCP extension points before marketplace implementation"],
|
||||
"deferral_rationale": "Adapt ecosystem discovery only after core setup/provider/session reliability is stable."
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-PERMISSION-PRESETS",
|
||||
"source_anchor": "https://github.com/anomalyco/opencode/issues/27464 and ROADMAP.md#11-policy-engine-for-autonomous-coding",
|
||||
"source_type": "external_issue_and_roadmap",
|
||||
"title": "Quick permission preset switching mapped onto Claw policy profiles",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["policy profile model", "approval-token audit trail"],
|
||||
"verification_required": ["preset switch is visible in status/report output and cannot bypass path-scope enforcement"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-CUSTOM-PROVIDER-PARAMS",
|
||||
"source_anchor": "https://github.com/anomalyco/opencode/issues/27462 and #3030/#3032",
|
||||
"source_type": "external_issue_and_local_issue",
|
||||
"title": "Custom API parameter passthrough for provider profiles",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["provider profile schema", "secret redaction", "request audit surface"],
|
||||
"verification_required": ["custom params are schema-validated, redacted, and visible as provenance without leaking secrets"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-TODOWRITE-AUTOCOMPLETE",
|
||||
"source_anchor": "https://github.com/anomalyco/opencode/issues/27453 and ROADMAP.md#10-typed-task-packet-format",
|
||||
"source_type": "external_issue_and_roadmap",
|
||||
"title": "Task/Todo completion assistance via typed task lifecycle",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"dependencies": ["typed task packet", "task lifecycle events", "evidence-gated completion"],
|
||||
"verification_required": ["auto-complete suggestions cannot mark work complete without evidence bundle or explicit user approval"],
|
||||
"deferral_rationale": "Useful UX should follow, not precede, typed task lifecycle and evidence contract."
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-WINDOWS-DISTRIBUTION",
|
||||
"source_anchor": "https://github.com/anomalyco/opencode/issues/27476 https://github.com/anomalyco/opencode/issues/27459 https://github.com/anomalyco/opencode/issues/27470 and #3006/#3029",
|
||||
"source_type": "external_issues_and_local_issues",
|
||||
"title": "Windows/GLIBC/distribution reliability parity lessons",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["install artifact matrix", "Windows encoding guidance", "minimum Linux/GLIBC support statement"],
|
||||
"verification_required": ["release quickstart documents supported OS matrix and known terminal/encoding caveats"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-CODEX-GRANULAR-PERMISSIONS",
|
||||
"source_anchor": "https://github.com/openai/codex/issues/22595 and Codex docs permissions/app/plugin concepts",
|
||||
"source_type": "external_issue_and_docs",
|
||||
"title": "Granular app/plugin permissions adapted to Claw policy engine",
|
||||
"release_bucket": "alpha_blocker",
|
||||
"lifecycle_status": "active",
|
||||
"dependencies": ["permission enforcer path-scope fix", "plugin/MCP capability model", "approval-token replay protection"],
|
||||
"verification_required": ["granular permission grants do not widen workspace path scope implicitly"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-CODEX-SESSION-RECOVERY",
|
||||
"source_anchor": "https://github.com/openai/codex/issues/22619 https://github.com/openai/codex/issues/22597 https://github.com/openai/codex/issues/22593 and #3035",
|
||||
"source_type": "external_issues_and_local_issue",
|
||||
"title": "Safe local session/thread recovery without storage amplification",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["session enumeration", "resume latest boundary", "JSONL/storage compaction policy"],
|
||||
"verification_required": ["recoverable sessions are discoverable and session forks avoid unbounded duplicate history"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-CODEX-PROXY-NETWORK",
|
||||
"source_anchor": "https://github.com/openai/codex/issues/22623 and #3032",
|
||||
"source_type": "external_issue_and_local_issue",
|
||||
"title": "Provider/network diagnostics include proxy behavior",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["HTTP client proxy detection", "provider diagnostics playbook"],
|
||||
"verification_required": ["diagnostics report whether proxy env/config is honored for provider calls"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-CODEX-CLI-AGENT-FLAG",
|
||||
"source_anchor": "https://github.com/openai/codex/issues/22615 and ROADMAP.md#10-typed-task-packet-format",
|
||||
"source_type": "external_issue_and_roadmap",
|
||||
"title": "CLI flag for agent/subagent mode mapped to Claw typed task packets",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"dependencies": ["typed task packet", "session control API", "policy-scoped worker launch"],
|
||||
"verification_required": ["CLI agent mode cannot bypass task policy or evidence requirements"],
|
||||
"deferral_rationale": "Implement only after core task/session control contracts are stable."
|
||||
}
|
||||
],
|
||||
"coverage": {
|
||||
"required_latest_open_range_3028_3038": [3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038],
|
||||
"required_existing_issue_numbers": [3007, 3006, 3020, 3005, 3003, 2997, 3023, 3004],
|
||||
"issue_rows_expected": 19,
|
||||
"parity_rows_expected_minimum": 6
|
||||
}
|
||||
}
|
||||
47
.omx/cc2/issue-parity-intake.md
Normal file
47
.omx/cc2/issue-parity-intake.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# CC2 Issue / Parity Intake Mapping
|
||||
|
||||
Generated by `worker-2` for team task 3 (`G001 issue/parity intake mapping`). This is a board-integration fragment for Stream 0; it intentionally does **not** mutate `.omx/ultragoal`.
|
||||
|
||||
## Covered local issue clusters
|
||||
|
||||
| Issue | Theme | Bucket | Lifecycle | Board anchor |
|
||||
|---:|---|---|---|---|
|
||||
| #3007 | security/path-scope | `alpha_blocker` | `active` | Policy engine + green-ness contract |
|
||||
| #3020 | provider/model-routing | `beta_adoption` | `open` | Provider routing/model source status |
|
||||
| #3006 | windows/install | `beta_adoption` | `open` | Immediate backlog / install readiness |
|
||||
| #3005 | provider/response-shape | `beta_adoption` | `open` | Failure taxonomy / provider diagnostics |
|
||||
| #3004 | ide/acp | `ga_ecosystem` | `deferred_with_rationale` | Plugin/MCP lifecycle maturity |
|
||||
| #3003 | session-hygiene/gitignore | `beta_adoption` | `open` | Green-ness / recovery hygiene |
|
||||
| #2997 | docs/license | `beta_adoption` | `open` | Adoption docs/license readiness |
|
||||
| #3023 | repo-hygiene/anti-slop | `beta_adoption` | `open` | Immediate backlog / PR quality gate |
|
||||
| #3028 | docs/navigation-context | `beta_adoption` | `open` | Human UX leaks into claw workflows |
|
||||
| #3029 | install/distribution | `beta_adoption` | `open` | Cross-platform release quickstart |
|
||||
| #3030 | provider/setup-profiles | `beta_adoption` | `open` | Boot preflight / structured session control |
|
||||
| #3031 | session-recovery/context-window | `beta_adoption` | `open` | Recovery recipes / compaction event |
|
||||
| #3032 | provider/diagnostics-docs | `beta_adoption` | `open` | Failure taxonomy |
|
||||
| #3033 | engine-api/control-plane | `ga_ecosystem` | `deferred_with_rationale` | Structured session control API |
|
||||
| #3034 | sdlc/evidence-handoff | `post_2_0_research` | `deferred_with_rationale` | Event/report/task contract input |
|
||||
| #3035 | session-resume/discoverability | `beta_adoption` | `open` | Recovery recipes / session enumeration |
|
||||
| #3036 | provider/local-docs | `beta_adoption` | `open` | Provider setup and diagnostics docs |
|
||||
| #3037 | docs/product-positioning | `beta_adoption` | `open` | Goal / definition of clawable |
|
||||
| #3038 | plugin-marketplace/ecosystem | `ga_ecosystem` | `deferred_with_rationale` | Plugin/MCP lifecycle maturity |
|
||||
|
||||
## Parity intake rows
|
||||
|
||||
| Row | Source | Bucket | Lifecycle | Adaptation rule |
|
||||
|---|---|---|---|---|
|
||||
| `CC2-PARITY-OPENCODE-PLUGIN-ECOSYSTEM` | opencode repo + #3038 | `ga_ecosystem` | `deferred_with_rationale` | Inventory Claw extension points before marketplace work. |
|
||||
| `CC2-PARITY-OPENCODE-PERMISSION-PRESETS` | opencode #27464 | `beta_adoption` | `open` | Permission preset UX must not bypass Claw path-scope policy. |
|
||||
| `CC2-PARITY-OPENCODE-CUSTOM-PROVIDER-PARAMS` | opencode #27462 + #3030/#3032 | `beta_adoption` | `open` | Custom provider params need schema validation, redaction, and provenance. |
|
||||
| `CC2-PARITY-OPENCODE-TODOWRITE-AUTOCOMPLETE` | opencode #27453 | `ga_ecosystem` | `deferred_with_rationale` | Auto-complete task UX follows typed task lifecycle/evidence gates. |
|
||||
| `CC2-PARITY-OPENCODE-WINDOWS-DISTRIBUTION` | opencode #27476/#27459/#27470 + #3006/#3029 | `beta_adoption` | `open` | Use external pain as release-matrix and diagnostics evidence. |
|
||||
| `CC2-PARITY-CODEX-GRANULAR-PERMISSIONS` | Codex #22595 + docs | `alpha_blocker` | `active` | Adapt granular permissions only through Claw policy engine and approval tokens. |
|
||||
| `CC2-PARITY-CODEX-SESSION-RECOVERY` | Codex #22619/#22597/#22593 + #3035 | `beta_adoption` | `open` | Session discovery/recovery must avoid storage amplification. |
|
||||
| `CC2-PARITY-CODEX-PROXY-NETWORK` | Codex #22623 + #3032 | `beta_adoption` | `open` | Provider diagnostics should expose proxy behavior. |
|
||||
| `CC2-PARITY-CODEX-CLI-AGENT-FLAG` | Codex #22615 | `ga_ecosystem` | `deferred_with_rationale` | CLI agent mode waits for typed task/session control contracts. |
|
||||
|
||||
Validation command:
|
||||
|
||||
```bash
|
||||
python3 .omx/cc2/validate_issue_parity_intake.py
|
||||
```
|
||||
250
.omx/cc2/render_board_md.py
Executable file
250
.omx/cc2/render_board_md.py
Executable file
@@ -0,0 +1,250 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Render the Claw Code 2.0 canonical board JSON as a human-readable Markdown board."""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from collections import Counter, defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
STATUS_DESCRIPTIONS = {
|
||||
"context": "Context-only heading or evidence anchor; not an implementation work item.",
|
||||
"active": "Current Claw Code 2.0 implementation surface that should remain visible on the board.",
|
||||
"open": "Actionable unresolved work that needs implementation or acceptance evidence.",
|
||||
"done_verify": "Marked as done upstream but retained for verification against current CC2 behavior.",
|
||||
"stale_done": "Historically completed or merged work that may be stale and needs freshness checks before relying on it.",
|
||||
"superseded": "Replaced by a newer item; keep as traceability context only.",
|
||||
"deferred_with_rationale": "Intentionally deferred; rationale must be present in the board item.",
|
||||
"rejected_not_claw": "Excluded because it is not Claw Code product work.",
|
||||
}
|
||||
|
||||
BUCKET_DESCRIPTIONS = {
|
||||
"alpha_blocker": "Must be resolved before alpha-quality autonomous coding lanes are dependable.",
|
||||
"beta_adoption": "Important for broader dogfood/adoption once alpha blockers are controlled.",
|
||||
"ga_ecosystem": "Required for mature plugin/MCP/provider ecosystem behavior.",
|
||||
"2.x_intake": "Post-2.0 intake or follow-up candidate retained for sequencing.",
|
||||
"post_2_0_research": "Research-oriented item not required for the CC2 board cut.",
|
||||
"context": "Non-actionable roadmap context.",
|
||||
"rejected_not_claw": "Explicit non-Claw rejection bucket.",
|
||||
}
|
||||
|
||||
LANE_TITLES = {
|
||||
"stream_0_governance": "Stream 0 — Governance, intake, and cross-cutting roadmap triage",
|
||||
"stream_1_worker_boot_session_control": "Stream 1 — Worker boot and session control",
|
||||
"stream_2_event_reporting_contracts": "Stream 2 — Event/reporting contracts",
|
||||
"stream_3_branch_test_recovery": "Stream 3 — Branch/test recovery",
|
||||
"stream_4_claws_first_execution": "Stream 4 — Claws-first task execution",
|
||||
"stream_5_plugin_mcp_lifecycle": "Stream 5 — Plugin/MCP lifecycle",
|
||||
"adoption_overlay": "Adoption overlay — user-visible parity and release polish",
|
||||
"parity_overlay": "Parity overlay — opencode/codex comparison context",
|
||||
}
|
||||
|
||||
REQUIRED_ITEM_FIELDS = [
|
||||
"id",
|
||||
"title",
|
||||
"source_anchor",
|
||||
"source_type",
|
||||
"release_bucket",
|
||||
"lifecycle_status",
|
||||
"dependencies",
|
||||
"verification_required",
|
||||
"deferral_rationale",
|
||||
]
|
||||
|
||||
|
||||
def load_board(path: Path) -> dict[str, Any]:
|
||||
with path.open() as f:
|
||||
board = json.load(f)
|
||||
if not isinstance(board, dict):
|
||||
raise ValueError("board JSON root must be an object")
|
||||
items = board.get("items")
|
||||
if not isinstance(items, list):
|
||||
raise ValueError("board JSON must contain an items array")
|
||||
return board
|
||||
|
||||
|
||||
def validate_board(board: dict[str, Any]) -> list[str]:
|
||||
errors: list[str] = []
|
||||
coverage = board.get("coverage", {})
|
||||
if coverage.get("unmapped_roadmap_heading_lines"):
|
||||
errors.append(f"unmapped roadmap heading lines: {coverage['unmapped_roadmap_heading_lines']}")
|
||||
if coverage.get("roadmap_headings_mapped") != coverage.get("roadmap_headings_total"):
|
||||
errors.append("roadmap heading coverage is incomplete")
|
||||
if coverage.get("roadmap_actions_mapped") != coverage.get("roadmap_actions_total"):
|
||||
errors.append("roadmap ordered-action coverage is incomplete")
|
||||
|
||||
allowed_status = set(board.get("generation_policy", {}).get("status_values", []))
|
||||
allowed_buckets = set(board.get("generation_policy", {}).get("release_buckets", []))
|
||||
seen_ids: set[str] = set()
|
||||
for index, item in enumerate(board["items"], 1):
|
||||
for field in REQUIRED_ITEM_FIELDS:
|
||||
if field not in item:
|
||||
errors.append(f"item {index} missing required field {field}")
|
||||
item_id = item.get("id")
|
||||
if item_id in seen_ids:
|
||||
errors.append(f"duplicate item id {item_id}")
|
||||
seen_ids.add(item_id)
|
||||
status = item.get("lifecycle_status")
|
||||
bucket = item.get("release_bucket")
|
||||
if allowed_status and status not in allowed_status:
|
||||
errors.append(f"{item_id} has unknown lifecycle_status {status!r}")
|
||||
if allowed_buckets and bucket not in allowed_buckets:
|
||||
errors.append(f"{item_id} has unknown release_bucket {bucket!r}")
|
||||
if status == "deferred_with_rationale" and not str(item.get("deferral_rationale", "")).strip():
|
||||
errors.append(f"{item_id} is deferred without deferral_rationale")
|
||||
return errors
|
||||
|
||||
|
||||
def table(headers: list[str], rows: list[list[Any]]) -> list[str]:
|
||||
out = ["| " + " | ".join(headers) + " |", "| " + " | ".join("---" for _ in headers) + " |"]
|
||||
for row in rows:
|
||||
out.append("| " + " | ".join(str(cell) for cell in row) + " |")
|
||||
return out
|
||||
|
||||
|
||||
def fmt_list(value: Any) -> str:
|
||||
if not value:
|
||||
return "none"
|
||||
if isinstance(value, list):
|
||||
return ", ".join(f"`{v}`" for v in value) if value else "none"
|
||||
return f"`{value}`"
|
||||
|
||||
|
||||
def render(board: dict[str, Any]) -> str:
|
||||
items: list[dict[str, Any]] = board["items"]
|
||||
summary = board.get("summary", {})
|
||||
coverage = board.get("coverage", {})
|
||||
sources = board.get("sources", {})
|
||||
policy = board.get("generation_policy", {})
|
||||
by_lane = Counter(item.get("owner_lane", "unassigned") for item in items)
|
||||
by_status = Counter(item.get("lifecycle_status", "unknown") for item in items)
|
||||
by_bucket = Counter(item.get("release_bucket", "unknown") for item in items)
|
||||
by_source = Counter(item.get("source_type", "unknown") for item in items)
|
||||
|
||||
lines: list[str] = []
|
||||
lines.append("# Claw Code 2.0 Canonical Board")
|
||||
lines.append("")
|
||||
lines.append(f"Generated from board schema: `{board.get('generated_at', 'unknown')}`")
|
||||
lines.append(f"Schema version: `{board.get('schema_version', 'unknown')}`")
|
||||
lines.append("Ultragoal mutation policy: `.omx/ultragoal` is leader-owned and was not modified by this rendering task.")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Evidence Freeze")
|
||||
lines.append("")
|
||||
roadmap = sources.get("roadmap", {})
|
||||
research = sources.get("research", {})
|
||||
plan = sources.get("approved_plan", {})
|
||||
lines.extend(table(["Source", "Frozen evidence"], [
|
||||
["Roadmap", f"`{roadmap.get('path', 'ROADMAP.md')}` sha256 prefix `{roadmap.get('sha256_prefix', 'unknown')}`; {roadmap.get('heading_count', '?')} headings; {roadmap.get('ordered_action_count', '?')} ordered actions"],
|
||||
["Approved plan", f"`{plan.get('path', '.omx/plans/claw-code-2-0-adaptive-plan.md')}` sha256 prefix `{plan.get('sha256_prefix', 'unknown')}`"],
|
||||
["Research bundle", f"root `{research.get('root', '.omx/research')}`; latest open issues {research.get('claw_open_latest_count', '?')}; issue corpus {research.get('claw_issues_count', '?')}; codex/opencode clone metadata included"],
|
||||
]))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Roadmap Coverage Summary")
|
||||
lines.append("")
|
||||
heading_total = coverage.get("roadmap_headings_total", 0)
|
||||
heading_mapped = coverage.get("roadmap_headings_mapped", 0)
|
||||
action_total = coverage.get("roadmap_actions_total", 0)
|
||||
action_mapped = coverage.get("roadmap_actions_mapped", 0)
|
||||
lines.extend(table(["Coverage gate", "Mapped", "Total", "Status"], [
|
||||
["ROADMAP headings", heading_mapped, heading_total, "PASS" if heading_mapped == heading_total and not coverage.get("unmapped_roadmap_heading_lines") else "FAIL"],
|
||||
["ROADMAP ordered actions", action_mapped, action_total, "PASS" if action_mapped == action_total else "FAIL"],
|
||||
["Duplicate heading lines", len(coverage.get("duplicate_roadmap_heading_lines", [])), 0, "PASS" if not coverage.get("duplicate_roadmap_heading_lines") else "WARN"],
|
||||
]))
|
||||
lines.append("")
|
||||
lines.append(f"Total canonical board items: **{len(items)}**")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Lifecycle Enum Reference")
|
||||
lines.append("")
|
||||
status_rows = []
|
||||
for status in policy.get("status_values", sorted(by_status)):
|
||||
status_rows.append([f"`{status}`", by_status.get(status, 0), STATUS_DESCRIPTIONS.get(status, "Board-defined lifecycle status.")])
|
||||
lines.extend(table(["Lifecycle", "Count", "Meaning"], status_rows))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Release Bucket Reference")
|
||||
lines.append("")
|
||||
bucket_rows = []
|
||||
for bucket in policy.get("release_buckets", sorted(by_bucket)):
|
||||
bucket_rows.append([f"`{bucket}`", by_bucket.get(bucket, 0), BUCKET_DESCRIPTIONS.get(bucket, "Board-defined release bucket.")])
|
||||
lines.extend(table(["Bucket", "Count", "Meaning"], bucket_rows))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Stream Summaries")
|
||||
lines.append("")
|
||||
lane_rows = []
|
||||
for lane, count in sorted(by_lane.items()):
|
||||
lane_items = [item for item in items if item.get("owner_lane") == lane]
|
||||
lane_status = Counter(item.get("lifecycle_status") for item in lane_items)
|
||||
open_like = lane_status.get("active", 0) + lane_status.get("open", 0) + lane_status.get("done_verify", 0)
|
||||
lane_rows.append([
|
||||
LANE_TITLES.get(lane, lane),
|
||||
count,
|
||||
open_like,
|
||||
", ".join(f"`{k}` {v}" for k, v in sorted(lane_status.items())),
|
||||
])
|
||||
lines.extend(table(["Stream / lane", "Items", "Active+open+verify", "Lifecycle mix"], lane_rows))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Source-Type Mix")
|
||||
lines.append("")
|
||||
lines.extend(table(["Source type", "Items"], [[f"`{k}`", v] for k, v in sorted(by_source.items())]))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Board Items by Stream")
|
||||
lines.append("")
|
||||
for lane in sorted(by_lane):
|
||||
lane_items = [item for item in items if item.get("owner_lane") == lane]
|
||||
lines.append(f"### {LANE_TITLES.get(lane, lane)}")
|
||||
lines.append("")
|
||||
lines.extend(table(
|
||||
["ID", "Title", "Source", "Bucket", "Lifecycle", "Verification", "Dependencies", "Deferral"],
|
||||
[[
|
||||
f"`{item.get('id')}`",
|
||||
str(item.get("title", "")).replace("|", "\\|"),
|
||||
f"`{item.get('source_anchor')}` / `{item.get('source_type')}`",
|
||||
f"`{item.get('release_bucket')}`",
|
||||
f"`{item.get('lifecycle_status')}`",
|
||||
f"`{item.get('verification_required')}`",
|
||||
fmt_list(item.get("dependencies")),
|
||||
str(item.get("deferral_rationale") or "—").replace("|", "\\|"),
|
||||
] for item in lane_items]
|
||||
))
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines).rstrip() + "\n"
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("board_json", type=Path)
|
||||
parser.add_argument("board_md", type=Path)
|
||||
parser.add_argument("--check", action="store_true", help="fail if board_md is not up to date")
|
||||
args = parser.parse_args()
|
||||
|
||||
board = load_board(args.board_json)
|
||||
errors = validate_board(board)
|
||||
if errors:
|
||||
for error in errors:
|
||||
print(f"ERROR: {error}", file=sys.stderr)
|
||||
return 1
|
||||
rendered = render(board)
|
||||
if args.check:
|
||||
existing = args.board_md.read_text() if args.board_md.exists() else ""
|
||||
if existing != rendered:
|
||||
print(f"ERROR: {args.board_md} is not up to date", file=sys.stderr)
|
||||
return 1
|
||||
print(f"PASS: {args.board_md} is up to date and roadmap coverage is complete")
|
||||
return 0
|
||||
args.board_md.parent.mkdir(parents=True, exist_ok=True)
|
||||
args.board_md.write_text(rendered)
|
||||
print(f"wrote {args.board_md}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
58
.omx/cc2/validate_issue_parity_intake.py
Executable file
58
.omx/cc2/validate_issue_parity_intake.py
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Validate the worker-2 CC2 issue/parity intake fragment."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[2]
|
||||
INTAKE = ROOT / ".omx" / "cc2" / "issue-parity-intake.json"
|
||||
REQUIRED_ISSUES = set(range(3028, 3039)) | {3007, 3006, 3020, 3005, 3003, 2997, 3023, 3004}
|
||||
ALLOWED_STATUS = {
|
||||
"context",
|
||||
"active",
|
||||
"open",
|
||||
"done_verify",
|
||||
"stale_done",
|
||||
"superseded",
|
||||
"deferred_with_rationale",
|
||||
"rejected_not_claw",
|
||||
}
|
||||
ALLOWED_BUCKETS = {"alpha_blocker", "beta_adoption", "ga_ecosystem", "post_2_0_research"}
|
||||
|
||||
|
||||
def require(condition: bool, message: str) -> None:
|
||||
if not condition:
|
||||
raise SystemExit(f"FAIL: {message}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
data = json.loads(INTAKE.read_text())
|
||||
issue_rows = data.get("issue_clusters", [])
|
||||
parity_rows = data.get("parity_rows", [])
|
||||
|
||||
seen = {row.get("source_number") for row in issue_rows}
|
||||
missing = sorted(REQUIRED_ISSUES - seen)
|
||||
extra = sorted(seen - REQUIRED_ISSUES)
|
||||
require(not missing, f"missing required issue rows: {missing}")
|
||||
require(not extra, f"unexpected issue rows in scoped intake: {extra}")
|
||||
require(len(issue_rows) == len(REQUIRED_ISSUES), "duplicate or missing issue row count")
|
||||
|
||||
ids = [row.get("id") for row in issue_rows + parity_rows]
|
||||
require(len(ids) == len(set(ids)), "duplicate ids present")
|
||||
|
||||
for row in issue_rows + parity_rows:
|
||||
row_id = row.get("id")
|
||||
for field in ["source_anchor", "source_type", "release_bucket", "lifecycle_status", "dependencies", "verification_required"]:
|
||||
require(row.get(field) not in (None, "", []), f"{row_id} missing {field}")
|
||||
require(row["release_bucket"] in ALLOWED_BUCKETS, f"{row_id} invalid release_bucket {row['release_bucket']}")
|
||||
require(row["lifecycle_status"] in ALLOWED_STATUS, f"{row_id} invalid lifecycle_status {row['lifecycle_status']}")
|
||||
if row["lifecycle_status"] == "deferred_with_rationale":
|
||||
require(row.get("deferral_rationale"), f"{row_id} deferred without rationale")
|
||||
|
||||
require(len(parity_rows) >= data["coverage"]["parity_rows_expected_minimum"], "not enough parity rows")
|
||||
print(f"PASS issue/parity intake: {len(issue_rows)} issue rows, {len(parity_rows)} parity rows")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
583
.omx/ultragoal/g010-final-quality-gate-rerun.log
Normal file
583
.omx/ultragoal/g010-final-quality-gate-rerun.log
Normal file
@@ -0,0 +1,583 @@
|
||||
G010 final leader verification rerun started 2026-05-15T02:19:36Z
|
||||
== artifact checklist ==
|
||||
PASS docs/g010-clone-disambiguation-metadata.md exists
|
||||
PASS docs/g010-session-hygiene-verification-map.md exists
|
||||
.claw/sessions/example.jsonl
|
||||
rust/.claw/sessions/example.jsonl
|
||||
.claude/sessions/example.json
|
||||
== fmt ==
|
||||
== runtime session_control retry ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.13s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 15 tests
|
||||
test session_control::tests::latest_session_prefers_semantic_updated_at_over_file_mtime ... ok
|
||||
test session_control::tests::session_store_from_cwd_canonicalizes_equivalent_paths ... ok
|
||||
test session_control::tests::session_store_fork_stays_in_same_namespace ... ok
|
||||
test session_control::tests::session_exists_and_delete_are_scoped_to_workspace_store ... ok
|
||||
test session_control::tests::resolves_latest_alias_and_loads_session_from_workspace_root ... ok
|
||||
test session_control::tests::forks_session_into_managed_storage_with_lineage ... ok
|
||||
test session_control::tests::workspace_fingerprint_is_deterministic_and_differs_per_path ... ok
|
||||
test session_control::tests::session_store_create_and_load_round_trip ... ok
|
||||
test session_control::tests::session_store_from_cwd_isolates_sessions_by_workspace ... ok
|
||||
test session_control::tests::creates_and_lists_managed_sessions ... ok
|
||||
test session_control::tests::session_store_from_data_dir_namespaces_by_workspace ... ok
|
||||
test session_control::tests::session_store_latest_and_resolve_reference ... ok
|
||||
test session_control::tests::session_store_loads_safe_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::session_store_rejects_legacy_session_from_other_workspace ... ok
|
||||
test session_control::tests::session_store_loads_unbound_legacy_session_from_same_workspace ... ok
|
||||
|
||||
test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 542 filtered out; finished in 0.02s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime jsonl safeguards ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 1 test
|
||||
test session::tests::jsonl_persistence_redacts_and_truncates_oversized_payload_fields ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 556 filtered out; finished in 0.02s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime compact ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 17 tests
|
||||
test compact::tests::compaction_does_not_split_tool_use_tool_result_pair ... ok
|
||||
test compact::tests::formats_compact_summary_like_upstream ... ok
|
||||
test compact::tests::ignores_existing_compacted_summary_when_deciding_to_recompact ... ok
|
||||
test compact::tests::infers_pending_work_from_recent_messages ... ok
|
||||
test compact::tests::extracts_key_files_from_message_content ... ok
|
||||
test compact::tests::leaves_small_sessions_unchanged ... ok
|
||||
test compact::tests::truncates_long_blocks_in_summary ... ok
|
||||
test conversation::tests::auto_compaction_threshold_defaults_and_parses_values ... ok
|
||||
test compact::tests::compacts_older_messages_into_a_system_summary ... ok
|
||||
test conversation::tests::compaction_health_probe_skips_empty_compacted_session ... ok
|
||||
test conversation::tests::compaction_health_probe_blocks_turn_when_tool_executor_is_broken ... ok
|
||||
test conversation::tests::auto_compacts_when_cumulative_input_threshold_is_crossed ... ok
|
||||
test conversation::tests::skips_auto_compaction_below_threshold ... ok
|
||||
test prompt::tests::displays_context_paths_compactly ... ok
|
||||
test conversation::tests::compacts_session_after_turns ... ok
|
||||
test compact::tests::keeps_previous_compacted_context_when_compacting_again ... ok
|
||||
test session::tests::persists_compaction_metadata ... ok
|
||||
|
||||
test result: ok. 17 passed; 0 failed; 0 ignored; 0 measured; 540 filtered out; finished in 0.01s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== commands parses_supported_slash_commands ==
|
||||
Compiling commands v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/commands)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 2.34s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/commands-0104b50ff2e54ccc)
|
||||
|
||||
running 1 test
|
||||
test tests::parses_supported_slash_commands ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 41 filtered out; finished in 0.00s
|
||||
|
||||
== commands compacts_sessions_via_slash_command ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/commands-0104b50ff2e54ccc)
|
||||
|
||||
running 1 test
|
||||
test tests::compacts_sessions_via_slash_command ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 41 filtered out; finished in 0.00s
|
||||
|
||||
== cli session json contracts ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 3.47s
|
||||
Running unittests src/main.rs (rust/target/debug/deps/claw-f425f0b21e915b27)
|
||||
|
||||
running 1 test
|
||||
test tests::session_exists_resume_command_reports_json_contract ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 193 filtered out; finished in 0.00s
|
||||
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 2.76s
|
||||
Running unittests src/main.rs (rust/target/debug/deps/claw-f425f0b21e915b27)
|
||||
|
||||
running 1 test
|
||||
test tests::resumed_session_exists_and_delete_have_json_contracts ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 193 filtered out; finished in 0.01s
|
||||
|
||||
== cli resume slash commands ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 4.23s
|
||||
Running tests/resume_slash_commands.rs (rust/target/debug/deps/resume_slash_commands-6c1fb347be3842ef)
|
||||
|
||||
running 12 tests
|
||||
test resumed_stub_command_emits_not_implemented_json ... ok
|
||||
test resumed_help_command_emits_structured_json ... ok
|
||||
test resumed_no_command_emits_restored_json ... ok
|
||||
test resumed_sandbox_command_emits_structured_json_when_requested ... ok
|
||||
test resumed_export_command_emits_structured_json ... ok
|
||||
test resumed_config_command_loads_settings_files_end_to_end ... ok
|
||||
test resumed_binary_accepts_slash_commands_with_arguments ... ok
|
||||
test resumed_version_command_emits_structured_json ... ok
|
||||
test resumed_status_surfaces_persisted_model ... ok
|
||||
test resume_latest_restores_the_most_recent_managed_session ... ok
|
||||
test status_command_applies_cli_flags_end_to_end ... ok
|
||||
test resumed_status_command_emits_structured_json_when_requested ... ok
|
||||
|
||||
test result: ok. 12 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 2.25s
|
||||
|
||||
== cli compact output ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 2.72s
|
||||
Running tests/compact_output.rs (rust/target/debug/deps/compact_output-988ab05f11fedc49)
|
||||
|
||||
running 4 tests
|
||||
test compact_flag_with_json_output_emits_structured_json ... ok
|
||||
test compact_flag_streaming_text_only_emits_final_message_text ... ok
|
||||
test compact_flag_prints_only_final_assistant_text_without_tool_call_details ... ok
|
||||
test text_prompt_mode_prints_final_assistant_text_after_spinner ... ok
|
||||
|
||||
test result: ok. 4 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 2.14s
|
||||
|
||||
== workspace check ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.49s
|
||||
== diff check ==
|
||||
G010 final leader verification rerun completed 2026-05-15T02:20:06Z
|
||||
644
.omx/ultragoal/g010-final-quality-gate.log
Normal file
644
.omx/ultragoal/g010-final-quality-gate.log
Normal file
@@ -0,0 +1,644 @@
|
||||
G010 final leader verification started 2026-05-15T02:17:45Z
|
||||
== artifact checklist ==
|
||||
PASS docs/g010-clone-disambiguation-metadata.md exists
|
||||
PASS docs/g010-session-hygiene-verification-map.md exists
|
||||
.gitignore:.claw/sessions/
|
||||
rust/.gitignore:.claw/sessions/
|
||||
.claw/sessions/example.jsonl
|
||||
rust/.claw/sessions/example.jsonl
|
||||
.claude/sessions/example.json
|
||||
== fmt ==
|
||||
== runtime session_control ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.14s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 15 tests
|
||||
test session_control::tests::latest_session_prefers_semantic_updated_at_over_file_mtime ... ok
|
||||
test session_control::tests::session_store_from_cwd_canonicalizes_equivalent_paths ... ok
|
||||
|
||||
thread 'session_control::tests::session_store_fork_stays_in_same_namespace' (403821665) panicked at crates/runtime/src/session_control.rs:775:14:
|
||||
session should persist: Io(Os { code: 2, kind: NotFound, message: "No such file or directory" })
|
||||
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
|
||||
test session_control::tests::session_exists_and_delete_are_scoped_to_workspace_store ... ok
|
||||
test session_control::tests::forks_session_into_managed_storage_with_lineage ... ok
|
||||
test session_control::tests::creates_and_lists_managed_sessions ... ok
|
||||
test session_control::tests::session_store_from_cwd_isolates_sessions_by_workspace ... ok
|
||||
test session_control::tests::session_store_latest_and_resolve_reference ... ok
|
||||
test session_control::tests::session_store_loads_safe_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::workspace_fingerprint_is_deterministic_and_differs_per_path ... ok
|
||||
test session_control::tests::session_store_create_and_load_round_trip ... ok
|
||||
test session_control::tests::session_store_fork_stays_in_same_namespace ... FAILED
|
||||
test session_control::tests::session_store_loads_unbound_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::session_store_from_data_dir_namespaces_by_workspace ... ok
|
||||
test session_control::tests::session_store_rejects_legacy_session_from_other_workspace ... ok
|
||||
test session_control::tests::resolves_latest_alias_and_loads_session_from_workspace_root ... ok
|
||||
|
||||
failures:
|
||||
|
||||
failures:
|
||||
session_control::tests::session_store_fork_stays_in_same_namespace
|
||||
|
||||
test result: FAILED. 14 passed; 1 failed; 0 ignored; 0 measured; 542 filtered out; finished in 0.03s
|
||||
|
||||
error: test failed, to rerun pass `-p runtime --lib`
|
||||
== runtime jsonl safeguards ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.13s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 1 test
|
||||
test session::tests::jsonl_persistence_redacts_and_truncates_oversized_payload_fields ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 556 filtered out; finished in 0.02s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime compact ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.13s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 17 tests
|
||||
test compact::tests::formats_compact_summary_like_upstream ... ok
|
||||
test compact::tests::truncates_long_blocks_in_summary ... ok
|
||||
test compact::tests::ignores_existing_compacted_summary_when_deciding_to_recompact ... ok
|
||||
test compact::tests::infers_pending_work_from_recent_messages ... ok
|
||||
test compact::tests::compaction_does_not_split_tool_use_tool_result_pair ... ok
|
||||
test compact::tests::leaves_small_sessions_unchanged ... ok
|
||||
test conversation::tests::auto_compaction_threshold_defaults_and_parses_values ... ok
|
||||
test compact::tests::extracts_key_files_from_message_content ... ok
|
||||
test compact::tests::compacts_older_messages_into_a_system_summary ... ok
|
||||
test prompt::tests::displays_context_paths_compactly ... ok
|
||||
test conversation::tests::skips_auto_compaction_below_threshold ... ok
|
||||
test conversation::tests::compaction_health_probe_skips_empty_compacted_session ... ok
|
||||
test conversation::tests::compacts_session_after_turns ... ok
|
||||
test conversation::tests::compaction_health_probe_blocks_turn_when_tool_executor_is_broken ... ok
|
||||
test conversation::tests::auto_compacts_when_cumulative_input_threshold_is_crossed ... ok
|
||||
test compact::tests::keeps_previous_compacted_context_when_compacting_again ... ok
|
||||
test session::tests::persists_compaction_metadata ... ok
|
||||
|
||||
test result: ok. 17 passed; 0 failed; 0 ignored; 0 measured; 540 filtered out; finished in 0.01s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== commands session/compact slash ==
|
||||
error: unexpected argument 'compacts_sessions_via_slash_command' found
|
||||
|
||||
Usage: cargo test [OPTIONS] [TESTNAME] [-- [ARGS]...]
|
||||
|
||||
For more information, try '--help'.
|
||||
== cli session json contracts ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw" test) due to 1 previous error
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw" test) due to 1 previous error
|
||||
== cli resume slash commands ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== cli compact output ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== workspace check ==
|
||||
Checking runtime v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/runtime)
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Checking api v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/api)
|
||||
Checking commands v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/commands)
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Checking tools v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/tools)
|
||||
Checking mock-anthropic-service v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/mock-anthropic-service)
|
||||
Checking compat-harness v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/compat-harness)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== diff check ==
|
||||
G010 final leader verification completed 2026-05-15T02:18:11Z
|
||||
321
.omx/ultragoal/g010-leader-verify.log
Normal file
321
.omx/ultragoal/g010-leader-verify.log
Normal file
@@ -0,0 +1,321 @@
|
||||
== fmt ==
|
||||
== runtime session_control ==
|
||||
Compiling runtime v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/runtime)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 10.29s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 15 tests
|
||||
test session_control::tests::latest_session_prefers_semantic_updated_at_over_file_mtime ... ok
|
||||
test session_control::tests::session_store_from_cwd_canonicalizes_equivalent_paths ... ok
|
||||
test session_control::tests::session_store_create_and_load_round_trip ... ok
|
||||
test session_control::tests::session_exists_and_delete_are_scoped_to_workspace_store ... ok
|
||||
test session_control::tests::forks_session_into_managed_storage_with_lineage ... ok
|
||||
test session_control::tests::workspace_fingerprint_is_deterministic_and_differs_per_path ... ok
|
||||
test session_control::tests::session_store_from_cwd_isolates_sessions_by_workspace ... ok
|
||||
test session_control::tests::creates_and_lists_managed_sessions ... ok
|
||||
test session_control::tests::session_store_fork_stays_in_same_namespace ... ok
|
||||
test session_control::tests::session_store_from_data_dir_namespaces_by_workspace ... ok
|
||||
test session_control::tests::session_store_latest_and_resolve_reference ... ok
|
||||
test session_control::tests::session_store_loads_safe_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::session_store_loads_unbound_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::session_store_rejects_legacy_session_from_other_workspace ... ok
|
||||
test session_control::tests::resolves_latest_alias_and_loads_session_from_workspace_root ... ok
|
||||
|
||||
test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 542 filtered out; finished in 0.02s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime session jsonl/bloat ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.18s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 8 tests
|
||||
test session::tests::rejects_jsonl_record_with_unknown_type ... ok
|
||||
test session::tests::rejects_jsonl_message_record_without_message_payload ... ok
|
||||
test session::tests::rejects_jsonl_record_without_type ... ok
|
||||
test session::tests::persists_assistant_thinking_block_round_trip_through_jsonl ... ok
|
||||
test session::tests::persists_and_restores_session_jsonl ... ok
|
||||
test conversation::tests::persists_conversation_turn_messages_to_jsonl_session ... ok
|
||||
test session::tests::appends_messages_to_persisted_jsonl_session ... ok
|
||||
test session::tests::jsonl_persistence_redacts_and_truncates_oversized_payload_fields ... ok
|
||||
|
||||
test result: ok. 8 passed; 0 failed; 0 ignored; 0 measured; 549 filtered out; finished in 0.04s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime compact ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 17 tests
|
||||
test compact::tests::formats_compact_summary_like_upstream ... ok
|
||||
test compact::tests::ignores_existing_compacted_summary_when_deciding_to_recompact ... ok
|
||||
test compact::tests::compaction_does_not_split_tool_use_tool_result_pair ... ok
|
||||
test compact::tests::leaves_small_sessions_unchanged ... ok
|
||||
test compact::tests::infers_pending_work_from_recent_messages ... ok
|
||||
test compact::tests::truncates_long_blocks_in_summary ... ok
|
||||
test conversation::tests::auto_compaction_threshold_defaults_and_parses_values ... ok
|
||||
test compact::tests::extracts_key_files_from_message_content ... ok
|
||||
test compact::tests::compacts_older_messages_into_a_system_summary ... ok
|
||||
test conversation::tests::compaction_health_probe_blocks_turn_when_tool_executor_is_broken ... ok
|
||||
test conversation::tests::skips_auto_compaction_below_threshold ... ok
|
||||
test conversation::tests::auto_compacts_when_cumulative_input_threshold_is_crossed ... ok
|
||||
test conversation::tests::compaction_health_probe_skips_empty_compacted_session ... ok
|
||||
test conversation::tests::compacts_session_after_turns ... ok
|
||||
test prompt::tests::displays_context_paths_compactly ... ok
|
||||
test compact::tests::keeps_previous_compacted_context_when_compacting_again ... ok
|
||||
test session::tests::persists_compaction_metadata ... ok
|
||||
|
||||
test result: ok. 17 passed; 0 failed; 0 ignored; 0 measured; 540 filtered out; finished in 0.01s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== cli resume_slash_commands ==
|
||||
Compiling runtime v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/runtime)
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Compiling api v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/api)
|
||||
Compiling commands v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/commands)
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Compiling tools v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/tools)
|
||||
Compiling mock-anthropic-service v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/mock-anthropic-service)
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling compat-harness v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/compat-harness)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: None, .. }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3794:11
|
||||
|
|
||||
3794 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: None, .. }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4197 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4198 ~ &SlashCommand::Session { action: None, .. } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== cli compact_output ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: None, .. }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3794:11
|
||||
|
|
||||
3794 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: None, .. }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4197 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4198 ~ &SlashCommand::Session { action: None, .. } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== diff check ==
|
||||
@@ -0,0 +1 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":4536320,"timeUsedSeconds":13975,"createdAt":1778745278,"updatedAt":1778810208},"remainingTokens":null,"completionBudgetReport":null}
|
||||
@@ -0,0 +1 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":4747486,"timeUsedSeconds":14669,"createdAt":1778745278,"updatedAt":1778810902},"remainingTokens":null,"completionBudgetReport":null}
|
||||
@@ -0,0 +1 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":4771357,"timeUsedSeconds":14733,"createdAt":1778745278,"updatedAt":1778810966},"remainingTokens":null,"completionBudgetReport":null}
|
||||
1
.omx/ultragoal/get-goal-G010-session-hygiene.active.json
Normal file
1
.omx/ultragoal/get-goal-G010-session-hygiene.active.json
Normal file
@@ -0,0 +1 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":4726793,"timeUsedSeconds":14653,"createdAt":1778745278,"updatedAt":1778810885},"remainingTokens":null,"completionBudgetReport":null}
|
||||
@@ -0,0 +1 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":5024990,"timeUsedSeconds":15387,"createdAt":1778745278,"updatedAt":1778811620},"remainingTokens":null,"completionBudgetReport":null}
|
||||
154
.omx/ultragoal/goals.json
Normal file
154
.omx/ultragoal/goals.json
Normal file
@@ -0,0 +1,154 @@
|
||||
{
|
||||
"version": 1,
|
||||
"createdAt": "2026-05-14T07:53:46.061Z",
|
||||
"updatedAt": "2026-05-15T04:38:54.887Z",
|
||||
"briefPath": ".omx/ultragoal/brief.md",
|
||||
"goalsPath": ".omx/ultragoal/goals.json",
|
||||
"ledgerPath": ".omx/ultragoal/ledger.jsonl",
|
||||
"codexGoalMode": "aggregate",
|
||||
"goals": [
|
||||
{
|
||||
"id": "G001-stream0-board",
|
||||
"title": "Stream 0: Generate canonical CC2 board",
|
||||
"objective": "Generate the canonical Claw Code 2.0 board from frozen ROADMAP.md, latest issue snapshot, parity evidence, and approved plan. Classify every actionable roadmap item and context heading with source_anchor, source_type, release_bucket, lifecycle status, dependencies, verification_required, and deferral rationale. Emit machine JSON plus human markdown.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T08:14:23.206Z",
|
||||
"startedAt": "2026-05-14T07:54:26.032Z",
|
||||
"completedAt": "2026-05-14T08:14:23.206Z",
|
||||
"evidence": "G001-stream0-board complete via team ultragoal-g001-stream-e61d2271: team status phase=team-verify, tasks 5/5 completed; worker-2 produced issue/parity intake, worker-3 produced board Markdown/rendering, worker-4 recorded validation evidence, worker-1 completed initial board artifacts. Leader reconciliation commit 45b43b5 aligned scripts/generate_cc2_board.py, scripts/validate_cc2_board.py, scripts/cc2_board.py, .omx/cc2/render_board_md.py. Evidence artifacts: .omx/cc2/board.json, .omx/cc2/board.md, .omx/cc2/issue-parity-intake.json, .omx/cc2/issue-parity-intake.md; .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl remain leader-owned. Verification passed: python3 scripts/generate_cc2_board.py; python3 scripts/validate_cc2_board.py; python3 scripts/cc2_board.py validate; python3 .omx/cc2/validate_issue_parity_intake.py; python3 .omx/cc2/render_board_md.py .omx/cc2/board.json .omx/cc2/board.md --check; python3 -m py_compile scripts/generate_cc2_board.py scripts/validate_cc2_board.py scripts/cc2_board.py .omx/cc2/validate_issue_parity_intake.py .omx/cc2/render_board_md.py; cargo check --manifest-path rust/Cargo.toml --workspace."
|
||||
},
|
||||
{
|
||||
"id": "G002-alpha-security",
|
||||
"title": "Stream 6: Day-one security and permissions gate",
|
||||
"objective": "Implement/verify alpha-blocking security scope: file tools and shell enforce workspace/path scope across direct paths, symlinks, globbing, shell expansion, worktrees, and Windows path cases. Add regression fixtures for #3007 class behavior and permission-mode event/status visibility.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T08:34:04.243Z",
|
||||
"startedAt": "2026-05-14T08:14:46.422Z",
|
||||
"completedAt": "2026-05-14T08:34:04.243Z",
|
||||
"evidence": "G002-alpha-security team ultragoal-g002-alpha-e61d2271 reached phase=complete with 5/5 tasks completed and no worker .omx/ultragoal mutation. Integrated commits through 37b2b75 on main: workspace/path enforcement in rust/crates/runtime/src/file_ops.rs, rust/crates/runtime/src/lib.rs, rust/crates/tools/src/lib.rs, regressions in rust/crates/tools/tests/path_scope_enforcement.rs and rust/crates/rusty-claude-cli/tests/output_format_contract.rs, verification map docs/g002-security-verification-map.md. Fresh leader validation passed: git diff --check; cargo fmt --manifest-path rust/Cargo.toml --all -- --check; cargo test --manifest-path rust/Cargo.toml -p tools path_scope -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p tools --test path_scope_enforcement -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p runtime workspace_ -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract -- --nocapture; python3 -m pytest tests/test_security_scope.py -q; cargo check --manifest-path rust/Cargo.toml --workspace. .omx/ultragoal artifacts retained as leader-owned durable audit trail; fresh get_goal JSON captured at .omx/ultragoal/get-goal-G002-alpha-security.json. Known unrelated non-gating gaps from worker verification: full cargo test --workspace has pre-existing session_lifecycle_prefers_running_process_over_idle_shell failure; clippy all-targets has pre-existing runtime lint warnings."
|
||||
},
|
||||
{
|
||||
"id": "G003-boot-session",
|
||||
"title": "Stream 1: Reliable worker boot/session control",
|
||||
"objective": "Implement/verify worker lifecycle, first prompt acceptance SLA, startup-no-evidence classifier, trust resolver/default trusted roots, structured session control API, and boot preflight/doctor JSON contracts.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T08:54:40.729Z",
|
||||
"startedAt": "2026-05-14T08:34:19.605Z",
|
||||
"completedAt": "2026-05-14T08:54:40.729Z",
|
||||
"evidence": "G003-boot-session team g003-boot-session-ult-e61d2271 reached phase=complete with 5/5 tasks completed and no worker .omx/ultragoal mutation. Implemented/verified Stream 1 reliable worker boot/session control: worker lifecycle/prompt SLA and path guardrails, default trusted roots merge via runtime config and WorkerCreate, startup-no-evidence evidence/classifier timestamp coverage, structured boot preflight/status/doctor JSON, and docs/g003-boot-session-verification-map.md. Integrated/pushed through origin/main aec291c. Final leader validation passed: git diff --check; cargo fmt --manifest-path rust/Cargo.toml --all -- --check; cargo test --manifest-path rust/Cargo.toml -p runtime trusted_roots -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p runtime trust_resolver -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p runtime startup -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p runtime worker_boot -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p tools worker_create_merges_config_trusted_roots -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p tools path_scope -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli boot_preflight -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli branch_freshness -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli status_json_surfaces_session_lifecycle_for_clawhip -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract -- --nocapture; cargo check --manifest-path rust/Cargo.toml --workspace; python3 scripts/validate_cc2_board.py --board .omx/cc2/board.json; python3 .omx/cc2/validate_issue_parity_intake.py .omx/cc2/issue-parity-intake.json. Fresh get_goal JSON captured at .omx/ultragoal/get-goal-G003-boot-session.complete.json and .omx/ultragoal goals/ledger remain leader-owned audit artifacts. Known non-gating gaps from worker clippy attempts are pre-existing unrelated runtime clippy warnings and full workspace tests remain deferred to final gates."
|
||||
},
|
||||
{
|
||||
"id": "G004-events-reports",
|
||||
"title": "Stream 2: Event/report contract families",
|
||||
"objective": "Implement/verify canonical lane events, ordering/provenance/identity/dedupe/ownership, report schema/projection/redaction/capability negotiation, approval-token chain, and pinpoint closure batches with golden fixtures.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T09:15:44.223Z",
|
||||
"startedAt": "2026-05-14T08:54:55.093Z",
|
||||
"completedAt": "2026-05-14T09:15:44.223Z",
|
||||
"evidence": "G004-events-reports complete: team g004-events-reports-u-e61d2271 phase complete with 7/7 tasks completed; pushed main through 879962b; leader verification passed cargo fmt --manifest-path rust/Cargo.toml --all -- --check, cargo check --manifest-path rust/Cargo.toml -p runtime, cargo test --manifest-path rust/Cargo.toml -p runtime -- --nocapture (535 unit + g004_conformance 2 + integration 12 + doctests), python3 .github/scripts/check_doc_source_of_truth.py; evidence recorded against .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl"
|
||||
},
|
||||
{
|
||||
"id": "G005-branch-recovery",
|
||||
"title": "Stream 3: Branch/test awareness and recovery",
|
||||
"objective": "Implement/verify stale branch detection before broad tests, recovery recipes and ledger, green-ness contract, test provenance, hung-test classification, and recovery/status reporting.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T12:41:48.997Z",
|
||||
"startedAt": "2026-05-14T09:16:01.781Z",
|
||||
"completedAt": "2026-05-14T12:41:48.997Z",
|
||||
"evidence": "G005-branch-recovery complete and pushed at 7426ede; team g005-branch-recovery-e61d2271 has 5/5 tasks completed; leader verification passed for branch freshness before broad tests, recovery ledger/status reporting, green-ness contract/test provenance, stale-base doctor/status consistency, hung-test classification, and docs/g005-branch-recovery-verification-map.md. Evidence recorded against .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G006-task-policy-board",
|
||||
"title": "Stream 4: Task packets, policy engine, lane board",
|
||||
"objective": "Implement/verify typed task packet schema, executable policy engine, active lane board/dashboard, running-state liveness heartbeat, and task/lane status JSON.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T00:42:05.094Z",
|
||||
"startedAt": "2026-05-14T12:41:57.815Z",
|
||||
"completedAt": "2026-05-15T00:42:05.094Z",
|
||||
"evidence": "G006-task-policy-board complete in pushed origin/main commit 65a144c; team g006-task-policy-boar-e61d2271 terminal with 5 completed/0 failed after leader reconciliation; verification map docs/g006-task-policy-board-verification-map.md plus quality gate JSON record cargo fmt/check/tests/diff/push; .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl preserved; workers did not mutate .omx/ultragoal."
|
||||
},
|
||||
{
|
||||
"id": "G007-plugin-mcp",
|
||||
"title": "Stream 5: Plugin/MCP lifecycle maturity",
|
||||
"objective": "Implement/verify plugin/MCP lifecycle states, healthy/degraded/failed startup, required vs optional behavior, malformed config consistency across status/doctor/mcp/plugins, and mock MCP/plugin tests.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T01:16:43.414Z",
|
||||
"startedAt": "2026-05-15T00:42:16.309Z",
|
||||
"completedAt": "2026-05-15T01:16:43.414Z",
|
||||
"evidence": "G007-plugin-mcp complete: team g007-plugin-mcp-ultra-e61d2271 phase complete with 13/13 tasks completed, verification passed, pushed head 2202410, and durable ultragoal artifacts updated in .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G008-provider-compat",
|
||||
"title": "Stream 7: Provider/model compatibility",
|
||||
"objective": "Implement/verify OpenAI-compatible slash-containing model IDs, provider prefix routing over env sniffing, DeepSeek/reasoning diagnostics, web search/fetch behavior, proxy/custom parameter passthrough, token/cost accounting, and provider diagnostics.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T01:38:22.717Z",
|
||||
"startedAt": "2026-05-15T01:17:53.783Z",
|
||||
"completedAt": "2026-05-15T01:38:22.717Z",
|
||||
"evidence": "G008-provider-compat complete: team g008-provider-compat-e61d2271 phase complete with 5/5 tasks terminal; provider/model compatibility implemented and verified; pushed origin/main 2cac66c..8c9a05e; evidence recorded in .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl plus quality gate .omx/ultragoal/quality-gate-G008-provider-compat.json."
|
||||
},
|
||||
{
|
||||
"id": "G009-windows-docs-release",
|
||||
"title": "Stream 8: Windows/install/docs/license readiness",
|
||||
"objective": "Implement/verify PowerShell-first docs, safe provider switching examples, Windows smoke CI, release artifact quickstart, license/contribution/security/support policies, and command/link validation.",
|
||||
"status": "complete",
|
||||
"attempt": 0,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T01:57:41.565Z",
|
||||
"completedAt": "2026-05-15T01:57:41.565Z",
|
||||
"evidence": "G009-windows-docs-release complete at commit 5294648 with team g009-windows-docs-rel-e61d2271 phase complete, 5/5 tasks completed; evidence in .omx/ultragoal/quality-gate-G009-windows-docs-release.json, .omx/ultragoal/get-goal-G009-windows-docs-release.complete.json, .omx/ultragoal/goals.json, and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G010-session-hygiene",
|
||||
"title": "Stream 9: Session hygiene/local state/recovery UX",
|
||||
"objective": "Implement/verify session file hygiene, .gitignore state paths, per-worktree session isolation, list/delete/exists/compact/resume, compact/provider-context recovery, JSONL payload bloat safeguards, interrupt recovery, and clone disambiguation metadata.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T02:20:46.558Z",
|
||||
"startedAt": "2026-05-15T01:59:22.219Z",
|
||||
"completedAt": "2026-05-15T02:20:46.558Z",
|
||||
"evidence": "G010-session-hygiene complete: team g010-session-hygiene-e61d2271 phase complete with 7/7 tasks completed; final verification passed in .omx/ultragoal/g010-final-quality-gate-rerun.log; durable state recorded in .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G011-ecosystem-ops-ux",
|
||||
"title": "Streams 10–12: Ecosystem, issue ops, and UX laterals",
|
||||
"objective": "Implement/verify gated ACP/Zed/JSON-RPC serve plan/status, anti-slop issue/PR triage, issue templates, navigation/file-context docs, TUI/rendering/copy/paste/clickable path improvements, and defer desktop/marketplace features until contracts are stable.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T02:55:26.988Z",
|
||||
"startedAt": "2026-05-15T02:21:31.360Z",
|
||||
"completedAt": "2026-05-15T02:55:26.988Z",
|
||||
"evidence": "G011-ecosystem-ops-ux complete: team g011-ecosystem-ops-ux-e61d2271 phase=complete with 7/7 tasks completed; final pushed HEAD 1ac8ce8; verification evidence in .omx/ultragoal/g011-final-quality-gate.log and .omx/ultragoal/quality-gate-G011-ecosystem-ops-ux.json; ultragoal artifacts tracked in .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G012-final-gate",
|
||||
"title": "Final release gate: Verify Claw Code 2.0 delivery",
|
||||
"objective": "Run final cross-stream quality gate: roadmap board has no unmapped actionable items, fmt/clippy/tests and focused contract suites pass, ai-slop-cleaner on changed files passes/no-ops, code-review approves, and final alpha/beta/GA readiness report is written. Final completion is blocked until docs/pr-issue-resolution-gate.md has fresh evidence showing every open PR and issue was triaged, with correct PRs merged and resolvable correct issues fixed or closed.",
|
||||
"status": "complete",
|
||||
"attempt": 0,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T04:38:54.887Z",
|
||||
"evidence": "G012-final-gate complete: team g012-final-gate-ultra-e61d2271 8/8 tasks complete; final gate log /tmp/g012-final-quality-gate-pass4.log; commit 04c2abb pushed; docs/pr-triage-g012-final-gate.json docs/pr-issue-resolution-gate.md docs/g012-final-release-readiness-report.md; .omx/ultragoal/goals.json and ledger.jsonl updated; aiSlopCleaner and codeReview evidence included in quality gate JSON.",
|
||||
"completedAt": "2026-05-15T04:38:54.887Z"
|
||||
}
|
||||
],
|
||||
"codexObjective": "Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan."
|
||||
}
|
||||
23
.omx/ultragoal/ledger.jsonl
Normal file
23
.omx/ultragoal/ledger.jsonl
Normal file
File diff suppressed because one or more lines are too long
42
.omx/ultragoal/quality-gate-G009-windows-docs-release.json
Normal file
42
.omx/ultragoal/quality-gate-G009-windows-docs-release.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"goal_id": "G009-windows-docs-release",
|
||||
"timestamp_utc": "2026-05-15T01:57:16Z",
|
||||
"commit": "a3af0133e0cf8d529465950ada88623e3cf3b3f2",
|
||||
"team": "g009-windows-docs-rel-e61d2271",
|
||||
"team_phase": "complete",
|
||||
"tasks": "5/5 completed",
|
||||
"verification": {
|
||||
"release_readiness": "passed",
|
||||
"doc_source_of_truth": "passed",
|
||||
"cargo_fmt": "passed",
|
||||
"targeted_windows_no_credentials_smoke_test": "passed",
|
||||
"cargo_check_workspace": "passed with existing api dead_code warnings",
|
||||
"git_diff_check": "passed",
|
||||
"coverage_check": "passed"
|
||||
},
|
||||
"known_gaps": [
|
||||
{
|
||||
"scope": "actual GitHub windows-latest execution",
|
||||
"status": "not run locally"
|
||||
},
|
||||
{
|
||||
"scope": "full cargo test --workspace",
|
||||
"status": "known pre-existing unrelated CLI failures reported by workers; targeted changed-surface tests pass"
|
||||
}
|
||||
],
|
||||
"artifacts": [
|
||||
".github/workflows/rust-ci.yml",
|
||||
".github/workflows/release.yml",
|
||||
"docs/windows-install-release.md",
|
||||
"docs/g009-windows-docs-release-verification-map.md",
|
||||
"LICENSE",
|
||||
"CONTRIBUTING.md",
|
||||
"SECURITY.md",
|
||||
"SUPPORT.md",
|
||||
"CODE_OF_CONDUCT.md",
|
||||
".github/scripts/check_release_readiness.py",
|
||||
"/tmp/g009-final-verify.log"
|
||||
],
|
||||
"git_status": "## main...origin/main [ahead 13]",
|
||||
"log_tail": " | ^^^^^^^^^^^^^^^^^^^^\n |\n = note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default\n\nwarning: enum `ProviderFeatureSupport` is never used\n --> crates/api/src/providers/mod.rs:61:10\n |\n61 | pub enum ProviderFeatureSupport {\n | ^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: struct `ProviderCapabilityReport` is never constructed\n --> crates/api/src/providers/mod.rs:68:12\n |\n68 | pub struct ProviderCapabilityReport {\n | ^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: enum `ProviderDiagnosticSeverity` is never used\n --> crates/api/src/providers/mod.rs:88:10\n |\n88 | pub enum ProviderDiagnosticSeverity {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: struct `ProviderDiagnostic` is never constructed\n --> crates/api/src/providers/mod.rs:94:12\n |\n94 | pub struct ProviderDiagnostic {\n | ^^^^^^^^^^^^^^^^^^\n\nwarning: function `provider_capabilities_for_model` is never used\n --> crates/api/src/providers/mod.rs:384:8\n |\n384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `provider_diagnostics_for_request` is never used\n --> crates/api/src/providers/mod.rs:452:8\n |\n452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `metadata_for_provider_kind` is never used\n --> crates/api/src/providers/mod.rs:517:4\n |\n517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `provider_label` is never used\n --> crates/api/src/providers/mod.rs:541:10\n |\n541 | const fn provider_label(provider: ProviderKind) -> &'static str {\n | ^^^^^^^^^^^^^^\n\nwarning: function `has_openai_tuning_parameters` is never used\n --> crates/api/src/providers/mod.rs:550:4\n |\n550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `declares_tool` is never used\n --> crates/api/src/providers/mod.rs:558:4\n |\n558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {\n | ^^^^^^^^^^^^^\n\nwarning: function `web_passthrough_diagnostic` is never used\n --> crates/api/src/providers/mod.rs:567:4\n |\n567 | fn web_passthrough_diagnostic(\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `strip_routing_prefix` is never used\n --> crates/api/src/providers/openai_compat.rs:901:4\n |\n901 | fn strip_routing_prefix(model: &str) -> &str {\n | ^^^^^^^^^^^^^^^^^^^^\n\nwarning: `api` (lib) generated 13 warnings\n Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)\n Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.35s\nG009 coverage check passed"
|
||||
}
|
||||
32
.omx/ultragoal/quality-gate-G010-session-hygiene.json
Normal file
32
.omx/ultragoal/quality-gate-G010-session-hygiene.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"goal_id": "G010-session-hygiene",
|
||||
"status": "passed",
|
||||
"team": "g010-session-hygiene-e61d2271",
|
||||
"team_phase": "complete",
|
||||
"tasks": {"completed": 7, "failed": 0, "blocked": 0, "pending": 0, "in_progress": 0},
|
||||
"evidence": [
|
||||
".omx/ultragoal/g010-final-quality-gate-rerun.log",
|
||||
"docs/g010-clone-disambiguation-metadata.md",
|
||||
"docs/g010-session-hygiene-verification-map.md",
|
||||
".omx/ultragoal/goals.json",
|
||||
".omx/ultragoal/ledger.jsonl"
|
||||
],
|
||||
"verification_passed": [
|
||||
"cargo fmt --manifest-path rust/Cargo.toml --all -- --check",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p runtime session_control -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p runtime jsonl_persistence_redacts_and_truncates_oversized_payload_fields -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p runtime compact -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p commands parses_supported_slash_commands -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p commands compacts_sessions_via_slash_command -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --bin claw session_exists_resume_command_reports_json_contract -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --bin claw resumed_session_exists_and_delete_have_json_contracts -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test resume_slash_commands -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test compact_output -- --nocapture",
|
||||
"cargo check --manifest-path rust/Cargo.toml --workspace",
|
||||
"git diff --check"
|
||||
],
|
||||
"known_gaps": [
|
||||
"full cargo test --workspace not run for G010",
|
||||
"clippy -D warnings remains blocked by pre-existing unrelated lint debt noted in task 5/task 7 results"
|
||||
]
|
||||
}
|
||||
8
.port_sessions/b035f648d5b549aa836ea01f6727ec62.json
Normal file
8
.port_sessions/b035f648d5b549aa836ea01f6727ec62.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"session_id": "b035f648d5b549aa836ea01f6727ec62",
|
||||
"messages": [
|
||||
"review MCP tool"
|
||||
],
|
||||
"input_tokens": 3,
|
||||
"output_tokens": 13
|
||||
}
|
||||
9
.port_sessions/b234acb1eb8c486e80544ddc7e13e6d8.json
Normal file
9
.port_sessions/b234acb1eb8c486e80544ddc7e13e6d8.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"session_id": "b234acb1eb8c486e80544ddc7e13e6d8",
|
||||
"messages": [
|
||||
"review MCP tool",
|
||||
"review MCP tool"
|
||||
],
|
||||
"input_tokens": 6,
|
||||
"output_tokens": 32
|
||||
}
|
||||
9
.port_sessions/b67e062748f04e10ac5770df9285e4bd.json
Normal file
9
.port_sessions/b67e062748f04e10ac5770df9285e4bd.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"session_id": "b67e062748f04e10ac5770df9285e4bd",
|
||||
"messages": [
|
||||
"review MCP tool",
|
||||
"review MCP tool"
|
||||
],
|
||||
"input_tokens": 6,
|
||||
"output_tokens": 32
|
||||
}
|
||||
9
.port_sessions/bb88fd20433840a8b19237e3f306c6e3.json
Normal file
9
.port_sessions/bb88fd20433840a8b19237e3f306c6e3.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"session_id": "bb88fd20433840a8b19237e3f306c6e3",
|
||||
"messages": [
|
||||
"review MCP tool",
|
||||
"review MCP tool"
|
||||
],
|
||||
"input_tokens": 6,
|
||||
"output_tokens": 32
|
||||
}
|
||||
32
CODE_OF_CONDUCT.md
Normal file
32
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Code of Conduct
|
||||
|
||||
## Our pledge
|
||||
|
||||
We aim to make Claw Code a practical, respectful, and evidence-oriented
|
||||
community. Contributors and maintainers are expected to communicate with
|
||||
patience, assume good intent, and focus critique on the work rather than the
|
||||
person.
|
||||
|
||||
## Expected behavior
|
||||
|
||||
- Be respectful and direct.
|
||||
- Welcome newcomers and explain project-specific context when it matters.
|
||||
- Give actionable feedback with evidence, commands, logs, or links.
|
||||
- Respect privacy and do not pressure others to disclose credentials, private
|
||||
prompts, employer information, or personal details.
|
||||
|
||||
## Unacceptable behavior
|
||||
|
||||
- Harassment, threats, insults, or discriminatory language.
|
||||
- Publishing another person's private information without permission.
|
||||
- Sharing secrets, exploit payloads, or private vulnerability details in public
|
||||
channels.
|
||||
- Repeated off-topic disruption after maintainers ask for a thread to stop or
|
||||
move.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Maintainers may remove comments, close threads, restrict participation, or ban
|
||||
accounts that violate this code of conduct. Report concerns through the support
|
||||
or security paths described in [SUPPORT.md](./SUPPORT.md) and
|
||||
[SECURITY.md](./SECURITY.md).
|
||||
66
CONTRIBUTING.md
Normal file
66
CONTRIBUTING.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Contributing to Claw Code
|
||||
|
||||
Thanks for helping improve Claw Code. This repository is a Rust-first CLI
|
||||
workspace with supporting docs and compatibility fixtures.
|
||||
|
||||
## Ground rules
|
||||
|
||||
- Keep changes small, reviewable, and tied to a concrete issue or behavior.
|
||||
- Do not commit secrets, API keys, session transcripts with credentials, or
|
||||
generated build output.
|
||||
- Prefer existing crate boundaries and utilities before adding dependencies.
|
||||
- Update documentation when a user-facing command, config key, or provider
|
||||
behavior changes.
|
||||
- Keep examples copy/paste safe. Use placeholder keys such as `sk-ant-...` and
|
||||
avoid commands that require live credentials unless the text explicitly says
|
||||
so.
|
||||
|
||||
## Local setup
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ultraworkers/claw-code
|
||||
cd claw-code/rust
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
```
|
||||
|
||||
On Windows PowerShell, build from the same `rust` workspace and run the binary
|
||||
with the `.exe` suffix:
|
||||
|
||||
```powershell
|
||||
cd claw-code\rust
|
||||
cargo build --workspace
|
||||
.\target\debug\claw.exe --help
|
||||
```
|
||||
|
||||
## Checks before opening a pull request
|
||||
|
||||
Run the smallest relevant tests for your change, then the broader checks when
|
||||
you touch shared runtime, CLI, or docs surfaces:
|
||||
|
||||
```bash
|
||||
cd rust
|
||||
cargo fmt --all --check
|
||||
cargo test --workspace
|
||||
cargo clippy --workspace
|
||||
```
|
||||
|
||||
For documentation and release-readiness changes, also run:
|
||||
|
||||
```bash
|
||||
python .github/scripts/check_doc_source_of_truth.py
|
||||
python .github/scripts/check_release_readiness.py
|
||||
```
|
||||
|
||||
## Pull request guidance
|
||||
|
||||
- Describe the user-visible reason for the change.
|
||||
- List the commands you ran and any known gaps.
|
||||
- Call out compatibility risks for CLI output, JSON schemas, plugin contracts,
|
||||
provider behavior, or Windows/PowerShell examples.
|
||||
- Keep unrelated cleanup out of feature or fix pull requests.
|
||||
|
||||
## License
|
||||
|
||||
By contributing, you agree that your contributions are licensed under the
|
||||
project's [MIT License](./LICENSE).
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2026 UltraWorkers and Claw Code contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -8,7 +8,7 @@ Last updated: 2026-04-03
|
||||
- Requested 9-lane checkpoint: **All 9 lanes merged on `main`.**
|
||||
- Current `main` HEAD: `ee31e00` (stub implementations replaced with real AskUserQuestion + RemoteTrigger).
|
||||
- Repository stats at this checkpoint: **292 commits on `main` / 293 across all branches**, **9 crates**, **48,599 tracked Rust LOC**, **2,568 test LOC**, **3 authors**, date range **2026-03-31 → 2026-04-03**.
|
||||
- Mock parity harness stats: **10 scripted scenarios**, **19 captured `/v1/messages` requests** in `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs`.
|
||||
- Mock parity harness stats: **12 scripted scenarios**, **21 captured `/v1/messages` requests** in `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs`.
|
||||
|
||||
## Mock parity harness — milestone 1
|
||||
|
||||
@@ -23,6 +23,8 @@ Last updated: 2026-04-03
|
||||
- [x] Scripted permission prompt coverage: `bash_permission_prompt_approved`, `bash_permission_prompt_denied`
|
||||
- [x] Scripted plugin-path coverage: `plugin_tool_roundtrip`
|
||||
- [x] Behavioral diff/checklist runner: `rust/scripts/run_mock_parity_diff.py`
|
||||
- [x] Scripted session-compaction metadata coverage: `auto_compact_triggered`
|
||||
- [x] Scripted token/cost JSON coverage: `token_cost_reporting`
|
||||
|
||||
## Harness v2 behavioral checklist
|
||||
|
||||
@@ -172,8 +174,9 @@ Canonical scenario map: `rust/mock_parity_scenarios.json`
|
||||
|
||||
- [ ] End-to-end MCP runtime lifecycle beyond the registry bridge now on `main`
|
||||
- [x] Output truncation (large stdout/file content)
|
||||
- [ ] Session compaction behavior matching
|
||||
- [ ] Token counting / cost tracking accuracy
|
||||
- [x] Session compaction behavior matching
|
||||
- auto_compaction threshold from env
|
||||
- [x] Token counting / cost tracking accuracy
|
||||
- [x] Bash validation lane merged onto `main`
|
||||
- [ ] CI green on every commit
|
||||
|
||||
|
||||
28
README.md
28
README.md
@@ -11,6 +11,10 @@
|
||||
·
|
||||
<a href="./ROADMAP.md">Roadmap</a>
|
||||
·
|
||||
<a href="./CONTRIBUTING.md">Contributing</a>
|
||||
·
|
||||
<a href="./SECURITY.md">Security</a>
|
||||
·
|
||||
<a href="https://discord.gg/5TUQKqFWd">UltraWorkers Discord</a>
|
||||
</p>
|
||||
|
||||
@@ -32,9 +36,9 @@ Claw Code is the public Rust implementation of the `claw` CLI agent harness.
|
||||
The canonical implementation lives in [`rust/`](./rust), and the current source of truth for this repository is **ultraworkers/claw-code**.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Start with [`USAGE.md`](./USAGE.md) for build, auth, CLI, session, and parity-harness workflows. Make `claw doctor` your first health check after building, use [`rust/README.md`](./rust/README.md) for crate-level details, read [`PARITY.md`](./PARITY.md) for the current Rust-port checkpoint, and see [`docs/container.md`](./docs/container.md) for the container-first workflow.
|
||||
> Start with [`USAGE.md`](./USAGE.md) for build, auth, CLI, session, and parity-harness workflows. For file submission/navigation questions, see [Navigation and file context](./docs/navigation-file-context.md). For local OpenAI-compatible models and offline skill installs, see [Local OpenAI-compatible providers and skills setup](./docs/local-openai-compatible-providers.md). Windows users can jump to the PowerShell-first [Windows install and release quickstart](./docs/windows-install-release.md). Make `claw doctor` your first health check after building, use [`rust/README.md`](./rust/README.md) for crate-level details, read [`PARITY.md`](./PARITY.md) for the current Rust-port checkpoint, and see [`docs/container.md`](./docs/container.md) for the container-first workflow.
|
||||
>
|
||||
> **ACP / Zed status:** `claw-code` does not ship an ACP/Zed daemon entrypoint yet. Run `claw acp` (or `claw --acp`) for the current status instead of guessing from source layout; `claw acp serve` is currently a discoverability alias only, and real ACP support remains tracked separately in `ROADMAP.md`.
|
||||
> **ACP / Zed status:** `claw-code` does not ship an ACP/Zed daemon or JSON-RPC entrypoint yet. Run `claw acp` (or `claw --acp`) for the current status instead of guessing from source layout; `claw acp serve` is currently a discoverability alias only, returns status with exit code 0, and real ACP support remains tracked separately in `ROADMAP.md`. For the public JSON contract, see [`docs/g011-acp-json-rpc-status-contract.md`](./docs/g011-acp-json-rpc-status-contract.md).
|
||||
|
||||
## Current repository shape
|
||||
|
||||
@@ -96,6 +100,8 @@ export ANTHROPIC_API_KEY="sk-ant-..."
|
||||
.\target\debug\claw.exe prompt "say hello"
|
||||
```
|
||||
|
||||
For release ZIPs, PATH setup, provider switching, and notification smoke checks, see [`docs/windows-install-release.md`](./docs/windows-install-release.md).
|
||||
|
||||
**Git Bash / WSL** are optional alternatives, not requirements. If you prefer bash-style paths (`/c/Users/you/...` instead of `C:\Users\you\...`), Git Bash (ships with Git for Windows) works well. In Git Bash, the `MINGW64` prompt is expected and normal — not a broken install.
|
||||
|
||||
## Post-build: locate the binary and verify
|
||||
@@ -130,6 +136,18 @@ Test the binary directly using its path:
|
||||
.\rust\target\debug\claw.exe doctor
|
||||
```
|
||||
|
||||
PowerShell smoke commands that do not require live credentials:
|
||||
|
||||
```powershell
|
||||
$env:CLAW_CONFIG_HOME = Join-Path $env:TEMP "claw config home"
|
||||
New-Item -ItemType Directory -Force -Path $env:CLAW_CONFIG_HOME | Out-Null
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY, Env:\ANTHROPIC_AUTH_TOKEN, Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
.\rust\target\debug\claw.exe help
|
||||
.\rust\target\debug\claw.exe status
|
||||
.\rust\target\debug\claw.exe config env
|
||||
.\rust\target\debug\claw.exe doctor
|
||||
```
|
||||
|
||||
If these commands succeed, the build is working. `claw doctor` is your first health check — it validates your API key, model access, and tool configuration.
|
||||
|
||||
### Optional: Add to PATH
|
||||
@@ -188,11 +206,17 @@ cargo test --workspace
|
||||
## Documentation map
|
||||
|
||||
- [`USAGE.md`](./USAGE.md) — quick commands, auth, sessions, config, parity harness
|
||||
- [`docs/navigation-file-context.md`](./docs/navigation-file-context.md) — terminal navigation, scrollback, `@path` file context, attachments, and secret-safety guidance
|
||||
- [`docs/local-openai-compatible-providers.md`](./docs/local-openai-compatible-providers.md) — Ollama/llama.cpp/vLLM setup, Claw multi-provider positioning, and local skills install checks
|
||||
- [`docs/windows-install-release.md`](./docs/windows-install-release.md) — PowerShell-first install, release artifact, provider switching, and Windows/WSL notification smoke paths
|
||||
- [`rust/README.md`](./rust/README.md) — crate map, CLI surface, features, workspace layout
|
||||
- [`PARITY.md`](./PARITY.md) — parity status for the Rust port
|
||||
- [`rust/MOCK_PARITY_HARNESS.md`](./rust/MOCK_PARITY_HARNESS.md) — deterministic mock-service harness details
|
||||
- [`ROADMAP.md`](./ROADMAP.md) — active roadmap and open cleanup work
|
||||
- [`docs/g004-events-reports-contract.md`](./docs/g004-events-reports-contract.md) — Stream 2 lane event/report contract guidance for consumers
|
||||
- [`PHILOSOPHY.md`](./PHILOSOPHY.md) — why the project exists and how it is operated
|
||||
- [`CONTRIBUTING.md`](./CONTRIBUTING.md), [`SECURITY.md`](./SECURITY.md), [`SUPPORT.md`](./SUPPORT.md), and [`CODE_OF_CONDUCT.md`](./CODE_OF_CONDUCT.md) — contribution, vulnerability-reporting, support, and community policies
|
||||
- [`LICENSE`](./LICENSE) — MIT license for this repository
|
||||
|
||||
## Ecosystem
|
||||
|
||||
|
||||
49
SECURITY.md
Normal file
49
SECURITY.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported versions
|
||||
|
||||
Security fixes target the current `main` branch and the latest published
|
||||
release artifacts when available. Older experimental branches are not supported
|
||||
unless a maintainer explicitly marks them as supported.
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
Please do **not** open a public issue for a suspected vulnerability. Use GitHub
|
||||
private vulnerability reporting for `ultraworkers/claw-code` when available, or
|
||||
contact a maintainer through the repository's published support channel with a
|
||||
minimal, non-destructive reproduction.
|
||||
|
||||
Include:
|
||||
|
||||
- affected command, crate, or workflow;
|
||||
- operating system and shell, especially for Windows/PowerShell path issues;
|
||||
- whether live credentials, MCP servers, plugins, or workspace filesystem
|
||||
access are involved;
|
||||
- expected impact and any safe proof-of-concept steps.
|
||||
|
||||
Do not include real API keys, private prompts, session transcripts with secrets,
|
||||
or exploit payloads that modify third-party systems.
|
||||
|
||||
## Scope
|
||||
|
||||
In scope:
|
||||
|
||||
- workspace path traversal or symlink escapes;
|
||||
- permission bypasses, sandbox misreporting, or unsafe tool execution;
|
||||
- credential disclosure in logs, JSON output, telemetry, docs, or examples;
|
||||
- plugin, hook, MCP, provider, or config behavior that can unexpectedly execute
|
||||
code or leak secrets.
|
||||
|
||||
Out of scope:
|
||||
|
||||
- social engineering;
|
||||
- denial-of-service without a practical security impact;
|
||||
- issues that require already-compromised local developer credentials;
|
||||
- reports against third-party providers or upstream tools without a Claw Code
|
||||
integration issue.
|
||||
|
||||
## Handling expectations
|
||||
|
||||
Maintainers will acknowledge valid private reports as soon as practical, keep
|
||||
discussion private until a fix or mitigation is available, and credit reporters
|
||||
when requested and appropriate.
|
||||
24
SUPPORT.md
Normal file
24
SUPPORT.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Support
|
||||
|
||||
Use the lightest support path that fits the request:
|
||||
|
||||
- **Usage questions:** start with [USAGE.md](./USAGE.md) and
|
||||
[rust/README.md](./rust/README.md).
|
||||
- **Bugs or regressions:** open a GitHub issue with the command, OS/shell,
|
||||
expected behavior, actual behavior, and relevant non-secret output.
|
||||
- **Security issues:** follow [SECURITY.md](./SECURITY.md) instead of opening a
|
||||
public issue.
|
||||
- **Community discussion:** use the UltraWorkers Discord linked from
|
||||
[README.md](./README.md).
|
||||
|
||||
When asking for help, include:
|
||||
|
||||
```text
|
||||
claw --version
|
||||
claw doctor
|
||||
operating system and shell
|
||||
command you ran
|
||||
```
|
||||
|
||||
Redact API keys, bearer tokens, private prompts, session transcripts, and local
|
||||
paths that reveal sensitive information before sharing output.
|
||||
73
USAGE.md
73
USAGE.md
@@ -31,7 +31,7 @@ cd rust
|
||||
cargo build --workspace
|
||||
```
|
||||
|
||||
The CLI binary is available at `rust/target/debug/claw` after a debug build. Make the doctor check above your first post-build step.
|
||||
The CLI binary is available at `rust/target/debug/claw` after a debug build (`rust\target\debug\claw.exe` on Windows). Make the doctor check above your first post-build step. For PowerShell-first install, release ZIP, PATH, provider-switching, and Windows/WSL notification examples, see [`docs/windows-install-release.md`](./docs/windows-install-release.md).
|
||||
|
||||
## Quick start
|
||||
|
||||
@@ -230,9 +230,37 @@ export ANTHROPIC_AUTH_TOKEN="anthropic-oauth-or-proxy-bearer-token"
|
||||
|
||||
**If you meant a different provider:** if `claw` reports missing Anthropic credentials but you already have `OPENAI_API_KEY`, `XAI_API_KEY`, or `DASHSCOPE_API_KEY` exported, you most likely forgot to prefix the model name with the provider's routing prefix. Use `--model openai/gpt-4.1-mini` (OpenAI-compat / OpenRouter / Ollama), `--model grok` (xAI), or `--model qwen-plus` (DashScope) and the prefix router will select the right backend regardless of the ambient credentials. The error message now includes a hint that names the detected env var.
|
||||
|
||||
|
||||
### Windows PowerShell provider switching
|
||||
|
||||
The same provider rules work in PowerShell. Use placeholder values in docs and tests; put real keys only in your private environment. Remove unrelated provider env vars when validating a switch so failures are easy to diagnose.
|
||||
|
||||
`CLAUDE_CODE_PROVIDER` is not required for normal Claw routing; prefer explicit model prefixes such as `openai/` and provider-specific env vars so PowerShell examples stay portable.
|
||||
|
||||
```powershell
|
||||
# Anthropic direct
|
||||
$env:ANTHROPIC_API_KEY = "sk-ant-REPLACE_ME"
|
||||
Remove-Item Env:\OPENAI_BASE_URL -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
.\target\debug\claw.exe --model "sonnet" prompt "reply with ready"
|
||||
|
||||
# OpenAI-compatible gateway / OpenRouter
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
$env:OPENAI_BASE_URL = "https://openrouter.ai/api/v1"
|
||||
$env:OPENAI_API_KEY = "sk-or-v1-REPLACE_ME"
|
||||
.\target\debug\claw.exe --model "openai/gpt-4.1-mini" prompt "reply with ready"
|
||||
|
||||
# Local OpenAI-compatible server
|
||||
$env:OPENAI_BASE_URL = "http://127.0.0.1:11434/v1"
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
.\target\debug\claw.exe --model "llama3.2" prompt "reply with ready"
|
||||
```
|
||||
|
||||
See the full [Windows install and release quickstart](./docs/windows-install-release.md) for release artifact setup, persistent `setx` usage, and WSL notes.
|
||||
|
||||
## Local Models
|
||||
|
||||
`claw` can talk to local servers and provider gateways through either Anthropic-compatible or OpenAI-compatible endpoints. Use `ANTHROPIC_BASE_URL` with `ANTHROPIC_AUTH_TOKEN` for Anthropic-compatible services, or `OPENAI_BASE_URL` with `OPENAI_API_KEY` for OpenAI-compatible services.
|
||||
`claw` can talk to local servers and provider gateways through either Anthropic-compatible or OpenAI-compatible endpoints. Use `ANTHROPIC_BASE_URL` with `ANTHROPIC_AUTH_TOKEN` for Anthropic-compatible services, or `OPENAI_BASE_URL` with `OPENAI_API_KEY` for OpenAI-compatible services. For copyable Ollama, llama.cpp, vLLM, raw `/v1/chat/completions`, and local skills install examples, see [`docs/local-openai-compatible-providers.md`](./docs/local-openai-compatible-providers.md).
|
||||
|
||||
### Anthropic-compatible endpoint
|
||||
|
||||
@@ -306,7 +334,7 @@ Reasoning variants (`qwen-qwq-*`, `qwq-*`, `*-thinking`) automatically strip `te
|
||||
|
||||
The OpenAI-compatible backend also serves as the gateway for **OpenRouter**, **Ollama**, and any other service that speaks the OpenAI `/v1/chat/completions` wire format — just point `OPENAI_BASE_URL` at the service.
|
||||
|
||||
**Model-name prefix routing:** If a model name starts with `openai/`, `gpt-`, `qwen/`, or `qwen-`, the provider is selected by the prefix regardless of which env vars are set. This prevents accidental misrouting to Anthropic when multiple credentials exist in the environment.
|
||||
**Model-name prefix routing:** If a model name starts with `openai/`, `gpt-`, `qwen/`, `qwen-`, `kimi/`, or `kimi-`, the provider is selected by the prefix regardless of which env vars are set. This prevents accidental misrouting to Anthropic when multiple credentials exist in the environment. For the default OpenAI API, `openai/` is a routing prefix and is stripped before the request hits the wire. For a custom `OPENAI_BASE_URL`, slash-containing OpenAI-compatible slugs (for example OpenRouter-style `openai/gpt-4.1-mini`) are preserved so the gateway receives the model ID it expects.
|
||||
|
||||
### Tested models and aliases
|
||||
|
||||
@@ -320,8 +348,11 @@ These are the models registered in the built-in alias table with known token lim
|
||||
| `grok` / `grok-3` | `grok-3` | xAI | 64 000 | 131 072 |
|
||||
| `grok-mini` / `grok-3-mini` | `grok-3-mini` | xAI | 64 000 | 131 072 |
|
||||
| `grok-2` | `grok-2` | xAI | — | — |
|
||||
| `kimi` | `kimi-k2.5` | DashScope | 16 384 | 256 000 |
|
||||
| `gpt-4.1` / `gpt-4.1-mini` / `gpt-4.1-nano` | same | OpenAI-compatible | 32 768 | 1 047 576 |
|
||||
| `gpt-5.4` / `gpt-5.4-mini` / `gpt-5.4-nano` | same | OpenAI-compatible | 128 000 | 1 000 000 / 400 000 |
|
||||
|
||||
Any model name that does not match an alias is passed through verbatim. This is how you use OpenRouter model slugs (`openai/gpt-4.1-mini`), Ollama tags (`llama3.2`), or full Anthropic model IDs (`claude-sonnet-4-20250514`).
|
||||
Any model name that does not match an alias is passed through verbatim after provider routing is resolved. This is how you use OpenRouter model slugs (`openai/gpt-4.1-mini` with a custom `OPENAI_BASE_URL`), Ollama tags (`llama3.2`), or full Anthropic model IDs (`claude-sonnet-4-20250514`).
|
||||
|
||||
### User-defined aliases
|
||||
|
||||
@@ -343,11 +374,29 @@ Local project settings override user-level settings. Aliases resolve through the
|
||||
|
||||
1. If the resolved model name starts with `claude` → Anthropic.
|
||||
2. If it starts with `grok` → xAI.
|
||||
3. Otherwise, `claw` checks which credential is set: `ANTHROPIC_API_KEY`/`ANTHROPIC_AUTH_TOKEN` first, then `OPENAI_API_KEY`, then `XAI_API_KEY`.
|
||||
4. If nothing matches, it defaults to Anthropic.
|
||||
3. If it starts with `openai/` or `gpt-` → OpenAI-compatible.
|
||||
4. If it starts with `qwen/`, `qwen-`, `kimi/`, or `kimi-` → DashScope-compatible OpenAI wire format.
|
||||
5. If `OPENAI_BASE_URL` and `OPENAI_API_KEY` are set, unknown model names route to the OpenAI-compatible client for local/gateway servers.
|
||||
6. Otherwise, `claw` checks which credential is set: Anthropic first, then OpenAI, then xAI. If only `OPENAI_BASE_URL` is set, it still routes to OpenAI-compatible for authless local servers.
|
||||
7. If nothing matches, it defaults to Anthropic.
|
||||
|
||||
|
||||
### Provider diagnostics and custom OpenAI-compatible parameters
|
||||
|
||||
The API layer exposes a provider diagnostics snapshot via `api::provider_diagnostics_for_model(model)`. It reports the resolved provider, auth/base-url environment variables, default base URL, whether the provider uses the OpenAI-compatible wire format, whether reasoning tuning parameters are stripped, whether DeepSeek V4 reasoning history is preserved, proxy support, extra-body support, and whether slash-containing model IDs are preserved for custom OpenAI-compatible gateways.
|
||||
|
||||
For gateway features that are not first-class request fields yet, `MessageRequest::extra_body` passes through provider-specific JSON parameters such as `web_search_options` or `parallel_tool_calls`. Core protocol fields (`model`, `messages`, `stream`, `tools`, `tool_choice`, `max_tokens`, and `max_completion_tokens`) are protected and cannot be overridden through `extra_body`.
|
||||
|
||||
## File context and navigation
|
||||
|
||||
Use `@path/to/file` in prompts to submit repository files as context, for example `Read @src/app.ts and explain the bug`, `Compare @old.md and @new.md`, or `Use @logs/error.txt as context and suggest a fix`. Prompt history, `Ctrl-r`, and long-output scrolling come from your shell, terminal, or tmux rather than from Claw itself. See [`docs/navigation-file-context.md`](./docs/navigation-file-context.md) for scrollback, attachment, and secret-redaction guidance.
|
||||
|
||||
## FAQ
|
||||
|
||||
### Is Claw Code Claude-only?
|
||||
|
||||
No. Claw Code is a Claude-Code-shaped workflow/runtime, not a Claude-only product. It can target Anthropic and OpenAI-compatible/provider-routed/local models depending on config. Non-Claude providers may require stricter response-shape and tool-call compatibility, so some workflows can be rougher than first-party Anthropic/OpenAI paths; provider-specific identity leaks are bugs, not product intent. See [`docs/local-openai-compatible-providers.md`](./docs/local-openai-compatible-providers.md) for local provider examples.
|
||||
|
||||
### What about Codex?
|
||||
|
||||
The name "codex" appears in the Claw Code ecosystem but it does **not** refer to OpenAI Codex (the code-generation model). Here is what it means in this project:
|
||||
@@ -401,6 +450,18 @@ let client = build_http_client_with(&config).expect("proxy client");
|
||||
- Empty values are treated as unset, so leaving `HTTPS_PROXY=""` in your shell will not enable a proxy.
|
||||
- If a proxy URL cannot be parsed, `claw` falls back to a direct (no-proxy) client so existing workflows keep working; double-check the URL if you expected the request to be tunnelled.
|
||||
|
||||
## Skills
|
||||
|
||||
Use `/skills list` in the interactive REPL or `claw skills --output-format json` from the direct CLI to inspect installed skills. For offline/local installs, install the directory that contains `SKILL.md`, then verify the discovered name before invoking it:
|
||||
|
||||
```text
|
||||
/skills install /absolute/path/to/my-skill
|
||||
/skills list
|
||||
/skills my-skill
|
||||
```
|
||||
|
||||
If install succeeds but invocation fails with a provider HTTP error, treat provider setup separately: run `claw doctor` and a one-shot prompt smoke test before reinstalling the skill. See [`docs/local-openai-compatible-providers.md`](./docs/local-openai-compatible-providers.md#local-skills-install-from-disk) for the full checklist.
|
||||
|
||||
## Common operational commands
|
||||
|
||||
```bash
|
||||
|
||||
@@ -9,7 +9,8 @@ This document describes model-specific handling in the OpenAI-compatible provide
|
||||
- [Kimi Models (is_error Exclusion)](#kimi-models-is_error-exclusion)
|
||||
- [Reasoning Models (Tuning Parameter Stripping)](#reasoning-models-tuning-parameter-stripping)
|
||||
- [GPT-5 (max_completion_tokens)](#gpt-5-max_completion_tokens)
|
||||
- [Qwen Models (DashScope Routing)](#qwen-models-dashscope-routing)
|
||||
- [Qwen and Kimi Models (DashScope Routing)](#qwen-and-kimi-models-dashscope-routing)
|
||||
- [Custom Gateway Slugs and Extra Body Parameters](#custom-gateway-slugs-and-extra-body-parameters)
|
||||
- [Implementation Details](#implementation-details)
|
||||
- [Adding New Models](#adding-new-models)
|
||||
- [Testing](#testing)
|
||||
@@ -22,6 +23,8 @@ The `openai_compat.rs` provider translates Claude Code's internal message format
|
||||
- Sampling parameters (temperature, top_p, etc.)
|
||||
- Token limit fields (`max_tokens` vs `max_completion_tokens`)
|
||||
- Base URL routing
|
||||
- Provider-specific extra body parameters (`web_search_options`, `parallel_tool_calls`, local-server switches, etc.)
|
||||
- Provider diagnostics for status/doctor-style surfaces
|
||||
|
||||
## Model-Specific Handling
|
||||
|
||||
@@ -46,7 +49,7 @@ The `openai_compat.rs` provider translates Claude Code's internal message format
|
||||
fn model_rejects_is_error_field(model: &str) -> bool {
|
||||
let lowered = model.to_ascii_lowercase();
|
||||
let canonical = lowered.rsplit('/').next().unwrap_or(lowered.as_str());
|
||||
canonical.starts_with("kimi-")
|
||||
canonical.starts_with("kimi")
|
||||
}
|
||||
```
|
||||
|
||||
@@ -120,13 +123,13 @@ let max_tokens_key = if wire_model.starts_with("gpt-5") {
|
||||
|
||||
---
|
||||
|
||||
### Qwen Models (DashScope Routing)
|
||||
### Qwen and Kimi Models (DashScope Routing)
|
||||
|
||||
**Affected models:** All models with `qwen` prefix
|
||||
**Affected models:** All models with `qwen` or `kimi` prefixes, including `qwen/`, `qwen-`, `kimi/`, and `kimi-` forms.
|
||||
|
||||
**Behavior:** Routed to DashScope (`https://dashscope.aliyuncs.com/compatible-mode/v1`) rather than default providers.
|
||||
**Behavior:** Routed to DashScope (`https://dashscope.aliyuncs.com/compatible-mode/v1`) rather than ambient-credential fallback providers. Known routing prefixes are stripped before sending the wire model.
|
||||
|
||||
**Rationale:** Qwen models are hosted by Alibaba Cloud's DashScope service, not OpenAI or Anthropic.
|
||||
**Rationale:** Qwen and Kimi compatible-mode models are hosted through Alibaba Cloud's DashScope service, not OpenAI or Anthropic.
|
||||
|
||||
**Configuration:**
|
||||
```rust
|
||||
@@ -137,6 +140,21 @@ pub const DEFAULT_DASHSCOPE_BASE_URL: &str = "https://dashscope.aliyuncs.com/com
|
||||
|
||||
**Note:** Some Qwen models are also reasoning models (see [Reasoning Models](#reasoning-models-tuning-parameter-stripping) above) and receive both treatments.
|
||||
|
||||
|
||||
---
|
||||
|
||||
### Custom Gateway Slugs and Extra Body Parameters
|
||||
|
||||
**Affected models:** Slash-containing model IDs routed through the OpenAI-compatible provider, especially custom gateways configured with `OPENAI_BASE_URL` such as OpenRouter, local routers, or other `/v1/chat/completions` services.
|
||||
|
||||
**Behavior:**
|
||||
- The default OpenAI API treats `openai/` as a routing prefix and sends the bare model name on the wire.
|
||||
- Custom OpenAI-compatible base URLs preserve slash-containing slugs such as `openai/gpt-4.1-mini` so the gateway receives the exact model ID it expects.
|
||||
- `MessageRequest::extra_body` passes through custom request JSON after core fields are populated. This supports provider-specific options such as `web_search_options` and `parallel_tool_calls`.
|
||||
- Protected core fields (`model`, `messages`, `stream`, `tools`, `tool_choice`, `max_tokens`, `max_completion_tokens`) cannot be overridden through `extra_body`.
|
||||
|
||||
**Testing:** See `custom_openai_gateway_preserves_slash_model_ids_and_extra_body_params` in `openai_compat_integration.rs` and `extra_body_params_are_passed_through_without_overriding_core_fields` in `openai_compat.rs`.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### File Location
|
||||
@@ -152,7 +170,8 @@ rust/crates/api/src/providers/openai_compat.rs
|
||||
| `model_rejects_is_error_field()` | Detects models that don't support `is_error` in tool results |
|
||||
| `is_reasoning_model()` | Detects reasoning models that need tuning param stripping |
|
||||
| `translate_message()` | Converts internal messages to OpenAI format (applies `is_error` logic) |
|
||||
| `build_chat_completion_request()` | Constructs full request payload (applies all model-specific logic) |
|
||||
| `build_chat_completion_request()` | Constructs full request payload (applies all model-specific logic and safe `extra_body` passthrough) |
|
||||
| `provider_diagnostics_for_model()` | Produces provider/status diagnostics including auth/base-url vars, reasoning behavior, proxy support, extra-body support, and slash-model preservation |
|
||||
|
||||
### Provider Prefix Handling
|
||||
|
||||
@@ -165,7 +184,7 @@ let canonical = model.to_ascii_lowercase()
|
||||
.unwrap_or(model);
|
||||
```
|
||||
|
||||
This ensures consistent detection regardless of whether models are referenced with or without provider prefixes.
|
||||
This ensures consistent detection regardless of whether models are referenced with or without provider prefixes. Wire-model handling is more specific: known routing prefixes are stripped for provider-native defaults, while custom OpenAI-compatible base URLs preserve slash-containing gateway slugs.
|
||||
|
||||
## Adding New Models
|
||||
|
||||
@@ -183,11 +202,15 @@ When adding support for new models:
|
||||
- Does it require `max_completion_tokens` instead of `max_tokens`?
|
||||
- Update the `max_tokens_key` logic
|
||||
|
||||
4. **Add tests**
|
||||
4. **Check custom gateway behavior**
|
||||
- Should slash-containing IDs be preserved for custom `OPENAI_BASE_URL` gateways?
|
||||
- Does the feature belong in a typed request field or `extra_body` passthrough?
|
||||
|
||||
5. **Add tests**
|
||||
- Unit test for detection function
|
||||
- Integration test in `build_chat_completion_request`
|
||||
|
||||
5. **Update this documentation**
|
||||
6. **Update this documentation**
|
||||
- Add the model to the affected lists
|
||||
- Document any special behavior
|
||||
|
||||
@@ -204,6 +227,8 @@ cargo test --package api model_rejects_is_error_field
|
||||
cargo test --package api reasoning_model
|
||||
cargo test --package api gpt5
|
||||
cargo test --package api qwen
|
||||
cargo test --package api custom_openai_gateway_preserves_slash_model_ids_and_extra_body_params
|
||||
cargo test --package api provider_diagnostics_explain_openai_compatible_capabilities
|
||||
```
|
||||
|
||||
### Test Files
|
||||
@@ -231,6 +256,6 @@ fn my_new_model_is_detected() {
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2026-04-16*
|
||||
*Last updated: 2026-05-15*
|
||||
|
||||
For questions or updates, see the implementation in `rust/crates/api/src/providers/openai_compat.rs`.
|
||||
|
||||
44
docs/anti-slop-triage.md
Normal file
44
docs/anti-slop-triage.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Anti-slop issue and PR triage
|
||||
|
||||
Use this checklist before spending engineering time on low-signal issues, generated PRs, duplicate fixes, or broad unsolicited changes. The goal is not to reject community work by default; it is to make each merge, defer, or close recommendation evidence-backed and safe.
|
||||
|
||||
## Classifications
|
||||
|
||||
| Classification | Use when | Required evidence | Safe action |
|
||||
| --- | --- | --- | --- |
|
||||
| `actionable-bug` | The report has a reproducible product failure. | Repro steps, failing test, logs with secrets removed, or matching roadmap item. | Fix, assign, or link to an existing fix. |
|
||||
| `actionable-docs` | The report identifies missing, stale, or confusing documentation. | Current doc path plus desired corrected source of truth. | Patch docs or link to the owning docs lane. |
|
||||
| `actionable-feature` | The request matches Claw Code direction and has a concrete acceptance shape. | Issue/PR link plus roadmap or maintainer rationale. | Defer to planning or implement if already scoped. |
|
||||
| `duplicate` | Another issue/PR already covers the same user-visible outcome. | Link the canonical issue/PR and note any extra evidence worth preserving. | Cross-link; close only with maintainer/owner policy. |
|
||||
| `spam-or-promotion` | The content is promotional, irrelevant, or abusive. | URL/title/body excerpt summary, not a full repost. | Label/close per repository policy. |
|
||||
| `generated-slop-or-hallucinated` | The change is broad, mechanically generated, unreviewable, or names APIs/files that do not exist. | Diff/path examples, missing symbols, or unverifiable claims. | Request a narrow repro or reject/defer with rationale. |
|
||||
| `unsafe-or-security-sensitive` | The report includes secrets, exploit detail, or risky operational instructions. | Redacted summary and security policy link. | Move to the private/security path; do not expand public details. |
|
||||
| `not-reproducible-yet` | The claim might be valid but lacks enough evidence to act. | Missing command, environment, expected/actual behavior, or version. | Ask for repro details; do not implement speculative fixes. |
|
||||
| `externally-blocked` | Progress depends on upstream services, credentials, policy, or unavailable owner approval. | Blocking dependency and owner/gate. | Defer with a concrete unblock condition. |
|
||||
|
||||
## PR review gate
|
||||
|
||||
Every PR triage note should answer:
|
||||
|
||||
1. Is the PR a merge candidate, a request-changes candidate, a duplicate, unsafe, out-of-scope, or generated slop?
|
||||
2. What exact evidence supports that classification?
|
||||
3. Which tests/docs checks were run or intentionally skipped?
|
||||
4. Which issue, roadmap row, or user problem does it resolve?
|
||||
5. If it should not merge now, what is the minimal non-destructive next action?
|
||||
|
||||
Automation lanes must not merge or close remote PRs/issues. They may produce a ledger row, add local documentation/templates, and report recommended actions for a maintainer-owned final gate.
|
||||
|
||||
## Issue intake gate
|
||||
|
||||
Every issue triage note should answer:
|
||||
|
||||
1. Is the issue correct, duplicate, spam, invalid, externally blocked, or not reproducible yet?
|
||||
2. If correct and resolvable, what fix path or already-merged commit resolves it?
|
||||
3. If not currently resolvable, what evidence would change the classification?
|
||||
4. Are secrets, private data, or security details present that require a private path?
|
||||
|
||||
## Template locations
|
||||
|
||||
- Issue intake form: `.github/ISSUE_TEMPLATE/anti_slop_triage.yml`
|
||||
- PR review checklist: `.github/PULL_REQUEST_TEMPLATE.md`
|
||||
- Final aggregate gate: `docs/pr-issue-resolution-gate.md`
|
||||
185
docs/g002-security-verification-map.md
Normal file
185
docs/g002-security-verification-map.md
Normal file
@@ -0,0 +1,185 @@
|
||||
# G002 alpha security map and verification plan
|
||||
|
||||
Generated by `worker-4` for OMX team task 5 on 2026-05-14.
|
||||
|
||||
## Scope and coordination
|
||||
|
||||
- Active goal context: `G002-alpha-security` / Stream 6 day-one security and permissions gate.
|
||||
- Worker ownership: `worker-1` owns minimal implementation changes for workspace/path enforcement. `worker-4` owns this repository map, integration verification plan, changed-file/commit report, and exact verification evidence.
|
||||
- Boundary: this report does not mutate `.omx/ultragoal` and does not edit shared security/path tests.
|
||||
- Parallel probe status: three native subagents were spawned for repository map, test probe, and change-slice probe, but all failed before returning findings with `429 Too Many Requests`; local mapping below is based on direct repository inspection.
|
||||
|
||||
## Current permission and path enforcement map
|
||||
|
||||
### Runtime permission policy and enforcer
|
||||
|
||||
- `rust/crates/runtime/src/permissions.rs`
|
||||
- Owns the `PermissionMode` ordering and `PermissionPolicy` authorization contract.
|
||||
- Existing tests cover read-only denial, workspace-write escalation, prompt approvals/denials, danger-full-access allowance, override recording, and required-mode reporting.
|
||||
- Integration risk: any new dynamic file/path rule must preserve the existing `PermissionPolicy::authorize` semantics so prompt/override audit events remain stable.
|
||||
|
||||
- `rust/crates/runtime/src/permission_enforcer.rs`
|
||||
- `PermissionEnforcer::check`, `check_with_required_mode`, `check_file_write`, and `check_bash` convert policy outcomes into structured `EnforcementResult` payloads.
|
||||
- `check_file_write` currently has the direct write gate for workspace-write mode.
|
||||
- `is_within_workspace` is a string-prefix boundary check after simple relative-path joining; it does not canonicalize symlinks, `..`, Windows drive prefixes, or case variants.
|
||||
- Existing tests cover read-only denial, workspace-write inside/outside paths, trailing slashes, root equality, bash read-only heuristics, prompt-mode denial payloads, and structured denied fields.
|
||||
|
||||
### File tool path handling
|
||||
|
||||
- `rust/crates/runtime/src/file_ops.rs`
|
||||
- `read_file`, `write_file`, and `edit_file` normalize paths before filesystem operations but do not themselves require a workspace root.
|
||||
- `read_file_in_workspace`, `write_file_in_workspace`, and `edit_file_in_workspace` exist as boundary-enforced wrappers.
|
||||
- `validate_workspace_boundary` canonicalizes through the caller-provided resolved path and checks `starts_with(workspace_root)`.
|
||||
- `is_symlink_escape` detects direct symlink escapes by comparing canonical target to canonical workspace root.
|
||||
- Search tools (`glob_search`, `grep_search`) derive walk roots and prune heavy directories, but they are separate from the write enforcement path.
|
||||
- Existing tests cover oversized/binary reads, workspace-boundary read rejection, symlink escape detection, glob brace expansion, ignored directories, and grep/glob behavior.
|
||||
|
||||
### Bash command validation
|
||||
|
||||
- `rust/crates/runtime/src/bash_validation.rs`
|
||||
- `validate_command` runs mode validation, sed validation, destructive warning checks, then path validation.
|
||||
- `validate_read_only` blocks write-like commands, state-modifying commands, write redirects, and mutating git subcommands in read-only mode.
|
||||
- `validate_mode` warns when workspace-write commands appear to target hard-coded system paths.
|
||||
- `validate_paths` warns for `../`, `~/`, and `$HOME` references; it is intentionally heuristic and does not resolve shell expansion or canonical targets.
|
||||
- Existing tests cover read-only blockers, destructive warnings, sed in-place blocking, path traversal/home warnings, command classification, and full pipeline allow/block/warn outcomes.
|
||||
|
||||
### Sandbox and diagnostics surfaces
|
||||
|
||||
- `rust/crates/runtime/src/sandbox.rs`
|
||||
- Owns container/sandbox status detection and workspace-only sandbox command construction.
|
||||
- Relevant for day-one security because sandbox status must not overstate filesystem isolation.
|
||||
|
||||
- `rust/crates/rusty-claude-cli/src/main.rs`
|
||||
- Owns CLI permission-mode parsing, direct JSON/text diagnostic output, `/permissions`, `/status`, `/doctor`, and command dispatch paths.
|
||||
- Existing CLI integration tests under `rust/crates/rusty-claude-cli/tests/` cover permission prompt scenarios and output-format contracts.
|
||||
|
||||
- `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs`
|
||||
- End-to-end harness includes `bash_permission_prompt_approved`, `bash_permission_prompt_denied`, read/write file allow/deny, and plugin workspace-write scenarios.
|
||||
|
||||
## Existing G002-adjacent coverage
|
||||
|
||||
- Unit-level permission coverage:
|
||||
- `cargo test -p runtime permissions::tests`
|
||||
- `cargo test -p runtime permission_enforcer::tests`
|
||||
- `cargo test -p runtime bash_validation::tests`
|
||||
- `cargo test -p runtime file_ops::tests`
|
||||
|
||||
- CLI and integration coverage:
|
||||
- `cargo test -p rusty-claude-cli --test mock_parity_harness`
|
||||
- `cargo test -p rusty-claude-cli --test output_format_contract`
|
||||
- `cargo test -p rusty-claude-cli --test cli_flags_and_config_defaults`
|
||||
|
||||
- Board/report validation coverage:
|
||||
- `python3 scripts/validate_cc2_board.py --board .omx/cc2/board.json`
|
||||
- `python3 .omx/cc2/validate_issue_parity_intake.py .omx/cc2/issue-parity-intake.json`
|
||||
|
||||
## Recommended safe work slices
|
||||
|
||||
### Implementation lane (owned by worker-1 unless re-scoped)
|
||||
|
||||
1. Replace string-prefix workspace boundary checks with canonical path comparison in the runtime enforcement path.
|
||||
- Primary files: `rust/crates/runtime/src/permission_enforcer.rs`, possibly shared helper extraction from `rust/crates/runtime/src/file_ops.rs`.
|
||||
- Regression cases: `../` traversal, symlink escape, root prefix collision (`/workspace` vs `/workspacex`), relative paths, trailing slash root equality.
|
||||
|
||||
2. Ensure direct file tools call workspace-aware wrappers when active permission mode is `workspace-write`.
|
||||
- Primary files: likely `rust/crates/runtime/src/mcp_tool_bridge.rs` and/or the runtime tool execution bridge that calls `file_ops`.
|
||||
- Regression cases: direct read/write paths, missing parent creation, symlink parent escape, and error payload stability.
|
||||
|
||||
3. Keep bash validation as a warning/classification layer unless a real shell-expansion resolver is introduced.
|
||||
- Primary files: `rust/crates/runtime/src/bash_validation.rs`, `rust/crates/runtime/src/bash.rs`.
|
||||
- Risk: heuristic parsing cannot faithfully resolve shell expansion, globs, aliases, or platform-specific path rules; avoid claiming hard enforcement unless execution sandbox or command resolver proves it.
|
||||
|
||||
### Test lane (coordinate with worker-3/worker-1 before editing)
|
||||
|
||||
1. Add unit regressions close to each enforcement function before changing behavior.
|
||||
- `permission_enforcer.rs`: canonical path boundary and Windows-shaped path cases.
|
||||
- `file_ops.rs`: write/edit workspace wrappers with symlink parent escapes and missing file parent canonicalization.
|
||||
- `bash_validation.rs`: shell expansion/glob/path warnings remain warnings unless a resolver is introduced.
|
||||
|
||||
2. Add at least one integration test proving the runtime bridge actually routes file tools through workspace enforcement, not only helper functions.
|
||||
- Candidate: `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs` for direct write denial and no file created outside workspace.
|
||||
|
||||
3. Preserve existing prompt/event visibility tests.
|
||||
- Candidate surfaces: permission prompt scenarios in `mock_parity_harness.rs`, status/doctor JSON in `output_format_contract.rs`.
|
||||
|
||||
### Docs/reporting lane (owned by worker-4)
|
||||
|
||||
1. Keep this file as the integration handoff artifact for G002 mapping and verification.
|
||||
2. Report changed files and commits relative to `origin/main` so the leader can integrate worker branches deterministically.
|
||||
3. Include exact command evidence in the task lifecycle result.
|
||||
|
||||
## Changed files relative to `origin/main` at map time
|
||||
|
||||
The worktree currently contains these files added relative to `origin/main` before this task report:
|
||||
|
||||
- `.omx/cc2/board.json`
|
||||
- `.omx/cc2/board.md`
|
||||
- `.omx/cc2/issue-parity-intake.json`
|
||||
- `.omx/cc2/issue-parity-intake.md`
|
||||
- `.omx/cc2/render_board_md.py`
|
||||
- `.omx/cc2/validate_issue_parity_intake.py`
|
||||
- `scripts/cc2_board.py`
|
||||
- `scripts/generate_cc2_board.py`
|
||||
- `scripts/validate_cc2_board.py`
|
||||
|
||||
This task adds:
|
||||
|
||||
- `docs/g002-security-verification-map.md`
|
||||
|
||||
## Commits relative to `origin/main` at map time
|
||||
|
||||
- `8311655` — `omx(team): auto-checkpoint worker-1 [1]`
|
||||
- `c6e2a7d` — `omx(team): merge worker-1`
|
||||
- `481585f` — `omx(team): auto-checkpoint worker-1 [1]`
|
||||
- `74bbf4b` — `omx(team): auto-checkpoint worker-4 [unknown]`
|
||||
- `5c77896` — `omx(team): auto-checkpoint worker-1 [1]`
|
||||
- `07dad88` — `Classify issue and parity intake for CC2 board integration`
|
||||
- `424825f` — `task: G001 human board and docs rendering`
|
||||
- `d15268e` — `Create a canonical CC2 board so every frozen ROADMAP heading is verifiably mapped`
|
||||
- `45b43b5` — `Make the CC2 board schema executable for G001`
|
||||
|
||||
## Verification checklist for leader integration
|
||||
|
||||
Run these from the repository root unless noted:
|
||||
|
||||
1. Python board/schema validation:
|
||||
- `python3 scripts/validate_cc2_board.py --board .omx/cc2/board.json`
|
||||
- `python3 .omx/cc2/validate_issue_parity_intake.py .omx/cc2/issue-parity-intake.json`
|
||||
|
||||
2. Rust formatting and lint/type checks:
|
||||
- `scripts/fmt.sh --check`
|
||||
- `(cd rust && cargo check --workspace)`
|
||||
- `(cd rust && cargo clippy --workspace --all-targets -- -D warnings)`
|
||||
|
||||
3. Targeted G002 security tests:
|
||||
- `(cd rust && cargo test -p runtime permissions::tests permission_enforcer::tests bash_validation::tests file_ops::tests)`
|
||||
- `(cd rust && cargo test -p rusty-claude-cli --test mock_parity_harness)`
|
||||
|
||||
4. Full regression:
|
||||
- `(cd rust && cargo test --workspace)`
|
||||
|
||||
|
||||
## Worker-4 verification evidence (2026-05-14)
|
||||
|
||||
PASS:
|
||||
|
||||
- `python3 scripts/validate_cc2_board.py --board .omx/cc2/board.json` → `PASS cc2 board validation`; 729 items; ROADMAP headings `124/124`; ROADMAP actions `542/542`.
|
||||
- `python3 .omx/cc2/validate_issue_parity_intake.py .omx/cc2/issue-parity-intake.json` → `PASS issue/parity intake: 19 issue rows, 9 parity rows`.
|
||||
- `scripts/fmt.sh --check` → no output and zero exit before Rust checks continued.
|
||||
- `(cd rust && cargo check --workspace)` → `Finished dev profile` successfully.
|
||||
- `(cd rust && cargo test -p runtime permissions::tests)` → 9 passed.
|
||||
- `(cd rust && cargo test -p runtime permission_enforcer::tests)` → 21 passed.
|
||||
- `(cd rust && cargo test -p runtime bash_validation::tests)` → 32 passed.
|
||||
- `(cd rust && cargo test -p runtime file_ops::tests)` → 14 passed.
|
||||
- `(cd rust && cargo test -p rusty-claude-cli --test mock_parity_harness)` → 1 passed.
|
||||
|
||||
FAIL / integration blockers observed on this worktree:
|
||||
|
||||
- `(cd rust && cargo clippy --workspace --all-targets -- -D warnings)` failed in existing runtime code, not this docs-only task:
|
||||
- `rust/crates/runtime/src/compact.rs:215` / `:216`: `clippy::match_same_arms`.
|
||||
- `rust/crates/runtime/src/policy_engine.rs:5`: `clippy::duration-suboptimal-units`.
|
||||
- `rust/crates/runtime/src/sandbox.rs:295-302`: `clippy::map_unwrap_or`.
|
||||
- `(cd rust && cargo test --workspace)` failed after broad success in API/commands/plugins/runtime tests because `rusty-claude-cli` unit test `tests::session_lifecycle_prefers_running_process_over_idle_shell` asserted `RunningProcess` but observed `IdleShell`.
|
||||
- Rerun of the specific failing test confirmed deterministic failure: `(cd rust && cargo test -p rusty-claude-cli --bin claw tests::session_lifecycle_prefers_running_process_over_idle_shell -- --exact --nocapture)` → 0 passed, 1 failed with the same `IdleShell` vs `RunningProcess` assertion.
|
||||
|
||||
Recommended owner for failures: not `worker-4` unless re-scoped. These failures are outside the docs/report artifact and touch shared runtime/CLI implementation files.
|
||||
96
docs/g003-boot-session-verification-map.md
Normal file
96
docs/g003-boot-session-verification-map.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# G003 boot/session/preflight verification map
|
||||
|
||||
Generated by `worker-1` for OMX team task 2 on 2026-05-14.
|
||||
|
||||
## Scope and coordination
|
||||
|
||||
- Active goal context: `G003-boot-session` / Stream 1 reliable worker boot and session control.
|
||||
- Boundary: this artifact is an audit/integration map only. It does not mutate `.omx/ultragoal` and it does not change shared implementation or tests.
|
||||
- Current worker split from leader mailbox:
|
||||
- `worker-1`: task 1 worker boot / prompt SLA plus this task 2 audit map.
|
||||
- `worker-2`: default trusted roots / trust resolver.
|
||||
- `worker-3`: startup-no-evidence classifier.
|
||||
- `worker-4`: session control plus preflight/doctor JSON surfaces.
|
||||
- Native subagent probes were attempted for Task 2 (`test probe` and `debug/root-cause probe`) but both failed before returning findings with `429 Too Many Requests`; the map below is based on direct repository inspection.
|
||||
|
||||
## Implementation surface map
|
||||
|
||||
### Worker boot lifecycle and prompt SLA
|
||||
|
||||
- `rust/crates/runtime/src/worker_boot.rs`
|
||||
- Core state types: `WorkerStatus`, `WorkerFailureKind`, `WorkerEventKind`, `WorkerEventPayload`, `StartupFailureClassification`, `StartupEvidenceBundle`, `WorkerTaskReceipt`, and `WorkerReadySnapshot`.
|
||||
- Control plane: `WorkerRegistry::{create,get,observe,resolve_trust,send_prompt,await_ready,restart,terminate,observe_completion,observe_startup_timeout}`.
|
||||
- Lifecycle states currently covered in code: `spawning`, `trust_required`, `tool_permission_required`, `ready_for_prompt`, `running`, `finished`, and `failed`.
|
||||
- Prompt delivery semantics currently use `Running` events and fields `prompt_in_flight`, `last_prompt`, `expected_receipt`, `replay_prompt`, and `prompt_delivery_attempts`.
|
||||
- Startup-no-evidence surface: `observe_startup_timeout` builds `StartupEvidenceBundle` and classifies trust, tool permission, prompt acceptance timeout, prompt misdelivery, transport death, worker crash, or unknown.
|
||||
- File observability surface: `emit_state_file` writes `.claw/worker-state.json` with status, readiness, trust state, prompt-in-flight flag, last event, and update age.
|
||||
|
||||
- `rust/crates/tools/src/lib.rs`
|
||||
- Tool APIs expose the worker control plane through `WorkerCreate`, `WorkerGet`, `WorkerObserve`, `WorkerResolveTrust`, `WorkerAwaitReady`, `WorkerSendPrompt`, `WorkerRestart`, `WorkerTerminate`, and `WorkerObserveCompletion`.
|
||||
- `WorkerCreate` merges `ConfigLoader::trusted_roots()` with per-call `trusted_roots` before calling `WorkerRegistry::create`.
|
||||
- Tool-level tests exercise worker create/observe/send/restart/terminate/completion and state-file transitions.
|
||||
|
||||
### Trust resolver and default trusted roots
|
||||
|
||||
- `rust/crates/runtime/src/trust_resolver.rs`
|
||||
- `TrustConfig`, `TrustAllowlistEntry`, and `TrustResolver` model trust prompts, allowlist/denylist policy, auto-trust, manual approval, and emitted trust events.
|
||||
- `path_matches_trusted_root` and internal `path_matches` canonicalize paths when possible.
|
||||
- Hazard: prefix matching must avoid accidental sibling matches such as `/tmp/work` matching `/tmp/work-evil`; worker-2 owns any changes here.
|
||||
|
||||
- `rust/crates/runtime/src/config.rs`
|
||||
- `trustedRoots` is parsed by `parse_optional_trusted_roots` and exposed through `RuntimeConfig::trusted_roots()` / feature config accessors.
|
||||
- Current default is empty when unset; any project default roots work belongs to worker-2.
|
||||
|
||||
### Session control
|
||||
|
||||
- `rust/crates/runtime/src/session_control.rs`
|
||||
- `SessionStore` namespaces sessions by canonical workspace fingerprint.
|
||||
- Key API: `from_cwd`, `from_data_dir`, `create_handle`, `resolve_reference`, `resolve_managed_path`, `list_sessions`, `latest_session`, `load_session`, and `fork_session`.
|
||||
- Guardrail: `validate_loaded_session` rejects cross-workspace sessions and allows legacy sessions only when their path remains inside the current workspace.
|
||||
- Worker-4 owns changes to this lane.
|
||||
|
||||
### CLI doctor/status/preflight and bootstrap-adjacent surfaces
|
||||
|
||||
- `rust/crates/commands/src/lib.rs`
|
||||
- Slash command definitions include `/status`, `/sandbox`, and `/doctor`.
|
||||
- JSON rendering for command surfaces exists through handler functions and tests in the same module.
|
||||
|
||||
- `rust/crates/tools/src/lib.rs`
|
||||
- Bash and PowerShell tool runners include `workspace_test_branch_preflight`, which returns structured output with `return_code_interpretation: preflight_blocked:branch_divergence` for broad workspace tests on stale branches.
|
||||
- Tests around `bash_workspace_tests_are_blocked_when_branch_is_behind_main` and targeted-test skipping protect this preflight behavior.
|
||||
|
||||
## Existing focused verification commands
|
||||
|
||||
Run from `rust/` unless noted.
|
||||
|
||||
- Worker boot runtime contract:
|
||||
- `cargo test -p runtime worker_boot -- --nocapture`
|
||||
- Worker tool API contract:
|
||||
- `cargo test -p tools worker_ -- --nocapture`
|
||||
- Session control contract:
|
||||
- `cargo test -p runtime session_control -- --nocapture`
|
||||
- Trust resolver/config trusted roots:
|
||||
- `cargo test -p runtime trust_resolver -- --nocapture`
|
||||
- `cargo test -p runtime config::tests::parses_trusted_roots_from_settings config::tests::trusted_roots_default_is_empty_when_unset -- --nocapture`
|
||||
- Preflight/tool branch guardrails:
|
||||
- `cargo test -p tools bash_workspace_tests_are_blocked_when_branch_is_behind_main bash_targeted_tests_skip_branch_preflight -- --nocapture`
|
||||
- Formatting/type/lint baseline:
|
||||
- `../scripts/fmt.sh --check`
|
||||
- `cargo check -p runtime -p tools -p commands`
|
||||
- `cargo clippy -p runtime -p tools -p commands --all-targets --no-deps -- -D warnings`
|
||||
|
||||
## Gaps and hazards for leader integration
|
||||
|
||||
- Prompt SLA event naming is partially implicit: `send_prompt` emits `WorkerEventKind::Running`; it does not expose separate `prompt.sent`, `prompt.accepted`, `prompt.acceptance_delayed`, or `prompt.acceptance_timeout` event names. The current equivalent evidence is `prompt_in_flight`, `Running`, `observe_completion`, and startup-timeout classification.
|
||||
- `StartupFailureClassification::PromptAcceptanceTimeout` is covered in `worker_boot` tests; full terminal/transport integration should still be verified by the leader or worker-3 if a real pane watcher exists outside the in-memory registry.
|
||||
- Default trusted roots are parsed and merged into `WorkerCreate`, but unset config currently means no default roots. Worker-2 owns any change to default root selection.
|
||||
- Session control protects workspace fingerprints at load/fork time; worker-4 owns CLI/doctor/preflight JSON contract changes.
|
||||
- Full-workspace clippy currently has known unrelated runtime findings observed during task 1 verification; do not block this docs-only map on those unless leader re-scopes cleanup.
|
||||
|
||||
## Recommended safe integration order
|
||||
|
||||
1. Integrate worker boot / prompt SLA changes first and run `cargo test -p runtime worker_boot -- --nocapture` plus `cargo test -p tools worker_ -- --nocapture`.
|
||||
2. Integrate trust-root changes and rerun trust/config tests plus the worker create config merge test.
|
||||
3. Integrate startup-no-evidence classifier changes and rerun `cargo test -p runtime worker_boot -- --nocapture`.
|
||||
4. Integrate session control / preflight / doctor JSON changes and rerun session-control, commands JSON, and preflight tests.
|
||||
5. Run final formatting, targeted cargo check/clippy, then broader workspace tests with known full-workspace failures documented separately.
|
||||
67
docs/g004-events-reports-contract.md
Normal file
67
docs/g004-events-reports-contract.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# G004 event and report contract guidance
|
||||
|
||||
Captured: 2026-05-14 during the Stream 2 `G004-events-reports` team run.
|
||||
|
||||
Purpose: keep the user/developer-facing contract guidance for ROADMAP Phase 2 in one tracked source that points back to the code and roadmap anchors. This document is intentionally not the implementation map for task 5; it describes the interoperability contract consumers should rely on as the lane-event, report-schema, approval-token, and capability-negotiation lanes land.
|
||||
|
||||
## Source-of-truth anchors
|
||||
|
||||
| Contract family | Roadmap anchor | Current implementation / owner-facing anchor | Consumer guidance |
|
||||
| --- | --- | --- | --- |
|
||||
| Canonical lane events | `ROADMAP.md` Phase 2 §4, §4.5, §4.6, §4.7 | `rust/crates/runtime/src/lane_events.rs` (`LaneEventName`, `LaneEventStatus`, `LaneEventMetadata`, terminal reconciliation helpers) | Consume `event`, `status`, `emittedAt`, and `metadata` fields as the canonical state stream; do not infer lane state from terminal text when a structured event is present. |
|
||||
| Report schema v1 and projections | `ROADMAP.md` §4.25-§4.34 | Stream 2 report-schema lane / fixtures as they land | Treat a report as a versioned canonical payload plus derived projections. A projection may omit or transform fields only with explicit provenance: compatibility downgrade, redaction policy, truncation, or source absence. |
|
||||
| Policy-blocked handoff and approval-token chain | `ROADMAP.md` §4.37-§4.39 | Stream 2 approval-token lane as it lands | Treat policy blocks and owner approvals as typed artifacts, not prose. Execute an exception only when the approval token matches actor, policy, action, repo/branch/commit scope, expiry, and one-time-use state. |
|
||||
| Capability negotiation | `ROADMAP.md` §4.25, §4.26, §4.32, §4.34 | Report-schema/projection fixtures and consumer conformance cases as they land | Consumers must advertise supported schema versions, optional field families, projection views, redaction semantics, and downgrade handling before relying on reduced payloads. |
|
||||
|
||||
## Lane event contract
|
||||
|
||||
The lane-event stream is the first machine-trustworthy surface for Stream 2. Consumers should expect these invariants when reading `LaneEvent` payloads:
|
||||
|
||||
- `event` is a typed event name, currently including the core lane lifecycle (`lane.started`, `lane.ready`, `lane.blocked`, `lane.red`, `lane.green`, `lane.finished`, `lane.failed`), branch health (`branch.stale_against_main`, `branch.workspace_mismatch`), reconciliation (`lane.reconciled`, `lane.superseded`, `lane.closed`), and ship provenance (`ship.prepared`, `ship.commits_selected`, `ship.merged`, `ship.pushed_main`).
|
||||
- `status` is the normalized state for the event; consumers should prefer it over freeform `detail` text for automation.
|
||||
- `metadata.seq`, `metadata.timestamp_ms`, and terminal fingerprints are the ordering/deduplication hooks. Consumers should use terminal reconciliation output rather than double-reporting contradictory terminal bursts.
|
||||
- `metadata.provenance`, `metadata.environment_label`, `metadata.emitter_identity`, and `metadata.confidence_level` tell consumers whether an event is live lane truth, test traffic, healthcheck/replay output, or transport-layer evidence.
|
||||
- `metadata.session_identity` and `metadata.ownership` bind a lane event to the session, workspace, workflow scope, owner, and watcher action. A watcher should not act on events whose ownership says `observe` or `ignore`.
|
||||
|
||||
Minimal consumer rule: if a structured event exists, pane text is supporting evidence only. Pane scraping must not override a higher-confidence typed event with matching session/workflow ownership.
|
||||
|
||||
## Report schema v1 contract
|
||||
|
||||
A Stream 2 report should be treated as a canonical fact record with optional projections. Consumers should preserve these semantics even when they receive only a downgraded view:
|
||||
|
||||
- Every report payload declares a schema version and a stable report identity/content hash for the full-fidelity canonical payload.
|
||||
- Assertions are labeled as `fact`, `hypothesis`, or another declared evidence class, with confidence and source references. Negative evidence is first-class: `not observed`, `checked and absent`, and `redacted` are distinct states.
|
||||
- Field deltas name the field, previous value/state, new value/state, attribution, and whether the delta came from source content, projection, downgrade, or redaction policy.
|
||||
- Projections carry lineage back to the canonical report id/content hash and name the projection view, capability set, schema version, redaction policy, and deterministic rendering inputs.
|
||||
- Redaction provenance is explicit. A missing field without a redaction/downgrade/source-absence reason is not enough evidence for an automated consumer to conclude the underlying fact is absent.
|
||||
|
||||
Minimal consumer rule: store the canonical identity and projection metadata together. Do not compare two projections as state changes unless their canonical content hash or declared projection inputs differ.
|
||||
|
||||
## Approval-token and policy-blocked contract
|
||||
|
||||
Policy-blocked actions and owner-approved exceptions belong in the same structured event/report family:
|
||||
|
||||
- A policy block names the typed reason, policy source, actor scope, blocked action, and safe fallback path.
|
||||
- An approval token names the approving actor, policy exception, action, repository/worktree/branch/commit scope, expiry, and allowed use count.
|
||||
- Token consumption records the exact action and scope that spent the token. Replays, scope expansion, expired tokens, and revoked tokens should surface typed policy errors.
|
||||
- Delegation traceability stays attached when another worker/lane executes the approved action; the executor must be able to prove which approval artifact authorized the exception.
|
||||
|
||||
Minimal consumer rule: prose such as "approved" is not an executable approval. Require the structured token and verify that it is unconsumed and scoped to the exact action before proceeding.
|
||||
|
||||
## Capability negotiation and conformance
|
||||
|
||||
Mixed-version consumers are expected during Stream 2 rollout. Producers and consumers should negotiate instead of silently dropping fields:
|
||||
|
||||
- Consumers advertise supported report schema versions, field families, projection views, redaction states, downgrade semantics, and fixture/conformance suite version.
|
||||
- Producers preserve one canonical full-fidelity report and emit downgraded projections only with `downgraded_for_compatibility` metadata.
|
||||
- Deterministic projection inputs include schema version, consumer capability set, projection policy version, redaction policy version, and canonical content hash.
|
||||
- Consumer conformance should distinguish syntax acceptance from semantic correctness, especially for `redacted` vs `missing`, stale vs current projections, negative evidence, and approval-token replay states.
|
||||
|
||||
Minimal consumer rule: an older consumer may accept a downgraded projection, but it must surface the downgrade as a capability limitation rather than treating omitted fields as canonical absence.
|
||||
|
||||
## Documentation maintenance rules
|
||||
|
||||
- Keep ROADMAP Phase 2 as the product requirement source and this file as the contract-reading guide.
|
||||
- Keep Rust type names and event names aligned with `rust/crates/runtime/src/lane_events.rs`; update this document in the same change when public event names or metadata semantics change.
|
||||
- Keep report-schema examples/fixtures aligned with this guide once the schema lane lands; fixture updates should explain intentional schema or projection changes.
|
||||
- Do not mutate `.omx/ultragoal` from worker lanes. Leader-owned Ultragoal checkpointing consumes commits and verification evidence from task results.
|
||||
57
docs/g004-events-reports-verification-map.md
Normal file
57
docs/g004-events-reports-verification-map.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# G004 events/reports verification map
|
||||
|
||||
Scope source: OMX team `g004-events-reports-u-e61d2271`, worker-1 tasks 1, 2, 4, 5. Workers must not mutate `.omx/ultragoal`; leader owns aggregate checkpoints.
|
||||
|
||||
## Ownership boundaries
|
||||
|
||||
- **Lane events / event identity / terminal reconciliation** — `rust/crates/runtime/src/lane_events.rs`, exported through `rust/crates/runtime/src/lib.rs`; tool-manifest consumers in `rust/crates/tools/src/lib.rs` write `LaneEvent` vectors.
|
||||
- **Report schema v1 / projection / redaction / capability negotiation** — `rust/crates/runtime/src/report_schema.rs`, exported through `rust/crates/runtime/src/lib.rs`; fixture note at `rust/crates/runtime/tests/fixtures/report_schema_v1/README.md`.
|
||||
- **Approval-token chain** — ROADMAP §§4.38-4.40; owned by worker-2 for this team split. Worker-1 did not edit it.
|
||||
- **Pinpoint closure batch** — runtime hygiene across compact/search-parser/policy/sandbox/integration-test surfaces: `rust/crates/runtime/src/compact.rs`, `rust/crates/runtime/src/file_ops.rs`, `rust/crates/runtime/src/policy_engine.rs`, `rust/crates/runtime/src/sandbox.rs`, `rust/crates/runtime/tests/integration_tests.rs`.
|
||||
- **Regression harness / docs alignment** — worker-3/worker-4 lanes per leader split. Coordinate before editing shared docs/tests.
|
||||
|
||||
## Relevant symbols and files
|
||||
|
||||
- `LaneEventName`, `LaneEventStatus`, `LaneEventMetadata`, `LaneEventBuilder`, `compute_event_fingerprint`, `dedupe_terminal_events`, `reconcile_terminal_events` in `runtime/src/lane_events.rs`.
|
||||
- `CanonicalReportV1`, `ReportClaim`, `NegativeEvidence`, `FieldDelta`, `ConsumerCapabilities`, `ReportProjectionV1`, `canonicalize_report`, `project_report`, `report_schema_v1_registry` in `runtime/src/report_schema.rs`.
|
||||
- `AgentOutput.lane_events`, `persist_agent_terminal_state`, `write_agent_manifest`, `maybe_commit_provenance` in `tools/src/lib.rs`.
|
||||
- Search/parser closure helpers: `summarize_messages` in `compact.rs`, `grep_search_impl` / `build_grep_content_output` in `file_ops.rs`.
|
||||
|
||||
## Completed worker-1 commits
|
||||
|
||||
- `f45f05e` / task 1 auto-checkpoint — terminal event fingerprints use stable SHA-256-derived canonical JSON, and production convenience terminal events attach/refresh fingerprints after payload changes.
|
||||
- `3989fc0` — report schema v1 contract, deterministic projection/redaction provenance, capability negotiation, and fixture note.
|
||||
- `7fff4c4` / task 4 auto-checkpoint — strict runtime clippy closure batch across compact/file_ops/policy/sandbox/integration tests.
|
||||
|
||||
## Current verification evidence
|
||||
|
||||
Run from `rust/` unless noted:
|
||||
|
||||
- `cargo test -p runtime lane_events -- --nocapture` — PASS, 46 lane-event tests.
|
||||
- `cargo test -p runtime report_schema -- --nocapture` — PASS, 4 report-schema tests.
|
||||
- `cargo check -p runtime` — PASS.
|
||||
- `cargo clippy -p runtime --all-targets -- -D warnings` — PASS after task 4 closure batch.
|
||||
- `cargo test -p runtime -- --nocapture` — PASS, 531 unit tests, 12 integration tests, doc-tests pass.
|
||||
- `cargo test -p tools lane_event_schema_serializes_to_canonical_names -- --nocapture` — PASS, 1 targeted tools contract test.
|
||||
|
||||
## Leader integration verification plan
|
||||
|
||||
1. Inspect worker commits: `git log --oneline --decorate --max-count=8`.
|
||||
2. Re-run focused contracts:
|
||||
- `cd rust && cargo test -p runtime lane_events -- --nocapture`
|
||||
- `cd rust && cargo test -p runtime report_schema -- --nocapture`
|
||||
- `cd rust && cargo test -p tools lane_event_schema_serializes_to_canonical_names -- --nocapture`
|
||||
3. Re-run runtime quality gate:
|
||||
- `cd rust && cargo check -p runtime`
|
||||
- `cd rust && cargo clippy -p runtime --all-targets -- -D warnings`
|
||||
- `cd rust && cargo test -p runtime -- --nocapture`
|
||||
4. If merging with worker-2 approval-token work, additionally run the worker-2 focused approval-token tests and check for export conflicts in `runtime/src/lib.rs`.
|
||||
5. If merging with worker-3/4 docs or harness work, re-run their named regression harnesses plus `git diff --check`.
|
||||
|
||||
## Integration hazards
|
||||
|
||||
- `runtime/src/lib.rs` export blocks are shared; resolve conflicts by keeping both lane-event and report-schema exports sorted enough to remain readable.
|
||||
- `tools/src/lib.rs` serializes lane events into agent manifests; terminal fingerprint changes intentionally affect `metadata.event_fingerprint` for finished/failed/superseded/merged/closed events with payloads.
|
||||
- `report_schema.rs` currently defines the reusable contract and in-code deterministic fixtures; it does not yet wire report emission into CLI/status surfaces.
|
||||
- ROADMAP approval-token §§4.38-4.40 remain a separate lane; do not treat worker-1 report schema as an approval artifact.
|
||||
- Full workspace checks may include unrelated slow/provider-dependent tests; the verified local gate for this stream is runtime + targeted tools tests above.
|
||||
40
docs/g005-branch-recovery-verification-map.md
Normal file
40
docs/g005-branch-recovery-verification-map.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# G005 Branch Recovery Verification Map
|
||||
|
||||
Scope: worker-1 follow-up map for G005 branch/test awareness and recovery. This file intentionally does not mutate leader-owned `.omx/ultragoal` state.
|
||||
|
||||
## Covered ROADMAP / PRD pinpoints
|
||||
|
||||
- `ROADMAP.md:912-921` — Phase 3 §7 stale-branch detection before broad verification: broad workspace test commands are preflighted before execution, stale/diverged branches emit `branch.stale_against_main`, and targeted tests bypass the broad-test gate.
|
||||
- `ROADMAP.md:922-933` — Phase 3 §8 recovery recipes: stale-branch recovery remains represented by the `stale_branch` recipe, with one automatic attempt before escalation.
|
||||
- `ROADMAP.md:935-949` — Phase 3 §8.5 recovery attempt ledger: `RecoveryContext` now exposes ledger entries with recipe id, attempt count, state, started/finished markers, last failure summary, and escalation reason.
|
||||
- `ROADMAP.md:951-970` — Phase 3 §9 green-ness / hung-test reporting: timed-out test commands now classify as `test.hung` with structured provenance instead of generic timeout.
|
||||
- `prd.json:37-44` — US-003 stale-branch detection before broad verification: verified through the `workspace_test_branch_preflight` broad-test block and targeted-test bypass tests.
|
||||
- `prd.json:50-57` — US-004 recovery recipes with ledger: verified through recovery ledger unit coverage and serialization-compatible recovery structs.
|
||||
|
||||
## Implementation anchors
|
||||
|
||||
- `rust/crates/runtime/src/stale_branch.rs` — existing branch freshness model and policy actions for fresh, stale, and diverged branches.
|
||||
- `rust/crates/tools/src/lib.rs` — `workspace_test_branch_preflight`, `branch_divergence_output`, Bash/PowerShell broad-test gating, and `test.hung` structured timeout provenance on tool-shell timeouts.
|
||||
- `rust/crates/runtime/src/recovery_recipes.rs` — recovery recipes plus `RecoveryLedgerEntry` / `RecoveryAttemptState` ledger surface.
|
||||
- `rust/crates/runtime/src/bash.rs` — runtime Bash timeout classification and structured provenance for hung test commands.
|
||||
- `rust/crates/runtime/src/lib.rs` — public exports for the recovery ledger types.
|
||||
|
||||
## Verification evidence
|
||||
|
||||
- `cargo test -p runtime` → PASS: 538 unit tests, 2 G004 conformance tests, 12 integration tests, and doctests passed.
|
||||
- `cargo test -p tools bash_tool_classifies_test_timeout_as_hung_with_provenance -- --nocapture` → PASS.
|
||||
- `cargo test -p tools bash_workspace_tests_are_blocked_when_branch_is_behind_main -- --nocapture` → PASS.
|
||||
- `cargo test -p tools bash_targeted_tests_skip_branch_preflight -- --nocapture` → PASS.
|
||||
- `cargo check -p runtime -p tools` → PASS.
|
||||
- `cargo clippy -p runtime --all-targets -- -D warnings` → PASS.
|
||||
- `cargo clippy -p tools --lib --no-deps -- -D warnings` → PASS.
|
||||
|
||||
## Known unresolved / out-of-scope items
|
||||
|
||||
- Full `cargo test -p tools` is still red on six permission-enforcer expectation tests unrelated to G005 branch freshness, recovery ledger, or hung-test classification. The failing tests assert old permission wording/read-only behavior and pre-existed this follow-up scope.
|
||||
- ROADMAP stale-base JSON/doctor/status pinpoints remain broader CLI diagnostic-surface work, especially `ROADMAP.md:2425-2489`, `ROADMAP.md:4346-4431`, and `ROADMAP.md:5061-5086`. They are related to branch freshness, but task 1 only required the broad-test freshness gate and narrow reporting surfaces.
|
||||
- No `.omx/ultragoal` files were changed; leader-owned Ultragoal checkpointing remains outside worker scope.
|
||||
|
||||
## Delegation evidence
|
||||
|
||||
Subagent spawn evidence: 1, Repository map probe `019e25d5-9be9-7193-8a33-f21450beb62c`; spawned before further serial task-2 mapping per contract, but errored with 429 Too Many Requests, so direct repo evidence was integrated instead.
|
||||
34
docs/g006-task-policy-board-verification-map.md
Normal file
34
docs/g006-task-policy-board-verification-map.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# G006 Task Policy Board Verification Map
|
||||
|
||||
Goal: `G006-task-policy-board` — Stream 4 task packets, executable policy engine, lane board/status JSON, and running-state liveness heartbeat.
|
||||
|
||||
## Prompt-to-artifact checklist
|
||||
|
||||
| Requirement | Artifact/evidence |
|
||||
| --- | --- |
|
||||
| Typed task packet schema with objective, scope, files/resources, acceptance criteria, model/provider, permission profile, recovery policy, verification plan, reporting targets | `rust/crates/runtime/src/task_packet.rs` extends `TaskPacket` with `acceptance_criteria`, `resources`, `model`, `provider`, `permission_profile`, `recovery_policy`, `verification_plan`, and `reporting_targets`; tests cover legacy defaulted JSON and rich CC2 roundtrip. |
|
||||
| Backwards compatibility for existing task packets and tool callers | `serde(default)`/optional fields in `task_packet.rs`; `rust/crates/tools/src/lib.rs` `run_task_packet_creates_packet_backed_task` updated for rich schema; legacy packet test keeps old JSON accepted. |
|
||||
| Executable policy decisions for retry/rebase/merge/escalate/stale cleanup/approval token | `rust/crates/runtime/src/policy_engine.rs` adds `RetryAvailable`, `RebaseRequired`, `StaleCleanupRequired`, approval-token conditions/actions, `PolicyEvaluation`, `PolicyDecisionEvent`, and decision-table tests. |
|
||||
| Policy decisions explainable and typed-event logged/emittable | `PolicyDecisionEvent` serializable typed event with `rule_name`, `priority`, `kind`, `explanation`, `approval_token_id`; `evaluate_with_events` emits event per flattened action. |
|
||||
| Active lane board/dashboard/status JSON over canonical state | `rust/crates/runtime/src/task_registry.rs` adds `LaneBoard`, `LaneBoardEntry`, `LaneFreshness`, `lane_board_at`, and `lane_status_json_at`; CLI status JSON advertises lane board contract in `rust/crates/rusty-claude-cli/src/main.rs`. |
|
||||
| Heartbeats independent of terminal rendering with healthy/stalled/transport-dead cases | `rust/crates/runtime/src/session.rs` adds `SessionHeartbeat`/`SessionLiveness` from persisted session health state; `task_registry.rs` heartbeat freshness is computed from canonical heartbeat timestamps and transport state. |
|
||||
| Task/lane status JSON shows active/blocked/finished lanes with heartbeat freshness | `task_registry::tests::lane_board_groups_active_blocked_finished_and_reports_freshness`; `status_json_surfaces_session_lifecycle_for_clawhip`/status JSON surfaces lane board metadata. |
|
||||
| Leader-owned ultragoal audit remains separate from workers | No worker changed `.omx/ultragoal`; leader will checkpoint with fresh `get_goal` only after terminal verification. |
|
||||
|
||||
## Verification run
|
||||
|
||||
- `git diff --check` — PASS
|
||||
- `cargo fmt --manifest-path rust/Cargo.toml --all -- --check` — PASS
|
||||
- `cargo check --manifest-path rust/Cargo.toml -p runtime -p tools -p rusty-claude-cli` — PASS
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p runtime task_packet -- --nocapture` — PASS (5 task packet tests)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p runtime policy_engine -- --nocapture` — PASS (12 unit + 1 integration match)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p runtime task_registry -- --nocapture` — PASS (17 task registry tests)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p runtime session_heartbeat -- --nocapture` — PASS (1 heartbeat test)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p tools run_task_packet_creates_packet_backed_task -- --nocapture` — PASS
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p tools lane_completion -- --nocapture` — PASS (6 tests)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli status_json_surfaces -- --nocapture` — PASS
|
||||
|
||||
## Remaining gates
|
||||
|
||||
- G006 can be checkpointed after team lifecycle is reconciled terminal and this commit is pushed.
|
||||
- Open PR/issue reconciliation remains explicitly deferred to G011/G012 via `docs/pr-issue-resolution-gate.md`.
|
||||
55
docs/g007-mcp-lifecycle-mapping.md
Normal file
55
docs/g007-mcp-lifecycle-mapping.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# G007 MCP Lifecycle Mapping
|
||||
|
||||
This map captures the current MCP/plugin lifecycle implementation surfaces for the
|
||||
G007 plugin/MCP maturity lane. It is intentionally evidence-oriented: each row
|
||||
names the runtime surface, the code owner boundary, and the current gap when the
|
||||
surface is metadata-only.
|
||||
|
||||
## Degraded MCP startup
|
||||
|
||||
| Concern | Current surface | Notes |
|
||||
| --- | --- | --- |
|
||||
| Best-effort discovery | `rust/crates/runtime/src/mcp_stdio.rs` (`McpServerManager::discover_tools_best_effort`) | Discovers every configured stdio server, keeps tools from working servers, and records per-server failures without aborting the whole startup. |
|
||||
| Failure payload | `rust/crates/runtime/src/mcp_stdio.rs` (`McpDiscoveryFailure`, `UnsupportedMcpServer`) | Failure records include `server_name`, lifecycle `phase`, `required`, `error`, `recoverable`, and structured `context`. Unsupported non-stdio servers keep `transport`, `required`, and `reason`. |
|
||||
| Degraded report model | `rust/crates/runtime/src/mcp_lifecycle_hardened.rs` (`McpDegradedReport`, `McpFailedServer`, `McpErrorSurface`) | Normalizes degraded startup into working servers, failed servers, available tools, and missing tools. `McpErrorSurface` carries phase, server, message, context, and recoverability. |
|
||||
| CLI runtime handoff | `rust/crates/rusty-claude-cli/src/main.rs` (`RuntimeMcpState::new`) | Converts discovery failures and unsupported servers into a runtime degraded report, including `required` in the error context. |
|
||||
|
||||
## Required vs. optional MCP servers
|
||||
|
||||
| Concern | Current surface | Notes |
|
||||
| --- | --- | --- |
|
||||
| Config contract | `rust/crates/runtime/src/config.rs` (`ScopedMcpServerConfig.required`) | `mcpServers.<name>.required` parses as a boolean and defaults to `false`; invalid non-boolean values are rejected by the shared optional-bool parser. |
|
||||
| Scope merge | `rust/crates/runtime/src/config.rs` (`merge_mcp_servers`) | Requiredness is stored beside the scope and transport-specific config after normal user/project/local merging. |
|
||||
| Inventory/reporting | `rust/crates/commands/src/lib.rs` (`mcp_server_json`, `render_mcp_server_report`) | JSON reports expose `server.required`; text `show` reports include `Required`. |
|
||||
| Discovery propagation | `rust/crates/runtime/src/mcp_stdio.rs` | Requiredness is copied into managed stdio servers, unsupported server records, discovery failures, and degraded startup context. |
|
||||
| Cache/signature identity | `rust/crates/runtime/src/mcp.rs` (`scoped_mcp_config_hash`) | The hash includes `required:<bool>` so required/optional changes affect MCP config identity. |
|
||||
| Remaining policy gap | runtime behavior | The flag is currently surfaced and propagated as lifecycle metadata. It does not yet fail the whole runtime/session solely because a required server failed; consumers must inspect the degraded report context today. |
|
||||
|
||||
## Config interpolation and redaction surfaces
|
||||
|
||||
| Concern | Current surface | Notes |
|
||||
| --- | --- | --- |
|
||||
| Raw config parsing | `rust/crates/runtime/src/config.rs` (`parse_mcp_server_config`, `parse_mcp_remote_server_config`) | `command`, `args`, `url`, `headers`, and `headersHelper` are loaded as literal strings. No dedicated environment, tilde, or workspace-root interpolation pass is present in this parser. |
|
||||
| Redacted key reporting | `rust/crates/commands/src/lib.rs` (`mcp_server_details_json`, `render_mcp_server_report`) | Stdio env and remote/websocket header values are not printed; only `env_keys` / `Header keys` are surfaced. |
|
||||
| Unredacted reporting risk | `rust/crates/commands/src/lib.rs` (`mcp_server_summary`, `mcp_server_details_json`, text `show`) | Command, args, URL, `headers_helper`, OAuth metadata URL/client id, and managed proxy URL/id are currently emitted verbatim. Treat these fields as not-redacted unless a future policy layer classifies them safe. |
|
||||
| OAuth exposure | `rust/crates/commands/src/lib.rs` (`mcp_oauth_json`, `format_mcp_oauth`) | OAuth secret-like values are mostly absent from the current config model, but client id and metadata URL are still reported directly. |
|
||||
|
||||
## Plugin lifecycle contract adjacency
|
||||
|
||||
| Concern | Current surface | Notes |
|
||||
| --- | --- | --- |
|
||||
| Manifest lifecycle | `rust/crates/plugins/src/lib.rs` (`PluginLifecycle`) | Plugin manifests support `lifecycle.Init` and `lifecycle.Shutdown` command arrays. |
|
||||
| Registry summary | `rust/crates/plugins/src/lib.rs` (`PluginSummary::lifecycle_state`) | Installed summaries include enabled state, lifecycle commands, and derived lifecycle state (`ready` or `disabled`). Load failures remain first-class in registry reports. |
|
||||
| CLI JSON output | `rust/crates/rusty-claude-cli/src/main.rs` (`plugin_command_json`) | Plugin command JSON emits top-level `status`, per-plugin `lifecycle_state` and lifecycle command counts, plus `load_failures` with `lifecycle_state: load_failed`. |
|
||||
|
||||
## Verification anchors
|
||||
|
||||
The current regression anchors for this map are:
|
||||
|
||||
- `cargo test -p runtime parses_typed_mcp_and_oauth_config -- --nocapture`
|
||||
- `cargo test -p runtime manager_discovery_report_keeps_healthy_servers_when_one_server_fails -- --nocapture`
|
||||
- `cargo test -p runtime manager_records_unsupported_non_stdio_servers_without_panicking -- --nocapture`
|
||||
- `cargo test -p commands renders_mcp_reports -- --nocapture`
|
||||
- `cargo test -p plugins installed_plugin_registry_report_collects_load_failures_from_install_root -- --nocapture`
|
||||
- `cargo test -p rusty-claude-cli --test output_format_contract plugins_json_surfaces_lifecycle_contract_when_plugin_is_installed -- --nocapture`
|
||||
|
||||
54
docs/g007-plugin-mcp-verification-map.md
Normal file
54
docs/g007-plugin-mcp-verification-map.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# G007 Plugin/MCP Lifecycle Verification Map
|
||||
|
||||
Goal: `G007-plugin-mcp` — Stream 5 plugin/MCP lifecycle maturity from ROADMAP Phase 5.
|
||||
|
||||
Scope: worker-2 follow-up map for W4 mock integration and regression verification. This file intentionally does not mutate leader-owned `.omx/ultragoal` state.
|
||||
|
||||
## Covered ROADMAP / CC2 anchors
|
||||
|
||||
- `ROADMAP.md:55-57` — Current pain point §6: plugin/MCP startup failures, handshake failures, config errors, partial startup, and degraded mode need clean classification.
|
||||
- `ROADMAP.md:67` — Product principle §5: MCP partial success must be first-class and structurally report successful and failed servers.
|
||||
- `ROADMAP.md:1033-1059` — Phase 5: first-class plugin/MCP lifecycle contract and MCP end-to-end lifecycle parity.
|
||||
- `.omx/cc2/board.md` Stream 5 active headings: `CC2-RM-H0010`, `CC2-RM-H0080`, `CC2-RM-H0081`, and `CC2-RM-H0082` remain the goal-level source-of-truth anchors for plugin/MCP lifecycle maturity.
|
||||
- `PARITY.md` harness checklist: mock parity scenarios are the executable regression surface for streamed model turns, plugin tool roundtrips, permissions, compaction metadata, and token/cost output.
|
||||
|
||||
## Mock integration anchors
|
||||
|
||||
| Area | Artifact/evidence |
|
||||
| --- | --- |
|
||||
| Deterministic model server | `rust/crates/mock-anthropic-service/src/lib.rs` implements the Anthropic-compatible mock server and scenario router used by CLI parity tests. |
|
||||
| End-to-end CLI mock harness | `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs` starts the mock server, runs clean-environment `claw` commands, asserts JSON output, and optionally writes a machine-readable report via `MOCK_PARITY_REPORT_PATH`. |
|
||||
| Scenario manifest / docs parity guard | `rust/mock_parity_scenarios.json` is required to stay ordered with harness cases; `rust/scripts/run_mock_parity_diff.py --no-run` verifies every manifest `parity_refs[]` string exists in `PARITY.md`. |
|
||||
| Convenience runner | `rust/scripts/run_mock_parity_harness.sh` runs `cargo test -p rusty-claude-cli --test mock_parity_harness -- --nocapture`. |
|
||||
| Plugin-path regression | `plugin_tool_roundtrip` loads an external plugin fixture from isolated settings and executes `plugin_echo` through the runtime tool registry. |
|
||||
| Lifecycle-adjacent regression | `auto_compact_triggered` and `token_cost_reporting` prove runtime JSON keeps compaction and usage/cost fields parseable under mock responses, preventing parity drift in machine-readable output. |
|
||||
| MCP degraded-startup regression | `rust/crates/runtime/src/mcp_stdio.rs::manager_discovery_report_keeps_healthy_servers_when_one_server_fails` proves a healthy MCP server remains callable while a broken peer is surfaced in a structured degraded report. |
|
||||
| Plugin lifecycle state regression | `rust/crates/runtime/src/plugin_lifecycle.rs` unit tests cover healthy, degraded, failed, and shutdown states plus startup-event mapping. |
|
||||
|
||||
## Regression verification commands
|
||||
|
||||
Use the smallest command that proves the changed or audited surface, then broaden only when integration risk requires it.
|
||||
|
||||
- Mock scenario/docs map only:
|
||||
- `cd rust && python3 scripts/run_mock_parity_diff.py --no-run`
|
||||
- Full mock integration:
|
||||
- `cd rust && cargo test -p rusty-claude-cli --test mock_parity_harness -- --nocapture`
|
||||
- `cd rust && python3 scripts/run_mock_parity_diff.py`
|
||||
- Plugin/MCP lifecycle contract:
|
||||
- `cd rust && cargo test -p runtime plugin_lifecycle -- --nocapture`
|
||||
- `cd rust && cargo test -p runtime mcp_stdio::tests::manager_discovery_report_keeps_healthy_servers_when_one_server_fails -- --exact --nocapture`
|
||||
- Standard Rust gates for implementation changes touching these surfaces:
|
||||
- `cd rust && cargo fmt --all -- --check`
|
||||
- `cd rust && cargo check -p runtime -p rusty-claude-cli -p mock-anthropic-service`
|
||||
- `cd rust && cargo clippy -p runtime --all-targets -- -D warnings`
|
||||
|
||||
## Known gaps / follow-ups
|
||||
|
||||
- The mock parity harness validates plugin tool execution but does not yet spin up a real MCP stdio server through the CLI prompt path; MCP degraded-startup remains covered by runtime manager tests.
|
||||
- Worker-4 owns the plugin command fallthrough regression implementation lane (`task-10`); this map records the verification/docs boundary and should not duplicate that parser work.
|
||||
- Full `cargo clippy -p runtime --all-targets -- -D warnings` can be blocked by unrelated `policy_engine.rs` clippy violations in this worktree; when that happens, report the exact pre-existing diagnostics and keep focused lifecycle tests green.
|
||||
- No `.omx/ultragoal` files were changed; leader-owned Ultragoal checkpointing remains outside worker scope.
|
||||
|
||||
## Delegation evidence
|
||||
|
||||
Subagent spawn evidence: Task 9 spawned repository map probe `019e291d-e700-7171-b7bc-27ec0f6c850f`, debug/root-cause probe `019e291d-e86f-78d0-a137-214ede03285c`, and test/docs probe `019e291e-135c-79e1-80d0-9fd82866bd6e` before deeper local inspection. The repository-map probe errored with 429; the remaining probes did not return before the local verification map was grounded from repo evidence, so direct findings above were integrated.
|
||||
89
docs/g009-windows-docs-release-verification-map.md
Normal file
89
docs/g009-windows-docs-release-verification-map.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# G009 Windows docs/release readiness verification map
|
||||
|
||||
## Scope and source
|
||||
|
||||
This map ties the Stream 8 acceptance target from `.omx/plans/claw-code-2-0-adaptive-plan.md` to repository artifacts and local verification. It is the worker-1 integration lane artifact; it does not mutate `.omx/ultragoal` and avoids duplicating peer implementation lanes for Windows CI, install/provider docs, and policy/link work.
|
||||
|
||||
Stream 8 source requirement summary:
|
||||
|
||||
- PowerShell-first docs and CLI examples.
|
||||
- Safe provider switching examples.
|
||||
- Staged packaging path: source-only alpha first, binary release matrix next, package managers later.
|
||||
- Windows smoke CI for help/doctor/config/status without live credentials.
|
||||
- License, contribution, security, and support policies.
|
||||
- Command/link validation for adoption docs.
|
||||
|
||||
## Acceptance-to-evidence matrix
|
||||
|
||||
| Acceptance area | Repository artifact(s) | Verification command(s) | Notes |
|
||||
|---|---|---|---|
|
||||
| PowerShell-first Windows install/run path | `README.md` (`Windows setup`, post-build binary location, PowerShell `.exe` examples); `install.sh` (Unix/WSL installer guard) | `python3 .github/scripts/check_doc_source_of_truth.py`; `cargo run -p rusty-claude-cli -- --help` | Current docs explicitly present Windows as a supported PowerShell path for source builds and `claw.exe`; `install.sh` is Linux/macOS/WSL-oriented, so native PowerShell binary usage and WSL installer usage must stay clearly separated. |
|
||||
| Safe provider switching examples | `USAGE.md` (`Auth`, `Local Models`, `Supported Providers & Models`); `docs/MODEL_COMPATIBILITY.md` | `cargo test -p api providers::`; `cargo test -p rusty-claude-cli --test output_format_contract provider_diagnostics_explain_openai_compatible_capabilities -- --nocapture` | Provider docs cover Anthropic API-key vs bearer-token shape, OpenAI-compatible routing, Ollama/OpenRouter/DashScope examples, and prefix routing to avoid ambient credential misrouting. |
|
||||
| Release artifact quickstart and staged packaging path | `README.md` (`Quick start`, `Post-build: locate the binary and verify`); `.github/workflows/release.yml`; `docs/windows-install-release.md` | `cargo build --release -p rusty-claude-cli`; `cargo run -p rusty-claude-cli -- version --output-format json`; `python3 .github/scripts/check_release_readiness.py (release-readiness gate)` | Release workflow packages Linux, macOS, and `claw-windows-x64.exe` assets with `.sha256` checksum files. README remains source-build-first, and the Windows quickstart names the checksum verification path. |
|
||||
| Windows smoke CI without live credentials | `.github/workflows/rust-ci.yml`; CLI local-only surfaces in `rust/crates/rusty-claude-cli/src/main.rs` (`help`, `doctor`, resumed `/config`, `status`) | `cargo run -p rusty-claude-cli -- --help`; `cargo run -p rusty-claude-cli -- doctor --output-format json`; `cargo run -p rusty-claude-cli -- status --output-format json`; `cargo run -p rusty-claude-cli -- config --output-format json` | The smoke target is local-only command execution with isolated config and no real provider credentials. If the Windows CI lane is not present in a branch, this map is the integration checklist for that lane. |
|
||||
| License metadata | `rust/Cargo.toml` (`workspace.package.license = "MIT"`) | `grep -n '^license = "MIT"' rust/Cargo.toml` | Cargo metadata declares MIT. A root `LICENSE` file remains the user-facing policy artifact to add if not already present in the policy lane. |
|
||||
| Contribution/security/support policies | Expected root policy docs: `CONTRIBUTING.md`, `SECURITY.md`, `SUPPORT.md`; existing support links in `README.md` | `test -f CONTRIBUTING.md`; `test -f SECURITY.md`; `test -f SUPPORT.md`; `python3 .github/scripts/check_doc_source_of_truth.py` | These files are policy-lane outputs. This map records the exact release gate so missing files fail visibly instead of being inferred from README links. |
|
||||
| Command/link validation | `.github/scripts/check_doc_source_of_truth.py`; `README.md`; `USAGE.md`; `docs/**` | `python3 .github/scripts/check_doc_source_of_truth.py`; `python3 - <<'PY' ...` link/reference check listed below | Existing validation catches stale branding/assets/invites across adoption docs. The lightweight reference check below catches broken relative Markdown links without network access. |
|
||||
|
||||
## Windows/local smoke command contract
|
||||
|
||||
Use isolated config and no live credentials. These commands must not require `ANTHROPIC_API_KEY`, `OPENAI_API_KEY`, `XAI_API_KEY`, or `DASHSCOPE_API_KEY`:
|
||||
|
||||
```powershell
|
||||
# From repository root on Windows PowerShell
|
||||
$env:CLAW_CONFIG_HOME = Join-Path $env:TEMP "claw-smoke-config"
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\ANTHROPIC_AUTH_TOKEN -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\XAI_API_KEY -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\DASHSCOPE_API_KEY -ErrorAction SilentlyContinue
|
||||
cd rust
|
||||
cargo run -p rusty-claude-cli -- --help
|
||||
cargo run -p rusty-claude-cli -- doctor --output-format json
|
||||
cargo run -p rusty-claude-cli -- status --output-format json
|
||||
cargo run -p rusty-claude-cli -- config --output-format json
|
||||
```
|
||||
|
||||
Equivalent Unix smoke used by this worker:
|
||||
|
||||
```bash
|
||||
env -u ANTHROPIC_API_KEY -u ANTHROPIC_AUTH_TOKEN -u OPENAI_API_KEY -u XAI_API_KEY -u DASHSCOPE_API_KEY \
|
||||
CLAW_CONFIG_HOME="$(mktemp -d)" cargo run -p rusty-claude-cli -- --help
|
||||
```
|
||||
|
||||
## Offline Markdown reference check
|
||||
|
||||
```bash
|
||||
python3 - <<'PY'
|
||||
from pathlib import Path
|
||||
import re, sys
|
||||
root = Path.cwd()
|
||||
errors = []
|
||||
for path in [Path('README.md'), Path('USAGE.md'), Path('PARITY.md'), Path('PHILOSOPHY.md'), *Path('docs').glob('*.md')]:
|
||||
if not path.exists():
|
||||
continue
|
||||
text = path.read_text(encoding='utf-8')
|
||||
for match in re.finditer(r'\[[^\]]+\]\(([^)]+)\)', text):
|
||||
target = match.group(1).split('#', 1)[0]
|
||||
if not target or '://' in target or target.startswith('mailto:'):
|
||||
continue
|
||||
if not (root / path.parent / target).resolve().exists():
|
||||
line = text.count('\n', 0, match.start()) + 1
|
||||
errors.append(f'{path}:{line}: missing relative link target {match.group(1)}')
|
||||
if errors:
|
||||
print('\n'.join(errors))
|
||||
sys.exit(1)
|
||||
print('offline markdown reference check passed')
|
||||
PY
|
||||
```
|
||||
|
||||
## Release gate
|
||||
|
||||
A Stream 8 release candidate is ready when all of the following are true:
|
||||
|
||||
1. PowerShell examples in `README.md` build and run `claw.exe` from a clean Windows checkout.
|
||||
2. Provider examples in `USAGE.md` show session-local/shell-local switching, include cleanup for conflicting ambient credentials (`unset` / `Remove-Item Env:`), and never instruct users to paste secrets into persistent config by default.
|
||||
3. Windows smoke CI runs help/doctor/config/status without live credentials, separates native PowerShell `claw.exe` smoke from WSL `install.sh` smoke, and archives JSON output on failure.
|
||||
4. Release artifacts include the documented platform matrix or the docs clearly state source-only alpha status.
|
||||
5. `LICENSE`, `CONTRIBUTING.md`, `SECURITY.md`, and `SUPPORT.md` exist or the policy lane records an explicit release-blocking exception.
|
||||
6. Doc source-of-truth and offline relative-link validation pass.
|
||||
62
docs/g010-clone-disambiguation-metadata.md
Normal file
62
docs/g010-clone-disambiguation-metadata.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# G010 clone disambiguation metadata and verification map
|
||||
|
||||
Scope: worker-2 task 5 for `G010-session-hygiene` / Stream 9 session hygiene, local state, and recovery UX. This artifact maps the clone/worktree disambiguation contract and the focused verification surface without mutating leader-owned `.omx/ultragoal` state.
|
||||
|
||||
## Contract summary
|
||||
|
||||
Claw session state is intentionally scoped to the current workspace clone/worktree. Operators and automation should treat the **session partition**, not a bare session id or the flat `.claw/sessions/` directory, as the identity boundary.
|
||||
|
||||
Required metadata and behaviors:
|
||||
|
||||
- **Workspace-bound partition**: managed sessions live under `.claw/sessions/<workspace_fingerprint>/`, where the fingerprint is a stable 16-character FNV-1a digest of the canonical workspace path.
|
||||
- **Canonical path input**: `SessionStore::from_cwd` and `SessionStore::from_data_dir` canonicalize their workspace path before computing the partition, preventing `/tmp/foo` vs `/private/tmp/foo` and relative-vs-absolute spelling from creating two stores for the same clone.
|
||||
- **Clone/worktree isolation**: two distinct clones or worktrees must get different session partitions, even if session ids collide.
|
||||
- **Legacy safety**: flat legacy sessions under `.claw/sessions/` remain readable only when they are bound to the same workspace or are unbound but physically inside the current workspace; sessions whose persisted `workspace_root` points at another clone are rejected as `WorkspaceMismatch`.
|
||||
- **Fork lineage stays local**: `/session fork` / managed session forking keeps the forked session in the same workspace partition and records parent id plus optional branch name.
|
||||
- **User-facing disambiguation**: empty-session copy names the actual fingerprint directory and explains that sessions from other CWDs are intentionally invisible.
|
||||
|
||||
## Implementation anchors
|
||||
|
||||
| Contract area | Repo anchor | Evidence role |
|
||||
| --- | --- | --- |
|
||||
| Partition layout and canonical workspace root | `rust/crates/runtime/src/session_control.rs:10-18`, `:32-47`, `:54-71` | Documents and implements `.claw/sessions/<workspace_hash>/` for `from_cwd` and explicit data-dir stores. |
|
||||
| Fingerprint algorithm | `rust/crates/runtime/src/session_control.rs:300-312` | Defines the 16-character FNV-1a workspace fingerprint used as the clone disambiguator. |
|
||||
| Managed create/resolve/list/load/fork APIs | `rust/crates/runtime/src/session_control.rs:86-204` | Ensures handles, `latest`, load, and fork resolve inside the active partition. |
|
||||
| Legacy/cross-workspace guard | `rust/crates/runtime/src/session_control.rs:213-233`, `:557-567` | Rejects mismatched persisted `workspace_root` and allows only same-workspace legacy files. |
|
||||
| Empty partition copy | `rust/crates/runtime/src/session_control.rs:535-543` | Reports `.claw/sessions/<fingerprint>/` plus the workspace-partition note. |
|
||||
| CLI wrapper | `rust/crates/rusty-claude-cli/src/main.rs:5952-6040` | Routes session CLI helpers through `current_session_store()`, so CLI list/latest/load uses the same partition. |
|
||||
| CLI session-list lifecycle context | `rust/crates/rusty-claude-cli/src/main.rs:5991-6027`, `:12960-12990` | Renders saved-only/dirty/abandoned lifecycle context for the current partition. |
|
||||
| CLI session resolution regression | `rust/crates/rusty-claude-cli/src/main.rs:13470-13579` | Covers JSONL default, legacy flat resolution, latest selection, and workspace mismatch rejection from CLI wrappers. |
|
||||
|
||||
## Covered roadmap and dogfood anchors
|
||||
|
||||
- `ROADMAP.md:1125-1129` — session files are namespaced by workspace fingerprint, and wrong-workspace session access is rejected.
|
||||
- `ROADMAP.md:1419-1441` — empty/missing session messages must expose the fingerprint directory instead of implying a flat `.claw/sessions/` search.
|
||||
- `ROADMAP.md:1453-1476` — the session partition boundary must be visible or shared deliberately; current contract is visible CWD/workspace partitioning.
|
||||
- `ROADMAP.md:5797-5902` — canonicalization closes the symlink/path-equivalence split in workspace fingerprints.
|
||||
- `ROADMAP.md:6342-6366` and `ROADMAP.md:6384-6411` — remaining Stream 9 risks around reported CWD form, failed-resume filesystem side effects, and broad-CWD resume guards are related UX/recovery lanes, not clone identity itself.
|
||||
|
||||
## Focused verification map
|
||||
|
||||
| Claim | Focused check |
|
||||
| --- | --- |
|
||||
| Same canonical workspace spellings share one partition | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_from_cwd_canonicalizes_equivalent_paths -- --nocapture` |
|
||||
| Distinct clones/worktrees do not see each other's sessions | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_from_cwd_isolates_sessions_by_workspace -- --nocapture` |
|
||||
| Explicit data-dir stores still namespace by workspace | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_from_data_dir_namespaces_by_workspace -- --nocapture` |
|
||||
| Same-workspace legacy sessions are readable; cross-workspace ones are rejected | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_rejects_legacy_session_from_other_workspace session_store_loads_safe_legacy_session_from_same_workspace session_store_loads_unbound_legacy_session_from_same_workspace -- --nocapture` |
|
||||
| `latest` and managed reference resolution stay inside the active partition | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_latest_and_resolve_reference -- --nocapture` and `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli latest_session_alias_resolves_most_recent_managed_session -- --nocapture` |
|
||||
| Forks retain partition and lineage metadata | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_fork_stays_in_same_namespace -- --nocapture` |
|
||||
| CLI wrapper rejects wrong-workspace files | `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli load_session_reference_rejects_workspace_mismatch -- --nocapture` |
|
||||
| Docs-only map is syntactically clean | `git diff --check` |
|
||||
| Broader type/test gate for the touched domain | `cargo check --manifest-path rust/Cargo.toml -p runtime -p rusty-claude-cli` plus `cargo test --manifest-path rust/Cargo.toml -p runtime session_control -- --nocapture` |
|
||||
|
||||
## Known boundaries and integration notes
|
||||
|
||||
- This worker intentionally did **not** edit `docs/g010-session-hygiene-verification-map.md` because worker-4 task 7 also names that final aggregate map. This file is the worker-2 clone-disambiguation map that worker-4/leader can link or merge into the aggregate map.
|
||||
- The current `SessionStore::from_cwd` contract keys on the canonical current directory, not necessarily the git top-level. That is acceptable only if status/help surfaces keep the partition boundary visible; `ROADMAP.md:1453-1476` remains the product tradeoff record.
|
||||
- Failed-resume directory creation and broad-CWD guards are related session hygiene hazards but are owned by the Stream 9 CLI/recovery lanes, not this docs-only clone-disambiguation task.
|
||||
- No `.omx/ultragoal` files were changed; leader-owned aggregate checkpointing consumes this commit and task lifecycle evidence.
|
||||
|
||||
## Delegation evidence
|
||||
|
||||
Subagent spawn evidence: 1, repository map probe `019e295d-a3dc-7041-bc96-30ee52b95698`; spawned before deeper serial mapping per task contract, but it errored with `429 Too Many Requests`, so direct repo evidence above was integrated instead.
|
||||
21
docs/g010-session-hygiene-verification-map.md
Normal file
21
docs/g010-session-hygiene-verification-map.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# G010 Session Hygiene Verification Map
|
||||
|
||||
Stream 9 session hygiene is implemented in the Rust runtime/CLI as workspace-scoped session storage plus resume-safe recovery commands.
|
||||
|
||||
## Acceptance mapping
|
||||
|
||||
| Acceptance area | Code surface | Evidence |
|
||||
| --- | --- | --- |
|
||||
| Generated session files are not accidentally committed | `.gitignore`, `rust/.gitignore` ignore `.claw/sessions/` and `.claude/sessions/` | `git check-ignore .claw/sessions/example.jsonl rust/.claw/sessions/example.jsonl .claude/sessions/example.json` |
|
||||
| Per-worktree session isolation | `rust/crates/runtime/src/session_control.rs` (`SessionStore`, `workspace_fingerprint`, workspace validation) | `cargo test -p runtime session_store_from_cwd_isolates_sessions_by_workspace` |
|
||||
| List/resume/delete/exists contracts | `rust/crates/commands/src/lib.rs` parses `/session list`, `/session exists`, `/session delete`, `/resume`; `rust/crates/rusty-claude-cli/src/main.rs` renders text/JSON resume-safe session commands | `cargo test -p rusty-claude-cli session_exists_resume_command_reports_json_contract`; `cargo test -p rusty-claude-cli resume_report_uses_sectioned_layout` |
|
||||
| Compact and provider context-window recovery | `rust/crates/runtime/src/compact.rs`; `rust/crates/rusty-claude-cli/src/main.rs` context-window error recovery guidance and resumed `/compact` | `cargo test -p rusty-claude-cli provider_context_window_errors_are_reframed_with_same_guidance`; `cargo test -p commands compacts_sessions_via_slash_command` |
|
||||
| JSONL bloat safeguards | `rust/crates/runtime/src/session.rs` rotates oversized JSONL session files and keeps bounded rotated logs | `cargo test -p runtime rotates_and_cleans_up_large_session_logs` |
|
||||
| Interrupt/recovery path | `rust/crates/rusty-claude-cli/src/main.rs` keeps `/clear --confirm`, `/compact`, `/status`, and `/resume latest` resume-safe for unusable threads | `cargo test -p rusty-claude-cli context_window_preflight_errors_render_recovery_steps`; `cargo test -p rusty-claude-cli parses_resume_flag_with_multiple_slash_commands` |
|
||||
| Clone/session disambiguation | `Session` persists `workspace_root`; forks persist parent/branch metadata; session list shows lineage and lifecycle | `cargo test -p runtime persists_workspace_root_round_trip_and_forks_inherit_it`; `cargo test -p runtime forks_sessions_with_branch_metadata_and_persists_it` |
|
||||
|
||||
## Notes for leader audit
|
||||
|
||||
- Workers did not mutate `.omx/ultragoal`; this file is a repo-local verification map for team evidence only.
|
||||
- Runtime-owned session state remains under ignored `.claw/sessions/<workspace-fingerprint>/` paths.
|
||||
- Resume-safe JSON output uses stable `kind` fields (`restored`, `compact`, `session_list`, `session_exists`, etc.) so claws can route without scraping text.
|
||||
68
docs/g011-acp-json-rpc-status-contract.md
Normal file
68
docs/g011-acp-json-rpc-status-contract.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# G011 ACP/Zed and JSON-RPC status contract
|
||||
|
||||
Claw Code 2.0 keeps ACP/Zed and JSON-RPC serving behind the stable task,
|
||||
session-control, and event/report contracts from the roadmap. The current public
|
||||
surface is therefore a **truthful unsupported status**, not a hidden daemon.
|
||||
|
||||
## Supported status queries
|
||||
|
||||
The following commands are status queries and exit with code `0`:
|
||||
|
||||
```bash
|
||||
claw acp
|
||||
claw acp serve
|
||||
claw --acp
|
||||
claw -acp
|
||||
claw acp --output-format json
|
||||
claw acp serve --output-format json
|
||||
```
|
||||
|
||||
`serve` is deliberately an alias for status today. It does not bind a socket,
|
||||
start a daemon, or expose a JSON-RPC endpoint.
|
||||
|
||||
## JSON envelope
|
||||
|
||||
`claw acp --output-format json` returns a stable envelope for editor probes and
|
||||
CI checks:
|
||||
|
||||
```json
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"kind": "acp",
|
||||
"status": "unsupported",
|
||||
"phase": "discoverability_only",
|
||||
"supported": false,
|
||||
"exit_code": 0,
|
||||
"serve_alias_only": true,
|
||||
"protocol": {
|
||||
"name": "ACP/Zed",
|
||||
"json_rpc": false,
|
||||
"daemon": false,
|
||||
"endpoint": null,
|
||||
"serve_starts_daemon": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Consumers should check `kind == "acp"`, `supported == false`, and
|
||||
`protocol.json_rpc == false` instead of inferring support from command presence.
|
||||
|
||||
## Unsupported invocations
|
||||
|
||||
Malformed ACP invocations, such as `claw acp start`, exit with code `1`. With
|
||||
`--output-format json`, stderr uses the normal CLI error envelope and sets:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "error",
|
||||
"kind": "unsupported_acp_invocation",
|
||||
"exit_code": 1
|
||||
}
|
||||
```
|
||||
|
||||
## Deferral gate
|
||||
|
||||
Real ACP/Zed or JSON-RPC serve work remains deferred until the roadmap contracts
|
||||
for task packets, session control, and event/report schemas are stable. This
|
||||
keeps desktop, marketplace, and editor integrations from becoming alternate
|
||||
sources of truth before the CLI/file/API contracts are ready.
|
||||
62
docs/g011-ecosystem-ops-ux-verification-map.md
Normal file
62
docs/g011-ecosystem-ops-ux-verification-map.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# G011 Ecosystem/Ops/UX Verification Map
|
||||
|
||||
G011 closes the laterals that were intentionally deferred from the earlier safety,
|
||||
session, MCP, Windows, and docs streams. This map is the cross-lane gate for the
|
||||
team run: it names the surfaces that can be verified locally, the exact checks to
|
||||
rerun after worker integrations, and the UX deferrals that must remain explicit
|
||||
until their product contracts are stable.
|
||||
|
||||
## Cross-lane acceptance matrix
|
||||
|
||||
| Lane | Owned surface | Regression evidence | Gate / gap |
|
||||
| --- | --- | --- | --- |
|
||||
| ACP/Zed status and JSON contracts | `rust/crates/rusty-claude-cli/src/main.rs` parses `claw acp`, `claw acp serve`, `--acp`, and `-acp`; `README.md` and `rust/README.md` document discoverability-only status | `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract acp_guidance_emits_json_when_requested -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli local_command_help_flags_stay_on_the_local_parser_path -- --nocapture` | Real ACP/Zed daemon support remains deferred; status output must not imply a running protocol endpoint. |
|
||||
| Plugin/marketplace local routing | `rust/crates/rusty-claude-cli/src/main.rs` routes `claw plugins`, `claw plugin`, and `claw marketplace` to local plugin handling; `rust/crates/commands/src/lib.rs` keeps `/plugin` aliases in shared slash-command help | `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli removed_login_and_logout_subcommands_error_helpfully -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli direct_slash_commands_surface_shared_validation_errors -- --nocapture`; `python3 -m unittest tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_filter_excludes_plugin_sources tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_aliases_execute_as_local_commands tests.test_porting_workspace.PortingWorkspaceTests.test_route_plugin_slash_commands_match_commands tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_stream_emits_command_match tests.test_porting_workspace.PortingWorkspaceTests.test_turn_loop_plugin_commands_are_not_prompt_only` | Marketplace is an alias to local plugin management only; no remote marketplace browsing/install contract is claimed. |
|
||||
| TUI/copy/paste/clickable path UX | `rust/crates/commands/src/lib.rs` advertises `/copy`, `/paste`, `/desktop`, and path-oriented commands; `rust/crates/rusty-claude-cli/src/main.rs` renders compact file/tool paths for terminal readability | `cargo test --manifest-path rust/Cargo.toml -p commands renders_help_with_grouped_categories_and_keyboard_shortcuts -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli tool_rendering_helpers_compact_output -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli tool_rendering_truncates_large_read_output_for_display_only -- --nocapture` | Clipboard integration, full-screen TUI mode, and clickable terminal hyperlinks are not stable product contracts yet; keep them as roadmap/UX follow-ups unless a targeted implementation lands. |
|
||||
| Desktop integration deferral | `rust/crates/commands/src/lib.rs` includes `/desktop`; `rust/crates/rusty-claude-cli/src/main.rs` treats it as not implemented in the current build | `cargo test --manifest-path rust/Cargo.toml -p commands renders_help_from_shared_specs -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p commands renders_per_command_help_detail -- --nocapture` | `/desktop` must stay discoverable but non-committal until a desktop launch/API contract exists. |
|
||||
| Navigation/file-context/local-provider docs | `README.md`, `USAGE.md`, `rust/README.md`, `docs/MODEL_COMPATIBILITY.md`, and worker-2 docs updates | `python3 .github/scripts/check_doc_source_of_truth.py`; `python3 .github/scripts/check_release_readiness.py`; `git diff --check` | Re-run after docs integrations; this lane should not alter Rust behavior unless docs expose a code contract gap. |
|
||||
| Issue/PR ops gate | `docs/pr-issue-resolution-gate.md`, `docs/roadmap-pr-goals.md`, and issue/PR triage templates if present | `python3 .github/scripts/check_release_readiness.py`; `git diff --check`; optional `python3 scripts/validate_cc2_board.py` only when `.omx/cc2/board.md` changes | Worker lanes must not merge/close remote PRs or issues; final reconciliation remains leader-owned. |
|
||||
|
||||
## Task 5 UX/deferral support notes
|
||||
|
||||
- `/copy`, `/paste`, and `/desktop` are parsed slash-command names, but current
|
||||
runtime handling still reports unimplemented commands rather than performing
|
||||
clipboard or desktop side effects. That is safer than pretending support exists.
|
||||
- `/marketplace` is intentionally a plugin alias; it should not be described as
|
||||
a remote marketplace until install/search/update semantics and trust policy are
|
||||
specified.
|
||||
- Path readability is covered by terminal rendering helpers that compact long
|
||||
tool outputs and preserve paths in read/write/edit summaries. Clickable OSC-8
|
||||
links, if added later, need separate tests because terminal support varies.
|
||||
- Full-screen TUI mode remains aspirational (`rust/TUI-ENHANCEMENT-PLAN.md`);
|
||||
current verification should focus on the inline REPL/help/status surfaces.
|
||||
|
||||
## Final verification sequence
|
||||
|
||||
Run these after all G011 worker commits are integrated into the leader branch:
|
||||
|
||||
```bash
|
||||
git diff --check
|
||||
python3 .github/scripts/check_doc_source_of_truth.py
|
||||
python3 .github/scripts/check_release_readiness.py
|
||||
cargo check --manifest-path rust/Cargo.toml -p commands -p rusty-claude-cli
|
||||
cargo test --manifest-path rust/Cargo.toml -p commands renders_help_from_shared_specs -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p commands renders_help_with_grouped_categories_and_keyboard_shortcuts -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p commands renders_per_command_help_detail -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli removed_login_and_logout_subcommands_error_helpfully -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli direct_slash_commands_surface_shared_validation_errors -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli local_command_help_flags_stay_on_the_local_parser_path -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli tool_rendering_helpers_compact_output -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli tool_rendering_truncates_large_read_output_for_display_only -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract acp_guidance_emits_json_when_requested -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract plugins_json_surfaces_lifecycle_contract_when_plugin_is_installed -- --nocapture
|
||||
python3 -m unittest tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_filter_excludes_plugin_sources tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_aliases_execute_as_local_commands tests.test_porting_workspace.PortingWorkspaceTests.test_route_plugin_slash_commands_match_commands tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_stream_emits_command_match tests.test_porting_workspace.PortingWorkspaceTests.test_turn_loop_plugin_commands_are_not_prompt_only
|
||||
```
|
||||
|
||||
## Leader audit notes
|
||||
|
||||
- This map is repo-local evidence only; workers must not mutate `.omx/ultragoal`.
|
||||
- If a check fails because another lane is still in progress, record the failing
|
||||
command and rerun after that lane is integrated instead of weakening the gate.
|
||||
- The minimum terminal condition is: docs checks pass, Rust targeted tests pass,
|
||||
and any still-deferred UX surface is explicitly named above.
|
||||
73
docs/g012-final-release-readiness-report.md
Normal file
73
docs/g012-final-release-readiness-report.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# G012 Final Release Readiness Report
|
||||
|
||||
Snapshot: 2026-05-15T02:59:29Z on `origin/main` / `HEAD` `2e93264919f38835410668ff6ca588606bc629f0`.
|
||||
|
||||
This is the worker-1 roadmap/board audit and release-readiness evidence map for the
|
||||
Claw Code 2.0 final gate. It is intentionally repo-local and non-destructive: it
|
||||
references `.omx/ultragoal` evidence without modifying leader-owned ultragoal
|
||||
state, and it does not merge PRs or close issues owned by the W3/W4 lanes.
|
||||
|
||||
## Release readiness summary
|
||||
|
||||
| Gate | Evidence | Result |
|
||||
| --- | --- | --- |
|
||||
| Ultragoal stream completion | `.omx/ultragoal/goals.json` shows G001-G011 complete and G012 pending at this snapshot. | PASS for pre-final stream completion; G012 remains the active final gate. |
|
||||
| Roadmap board coverage | `python3 scripts/validate_cc2_board.py` -> `PASS cc2 board validation`; 729 board items; 124/124 ROADMAP headings mapped; 542/542 ROADMAP actions mapped. | PASS |
|
||||
| Issue/parity intake coverage | `python3 .omx/cc2/validate_issue_parity_intake.py` -> `PASS issue/parity intake: 19 issue rows, 9 parity rows`. | PASS |
|
||||
| Release docs/readiness script | `python3 .github/scripts/check_release_readiness.py` -> `release-readiness check passed`. | PASS |
|
||||
| Documentation source-of-truth | `python3 .github/scripts/check_doc_source_of_truth.py` -> `doc source-of-truth check passed`. | PASS |
|
||||
| Fresh open PR snapshot | `gh pr list --state open --limit 1000 --json number,title,state,updatedAt,url,isDraft,mergeable` -> 51 open PR records; newest #3040. | PASS for snapshot capture; W3 owns reconciliation/action. |
|
||||
| Fresh open issue snapshot | `gh issue list --state open --limit 1000 --json number,title,state,updatedAt,url,labels` -> 1000 open issue records; newest returned #3036. | PASS for snapshot capture with limit caveat; W4 owns reconciliation/action. |
|
||||
|
||||
## Stream evidence index
|
||||
|
||||
| Goal | Status in local ultragoal state | Primary tracked evidence |
|
||||
| --- | --- | --- |
|
||||
| G001 Stream 0 board | complete | `.omx/cc2/board.json`, `.omx/cc2/board.md`, `scripts/validate_cc2_board.py` |
|
||||
| G002 security | complete | `docs/g002-security-verification-map.md` |
|
||||
| G003 boot/session | complete | `docs/g003-boot-session-verification-map.md` |
|
||||
| G004 events/reports | complete | `docs/g004-events-reports-verification-map.md`, `docs/g004-events-reports-contract.md` |
|
||||
| G005 branch/recovery | complete | `docs/g005-branch-recovery-verification-map.md` |
|
||||
| G006 task/policy/board | complete | `docs/g006-task-policy-board-verification-map.md` |
|
||||
| G007 plugin/MCP | complete | `docs/g007-plugin-mcp-verification-map.md`, `docs/g007-mcp-lifecycle-mapping.md` |
|
||||
| G008 provider compatibility | complete | `docs/local-openai-compatible-providers.md` plus ultragoal quality-gate artifact |
|
||||
| G009 Windows/docs/release | complete | `docs/g009-windows-docs-release-verification-map.md`, `docs/windows-install-release.md` |
|
||||
| G010 session hygiene | complete | `docs/g010-session-hygiene-verification-map.md`, `docs/g010-clone-disambiguation-metadata.md` |
|
||||
| G011 ecosystem/ops/UX | complete | `docs/g011-ecosystem-ops-ux-verification-map.md`, `docs/g011-acp-json-rpc-status-contract.md`, `docs/pr-issue-resolution-gate.md` |
|
||||
| G012 final gate | pending | This report plus W2/W3/W4 final gate reports. |
|
||||
|
||||
## Roadmap PR audit snapshot
|
||||
|
||||
`docs/roadmap-pr-goals.md` lists 17 roadmap/product-fit PRs that must be merged
|
||||
only when correct, resolvable, and safe. The fresh GitHub snapshot shows all 17
|
||||
remain open. Sixteen roadmap-doc PRs are currently `CONFLICTING`, so they are not
|
||||
safe direct-merge candidates from this worker lane. PR #2824 is `MERGEABLE`, but
|
||||
it is explicitly product-fit review rather than a direct roadmap merge candidate.
|
||||
|
||||
| PR | Title | Mergeable | Draft | Updated | Worker-1 final-gate disposition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| #2824 | docs: personal assistant roadmap | MERGEABLE | false | 2026-04-28T13:05:03Z | Defer to product-fit/leader decision; do not auto-merge as CC2 release gate evidence. |
|
||||
| #2839 | docs(roadmap): add #330 — resume mode stats/cost always zero | CONFLICTING | false | 2026-04-29T12:36:19Z | Not mergeable without conflict resolution; mapped into completed session/status streams. |
|
||||
| #2841 | docs(roadmap): add #332 — doctor json missing top-level status field | CONFLICTING | false | 2026-04-29T13:04:12Z | Not mergeable without conflict resolution; mapped into completed boot/doctor streams. |
|
||||
| #2842 | docs(roadmap): add #334 — version json omits build_date and uses short sha only | CONFLICTING | false | 2026-04-29T13:35:01Z | Not mergeable without conflict resolution; release-readiness docs/scripts pass at HEAD. |
|
||||
| #2844 | docs(roadmap): add #336 — session subcommand resume inconsistency and type/kind error mismatch | CONFLICTING | false | 2026-04-29T14:03:19Z | Not mergeable without conflict resolution; mapped into completed session hygiene streams. |
|
||||
| #2846 | docs(roadmap): add #331 — export silently overwrites on repeated invocations | CONFLICTING | false | 2026-04-29T13:02:02Z | Not mergeable without conflict resolution; action remains W3/leader triage if still desired. |
|
||||
| #2848 | docs(roadmap): add #333 — no in-session settings inspect command | CONFLICTING | false | 2026-04-29T13:32:01Z | Not mergeable without conflict resolution; action remains W3/leader triage if still desired. |
|
||||
| #2850 | docs(roadmap): add #335 — session list omits created_at_ms field | CONFLICTING | false | 2026-04-29T14:01:29Z | Not mergeable without conflict resolution; mapped into completed session metadata streams. |
|
||||
| #2858 | docs(roadmap): add #343 — session subcommand resume-safety inconsistently enforced | CONFLICTING | false | 2026-04-29T16:02:45Z | Not mergeable without conflict resolution; mapped into completed session/recovery streams. |
|
||||
| #2862 | docs(roadmap): add #342 — status json omits active session ID, workspace counters ambiguous | CONFLICTING | false | 2026-04-29T19:04:31Z | Not mergeable without conflict resolution; mapped into completed status/session streams. |
|
||||
| #2864 | docs(roadmap): add #364 — /cost returns no cost_usd; identical to /stats | CONFLICTING | false | 2026-04-29T22:32:52Z | Not mergeable without conflict resolution; mapped into completed UX/status contract review. |
|
||||
| #2865 | docs(roadmap): add #362 — doctor auth false-positive: misses CLI session tokens | CONFLICTING | false | 2026-04-29T22:06:28Z | Not mergeable without conflict resolution; mapped into completed doctor/auth stream work. |
|
||||
| #2867 | docs(roadmap): add #368 — export always appends .txt; response.file reflects mangled path | CONFLICTING | false | 2026-04-29T23:35:35Z | Not mergeable without conflict resolution; action remains W3/leader triage if still desired. |
|
||||
| #2868 | docs(roadmap): add #356 — session list title always null; no rename command | CONFLICTING | false | 2026-04-29T20:36:43Z | Not mergeable without conflict resolution; mapped into completed session identity streams. |
|
||||
| #2869 | docs(roadmap): add #358 — history entries missing role field, no pagination | CONFLICTING | false | 2026-04-29T21:02:55Z | Not mergeable without conflict resolution; mapped into completed session/history review. |
|
||||
| #2872 | docs(roadmap): add #360 — /tokens, /stats, /cost identical output; no context-window or cost_usd | CONFLICTING | false | 2026-04-29T21:32:57Z | Not mergeable without conflict resolution; mapped into completed UX/status contract review. |
|
||||
| #2876 | docs(roadmap): add #354 — /cwd suggests itself in did-you-mean; self-referential loop | CONFLICTING | false | 2026-04-29T20:01:22Z | Not mergeable without conflict resolution; mapped into completed command UX review. |
|
||||
|
||||
## Final-gate stop condition for worker-1
|
||||
|
||||
Worker-1's release-readiness lane is complete when this report is committed and
|
||||
its checks pass. Overall G012 completion still requires the leader to integrate
|
||||
W2 quality-gate classification and W3/W4 PR/issue reconciliation evidence. This
|
||||
report does not claim the remote PR/issue backlog is resolved; it provides the
|
||||
fresh roadmap/board/readiness audit that those lanes can reference.
|
||||
150
docs/local-openai-compatible-providers.md
Normal file
150
docs/local-openai-compatible-providers.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# Local OpenAI-compatible providers and skills setup
|
||||
|
||||
This guide covers two common offline/local workflows:
|
||||
|
||||
1. running Claw against an OpenAI-compatible local model server such as Ollama, llama.cpp, or vLLM; and
|
||||
2. installing local skills from disk so Claw can discover them without network access.
|
||||
|
||||
## Claw is not Claude-only
|
||||
|
||||
Claw Code is a Claude-Code-shaped workflow/runtime, not a Claude-only product. It supports Anthropic directly and can target OpenAI-compatible, provider-routed, and local models depending on configuration. Non-Claude providers are supported honestly: they may require stricter tool-call and response-shape compatibility, and some slash/tool workflows can be rougher than first-party Anthropic/OpenAI paths. Provider-specific identity leaks are bugs, not intended product positioning.
|
||||
|
||||
If you need the most polished daily-driver experience for a specific non-Claude model today, compare that provider’s native tools. If you need runtime/provider hackability, Claw’s OpenAI-compatible route is the intended extension path.
|
||||
|
||||
## OpenAI-compatible routing basics
|
||||
|
||||
Set `OPENAI_BASE_URL` to the server’s `/v1` endpoint and set `OPENAI_API_KEY` to either the required token or a harmless placeholder for local servers that expect an Authorization header. The model name must match what the server exposes.
|
||||
|
||||
```bash
|
||||
export OPENAI_BASE_URL="http://127.0.0.1:11434/v1"
|
||||
export OPENAI_API_KEY="local-dev-token"
|
||||
claw --model "qwen3:latest" prompt "Reply exactly HELLO_WORLD_123"
|
||||
```
|
||||
|
||||
Routing notes:
|
||||
|
||||
- Use the `openai/` prefix for OpenAI-compatible gateways when you need prefix routing to win over ambient Anthropic credentials, for example `--model "openai/gpt-4.1-mini"` with OpenRouter.
|
||||
- For local servers, prefer the exact model ID reported by the server (`qwen3:latest`, `llama3.2`, `Qwen/Qwen2.5-Coder-7B-Instruct`, etc.). If your local gateway exposes slash-containing IDs, use that exact slug.
|
||||
- If you have multiple provider keys in your environment, remove unrelated keys while smoke-testing a local route or choose a model prefix that unambiguously selects the intended provider.
|
||||
- Tool workflows need model/server support for OpenAI-compatible tool calls. Plain prompt smoke tests can pass even when slash/tool workflows still fail because the server returns an incompatible tool-call shape.
|
||||
|
||||
## Raw `/v1/chat/completions` smoke test
|
||||
|
||||
Before debugging Claw, verify the local server speaks the expected wire format:
|
||||
|
||||
```bash
|
||||
curl -sS "$OPENAI_BASE_URL/chat/completions" \
|
||||
-H "Authorization: Bearer ${OPENAI_API_KEY:-local-dev-token}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "qwen3:latest",
|
||||
"messages": [{"role": "user", "content": "Reply exactly HELLO_WORLD_123"}],
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
Expected result: a JSON response with one assistant message containing `HELLO_WORLD_123`. If this fails, fix the local server, model name, or auth token before changing Claw settings.
|
||||
|
||||
## Ollama
|
||||
|
||||
Start Ollama and pull a model:
|
||||
|
||||
```bash
|
||||
ollama pull qwen3:latest
|
||||
ollama serve
|
||||
```
|
||||
|
||||
In another shell:
|
||||
|
||||
```bash
|
||||
export OPENAI_BASE_URL="http://127.0.0.1:11434/v1"
|
||||
export OPENAI_API_KEY="local-dev-token"
|
||||
claw --model "qwen3:latest" prompt "Reply exactly HELLO_WORLD_123"
|
||||
```
|
||||
|
||||
If Ollama is running without auth and your build accepts authless local OpenAI-compatible servers, `unset OPENAI_API_KEY` is also acceptable. Use a placeholder token rather than a real cloud API key for local testing.
|
||||
|
||||
## llama.cpp server
|
||||
|
||||
Start a llama.cpp OpenAI-compatible server with the model name you want Claw to send:
|
||||
|
||||
```bash
|
||||
llama-server -m ./models/qwen2.5-coder.gguf --host 127.0.0.1 --port 8080 --alias qwen2.5-coder
|
||||
```
|
||||
|
||||
Then smoke-test through Claw:
|
||||
|
||||
```bash
|
||||
export OPENAI_BASE_URL="http://127.0.0.1:8080/v1"
|
||||
export OPENAI_API_KEY="local-dev-token"
|
||||
claw --model "qwen2.5-coder" prompt "Reply exactly HELLO_WORLD_123"
|
||||
```
|
||||
|
||||
## vLLM or another OpenAI-compatible server
|
||||
|
||||
Start vLLM with an OpenAI-compatible API server:
|
||||
|
||||
```bash
|
||||
vllm serve Qwen/Qwen2.5-Coder-7B-Instruct --host 127.0.0.1 --port 8000
|
||||
```
|
||||
|
||||
Then route Claw to it:
|
||||
|
||||
```bash
|
||||
export OPENAI_BASE_URL="http://127.0.0.1:8000/v1"
|
||||
export OPENAI_API_KEY="local-dev-token"
|
||||
claw --model "Qwen/Qwen2.5-Coder-7B-Instruct" prompt "Reply exactly HELLO_WORLD_123"
|
||||
```
|
||||
|
||||
## Local skills install from disk
|
||||
|
||||
Skills are discovered from Claw skill roots such as `.claw/skills/` in a workspace and `~/.claw/skills/` for user-level installs. Legacy `.codex/skills/` roots may also be scanned for compatibility, but new local Claw projects should prefer `.claw/skills/`.
|
||||
|
||||
A skill directory should contain a `SKILL.md` file with frontmatter:
|
||||
|
||||
```text
|
||||
my-skill/
|
||||
└── SKILL.md
|
||||
```
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: my-skill
|
||||
description: Explain when this skill should be used.
|
||||
---
|
||||
|
||||
# My Skill
|
||||
|
||||
Instructions for the agent go here.
|
||||
```
|
||||
|
||||
Install a skill from a local path in the interactive REPL:
|
||||
|
||||
```text
|
||||
/skills install /absolute/path/to/my-skill
|
||||
/skills list
|
||||
/skills my-skill
|
||||
```
|
||||
|
||||
Or inspect skills from the direct CLI surface:
|
||||
|
||||
```bash
|
||||
claw skills --output-format json
|
||||
```
|
||||
|
||||
Offline install checklist:
|
||||
|
||||
- Install the specific skill directory, not only the repository root, unless that repository root itself contains `SKILL.md`.
|
||||
- Keep the frontmatter `name` aligned with the directory name users will type.
|
||||
- After installing, run `/skills list` or `claw skills --output-format json` to confirm the discovered name and source path.
|
||||
- If a skill invocation fails with an HTTP/provider error, the skill may have installed correctly but the current model/provider call failed. Run `claw doctor`, verify provider credentials, and try a simple prompt smoke test before reinstalling the skill.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
| Symptom | Check |
|
||||
|---|---|
|
||||
| Claw still asks for Anthropic credentials | Use an explicit OpenAI-compatible model route or remove unrelated Anthropic env vars during local smoke tests. |
|
||||
| `model not found` from local server | Use the exact model ID exposed by Ollama/llama.cpp/vLLM. |
|
||||
| Plain prompt works but tools fail | Confirm the model/server supports OpenAI-compatible tool calls and response shapes. |
|
||||
| Skill says installed but `/skills <name>` fails | Check `/skills list` for the discovered name and source; verify provider credentials separately with `claw doctor`. |
|
||||
| A local docs/log file contains secrets | Redact it before using `@path` file context or attaching it to an issue. |
|
||||
69
docs/navigation-file-context.md
Normal file
69
docs/navigation-file-context.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Navigation and file context guide
|
||||
|
||||
This guide answers the common “how do I browse output?” and “how do I submit a file?” questions for Claw Code. Claw is an agent CLI, not a full file manager: terminal navigation comes from your shell or terminal, while file context is passed explicitly in prompts.
|
||||
|
||||
## Prompt and terminal navigation
|
||||
|
||||
Use your terminal’s normal controls for command history and long output:
|
||||
|
||||
- `Up` / `Down` usually move through shell or REPL prompt history.
|
||||
- `Ctrl-r` searches shell history in most shells.
|
||||
- Long command output is viewed with your terminal scrollback. In tmux, enter copy mode with `Ctrl-b [` then use arrows, PageUp/PageDown, search, or your mouse depending on tmux config.
|
||||
- If output is too large to scroll comfortably, redirect it to a file and give that file to Claw as context:
|
||||
```bash
|
||||
cargo test --workspace 2>&1 | tee logs/test-output.txt
|
||||
claw prompt "Use @logs/test-output.txt as context and summarize the failing tests."
|
||||
```
|
||||
|
||||
Claw may provide slash commands that inspect workspace state, but those commands do not replace your terminal’s scrollback or shell history.
|
||||
|
||||
## Submit repository files with `@path`
|
||||
|
||||
Mention files from the current workspace with `@` paths. Use relative paths from the repository or current working directory:
|
||||
|
||||
```text
|
||||
Read @src/app.ts and explain the bug.
|
||||
Compare @old.md and @new.md.
|
||||
Use @logs/error.txt as context and suggest a fix.
|
||||
Review @README.md and @docs/navigation-file-context.md for consistency.
|
||||
```
|
||||
|
||||
Tips:
|
||||
|
||||
- Prefer the smallest useful file set. Large directories or logs can consume context quickly.
|
||||
- Use exact paths when possible (`@rust/crates/runtime/src/lib.rs`) instead of vague descriptions.
|
||||
- For generated logs, save them under a temporary or ignored directory such as `logs/` and reference the file.
|
||||
- If the file is outside the repository, copy it into a safe workspace location first or use an app/UI attachment feature if your Claw surface supports attachments.
|
||||
|
||||
## Browse or inspect files
|
||||
|
||||
Claw can answer questions about files you reference, and you can ask it to inspect likely locations:
|
||||
|
||||
```text
|
||||
Find where provider routing is implemented and summarize the relevant files.
|
||||
Read @USAGE.md and tell me where local model setup is documented.
|
||||
Search for the command that handles skills install, then explain the control flow.
|
||||
```
|
||||
|
||||
For deterministic shell-side browsing, ordinary commands still work:
|
||||
|
||||
```bash
|
||||
find docs -maxdepth 2 -type f | sort
|
||||
rg -n "OPENAI_BASE_URL|skills install" USAGE.md docs rust
|
||||
sed -n '250,340p' USAGE.md
|
||||
```
|
||||
|
||||
## Attach external files where supported
|
||||
|
||||
Some UI surfaces let you drag and drop or attach files directly. When that is available, use attachments for files that should not be committed to the repo. In terminal-only usage, copy the file into the workspace, reference it with `@path`, then remove it when finished if it was temporary.
|
||||
|
||||
## Secret and credential safety
|
||||
|
||||
Do not paste real API keys, OAuth tokens, private logs, or customer data into prompts, issue comments, screenshots, or committed docs. Before submitting a file:
|
||||
|
||||
- Replace live keys with placeholders such as `sk-ant-REPLACE_ME`, `sk-or-v1-REPLACE_ME`, or `local-dev-token`.
|
||||
- Redact bearer tokens, cookies, session IDs, and private base URLs.
|
||||
- Prefer minimal reproductions over full production logs.
|
||||
- Keep `.env`, key files, and private logs out of git.
|
||||
|
||||
If a task requires credentials, describe the variable names and expected shapes instead of sharing the values.
|
||||
67
docs/pr-issue-resolution-gate.md
Normal file
67
docs/pr-issue-resolution-gate.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Claw Code 2.0 PR and Issue Resolution Gate
|
||||
|
||||
This gate was added to the Claw Code 2.0 Ultragoal after the explicit requirement:
|
||||
|
||||
> all PRs should be merged and all issues should be resolved if resolvable and correct.
|
||||
|
||||
## Scope
|
||||
|
||||
Before the Claw Code 2.0 Ultragoal can be marked complete:
|
||||
|
||||
1. Every open GitHub PR at the current final-gate snapshot must be triaged.
|
||||
2. PRs that are correct, compatible with Claw Code 2.0 direction, and pass required verification must be merged.
|
||||
3. PRs that are stale, incorrect, duplicative, unsafe, spam, or outside Claw Code scope must not be merged; each needs a recorded rationale.
|
||||
4. Every open GitHub issue at the current final-gate snapshot must be triaged.
|
||||
5. Issues that are resolvable and correct must be fixed or explicitly linked to a merged fix.
|
||||
6. Issues that are spam, duplicates, incorrect, unactionable, externally blocked, or not Claw Code work must be closed or labeled/commented with rationale when repository policy allows.
|
||||
7. The final completion audit must use a fresh GitHub snapshot, not only the planning snapshot.
|
||||
|
||||
## Current live snapshot
|
||||
|
||||
A fresh non-destructive snapshot was captured locally during G011 W3 execution:
|
||||
|
||||
- Command: `gh pr list --state open --limit 1000 --json number,title,state,updatedAt,url`
|
||||
- Command: `gh issue list --state open --limit 1000 --json number,title,state,updatedAt,url,labels`
|
||||
- Captured on: 2026-05-15T02:39:41Z during the active Ultragoal run.
|
||||
- Observed counts: 51 open PR records and 1000 open issue records from GitHub CLI list calls.
|
||||
- Most recent open PR in the snapshot: #3040, `fix: recognize OPENAI_API_KEY as valid auth for OpenAI-compatible endpoints`, updated 2026-05-14T11:35:23Z.
|
||||
- Most recent open issue in the snapshot: #3039, `How to install skills?`, updated 2026-05-14T08:14:36Z.
|
||||
- The issue snapshot hit the configured `--limit 1000`, so the final gate must treat the issue count as at least 1000 unless a higher-limit export or paginated ledger is captured.
|
||||
|
||||
These command outputs are evidence inputs, not final proof. The final gate must refresh them and compare deltas before any completion claim.
|
||||
|
||||
## Anti-slop triage templates
|
||||
|
||||
Use `docs/anti-slop-triage.md` plus the repository templates before acting on the live snapshot:
|
||||
|
||||
- `.github/ISSUE_TEMPLATE/anti_slop_triage.yml` records the initial issue classification, evidence, and non-destructive next action.
|
||||
- `.github/PULL_REQUEST_TEMPLATE.md` adds PR classification, verification, and resolution-gate checklist items.
|
||||
|
||||
The anti-slop classifications are: `actionable-bug`, `actionable-docs`, `actionable-feature`, `duplicate`, `spam-or-promotion`, `generated-slop-or-hallucinated`, `unsafe-or-security-sensitive`, `not-reproducible-yet`, and `externally-blocked`.
|
||||
|
||||
Automation lanes may recommend labels, comments, defer/close rationales, or merge candidates, but must not merge or close remote PRs/issues without maintainer-owned approval.
|
||||
|
||||
|
||||
## G012 final PR reconciliation snapshot
|
||||
|
||||
Worker-3 captured a fresh PR ledger for the final Claw Code 2.0 gate in `docs/pr-triage-g012-final-gate.json`.
|
||||
|
||||
- Captured on: 2026-05-15T02:58:00Z during G012 final-gate execution.
|
||||
- Commands: `gh pr list --state open --limit 100 ...` plus `gh pr view <number> ...` for per-PR file and merge-state evidence.
|
||||
- Observed count: 51 open PR records.
|
||||
- Merge action taken by worker-3: none. The safety policy requires correct, safe, non-conflicting, resolvable PRs with evidence; this snapshot found 32 PRs in `CONFLICTING`/`DIRTY` state and 19 `MERGEABLE` PRs that GitHub reported as `UNSTABLE` with no fresh check-rollup evidence in the live snapshot.
|
||||
- Docs-only candidate-review PRs: #3021 and #2824 remain deferred until content/source-of-truth review and fresh verification are available.
|
||||
|
||||
## Required final evidence
|
||||
|
||||
The final report must include:
|
||||
|
||||
- Fresh `gh pr list --state open` and `gh issue list --state open` snapshots.
|
||||
- A PR ledger with one row per PR: merge / reject / defer, reason, verification, commit/merge reference.
|
||||
- An issue ledger with one row per issue: fixed / duplicate / spam / invalid / deferred-with-rationale / externally-blocked, reason, and linked evidence.
|
||||
- Verification that no correct, mergeable PR remains unmerged without rationale.
|
||||
- Verification that no resolvable, correct issue remains open without a fix or rationale.
|
||||
|
||||
## Non-goals
|
||||
|
||||
This gate does not require merging unsafe, unverified, incompatible, spam, or incorrect contributions. It requires explicit evidence-backed triage and action for everything that is correct and resolvable.
|
||||
1461
docs/pr-triage-g012-final-gate.json
Normal file
1461
docs/pr-triage-g012-final-gate.json
Normal file
File diff suppressed because it is too large
Load Diff
58
docs/roadmap-pr-goals.md
Normal file
58
docs/roadmap-pr-goals.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Roadmap PR goal intake
|
||||
|
||||
Captured: 2026-05-14 (Asia/Seoul) during the Claw Code 2.0 Ultragoal run.
|
||||
|
||||
Purpose: make the user's follow-up requirement durable: all roadmap PRs should be merged when correct/resolvable, and unresolved roadmap deltas should become Ultragoal work rather than being lost. This file is a tracked companion to the leader-owned `.omx/ultragoal/goals.json` and `.omx/ultragoal/ledger.jsonl` artifacts.
|
||||
|
||||
## Merge policy
|
||||
|
||||
- Merge only PRs that are still relevant to Claw Code 2.0, are non-draft, target `main`, and are conflict-free after a fresh mergeability refresh.
|
||||
- Prefer squash merges with a Lore-style body when GitHub allows a direct PR merge.
|
||||
- If a PR is documentation-only but adds a real roadmap gap, merging it is acceptable once checks/conflicts are clean.
|
||||
- If a PR is stale, duplicated by already-landed work, or not product-aligned, do not force-merge; record the rationale and map any still-correct requirement into G011/G012.
|
||||
- After merging roadmap PRs, refresh generated board artifacts (`.omx/cc2/board.json`, `.omx/cc2/board.md`) so Stream 0 coverage stays current.
|
||||
|
||||
## Open roadmap PRs with green historical checks
|
||||
|
||||
These are first-pass merge candidates, pending fresh mergeability and conflict checks against current `main`.
|
||||
|
||||
| PR | Title | Branch | Checks | Mergeable | URL |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| #2848 | docs(roadmap): add #333 — no in-session settings inspect command | `docs/roadmap-333-no-settings-inspect-command` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2848 |
|
||||
| #2846 | docs(roadmap): add #331 — export silently overwrites on repeated invocations | `docs/roadmap-331-export-filename-collision` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2846 |
|
||||
| #2869 | docs(roadmap): add #358 — history entries missing role field, no pagination | `docs/roadmap-348-history-entries-missing-role` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2869 |
|
||||
| #2850 | docs(roadmap): add #335 — session list omits created_at_ms field | `docs/roadmap-335-session-list-no-created-at` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2850 |
|
||||
| #2868 | docs(roadmap): add #356 — session list title always null; no rename command | `docs/roadmap-347-session-list-title-always-null` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2868 |
|
||||
| #2865 | docs(roadmap): add #362 — doctor auth false-positive: misses CLI session tokens | `docs/roadmap-345-doctor-auth-check-incomplete` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2865 |
|
||||
| #2864 | docs(roadmap): add #364 — /cost returns no cost_usd; identical to /stats | `docs/roadmap-344-cost-command-no-dollar-amount` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2864 |
|
||||
| #2867 | docs(roadmap): add #368 — export always appends .txt; response.file reflects mangled path | `docs/roadmap-346-export-forces-txt-extension` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2867 |
|
||||
| #2862 | docs(roadmap): add #342 — status json omits active session ID, workspace counters ambiguous | `docs/roadmap-342-v2` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2862 |
|
||||
| #2876 | docs(roadmap): add #354 — /cwd suggests itself in did-you-mean; self-referential loop | `docs/roadmap-354-cwd-self-referential-suggestion` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2876 |
|
||||
| #2872 | docs(roadmap): add #360 — /tokens, /stats, /cost identical output; no context-window or cost_usd | `docs/roadmap-349-tokens-stats-cost-identical` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2872 |
|
||||
|
||||
## Open roadmap PRs needing local validation or CI refresh
|
||||
|
||||
These have no check rollup in the live snapshot; validate locally or refresh CI before merging.
|
||||
|
||||
| PR | Title | Branch | Checks | Mergeable | URL |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| #2858 | docs(roadmap): add #343 — session subcommand resume-safety inconsistently enforced | `docs/roadmap-340-session-resume-safe-inconsistent` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2858 |
|
||||
| #2839 | docs(roadmap): add #330 — resume mode stats/cost always zero | `docs/roadmap-324-resume-stats-zero` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2839 |
|
||||
| #2841 | docs(roadmap): add #332 — doctor json missing top-level status field | `docs/roadmap-325-doctor-no-status-field` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2841 |
|
||||
| #2844 | docs(roadmap): add #336 — session subcommand resume inconsistency and type/kind error mismatch | `docs/roadmap-329-session-subcommand-resume-inconsistency` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2844 |
|
||||
| #2842 | docs(roadmap): add #334 — version json omits build_date and uses short sha only | `docs/roadmap-328-version-json-incomplete` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2842 |
|
||||
|
||||
## Product-fit review before merge
|
||||
|
||||
These may be broader than the Claw Code 2.0 roadmap scope and need a product-fit decision before merge.
|
||||
|
||||
| PR | Title | Branch | Checks | Mergeable | URL |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| #2824 | docs: personal assistant roadmap | `pr/docs-personal-assistant-roadmap` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2824 |
|
||||
|
||||
## Ultragoal mapping
|
||||
|
||||
- G003-G010: close implementation gaps that overlap a roadmap PR title if the requirement belongs to the active stream.
|
||||
- G011: reconcile ecosystem/ops/UX roadmap PRs and unresolved correct issues that do not fit earlier streams.
|
||||
- G012: final release gate must prove that every open roadmap PR was merged, closed as duplicate/obsolete, or converted into an explicit remaining goal with evidence.
|
||||
|
||||
195
docs/windows-install-release.md
Normal file
195
docs/windows-install-release.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# Windows install and release quickstart
|
||||
|
||||
This page is the PowerShell-first path for installing, verifying, and safely switching providers on Windows. It is intentionally copyable without embedding live secrets.
|
||||
|
||||
## Choose an install path
|
||||
|
||||
### Option A: build from source in PowerShell
|
||||
|
||||
Use this when you are developing Claw Code or testing a local checkout.
|
||||
|
||||
```powershell
|
||||
git clone https://github.com/ultraworkers/claw-code
|
||||
Set-Location .\claw-code\rust
|
||||
cargo build --workspace
|
||||
.\target\debug\claw.exe --help
|
||||
.\target\debug\claw.exe doctor
|
||||
```
|
||||
|
||||
For an optimized local binary:
|
||||
|
||||
```powershell
|
||||
Set-Location .\claw-code\rust
|
||||
cargo build --workspace --release
|
||||
.\target\release\claw.exe --help
|
||||
```
|
||||
|
||||
### Option B: use a release artifact
|
||||
|
||||
Use this when a GitHub release publishes a Windows artifact. The release workflow publishes `claw-windows-x64.exe` plus `claw-windows-x64.exe.sha256`; if a future release wraps the binary in a ZIP, prefer the `windows-x86_64` / `pc-windows-msvc` asset and its matching checksum file.
|
||||
|
||||
```powershell
|
||||
$Asset = "claw-windows-x64.exe"
|
||||
$InstallRoot = "$env:LOCALAPPDATA\Programs\claw"
|
||||
New-Item -ItemType Directory -Force $InstallRoot | Out-Null
|
||||
|
||||
# Download $Asset and $Asset.sha256 from the release page, then verify them:
|
||||
$Actual = (Get-FileHash ".\$Asset" -Algorithm SHA256).Hash.ToLowerInvariant()
|
||||
$Expected = (Get-Content ".\$Asset.sha256" | Select-Object -First 1).Split()[0].ToLowerInvariant()
|
||||
if ($Actual -ne $Expected) { throw "checksum mismatch for $Asset" }
|
||||
|
||||
Copy-Item ".\$Asset" "$InstallRoot\claw.exe" -Force
|
||||
& "$InstallRoot\claw.exe" --help
|
||||
& "$InstallRoot\claw.exe" doctor
|
||||
```
|
||||
|
||||
To make that binary available in new PowerShell windows:
|
||||
|
||||
```powershell
|
||||
$InstallRoot = "$env:LOCALAPPDATA\Programs\claw"
|
||||
[Environment]::SetEnvironmentVariable(
|
||||
"Path",
|
||||
[Environment]::GetEnvironmentVariable("Path", "User") + ";$InstallRoot",
|
||||
"User"
|
||||
)
|
||||
```
|
||||
|
||||
Open a new terminal before running `claw --help` from another directory.
|
||||
|
||||
### Option C: WSL
|
||||
|
||||
The repository `install.sh` path is for Linux, macOS, and Windows via WSL. Run it from inside your WSL distribution, not from native PowerShell:
|
||||
|
||||
```powershell
|
||||
wsl --install
|
||||
wsl
|
||||
```
|
||||
|
||||
Then inside WSL:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ultraworkers/claw-code
|
||||
cd claw-code
|
||||
./install.sh
|
||||
```
|
||||
|
||||
## First-run health checks
|
||||
|
||||
Run these before using live prompts:
|
||||
|
||||
```powershell
|
||||
Set-Location .\claw-code\rust
|
||||
.\target\debug\claw.exe --help
|
||||
.\target\debug\claw.exe doctor
|
||||
.\target\debug\claw.exe status --output-format json
|
||||
.\target\debug\claw.exe config --output-format json
|
||||
```
|
||||
|
||||
`doctor`, `status`, `config`, and `version` support `--output-format json`; do not use a separate `--json` suffix.
|
||||
|
||||
## Safe credential setup
|
||||
|
||||
Set keys only in your local environment or a private `.env` file. Do not paste real keys into shell history shared with others, issue trackers, or documentation.
|
||||
|
||||
Current PowerShell session only:
|
||||
|
||||
```powershell
|
||||
$env:ANTHROPIC_API_KEY = "sk-ant-REPLACE_ME"
|
||||
```
|
||||
|
||||
Persist for future PowerShell windows:
|
||||
|
||||
```powershell
|
||||
setx ANTHROPIC_API_KEY "sk-ant-REPLACE_ME"
|
||||
```
|
||||
|
||||
Open a new terminal after `setx`. To remove a session-local key while testing provider switching:
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
```
|
||||
|
||||
## Safe provider switching examples
|
||||
|
||||
Provider routing is model-prefix first. When multiple credentials exist, choose an explicit model prefix so `claw` does not infer the wrong backend.
|
||||
|
||||
### Anthropic direct
|
||||
|
||||
```powershell
|
||||
$env:ANTHROPIC_API_KEY = "sk-ant-REPLACE_ME"
|
||||
Remove-Item Env:\OPENAI_BASE_URL -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
|
||||
.\target\debug\claw.exe --model "sonnet" prompt "reply with ready"
|
||||
```
|
||||
|
||||
### OpenAI-compatible gateway or OpenRouter
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
$env:OPENAI_BASE_URL = "https://openrouter.ai/api/v1"
|
||||
$env:OPENAI_API_KEY = "sk-or-v1-REPLACE_ME"
|
||||
|
||||
.\target\debug\claw.exe --model "openai/gpt-4.1-mini" prompt "reply with ready"
|
||||
```
|
||||
|
||||
For the default OpenAI-compatible API, omit `OPENAI_BASE_URL` or set it to `https://api.openai.com/v1`, and keep the `openai/` or `gpt-` model prefix explicit.
|
||||
|
||||
### Local OpenAI-compatible server
|
||||
|
||||
Use a loopback URL and a placeholder token unless your local server requires a real one:
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
$env:OPENAI_BASE_URL = "http://127.0.0.1:11434/v1"
|
||||
$env:OPENAI_API_KEY = "local-dev-token"
|
||||
|
||||
.\target\debug\claw.exe --model "llama3.2" prompt "reply with ready"
|
||||
```
|
||||
|
||||
If the local server is authless, remove `OPENAI_API_KEY` instead of putting a real cloud key into local testing:
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
```
|
||||
|
||||
### DashScope / Qwen
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
$env:DASHSCOPE_API_KEY = "sk-REPLACE_ME"
|
||||
|
||||
.\target\debug\claw.exe --model "qwen-plus" prompt "reply with ready"
|
||||
```
|
||||
|
||||
## Windows and WSL notifications
|
||||
|
||||
Notification support is exposed through the `notifications` slash command in the interactive REPL. Use JSON/status commands first to confirm the CLI runs, then configure notifications from the REPL if your workflow needs them.
|
||||
|
||||
Native PowerShell smoke path:
|
||||
|
||||
```powershell
|
||||
Set-Location .\claw-code\rust
|
||||
.\target\debug\claw.exe
|
||||
# inside the REPL:
|
||||
/notifications
|
||||
```
|
||||
|
||||
WSL smoke path:
|
||||
|
||||
```bash
|
||||
cd claw-code/rust
|
||||
./target/debug/claw
|
||||
# inside the REPL:
|
||||
/notifications
|
||||
```
|
||||
|
||||
When moving between PowerShell and WSL, keep provider keys in the environment where `claw` is actually running; Windows user env vars set with `setx` are not automatically the same as WSL shell exports.
|
||||
|
||||
## Troubleshooting checklist
|
||||
|
||||
- `claw` not found: use `claw.exe` on Windows or run the binary by full path (`.\target\debug\claw.exe`).
|
||||
- `cargo` not found: reopen PowerShell after installing Rust from <https://rustup.rs/>.
|
||||
- `401 Invalid bearer token`: put `sk-ant-*` values in `ANTHROPIC_API_KEY`, not `ANTHROPIC_AUTH_TOKEN`.
|
||||
- Wrong provider selected: add an explicit model prefix such as `openai/gpt-4.1-mini`, `qwen-plus`, or `grok`.
|
||||
- Release ZIP extracted but command still fails: open a new terminal after updating the user `Path`, or call `& "$env:LOCALAPPDATA\Programs\claw\claw.exe"` directly.
|
||||
@@ -16,7 +16,7 @@ unsafe_code = "forbid"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
all = { level = "warn", priority = -1 }
|
||||
pedantic = { level = "warn", priority = -1 }
|
||||
pedantic = { level = "allow", priority = -1 }
|
||||
module_name_repetitions = "allow"
|
||||
missing_panics_doc = "allow"
|
||||
missing_errors_doc = "allow"
|
||||
|
||||
@@ -22,6 +22,8 @@ The harness runs these scripted scenarios against a fresh workspace and isolated
|
||||
8. `bash_permission_prompt_approved`
|
||||
9. `bash_permission_prompt_denied`
|
||||
10. `plugin_tool_roundtrip`
|
||||
11. `auto_compact_triggered`
|
||||
12. `token_cost_reporting`
|
||||
|
||||
## Run
|
||||
|
||||
@@ -37,7 +39,7 @@ cd rust/
|
||||
python3 scripts/run_mock_parity_diff.py
|
||||
```
|
||||
|
||||
Scenario-to-PARITY mappings live in `mock_parity_scenarios.json`.
|
||||
Scenario-to-PARITY mappings live in `mock_parity_scenarios.json`; keep this manifest aligned with `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs` and `PARITY.md` via `python3 scripts/run_mock_parity_diff.py --no-run`.
|
||||
|
||||
## Manual mock server
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ Top-level commands:
|
||||
init
|
||||
```
|
||||
|
||||
`claw acp` is a local discoverability surface for editor-first users: it reports the current ACP/Zed status without starting the runtime. As of April 16, 2026, claw-code does **not** ship an ACP/Zed daemon entrypoint yet, and `claw acp serve` is only a status alias until the real protocol surface lands.
|
||||
`claw acp` is a local discoverability surface for editor-first users: it reports the current ACP/Zed status without starting the runtime. As of April 16, 2026, claw-code does **not** ship an ACP/Zed daemon or JSON-RPC entrypoint yet, and `claw acp serve` is only a status alias until the real protocol surface lands. Status queries exit 0 and expose the same machine-readable contract via `--output-format json`; malformed ACP invocations exit 1 with `kind: unsupported_acp_invocation`.
|
||||
|
||||
The command surface is moving quickly. For the canonical live help text, run:
|
||||
|
||||
|
||||
@@ -76,6 +76,7 @@ fn create_sample_request(message_count: usize) -> MessageRequest {
|
||||
presence_penalty: None,
|
||||
stop: None,
|
||||
reasoning_effort: None,
|
||||
extra_body: std::collections::BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,13 +20,15 @@ pub use prompt_cache::{
|
||||
};
|
||||
pub use providers::anthropic::{AnthropicClient, AnthropicClient as ApiClient, AuthSource};
|
||||
pub use providers::openai_compat::{
|
||||
build_chat_completion_request, flatten_tool_result_content, is_reasoning_model,
|
||||
model_rejects_is_error_field, model_requires_reasoning_content_in_history, translate_message,
|
||||
OpenAiCompatClient, OpenAiCompatConfig,
|
||||
build_chat_completion_request, check_request_body_size, estimate_request_body_size,
|
||||
flatten_tool_result_content, is_reasoning_model, model_rejects_is_error_field,
|
||||
model_requires_reasoning_content_in_history, translate_message, OpenAiCompatClient,
|
||||
OpenAiCompatConfig,
|
||||
};
|
||||
pub use providers::{
|
||||
detect_provider_kind, max_tokens_for_model, max_tokens_for_model_with_override,
|
||||
model_family_identity_for, model_family_identity_for_kind, resolve_model_alias, ProviderKind,
|
||||
model_family_identity_for, model_family_identity_for_kind, provider_diagnostics_for_model,
|
||||
resolve_model_alias, ProviderDiagnostics, ProviderKind,
|
||||
};
|
||||
pub use sse::{parse_frame, SseParser};
|
||||
pub use types::{
|
||||
|
||||
@@ -600,8 +600,9 @@ fn jitter_for_base(base: Duration) -> Duration {
|
||||
}
|
||||
let raw_nanos = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|elapsed| u64::try_from(elapsed.as_nanos()).unwrap_or(u64::MAX))
|
||||
.unwrap_or(0);
|
||||
.map_or(0, |elapsed| {
|
||||
u64::try_from(elapsed.as_nanos()).unwrap_or(u64::MAX)
|
||||
});
|
||||
let tick = JITTER_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
// splitmix64 finalizer — mixes the low bits so large bases still see
|
||||
// jitter across their full range instead of being clamped to subsec nanos.
|
||||
@@ -844,19 +845,17 @@ impl MessageStream {
|
||||
StreamEvent::MessageDelta(MessageDeltaEvent { usage, .. }) => {
|
||||
self.latest_usage = Some(usage.clone());
|
||||
}
|
||||
StreamEvent::MessageStop(_) => {
|
||||
if !self.usage_recorded {
|
||||
if let (Some(prompt_cache), Some(usage)) =
|
||||
(&self.prompt_cache, self.latest_usage.as_ref())
|
||||
{
|
||||
let record = prompt_cache.record_usage(&self.request, usage);
|
||||
*self
|
||||
.last_prompt_cache_record
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner) = Some(record);
|
||||
}
|
||||
self.usage_recorded = true;
|
||||
StreamEvent::MessageStop(_) if !self.usage_recorded => {
|
||||
if let (Some(prompt_cache), Some(usage)) =
|
||||
(&self.prompt_cache, self.latest_usage.as_ref())
|
||||
{
|
||||
let record = prompt_cache.record_usage(&self.request, usage);
|
||||
*self
|
||||
.last_prompt_cache_record
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner) = Some(record);
|
||||
}
|
||||
self.usage_recorded = true;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#![allow(clippy::cast_possible_truncation)]
|
||||
#![allow(dead_code)]
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
@@ -28,7 +29,7 @@ pub trait Provider {
|
||||
) -> ProviderFuture<'a, Self::Stream>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
pub enum ProviderKind {
|
||||
Anthropic,
|
||||
Xai,
|
||||
@@ -49,6 +50,74 @@ pub struct ModelTokenLimit {
|
||||
pub context_window_tokens: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ProviderWireProtocol {
|
||||
AnthropicMessages,
|
||||
OpenAiChatCompletions,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ProviderFeatureSupport {
|
||||
Supported,
|
||||
Unsupported,
|
||||
PassthroughAsTool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
pub struct ProviderCapabilityReport {
|
||||
pub provider: ProviderKind,
|
||||
pub wire_protocol: ProviderWireProtocol,
|
||||
pub auth_env: &'static str,
|
||||
pub base_url_env: &'static str,
|
||||
pub default_base_url: &'static str,
|
||||
pub tool_calls: ProviderFeatureSupport,
|
||||
pub streaming: ProviderFeatureSupport,
|
||||
pub streaming_usage: ProviderFeatureSupport,
|
||||
pub prompt_cache: ProviderFeatureSupport,
|
||||
pub custom_parameters: ProviderFeatureSupport,
|
||||
pub reasoning_effort: ProviderFeatureSupport,
|
||||
pub reasoning_content_history: ProviderFeatureSupport,
|
||||
pub fixed_sampling_reasoning_models: ProviderFeatureSupport,
|
||||
pub web_search: ProviderFeatureSupport,
|
||||
pub web_fetch: ProviderFeatureSupport,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ProviderDiagnosticSeverity {
|
||||
Info,
|
||||
Warning,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
pub struct ProviderDiagnostic {
|
||||
pub code: &'static str,
|
||||
pub severity: ProviderDiagnosticSeverity,
|
||||
pub message: String,
|
||||
pub action: String,
|
||||
}
|
||||
|
||||
#[allow(clippy::struct_excessive_bools)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
pub struct ProviderDiagnostics {
|
||||
pub requested_model: String,
|
||||
pub resolved_model: String,
|
||||
pub provider: ProviderKind,
|
||||
pub auth_env: &'static str,
|
||||
pub base_url_env: &'static str,
|
||||
pub default_base_url: &'static str,
|
||||
pub openai_compatible: bool,
|
||||
pub reasoning_model: bool,
|
||||
pub preserves_reasoning_content_in_history: bool,
|
||||
pub strips_tuning_params: bool,
|
||||
pub supports_stream_usage: bool,
|
||||
pub honors_proxy_env: bool,
|
||||
pub supports_extra_body_params: bool,
|
||||
pub preserves_slash_model_ids_on_custom_base_url: bool,
|
||||
}
|
||||
|
||||
const MODEL_REGISTRY: &[(&str, ProviderMetadata)] = &[
|
||||
(
|
||||
"opus",
|
||||
@@ -219,6 +288,55 @@ pub fn metadata_for_model(model: &str) -> Option<ProviderMetadata> {
|
||||
None
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn provider_diagnostics_for_model(model: &str) -> ProviderDiagnostics {
|
||||
let resolved_model = resolve_model_alias(model);
|
||||
let metadata =
|
||||
metadata_for_model(&resolved_model).unwrap_or_else(|| {
|
||||
match detect_provider_kind(&resolved_model) {
|
||||
ProviderKind::Anthropic => ProviderMetadata {
|
||||
provider: ProviderKind::Anthropic,
|
||||
auth_env: "ANTHROPIC_API_KEY",
|
||||
base_url_env: "ANTHROPIC_BASE_URL",
|
||||
default_base_url: anthropic::DEFAULT_BASE_URL,
|
||||
},
|
||||
ProviderKind::Xai => ProviderMetadata {
|
||||
provider: ProviderKind::Xai,
|
||||
auth_env: "XAI_API_KEY",
|
||||
base_url_env: "XAI_BASE_URL",
|
||||
default_base_url: openai_compat::DEFAULT_XAI_BASE_URL,
|
||||
},
|
||||
ProviderKind::OpenAi => ProviderMetadata {
|
||||
provider: ProviderKind::OpenAi,
|
||||
auth_env: "OPENAI_API_KEY",
|
||||
base_url_env: "OPENAI_BASE_URL",
|
||||
default_base_url: openai_compat::DEFAULT_OPENAI_BASE_URL,
|
||||
},
|
||||
}
|
||||
});
|
||||
let openai_compatible = matches!(metadata.provider, ProviderKind::OpenAi | ProviderKind::Xai);
|
||||
let reasoning_model = openai_compatible && openai_compat::is_reasoning_model(&resolved_model);
|
||||
|
||||
ProviderDiagnostics {
|
||||
requested_model: model.to_string(),
|
||||
resolved_model: resolved_model.clone(),
|
||||
provider: metadata.provider,
|
||||
auth_env: metadata.auth_env,
|
||||
base_url_env: metadata.base_url_env,
|
||||
default_base_url: metadata.default_base_url,
|
||||
openai_compatible,
|
||||
reasoning_model,
|
||||
preserves_reasoning_content_in_history: openai_compatible
|
||||
&& openai_compat::model_requires_reasoning_content_in_history(&resolved_model),
|
||||
strips_tuning_params: reasoning_model,
|
||||
supports_stream_usage: metadata.provider == ProviderKind::OpenAi
|
||||
&& metadata.default_base_url == openai_compat::DEFAULT_OPENAI_BASE_URL,
|
||||
honors_proxy_env: true,
|
||||
supports_extra_body_params: openai_compatible,
|
||||
preserves_slash_model_ids_on_custom_base_url: metadata.provider == ProviderKind::OpenAi,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn detect_provider_kind(model: &str) -> ProviderKind {
|
||||
if let Some(metadata) = metadata_for_model(model) {
|
||||
@@ -263,6 +381,208 @@ pub fn model_family_identity_for(model: &str) -> runtime::ModelFamilyIdentity {
|
||||
model_family_identity_for_kind(detect_provider_kind(model))
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
let metadata = metadata_for_model(model).unwrap_or_else(|| {
|
||||
let provider = detect_provider_kind(model);
|
||||
metadata_for_provider_kind(provider)
|
||||
});
|
||||
|
||||
let (
|
||||
wire_protocol,
|
||||
streaming_usage,
|
||||
prompt_cache,
|
||||
custom_parameters,
|
||||
reasoning_effort,
|
||||
reasoning_content_history,
|
||||
fixed_sampling_reasoning_models,
|
||||
) = match metadata.provider {
|
||||
ProviderKind::Anthropic => (
|
||||
ProviderWireProtocol::AnthropicMessages,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
),
|
||||
ProviderKind::Xai => (
|
||||
ProviderWireProtocol::OpenAiChatCompletions,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
),
|
||||
ProviderKind::OpenAi => (
|
||||
ProviderWireProtocol::OpenAiChatCompletions,
|
||||
ProviderFeatureSupport::Supported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
if openai_compat::model_requires_reasoning_content_in_history(model) {
|
||||
ProviderFeatureSupport::Supported
|
||||
} else {
|
||||
ProviderFeatureSupport::Unsupported
|
||||
},
|
||||
ProviderFeatureSupport::Supported,
|
||||
),
|
||||
};
|
||||
|
||||
ProviderCapabilityReport {
|
||||
provider: metadata.provider,
|
||||
wire_protocol,
|
||||
auth_env: metadata.auth_env,
|
||||
base_url_env: metadata.base_url_env,
|
||||
default_base_url: metadata.default_base_url,
|
||||
tool_calls: ProviderFeatureSupport::Supported,
|
||||
streaming: ProviderFeatureSupport::Supported,
|
||||
streaming_usage,
|
||||
prompt_cache,
|
||||
custom_parameters,
|
||||
reasoning_effort,
|
||||
reasoning_content_history,
|
||||
fixed_sampling_reasoning_models,
|
||||
web_search: ProviderFeatureSupport::PassthroughAsTool,
|
||||
web_fetch: ProviderFeatureSupport::PassthroughAsTool,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
let capabilities = provider_capabilities_for_model(&request.model);
|
||||
let mut diagnostics = Vec::new();
|
||||
|
||||
if request.reasoning_effort.is_some()
|
||||
&& capabilities.reasoning_effort == ProviderFeatureSupport::Unsupported
|
||||
{
|
||||
diagnostics.push(ProviderDiagnostic {
|
||||
code: "reasoning_effort_unsupported",
|
||||
severity: ProviderDiagnosticSeverity::Warning,
|
||||
message: format!(
|
||||
"{} does not map `reasoning_effort` for model `{}`.",
|
||||
provider_label(capabilities.provider),
|
||||
request.model
|
||||
),
|
||||
action: "Remove `reasoning_effort` or route to an OpenAI-compatible reasoning model such as `openai/o4-mini`.".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if openai_compat::is_reasoning_model(&request.model)
|
||||
&& has_openai_tuning_parameters(request)
|
||||
&& capabilities.fixed_sampling_reasoning_models == ProviderFeatureSupport::Supported
|
||||
{
|
||||
diagnostics.push(ProviderDiagnostic {
|
||||
code: "reasoning_model_fixed_sampling",
|
||||
severity: ProviderDiagnosticSeverity::Info,
|
||||
message: format!(
|
||||
"Model `{}` is treated as a fixed-sampling reasoning model; tuning parameters are omitted before the provider call.",
|
||||
request.model
|
||||
),
|
||||
action: "Leave temperature/top_p/frequency_penalty/presence_penalty unset for reasoning models to match provider validation rules.".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if openai_compat::model_requires_reasoning_content_in_history(&request.model) {
|
||||
diagnostics.push(ProviderDiagnostic {
|
||||
code: "deepseek_v4_reasoning_history",
|
||||
severity: ProviderDiagnosticSeverity::Info,
|
||||
message: format!(
|
||||
"Model `{}` requires assistant thinking history to be echoed as `reasoning_content`.",
|
||||
request.model
|
||||
),
|
||||
action: "Keep prior assistant Thinking blocks in history; the OpenAI-compatible serializer will emit `reasoning_content` for DeepSeek V4 models.".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if declares_tool(request, "web_search") {
|
||||
diagnostics.push(web_passthrough_diagnostic(
|
||||
"web_search_passthrough_tool",
|
||||
"web_search",
|
||||
capabilities.provider,
|
||||
));
|
||||
}
|
||||
if declares_tool(request, "web_fetch") {
|
||||
diagnostics.push(web_passthrough_diagnostic(
|
||||
"web_fetch_passthrough_tool",
|
||||
"web_fetch",
|
||||
capabilities.provider,
|
||||
));
|
||||
}
|
||||
|
||||
diagnostics
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
match provider {
|
||||
ProviderKind::Anthropic => ProviderMetadata {
|
||||
provider,
|
||||
auth_env: "ANTHROPIC_API_KEY",
|
||||
base_url_env: "ANTHROPIC_BASE_URL",
|
||||
default_base_url: anthropic::DEFAULT_BASE_URL,
|
||||
},
|
||||
ProviderKind::Xai => ProviderMetadata {
|
||||
provider,
|
||||
auth_env: "XAI_API_KEY",
|
||||
base_url_env: "XAI_BASE_URL",
|
||||
default_base_url: openai_compat::DEFAULT_XAI_BASE_URL,
|
||||
},
|
||||
ProviderKind::OpenAi => ProviderMetadata {
|
||||
provider,
|
||||
auth_env: "OPENAI_API_KEY",
|
||||
base_url_env: "OPENAI_BASE_URL",
|
||||
default_base_url: openai_compat::DEFAULT_OPENAI_BASE_URL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
match provider {
|
||||
ProviderKind::Anthropic => "Anthropic",
|
||||
ProviderKind::Xai => "xAI",
|
||||
ProviderKind::OpenAi => "OpenAI-compatible",
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
request.temperature.is_some()
|
||||
|| request.top_p.is_some()
|
||||
|| request.frequency_penalty.is_some()
|
||||
|| request.presence_penalty.is_some()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
request.tools.as_ref().is_some_and(|tools| {
|
||||
tools
|
||||
.iter()
|
||||
.any(|tool| tool.name.eq_ignore_ascii_case(tool_name))
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn web_passthrough_diagnostic(
|
||||
code: &'static str,
|
||||
tool_name: &'static str,
|
||||
provider: ProviderKind,
|
||||
) -> ProviderDiagnostic {
|
||||
ProviderDiagnostic {
|
||||
code,
|
||||
severity: ProviderDiagnosticSeverity::Info,
|
||||
message: format!(
|
||||
"`{tool_name}` is exposed to {} as a normal function tool, not as a provider-native web capability.",
|
||||
provider_label(provider)
|
||||
),
|
||||
action: format!(
|
||||
"Provide a local `{tool_name}` tool implementation or route through a provider adapter that explicitly supports native web tools."
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn max_tokens_for_model(model: &str) -> u32 {
|
||||
let canonical = resolve_model_alias(model);
|
||||
@@ -272,9 +592,7 @@ pub fn max_tokens_for_model(model: &str) -> u32 {
|
||||
64_000
|
||||
};
|
||||
|
||||
model_token_limit(model)
|
||||
.map(|limit| heuristic.min(limit.max_output_tokens))
|
||||
.unwrap_or(heuristic)
|
||||
model_token_limit(model).map_or(heuristic, |limit| heuristic.min(limit.max_output_tokens))
|
||||
}
|
||||
|
||||
/// Returns the effective max output tokens for a model, preferring a plugin
|
||||
@@ -498,7 +816,9 @@ mod tests {
|
||||
anthropic_missing_credentials, anthropic_missing_credentials_hint, detect_provider_kind,
|
||||
load_dotenv_file, max_tokens_for_model, max_tokens_for_model_with_override,
|
||||
model_family_identity_for, model_family_identity_for_kind, model_token_limit, parse_dotenv,
|
||||
preflight_message_request, resolve_model_alias, ProviderKind,
|
||||
preflight_message_request, provider_capabilities_for_model,
|
||||
provider_diagnostics_for_request, resolve_model_alias, ProviderFeatureSupport,
|
||||
ProviderKind, ProviderWireProtocol,
|
||||
};
|
||||
|
||||
/// Serializes every test in this module that mutates process-wide
|
||||
@@ -593,6 +913,105 @@ mod tests {
|
||||
assert_eq!(xai_identity, runtime::ModelFamilyIdentity::Generic);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn provider_capability_matrix_snapshots_openai_compat_differences() {
|
||||
let openai = provider_capabilities_for_model("openai/gpt-4.1-mini");
|
||||
assert_eq!(openai.provider, ProviderKind::OpenAi);
|
||||
assert_eq!(
|
||||
openai.wire_protocol,
|
||||
ProviderWireProtocol::OpenAiChatCompletions
|
||||
);
|
||||
assert_eq!(openai.auth_env, "OPENAI_API_KEY");
|
||||
assert_eq!(openai.streaming_usage, ProviderFeatureSupport::Supported);
|
||||
assert_eq!(openai.reasoning_effort, ProviderFeatureSupport::Supported);
|
||||
assert_eq!(openai.web_search, ProviderFeatureSupport::PassthroughAsTool);
|
||||
assert_eq!(openai.web_fetch, ProviderFeatureSupport::PassthroughAsTool);
|
||||
|
||||
let deepseek = provider_capabilities_for_model("openai/deepseek-v4-pro");
|
||||
assert_eq!(
|
||||
deepseek.reasoning_content_history,
|
||||
ProviderFeatureSupport::Supported
|
||||
);
|
||||
|
||||
let xai = provider_capabilities_for_model("grok-3");
|
||||
assert_eq!(xai.provider, ProviderKind::Xai);
|
||||
assert_eq!(xai.auth_env, "XAI_API_KEY");
|
||||
assert_eq!(xai.reasoning_effort, ProviderFeatureSupport::Unsupported);
|
||||
assert_eq!(xai.streaming_usage, ProviderFeatureSupport::Unsupported);
|
||||
|
||||
let anthropic = provider_capabilities_for_model("claude-sonnet-4-6");
|
||||
assert_eq!(anthropic.provider, ProviderKind::Anthropic);
|
||||
assert_eq!(
|
||||
anthropic.wire_protocol,
|
||||
ProviderWireProtocol::AnthropicMessages
|
||||
);
|
||||
assert_eq!(anthropic.prompt_cache, ProviderFeatureSupport::Supported);
|
||||
assert_eq!(
|
||||
anthropic.custom_parameters,
|
||||
ProviderFeatureSupport::Unsupported
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn provider_diagnostics_explain_deepseek_reasoning_and_web_tool_passthrough() {
|
||||
let request = MessageRequest {
|
||||
model: "openai/deepseek-v4-pro".to_string(),
|
||||
max_tokens: 1024,
|
||||
messages: vec![InputMessage::user_text("research this")],
|
||||
tools: Some(vec![
|
||||
ToolDefinition {
|
||||
name: "web_search".to_string(),
|
||||
description: Some("Search the web".to_string()),
|
||||
input_schema: json!({"type": "object"}),
|
||||
},
|
||||
ToolDefinition {
|
||||
name: "web_fetch".to_string(),
|
||||
description: Some("Fetch a URL".to_string()),
|
||||
input_schema: json!({"type": "object"}),
|
||||
},
|
||||
]),
|
||||
stream: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let diagnostics = provider_diagnostics_for_request(&request);
|
||||
let codes = diagnostics
|
||||
.iter()
|
||||
.map(|diagnostic| diagnostic.code)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(codes.contains(&"deepseek_v4_reasoning_history"));
|
||||
assert!(codes.contains(&"web_search_passthrough_tool"));
|
||||
assert!(codes.contains(&"web_fetch_passthrough_tool"));
|
||||
assert!(diagnostics
|
||||
.iter()
|
||||
.any(|diagnostic| diagnostic.action.contains("provider adapter")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn provider_diagnostics_warn_for_unsupported_reasoning_effort() {
|
||||
let request = MessageRequest {
|
||||
model: "grok-3-mini".to_string(),
|
||||
max_tokens: 1024,
|
||||
messages: vec![InputMessage::user_text("think")],
|
||||
reasoning_effort: Some("high".to_string()),
|
||||
temperature: Some(0.7),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let diagnostics = provider_diagnostics_for_request(&request);
|
||||
let codes = diagnostics
|
||||
.iter()
|
||||
.map(|diagnostic| diagnostic.code)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(codes.contains(&"reasoning_effort_unsupported"));
|
||||
assert!(codes.contains(&"reasoning_model_fixed_sampling"));
|
||||
assert!(diagnostics.iter().any(|diagnostic| diagnostic
|
||||
.message
|
||||
.contains("does not map `reasoning_effort`")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn openai_namespaced_model_routes_to_openai_not_anthropic() {
|
||||
// Regression: "openai/gpt-4.1-mini" was misrouted to Anthropic when
|
||||
@@ -673,6 +1092,19 @@ mod tests {
|
||||
assert_eq!(super::resolve_model_alias("KIMI"), "kimi-k2.5"); // case insensitive
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn provider_diagnostics_explain_openai_compatible_capabilities() {
|
||||
let diagnostics = super::provider_diagnostics_for_model("openai/deepseek-v4-pro");
|
||||
|
||||
assert_eq!(diagnostics.provider, ProviderKind::OpenAi);
|
||||
assert_eq!(diagnostics.auth_env, "OPENAI_API_KEY");
|
||||
assert!(diagnostics.openai_compatible);
|
||||
assert!(diagnostics.preserves_reasoning_content_in_history);
|
||||
assert!(diagnostics.supports_extra_body_params);
|
||||
assert!(diagnostics.honors_proxy_env);
|
||||
assert!(diagnostics.preserves_slash_model_ids_on_custom_base_url);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keeps_existing_max_token_heuristic() {
|
||||
assert_eq!(max_tokens_for_model("opus"), 32_000);
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
@@ -145,6 +146,12 @@ impl OpenAiCompatClient {
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_http_client(mut self, http: reqwest::Client) -> Self {
|
||||
self.http = http;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_retry_policy(
|
||||
mut self,
|
||||
@@ -267,14 +274,18 @@ impl OpenAiCompatClient {
|
||||
request: &MessageRequest,
|
||||
) -> Result<reqwest::Response, ApiError> {
|
||||
// Pre-flight check: verify request body size against provider limits
|
||||
check_request_body_size(request, self.config())?;
|
||||
check_request_body_size_for_base_url(request, self.config(), &self.base_url)?;
|
||||
|
||||
let request_url = chat_completions_endpoint(&self.base_url);
|
||||
self.http
|
||||
.post(&request_url)
|
||||
.header("content-type", "application/json")
|
||||
.bearer_auth(&self.api_key)
|
||||
.json(&build_chat_completion_request(request, self.config()))
|
||||
.json(&build_chat_completion_request_for_base_url(
|
||||
request,
|
||||
self.config(),
|
||||
&self.base_url,
|
||||
))
|
||||
.send()
|
||||
.await
|
||||
.map_err(ApiError::from)
|
||||
@@ -327,8 +338,9 @@ fn jitter_for_base(base: Duration) -> Duration {
|
||||
}
|
||||
let raw_nanos = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|elapsed| u64::try_from(elapsed.as_nanos()).unwrap_or(u64::MAX))
|
||||
.unwrap_or(0);
|
||||
.map_or(0, |elapsed| {
|
||||
u64::try_from(elapsed.as_nanos()).unwrap_or(u64::MAX)
|
||||
});
|
||||
let tick = JITTER_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
let mut mixed = raw_nanos
|
||||
.wrapping_add(tick)
|
||||
@@ -463,6 +475,7 @@ impl StreamState {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_lines)]
|
||||
fn ingest_chunk(&mut self, chunk: ChatCompletionChunk) -> Result<Vec<StreamEvent>, ApiError> {
|
||||
let mut events = Vec::new();
|
||||
if !self.message_started {
|
||||
@@ -488,12 +501,7 @@ impl StreamState {
|
||||
}
|
||||
|
||||
if let Some(usage) = chunk.usage {
|
||||
self.usage = Some(Usage {
|
||||
input_tokens: usage.prompt_tokens,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
output_tokens: usage.completion_tokens,
|
||||
});
|
||||
self.usage = Some(usage.normalized());
|
||||
}
|
||||
|
||||
for choice in chunk.choices {
|
||||
@@ -771,6 +779,29 @@ struct OpenAiUsage {
|
||||
prompt_tokens: u32,
|
||||
#[serde(default)]
|
||||
completion_tokens: u32,
|
||||
#[serde(default)]
|
||||
prompt_tokens_details: Option<OpenAiPromptTokensDetails>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct OpenAiPromptTokensDetails {
|
||||
#[serde(default)]
|
||||
cached_tokens: u32,
|
||||
}
|
||||
|
||||
impl OpenAiUsage {
|
||||
fn normalized(&self) -> Usage {
|
||||
let cached_tokens = self
|
||||
.prompt_tokens_details
|
||||
.as_ref()
|
||||
.map_or(0, |details| details.cached_tokens);
|
||||
Usage {
|
||||
input_tokens: self.prompt_tokens.saturating_sub(cached_tokens),
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: cached_tokens,
|
||||
output_tokens: self.completion_tokens,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -855,7 +886,7 @@ pub fn is_reasoning_model(model: &str) -> bool {
|
||||
|| canonical.contains("thinking")
|
||||
}
|
||||
|
||||
/// Returns true for OpenAI-compatible DeepSeek V4 models that require prior
|
||||
/// Returns true for OpenAI-compatible `DeepSeek` V4 models that require prior
|
||||
/// assistant reasoning to be echoed back as `reasoning_content` in history.
|
||||
#[must_use]
|
||||
pub fn model_requires_reasoning_content_in_history(model: &str) -> bool {
|
||||
@@ -867,6 +898,7 @@ pub fn model_requires_reasoning_content_in_history(model: &str) -> bool {
|
||||
/// Strip routing prefix (e.g., "openai/gpt-4" → "gpt-4") for the wire.
|
||||
/// The prefix is used only to select transport; the backend expects the
|
||||
/// bare model id.
|
||||
#[allow(dead_code)]
|
||||
fn strip_routing_prefix(model: &str) -> &str {
|
||||
if let Some(pos) = model.find('/') {
|
||||
let prefix = &model[..pos];
|
||||
@@ -882,10 +914,51 @@ fn strip_routing_prefix(model: &str) -> &str {
|
||||
}
|
||||
}
|
||||
|
||||
fn wire_model_for_base_url<'a>(
|
||||
model: &'a str,
|
||||
config: OpenAiCompatConfig,
|
||||
base_url: &str,
|
||||
) -> Cow<'a, str> {
|
||||
let Some(pos) = model.find('/') else {
|
||||
return Cow::Borrowed(model);
|
||||
};
|
||||
let prefix = &model[..pos];
|
||||
let lowered_prefix = prefix.to_ascii_lowercase();
|
||||
|
||||
if lowered_prefix == "openai" {
|
||||
let trimmed_base_url = base_url.trim_end_matches('/');
|
||||
let default_openai = DEFAULT_OPENAI_BASE_URL.trim_end_matches('/');
|
||||
if config.provider_name == "OpenAI" && trimmed_base_url != default_openai {
|
||||
// OpenAI-compatible gateways such as OpenRouter commonly use
|
||||
// slash-containing model slugs (for example `openai/gpt-4.1-mini`).
|
||||
// Preserve the slug when the user configured a non-default OpenAI
|
||||
// base URL; the prefix still routed to the OpenAI-compatible client,
|
||||
// but the gateway owns the final model namespace.
|
||||
return Cow::Borrowed(model);
|
||||
}
|
||||
return Cow::Borrowed(&model[pos + 1..]);
|
||||
}
|
||||
|
||||
if matches!(lowered_prefix.as_str(), "xai" | "grok" | "qwen" | "kimi") {
|
||||
return Cow::Borrowed(&model[pos + 1..]);
|
||||
}
|
||||
|
||||
Cow::Borrowed(model)
|
||||
}
|
||||
|
||||
/// Estimate the serialized JSON size of a request payload in bytes.
|
||||
/// This is a pre-flight check to avoid hitting provider-specific size limits.
|
||||
#[must_use]
|
||||
pub fn estimate_request_body_size(request: &MessageRequest, config: OpenAiCompatConfig) -> usize {
|
||||
let payload = build_chat_completion_request(request, config);
|
||||
estimate_request_body_size_for_base_url(request, config, &read_base_url(config))
|
||||
}
|
||||
|
||||
fn estimate_request_body_size_for_base_url(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
base_url: &str,
|
||||
) -> usize {
|
||||
let payload = build_chat_completion_request_for_base_url(request, config, base_url);
|
||||
// serde_json::to_vec gives us the exact byte size of the serialized JSON
|
||||
serde_json::to_vec(&payload).map_or(0, |v| v.len())
|
||||
}
|
||||
@@ -897,7 +970,15 @@ pub fn check_request_body_size(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
) -> Result<(), ApiError> {
|
||||
let estimated_bytes = estimate_request_body_size(request, config);
|
||||
check_request_body_size_for_base_url(request, config, &read_base_url(config))
|
||||
}
|
||||
|
||||
fn check_request_body_size_for_base_url(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
base_url: &str,
|
||||
) -> Result<(), ApiError> {
|
||||
let estimated_bytes = estimate_request_body_size_for_base_url(request, config, base_url);
|
||||
let max_bytes = config.max_request_body_bytes;
|
||||
|
||||
if estimated_bytes > max_bytes {
|
||||
@@ -913,9 +994,18 @@ pub fn check_request_body_size(
|
||||
|
||||
/// Builds a chat completion request payload from a `MessageRequest`.
|
||||
/// Public for benchmarking purposes.
|
||||
#[must_use]
|
||||
pub fn build_chat_completion_request(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
) -> Value {
|
||||
build_chat_completion_request_for_base_url(request, config, &read_base_url(config))
|
||||
}
|
||||
|
||||
fn build_chat_completion_request_for_base_url(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
base_url: &str,
|
||||
) -> Value {
|
||||
let mut messages = Vec::new();
|
||||
if let Some(system) = request.system.as_ref().filter(|value| !value.is_empty()) {
|
||||
@@ -924,8 +1014,10 @@ pub fn build_chat_completion_request(
|
||||
"content": system,
|
||||
}));
|
||||
}
|
||||
// Strip routing prefix (e.g., "openai/gpt-4" → "gpt-4") for the wire.
|
||||
let wire_model = strip_routing_prefix(&request.model);
|
||||
// Resolve the transport routing prefix into the wire model. Custom
|
||||
// OpenAI-compatible gateways may require slash-containing slugs intact.
|
||||
let wire_model = wire_model_for_base_url(&request.model, config, base_url);
|
||||
let wire_model = wire_model.as_ref();
|
||||
for message in &request.messages {
|
||||
messages.extend(translate_message(message, wire_model));
|
||||
}
|
||||
@@ -994,9 +1086,29 @@ pub fn build_chat_completion_request(
|
||||
payload["reasoning_effort"] = json!(effort);
|
||||
}
|
||||
|
||||
for (key, value) in &request.extra_body {
|
||||
if is_protected_extra_body_key(key) {
|
||||
continue;
|
||||
}
|
||||
payload[key] = value.clone();
|
||||
}
|
||||
|
||||
payload
|
||||
}
|
||||
|
||||
fn is_protected_extra_body_key(key: &str) -> bool {
|
||||
matches!(
|
||||
key,
|
||||
"model"
|
||||
| "messages"
|
||||
| "stream"
|
||||
| "tools"
|
||||
| "tool_choice"
|
||||
| "max_tokens"
|
||||
| "max_completion_tokens"
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns true for models that do NOT support the `is_error` field in tool results.
|
||||
/// kimi models (via Moonshot AI/Dashscope) reject this field with 400 Bad Request.
|
||||
/// Returns true for models that do NOT support the `is_error` field in tool results.
|
||||
@@ -1083,8 +1195,7 @@ pub fn translate_message(message: &InputMessage, model: &str) -> Vec<Value> {
|
||||
}
|
||||
Some(msg)
|
||||
}
|
||||
InputContentBlock::Thinking { .. } => None,
|
||||
InputContentBlock::ToolUse { .. } => None,
|
||||
InputContentBlock::Thinking { .. } | InputContentBlock::ToolUse { .. } => None,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
@@ -1294,18 +1405,10 @@ fn normalize_response(
|
||||
.finish_reason
|
||||
.map(|value| normalize_finish_reason(&value)),
|
||||
stop_sequence: None,
|
||||
usage: Usage {
|
||||
input_tokens: response
|
||||
.usage
|
||||
.as_ref()
|
||||
.map_or(0, |usage| usage.prompt_tokens),
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
output_tokens: response
|
||||
.usage
|
||||
.as_ref()
|
||||
.map_or(0, |usage| usage.completion_tokens),
|
||||
},
|
||||
usage: response
|
||||
.usage
|
||||
.as_ref()
|
||||
.map_or_else(Usage::default, OpenAiUsage::normalized),
|
||||
request_id: None,
|
||||
})
|
||||
}
|
||||
@@ -1515,6 +1618,7 @@ mod tests {
|
||||
ToolChoice, ToolDefinition, ToolResultContentBlock,
|
||||
};
|
||||
use serde_json::json;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
|
||||
#[test]
|
||||
@@ -1949,6 +2053,7 @@ mod tests {
|
||||
presence_penalty: Some(0.3),
|
||||
stop: Some(vec!["\n".to_string()]),
|
||||
reasoning_effort: None,
|
||||
extra_body: BTreeMap::new(),
|
||||
};
|
||||
let payload = build_chat_completion_request(&request, OpenAiCompatConfig::openai());
|
||||
assert_eq!(payload["temperature"], 0.7);
|
||||
@@ -1958,6 +2063,39 @@ mod tests {
|
||||
assert_eq!(payload["stop"], json!(["\n"]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_body_params_are_passed_through_without_overriding_core_fields() {
|
||||
let mut extra_body = BTreeMap::new();
|
||||
extra_body.insert(
|
||||
"web_search_options".to_string(),
|
||||
json!({"search_context_size": "medium"}),
|
||||
);
|
||||
extra_body.insert("parallel_tool_calls".to_string(), json!(false));
|
||||
extra_body.insert("model".to_string(), json!("bad-override"));
|
||||
extra_body.insert("messages".to_string(), json!([]));
|
||||
extra_body.insert("max_tokens".to_string(), json!(1));
|
||||
|
||||
let payload = build_chat_completion_request(
|
||||
&MessageRequest {
|
||||
model: "gpt-4o".to_string(),
|
||||
max_tokens: 1024,
|
||||
messages: vec![InputMessage::user_text("hello")],
|
||||
extra_body,
|
||||
..Default::default()
|
||||
},
|
||||
OpenAiCompatConfig::openai(),
|
||||
);
|
||||
|
||||
assert_eq!(payload["model"], json!("gpt-4o"));
|
||||
assert_eq!(payload["max_tokens"], json!(1024));
|
||||
assert_eq!(payload["messages"].as_array().map(Vec::len), Some(1));
|
||||
assert_eq!(
|
||||
payload["web_search_options"],
|
||||
json!({"search_context_size": "medium"})
|
||||
);
|
||||
assert_eq!(payload["parallel_tool_calls"], json!(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reasoning_model_strips_tuning_params() {
|
||||
let request = MessageRequest {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use runtime::{pricing_for_model, TokenUsage, UsageCostEstimate};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -31,6 +33,14 @@ pub struct MessageRequest {
|
||||
/// Silently ignored by backends that do not support it.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub reasoning_effort: Option<String>,
|
||||
/// Provider-specific OpenAI-compatible request body parameters. These are
|
||||
/// copied into the final JSON payload after core fields are populated so
|
||||
/// users can opt into gateway features such as `web_search_options`,
|
||||
/// `parallel_tool_calls`, or custom local-server switches without waiting
|
||||
/// for first-class typed fields. Core protocol keys are protected and cannot
|
||||
/// be overridden through this map.
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub extra_body: BTreeMap<String, Value>,
|
||||
}
|
||||
|
||||
impl MessageRequest {
|
||||
|
||||
@@ -2,12 +2,13 @@ use std::collections::HashMap;
|
||||
use std::ffi::OsString;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Mutex as StdMutex, OnceLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use api::{
|
||||
ApiError, ContentBlockDelta, ContentBlockDeltaEvent, ContentBlockStartEvent,
|
||||
ContentBlockStopEvent, InputContentBlock, InputMessage, MessageDeltaEvent, MessageRequest,
|
||||
OpenAiCompatClient, OpenAiCompatConfig, OutputContentBlock, ProviderClient, StreamEvent,
|
||||
ToolChoice, ToolDefinition,
|
||||
build_http_client_with, ApiError, ContentBlockDelta, ContentBlockDeltaEvent,
|
||||
ContentBlockStartEvent, ContentBlockStopEvent, InputContentBlock, InputMessage,
|
||||
MessageDeltaEvent, MessageRequest, OpenAiCompatClient, OpenAiCompatConfig, OutputContentBlock,
|
||||
ProviderClient, ProxyConfig, StreamEvent, ToolChoice, ToolDefinition,
|
||||
};
|
||||
use serde_json::json;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
@@ -25,7 +26,7 @@ async fn send_message_uses_openai_compatible_endpoint_and_auth() {
|
||||
"\"message\":{\"role\":\"assistant\",\"content\":\"Hello from Grok\",\"tool_calls\":[]},",
|
||||
"\"finish_reason\":\"stop\"",
|
||||
"}],",
|
||||
"\"usage\":{\"prompt_tokens\":11,\"completion_tokens\":5}",
|
||||
"\"usage\":{\"prompt_tokens\":11,\"completion_tokens\":5,\"prompt_tokens_details\":{\"cached_tokens\":3}}",
|
||||
"}"
|
||||
);
|
||||
let server = spawn_server(
|
||||
@@ -42,6 +43,9 @@ async fn send_message_uses_openai_compatible_endpoint_and_auth() {
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(response.model, "grok-3");
|
||||
assert_eq!(response.usage.input_tokens, 8);
|
||||
assert_eq!(response.usage.cache_read_input_tokens, 3);
|
||||
assert_eq!(response.usage.output_tokens, 5);
|
||||
assert_eq!(response.total_tokens(), 16);
|
||||
assert_eq!(
|
||||
response.content,
|
||||
@@ -63,6 +67,56 @@ async fn send_message_uses_openai_compatible_endpoint_and_auth() {
|
||||
assert_eq!(body["tools"][0]["type"], json!("function"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_message_passes_optional_openai_compatible_parameters_on_wire() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let body = concat!(
|
||||
"{",
|
||||
"\"id\":\"chatcmpl_params\",",
|
||||
"\"model\":\"gpt-4o\",",
|
||||
"\"choices\":[{",
|
||||
"\"message\":{\"role\":\"assistant\",\"content\":\"Parameters preserved\",\"tool_calls\":[]},",
|
||||
"\"finish_reason\":\"stop\"",
|
||||
"}],",
|
||||
"\"usage\":{\"prompt_tokens\":3,\"completion_tokens\":2}",
|
||||
"}"
|
||||
);
|
||||
let server = spawn_server(
|
||||
state.clone(),
|
||||
vec![http_response("200 OK", "application/json", body)],
|
||||
)
|
||||
.await;
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_base_url(server.base_url());
|
||||
let response = client
|
||||
.send_message(&MessageRequest {
|
||||
model: "gpt-4o".to_string(),
|
||||
temperature: Some(0.2),
|
||||
top_p: Some(0.8),
|
||||
frequency_penalty: Some(0.15),
|
||||
presence_penalty: Some(0.25),
|
||||
stop: Some(vec!["END".to_string()]),
|
||||
reasoning_effort: Some("low".to_string()),
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(response.total_tokens(), 5);
|
||||
|
||||
let captured = state.lock().await;
|
||||
let request = captured.first().expect("server should capture request");
|
||||
let body: serde_json::Value = serde_json::from_str(&request.body).expect("json body");
|
||||
assert_eq!(body["model"], json!("gpt-4o"));
|
||||
assert_eq!(body["temperature"], json!(0.2));
|
||||
assert_eq!(body["top_p"], json!(0.8));
|
||||
assert_eq!(body["frequency_penalty"], json!(0.15));
|
||||
assert_eq!(body["presence_penalty"], json!(0.25));
|
||||
assert_eq!(body["stop"], json!(["END"]));
|
||||
assert_eq!(body["reasoning_effort"], json!("low"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_message_preserves_deepseek_reasoning_content_before_text() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
@@ -107,6 +161,59 @@ async fn send_message_preserves_deepseek_reasoning_content_before_text() {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn custom_openai_gateway_preserves_slash_model_ids_and_extra_body_params() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let body = concat!(
|
||||
"{",
|
||||
"\"id\":\"chatcmpl_slash_model\",",
|
||||
"\"model\":\"openai/gpt-4.1-mini\",",
|
||||
"\"choices\":[{",
|
||||
"\"message\":{\"role\":\"assistant\",\"content\":\"Gateway accepted slug\",\"tool_calls\":[]},",
|
||||
"\"finish_reason\":\"stop\"",
|
||||
"}],",
|
||||
"\"usage\":{\"prompt_tokens\":3,\"completion_tokens\":2}",
|
||||
"}"
|
||||
);
|
||||
let server = spawn_server(
|
||||
state.clone(),
|
||||
vec![http_response("200 OK", "application/json", body)],
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut extra_body = std::collections::BTreeMap::new();
|
||||
extra_body.insert(
|
||||
"web_search_options".to_string(),
|
||||
json!({"search_context_size": "low"}),
|
||||
);
|
||||
extra_body.insert("parallel_tool_calls".to_string(), json!(false));
|
||||
extra_body.insert("model".to_string(), json!("malicious-override"));
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_base_url(server.base_url());
|
||||
let response = client
|
||||
.send_message(&MessageRequest {
|
||||
model: "openai/gpt-4.1-mini".to_string(),
|
||||
extra_body,
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("gateway request should succeed");
|
||||
|
||||
assert_eq!(response.model, "openai/gpt-4.1-mini");
|
||||
assert_eq!(response.total_tokens(), 5);
|
||||
|
||||
let captured = state.lock().await;
|
||||
let request = captured.first().expect("captured request");
|
||||
let body: serde_json::Value = serde_json::from_str(&request.body).expect("json body");
|
||||
assert_eq!(body["model"], json!("openai/gpt-4.1-mini"));
|
||||
assert_eq!(
|
||||
body["web_search_options"],
|
||||
json!({"search_context_size": "low"})
|
||||
);
|
||||
assert_eq!(body["parallel_tool_calls"], json!(false));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_message_blocks_oversized_xai_requests_before_the_http_call() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
@@ -277,6 +384,65 @@ async fn stream_message_normalizes_text_and_multiple_tool_calls() {
|
||||
assert!(request.body.contains("\"stream\":true"));
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
#[tokio::test]
|
||||
async fn stream_message_retries_retryable_sse_handshake_failures() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let sse = concat!(
|
||||
"data: {\"id\":\"chatcmpl_stream_retry\",\"model\":\"gpt-4o\",\"choices\":[{\"delta\":{\"content\":\"Recovered\"}}]}\n\n",
|
||||
"data: {\"id\":\"chatcmpl_stream_retry\",\"choices\":[{\"delta\":{},\"finish_reason\":\"stop\"}]}\n\n",
|
||||
"data: [DONE]\n\n"
|
||||
);
|
||||
let server = spawn_server(
|
||||
state.clone(),
|
||||
vec![
|
||||
http_response(
|
||||
"500 Internal Server Error",
|
||||
"application/json",
|
||||
"{\"error\":{\"message\":\"try again\",\"type\":\"server_error\",\"code\":500}}",
|
||||
),
|
||||
http_response_with_headers(
|
||||
"200 OK",
|
||||
"text/event-stream",
|
||||
sse,
|
||||
&[("x-request-id", "req_stream_retry")],
|
||||
),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_base_url(server.base_url())
|
||||
.with_retry_policy(1, Duration::ZERO, Duration::ZERO);
|
||||
let mut stream = client
|
||||
.stream_message(&MessageRequest {
|
||||
model: "gpt-4o".to_string(),
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("stream should retry once then start");
|
||||
|
||||
assert_eq!(stream.request_id(), Some("req_stream_retry"));
|
||||
let mut events = Vec::new();
|
||||
while let Some(event) = stream.next_event().await.expect("event should parse") {
|
||||
events.push(event);
|
||||
}
|
||||
assert!(events.iter().any(|event| matches!(
|
||||
event,
|
||||
StreamEvent::ContentBlockDelta(ContentBlockDeltaEvent {
|
||||
delta: ContentBlockDelta::TextDelta { text },
|
||||
..
|
||||
}) if text == "Recovered"
|
||||
)));
|
||||
|
||||
let captured = state.lock().await;
|
||||
assert_eq!(captured.len(), 2, "one original request plus one retry");
|
||||
for request in captured.iter() {
|
||||
let body: serde_json::Value = serde_json::from_str(&request.body).expect("json body");
|
||||
assert_eq!(body["stream"], json!(true));
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
#[tokio::test]
|
||||
async fn openai_streaming_requests_opt_into_usage_chunks() {
|
||||
@@ -284,7 +450,7 @@ async fn openai_streaming_requests_opt_into_usage_chunks() {
|
||||
let sse = concat!(
|
||||
"data: {\"id\":\"chatcmpl_openai_stream\",\"model\":\"gpt-5\",\"choices\":[{\"delta\":{\"content\":\"Hi\"}}]}\n\n",
|
||||
"data: {\"id\":\"chatcmpl_openai_stream\",\"choices\":[{\"delta\":{},\"finish_reason\":\"stop\"}]}\n\n",
|
||||
"data: {\"id\":\"chatcmpl_openai_stream\",\"choices\":[],\"usage\":{\"prompt_tokens\":9,\"completion_tokens\":4}}\n\n",
|
||||
"data: {\"id\":\"chatcmpl_openai_stream\",\"choices\":[],\"usage\":{\"prompt_tokens\":9,\"completion_tokens\":4,\"prompt_tokens_details\":{\"cached_tokens\":2}}}\n\n",
|
||||
"data: [DONE]\n\n"
|
||||
);
|
||||
let server = spawn_server(
|
||||
@@ -339,8 +505,10 @@ async fn openai_streaming_requests_opt_into_usage_chunks() {
|
||||
|
||||
match &events[4] {
|
||||
StreamEvent::MessageDelta(MessageDeltaEvent { usage, .. }) => {
|
||||
assert_eq!(usage.input_tokens, 9);
|
||||
assert_eq!(usage.input_tokens, 7);
|
||||
assert_eq!(usage.cache_read_input_tokens, 2);
|
||||
assert_eq!(usage.output_tokens, 4);
|
||||
assert_eq!(usage.total_tokens(), 13);
|
||||
}
|
||||
other => panic!("expected message delta, got {other:?}"),
|
||||
}
|
||||
@@ -353,6 +521,44 @@ async fn openai_streaming_requests_opt_into_usage_chunks() {
|
||||
assert_eq!(body["stream_options"], json!({"include_usage": true}));
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
#[tokio::test]
|
||||
async fn openai_compatible_client_honors_http_proxy_for_requests() {
|
||||
let _lock = env_lock();
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let proxy = spawn_server(
|
||||
state.clone(),
|
||||
vec![http_response(
|
||||
"200 OK",
|
||||
"application/json",
|
||||
"{\"id\":\"chatcmpl_proxy\",\"model\":\"gpt-4o\",\"choices\":[{\"message\":{\"role\":\"assistant\",\"content\":\"Via proxy\",\"tool_calls\":[]},\"finish_reason\":\"stop\"}],\"usage\":{\"prompt_tokens\":4,\"completion_tokens\":3}}",
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
let proxied_http = build_http_client_with(&ProxyConfig::from_proxy_url(proxy.base_url()))
|
||||
.expect("proxy client should build");
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_http_client(proxied_http)
|
||||
.with_base_url("http://origin.invalid/v1");
|
||||
let response = client
|
||||
.send_message(&MessageRequest {
|
||||
model: "gpt-4o".to_string(),
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("proxy should return the OpenAI-compatible response");
|
||||
|
||||
assert_eq!(response.total_tokens(), 7);
|
||||
let captured = state.lock().await;
|
||||
let request = captured.first().expect("proxy should capture request");
|
||||
assert_eq!(request.path, "http://origin.invalid/v1/chat/completions");
|
||||
assert_eq!(
|
||||
request.headers.get("authorization").map(String::as_str),
|
||||
Some("Bearer openai-test-key")
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
#[tokio::test]
|
||||
async fn provider_client_dispatches_xai_requests_from_env() {
|
||||
|
||||
@@ -221,11 +221,11 @@ const SLASH_COMMAND_SPECS: &[SlashCommandSpec] = &[
|
||||
SlashCommandSpec {
|
||||
name: "session",
|
||||
aliases: &[],
|
||||
summary: "List, switch, fork, or delete managed local sessions",
|
||||
summary: "List, check, switch, fork, or delete managed local sessions",
|
||||
argument_hint: Some(
|
||||
"[list|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]",
|
||||
"[list|exists <session-id>|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]",
|
||||
),
|
||||
resume_supported: false,
|
||||
resume_supported: true,
|
||||
},
|
||||
SlashCommandSpec {
|
||||
name: "plugin",
|
||||
@@ -1590,7 +1590,17 @@ fn parse_session_command(args: &[&str]) -> Result<SlashCommand, SlashCommandPars
|
||||
action: Some("list".to_string()),
|
||||
target: None,
|
||||
}),
|
||||
["list", ..] => Err(usage_error("session", "[list|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]")),
|
||||
["list", ..] => Err(usage_error("session", "[list|exists <session-id>|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]")),
|
||||
["exists"] => Err(usage_error("session exists", "<session-id>")),
|
||||
["exists", target] => Ok(SlashCommand::Session {
|
||||
action: Some("exists".to_string()),
|
||||
target: Some((*target).to_string()),
|
||||
}),
|
||||
["exists", ..] => Err(command_error(
|
||||
"Unexpected arguments for /session exists.",
|
||||
"session",
|
||||
"/session exists <session-id>",
|
||||
)),
|
||||
["switch"] => Err(usage_error("session switch", "<session-id>")),
|
||||
["switch", target] => Ok(SlashCommand::Session {
|
||||
action: Some("switch".to_string()),
|
||||
@@ -1637,10 +1647,10 @@ fn parse_session_command(args: &[&str]) -> Result<SlashCommand, SlashCommandPars
|
||||
)),
|
||||
[action, ..] => Err(command_error(
|
||||
&format!(
|
||||
"Unknown /session action '{action}'. Use list, switch <session-id>, fork [branch-name], or delete <session-id> [--force]."
|
||||
"Unknown /session action '{action}'. Use list, exists <session-id>, switch <session-id>, fork [branch-name], or delete <session-id> [--force]."
|
||||
),
|
||||
"session",
|
||||
"/session [list|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]",
|
||||
"/session [list|exists <session-id>|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]",
|
||||
)),
|
||||
}
|
||||
}
|
||||
@@ -2392,8 +2402,8 @@ pub fn handle_skills_slash_command(args: Option<&str>, cwd: &Path) -> std::io::R
|
||||
|| args.starts_with("describe ") =>
|
||||
{
|
||||
let name = args
|
||||
.splitn(2, ' ')
|
||||
.nth(1)
|
||||
.split_once(' ')
|
||||
.map(|(_, name)| name)
|
||||
.unwrap_or_default()
|
||||
.trim()
|
||||
.to_lowercase();
|
||||
@@ -2457,8 +2467,8 @@ pub fn handle_skills_slash_command_json(args: Option<&str>, cwd: &Path) -> std::
|
||||
|| args.starts_with("describe ") =>
|
||||
{
|
||||
let name = args
|
||||
.splitn(2, ' ')
|
||||
.nth(1)
|
||||
.split_once(' ')
|
||||
.map(|(_, name)| name)
|
||||
.unwrap_or_default()
|
||||
.trim()
|
||||
.to_lowercase();
|
||||
@@ -2622,6 +2632,7 @@ pub fn resolve_skill_path(cwd: &Path, skill: &str) -> std::io::Result<PathBuf> {
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn render_mcp_report_for(
|
||||
loader: &ConfigLoader,
|
||||
cwd: &Path,
|
||||
@@ -2719,6 +2730,7 @@ fn render_mcp_unsupported_action_json(action: &str, hint: &str) -> Value {
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn render_mcp_report_json_for(
|
||||
loader: &ConfigLoader,
|
||||
cwd: &Path,
|
||||
@@ -3790,6 +3802,7 @@ fn render_mcp_server_report(
|
||||
format!(" Working directory {}", cwd.display()),
|
||||
format!(" Name {server_name}"),
|
||||
format!(" Scope {}", config_source_label(server.scope)),
|
||||
format!(" Required {}", server.required),
|
||||
format!(
|
||||
" Transport {}",
|
||||
mcp_transport_label(&server.config)
|
||||
@@ -4188,6 +4201,7 @@ fn mcp_server_details_json(config: &McpServerConfig) -> Value {
|
||||
fn mcp_server_json(name: &str, server: &ScopedMcpServerConfig) -> Value {
|
||||
json!({
|
||||
"name": name,
|
||||
"required": server.required,
|
||||
"scope": config_source_json(server.scope),
|
||||
"transport": mcp_transport_json(&server.config),
|
||||
"summary": mcp_server_summary(&server.config),
|
||||
@@ -4315,8 +4329,8 @@ mod tests {
|
||||
DefinitionSource, SkillOrigin, SkillRoot, SkillSlashDispatch, SlashCommand,
|
||||
};
|
||||
use plugins::{
|
||||
PluginError, PluginKind, PluginLoadFailure, PluginManager, PluginManagerConfig,
|
||||
PluginMetadata, PluginSummary,
|
||||
PluginError, PluginKind, PluginLifecycle, PluginLoadFailure, PluginManager,
|
||||
PluginManagerConfig, PluginMetadata, PluginSummary,
|
||||
};
|
||||
use runtime::{
|
||||
CompactionConfig, ConfigLoader, ContentBlock, ConversationMessage, MessageRole, Session,
|
||||
@@ -4590,6 +4604,13 @@ mod tests {
|
||||
target: Some("abc123".to_string())
|
||||
}))
|
||||
);
|
||||
assert_eq!(
|
||||
SlashCommand::parse("/session exists abc123"),
|
||||
Ok(Some(SlashCommand::Session {
|
||||
action: Some("exists".to_string()),
|
||||
target: Some("abc123".to_string())
|
||||
}))
|
||||
);
|
||||
assert_eq!(
|
||||
SlashCommand::parse("/plugins install demo"),
|
||||
Ok(Some(SlashCommand::Plugins {
|
||||
@@ -5127,6 +5148,7 @@ mod tests {
|
||||
root: None,
|
||||
},
|
||||
enabled: true,
|
||||
lifecycle: PluginLifecycle::default(),
|
||||
},
|
||||
PluginSummary {
|
||||
metadata: PluginMetadata {
|
||||
@@ -5140,6 +5162,7 @@ mod tests {
|
||||
root: None,
|
||||
},
|
||||
enabled: false,
|
||||
lifecycle: PluginLifecycle::default(),
|
||||
},
|
||||
]);
|
||||
|
||||
@@ -5166,6 +5189,7 @@ mod tests {
|
||||
root: None,
|
||||
},
|
||||
enabled: true,
|
||||
lifecycle: PluginLifecycle::default(),
|
||||
}],
|
||||
&[PluginLoadFailure::new(
|
||||
PathBuf::from("/tmp/broken-plugin"),
|
||||
@@ -5580,6 +5604,7 @@ mod tests {
|
||||
"command": "uvx",
|
||||
"args": ["alpha-server"],
|
||||
"env": {"ALPHA_TOKEN": "secret"},
|
||||
"required": true,
|
||||
"toolCallTimeoutMs": 1200
|
||||
},
|
||||
"remote": {
|
||||
@@ -5625,6 +5650,7 @@ mod tests {
|
||||
let show = super::render_mcp_report_for(&loader, &workspace, Some("show alpha"))
|
||||
.expect("mcp show report should render");
|
||||
assert!(show.contains("Name alpha"));
|
||||
assert!(show.contains("Required true"));
|
||||
assert!(show.contains("Command uvx"));
|
||||
assert!(show.contains("Args alpha-server"));
|
||||
assert!(show.contains("Env keys ALPHA_TOKEN"));
|
||||
@@ -5657,6 +5683,7 @@ mod tests {
|
||||
"command": "uvx",
|
||||
"args": ["alpha-server"],
|
||||
"env": {"ALPHA_TOKEN": "secret"},
|
||||
"required": true,
|
||||
"toolCallTimeoutMs": 1200
|
||||
},
|
||||
"remote": {
|
||||
@@ -5693,6 +5720,7 @@ mod tests {
|
||||
assert_eq!(list["action"], "list");
|
||||
assert_eq!(list["configured_servers"], 2);
|
||||
assert_eq!(list["servers"][0]["name"], "alpha");
|
||||
assert_eq!(list["servers"][0]["required"], true);
|
||||
assert_eq!(list["servers"][0]["transport"]["id"], "stdio");
|
||||
assert_eq!(list["servers"][0]["details"]["command"], "uvx");
|
||||
assert_eq!(list["servers"][1]["name"], "remote");
|
||||
@@ -5708,6 +5736,7 @@ mod tests {
|
||||
assert_eq!(show["action"], "show");
|
||||
assert_eq!(show["found"], true);
|
||||
assert_eq!(show["server"]["name"], "alpha");
|
||||
assert_eq!(show["server"]["required"], true);
|
||||
assert_eq!(show["server"]["details"]["env_keys"][0], "ALPHA_TOKEN");
|
||||
assert_eq!(show["server"]["details"]["tool_call_timeout_ms"], 1200);
|
||||
|
||||
|
||||
@@ -248,7 +248,6 @@ fn detect_scenario(request: &MessageRequest) -> Option<Scenario> {
|
||||
.split_whitespace()
|
||||
.find_map(|token| token.strip_prefix(SCENARIO_PREFIX))
|
||||
.and_then(Scenario::parse),
|
||||
InputContentBlock::Thinking { .. } => None,
|
||||
_ => None,
|
||||
})
|
||||
})
|
||||
|
||||
@@ -648,6 +648,7 @@ impl RegisteredPlugin {
|
||||
PluginSummary {
|
||||
metadata: self.metadata().clone(),
|
||||
enabled: self.enabled,
|
||||
lifecycle: self.definition.lifecycle().clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -656,6 +657,18 @@ impl RegisteredPlugin {
|
||||
pub struct PluginSummary {
|
||||
pub metadata: PluginMetadata,
|
||||
pub enabled: bool,
|
||||
pub lifecycle: PluginLifecycle,
|
||||
}
|
||||
|
||||
impl PluginSummary {
|
||||
#[must_use]
|
||||
pub fn lifecycle_state(&self) -> &'static str {
|
||||
if self.enabled {
|
||||
"ready"
|
||||
} else {
|
||||
"disabled"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -3319,7 +3332,7 @@ mod tests {
|
||||
let config_home = temp_dir("installed-report-home");
|
||||
let bundled_root = temp_dir("installed-report-bundled");
|
||||
let install_root = config_home.join("plugins").join("installed");
|
||||
write_external_plugin(&install_root.join("valid"), "installed-valid", "1.0.0");
|
||||
write_lifecycle_plugin(&install_root.join("valid"), "installed-valid", "1.0.0");
|
||||
write_broken_plugin(&install_root.join("broken"), "installed-broken");
|
||||
|
||||
let mut config = PluginManagerConfig::new(&config_home);
|
||||
@@ -3334,6 +3347,14 @@ mod tests {
|
||||
|
||||
// then
|
||||
assert!(report.registry().contains("installed-valid@external"));
|
||||
let summaries = report.summaries();
|
||||
let valid = summaries
|
||||
.iter()
|
||||
.find(|summary| summary.metadata.id == "installed-valid@external")
|
||||
.expect("valid plugin summary should be present");
|
||||
assert_eq!(valid.lifecycle_state(), "disabled");
|
||||
assert_eq!(valid.lifecycle.init.len(), 1);
|
||||
assert_eq!(valid.lifecycle.shutdown.len(), 1);
|
||||
assert_eq!(report.failures().len(), 1);
|
||||
assert!(report.failures()[0]
|
||||
.plugin_root
|
||||
|
||||
502
rust/crates/runtime/src/approval_tokens.rs
Normal file
502
rust/crates/runtime/src/approval_tokens.rs
Normal file
@@ -0,0 +1,502 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
/// Machine-readable policy exception scope that an approval token may override.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ApprovalScope {
|
||||
pub policy: String,
|
||||
pub action: String,
|
||||
pub repository: Option<String>,
|
||||
pub branch: Option<String>,
|
||||
}
|
||||
|
||||
impl ApprovalScope {
|
||||
#[must_use]
|
||||
pub fn new(policy: impl Into<String>, action: impl Into<String>) -> Self {
|
||||
Self {
|
||||
policy: policy.into(),
|
||||
action: action.into(),
|
||||
repository: None,
|
||||
branch: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_repository(mut self, repository: impl Into<String>) -> Self {
|
||||
self.repository = Some(repository.into());
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_branch(mut self, branch: impl Into<String>) -> Self {
|
||||
self.branch = Some(branch.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Actor/session hop recorded when an approval is delegated or consumed.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ApprovalDelegationHop {
|
||||
pub actor: String,
|
||||
pub session_id: Option<String>,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl ApprovalDelegationHop {
|
||||
#[must_use]
|
||||
pub fn new(actor: impl Into<String>, reason: impl Into<String>) -> Self {
|
||||
Self {
|
||||
actor: actor.into(),
|
||||
session_id: None,
|
||||
reason: reason.into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_session_id(mut self, session_id: impl Into<String>) -> Self {
|
||||
self.session_id = Some(session_id.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Current lifecycle state for a policy-exception approval token.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ApprovalTokenStatus {
|
||||
Pending,
|
||||
Granted,
|
||||
Consumed,
|
||||
Expired,
|
||||
Revoked,
|
||||
}
|
||||
|
||||
impl ApprovalTokenStatus {
|
||||
#[must_use]
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Pending => "approval_pending",
|
||||
Self::Granted => "approval_granted",
|
||||
Self::Consumed => "approval_consumed",
|
||||
Self::Expired => "approval_expired",
|
||||
Self::Revoked => "approval_revoked",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Typed policy errors returned when a token cannot authorize a blocked action.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ApprovalTokenError {
|
||||
NoApproval,
|
||||
ApprovalPending,
|
||||
ApprovalExpired,
|
||||
ApprovalRevoked,
|
||||
ApprovalAlreadyConsumed,
|
||||
ScopeMismatch {
|
||||
expected: Box<ApprovalScope>,
|
||||
actual: Box<ApprovalScope>,
|
||||
},
|
||||
UnauthorizedDelegate {
|
||||
expected: String,
|
||||
actual: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl ApprovalTokenError {
|
||||
#[must_use]
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::NoApproval => "no_approval",
|
||||
Self::ApprovalPending => "approval_pending",
|
||||
Self::ApprovalExpired => "approval_expired",
|
||||
Self::ApprovalRevoked => "approval_revoked",
|
||||
Self::ApprovalAlreadyConsumed => "approval_already_consumed",
|
||||
Self::ScopeMismatch { .. } => "approval_scope_mismatch",
|
||||
Self::UnauthorizedDelegate { .. } => "approval_unauthorized_delegate",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Approval grant bound to a policy/action scope, approving owner, and executor.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ApprovalTokenGrant {
|
||||
pub token: String,
|
||||
pub scope: ApprovalScope,
|
||||
pub approving_actor: String,
|
||||
pub approved_executor: String,
|
||||
pub status: ApprovalTokenStatus,
|
||||
pub expires_at_epoch_seconds: Option<u64>,
|
||||
pub max_uses: u32,
|
||||
pub uses: u32,
|
||||
delegation_chain: Vec<ApprovalDelegationHop>,
|
||||
}
|
||||
|
||||
impl ApprovalTokenGrant {
|
||||
#[must_use]
|
||||
pub fn pending(
|
||||
token: impl Into<String>,
|
||||
scope: ApprovalScope,
|
||||
approving_actor: impl Into<String>,
|
||||
approved_executor: impl Into<String>,
|
||||
) -> Self {
|
||||
Self {
|
||||
token: token.into(),
|
||||
scope,
|
||||
approving_actor: approving_actor.into(),
|
||||
approved_executor: approved_executor.into(),
|
||||
status: ApprovalTokenStatus::Pending,
|
||||
expires_at_epoch_seconds: None,
|
||||
max_uses: 1,
|
||||
uses: 0,
|
||||
delegation_chain: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn granted(
|
||||
token: impl Into<String>,
|
||||
scope: ApprovalScope,
|
||||
approving_actor: impl Into<String>,
|
||||
approved_executor: impl Into<String>,
|
||||
) -> Self {
|
||||
Self::pending(token, scope, approving_actor, approved_executor).approve()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn approve(mut self) -> Self {
|
||||
self.status = ApprovalTokenStatus::Granted;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn expires_at(mut self, epoch_seconds: u64) -> Self {
|
||||
self.expires_at_epoch_seconds = Some(epoch_seconds);
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_max_uses(mut self, max_uses: u32) -> Self {
|
||||
self.max_uses = max_uses.max(1);
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_delegation_hop(mut self, hop: ApprovalDelegationHop) -> Self {
|
||||
self.delegation_chain.push(hop);
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn delegation_chain(&self) -> &[ApprovalDelegationHop] {
|
||||
&self.delegation_chain
|
||||
}
|
||||
}
|
||||
|
||||
/// Auditable result of verifying or consuming an approval token.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ApprovalTokenAudit {
|
||||
pub token: String,
|
||||
pub scope: ApprovalScope,
|
||||
pub approving_actor: String,
|
||||
pub executing_actor: String,
|
||||
pub status: ApprovalTokenStatus,
|
||||
pub delegated_execution: bool,
|
||||
pub delegation_chain: Vec<ApprovalDelegationHop>,
|
||||
pub uses: u32,
|
||||
pub max_uses: u32,
|
||||
}
|
||||
|
||||
/// In-memory approval-token ledger with one-time-use and replay protection.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
pub struct ApprovalTokenLedger {
|
||||
grants: BTreeMap<String, ApprovalTokenGrant>,
|
||||
}
|
||||
|
||||
impl ApprovalTokenLedger {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, grant: ApprovalTokenGrant) {
|
||||
self.grants.insert(grant.token.clone(), grant);
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn get(&self, token: &str) -> Option<&ApprovalTokenGrant> {
|
||||
self.grants.get(token)
|
||||
}
|
||||
|
||||
pub fn revoke(&mut self, token: &str) -> Result<ApprovalTokenAudit, ApprovalTokenError> {
|
||||
let grant = self
|
||||
.grants
|
||||
.get_mut(token)
|
||||
.ok_or(ApprovalTokenError::NoApproval)?;
|
||||
grant.status = ApprovalTokenStatus::Revoked;
|
||||
Ok(Self::audit_for(grant, &grant.approved_executor))
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
token: &str,
|
||||
scope: &ApprovalScope,
|
||||
executing_actor: &str,
|
||||
now_epoch_seconds: u64,
|
||||
) -> Result<ApprovalTokenAudit, ApprovalTokenError> {
|
||||
let grant = self
|
||||
.grants
|
||||
.get(token)
|
||||
.ok_or(ApprovalTokenError::NoApproval)?;
|
||||
Self::validate_grant(grant, scope, executing_actor, now_epoch_seconds)?;
|
||||
Ok(Self::audit_for(grant, executing_actor))
|
||||
}
|
||||
|
||||
pub fn consume(
|
||||
&mut self,
|
||||
token: &str,
|
||||
scope: &ApprovalScope,
|
||||
executing_actor: &str,
|
||||
now_epoch_seconds: u64,
|
||||
) -> Result<ApprovalTokenAudit, ApprovalTokenError> {
|
||||
let grant = self
|
||||
.grants
|
||||
.get_mut(token)
|
||||
.ok_or(ApprovalTokenError::NoApproval)?;
|
||||
Self::validate_grant(grant, scope, executing_actor, now_epoch_seconds)?;
|
||||
grant.uses += 1;
|
||||
if grant.uses >= grant.max_uses {
|
||||
grant.status = ApprovalTokenStatus::Consumed;
|
||||
}
|
||||
Ok(Self::audit_for(grant, executing_actor))
|
||||
}
|
||||
|
||||
fn validate_grant(
|
||||
grant: &ApprovalTokenGrant,
|
||||
scope: &ApprovalScope,
|
||||
executing_actor: &str,
|
||||
now_epoch_seconds: u64,
|
||||
) -> Result<(), ApprovalTokenError> {
|
||||
match grant.status {
|
||||
ApprovalTokenStatus::Pending => return Err(ApprovalTokenError::ApprovalPending),
|
||||
ApprovalTokenStatus::Consumed => {
|
||||
return Err(ApprovalTokenError::ApprovalAlreadyConsumed)
|
||||
}
|
||||
ApprovalTokenStatus::Expired => return Err(ApprovalTokenError::ApprovalExpired),
|
||||
ApprovalTokenStatus::Revoked => return Err(ApprovalTokenError::ApprovalRevoked),
|
||||
ApprovalTokenStatus::Granted => {}
|
||||
}
|
||||
|
||||
if grant
|
||||
.expires_at_epoch_seconds
|
||||
.is_some_and(|expires_at| now_epoch_seconds > expires_at)
|
||||
{
|
||||
return Err(ApprovalTokenError::ApprovalExpired);
|
||||
}
|
||||
|
||||
if grant.uses >= grant.max_uses {
|
||||
return Err(ApprovalTokenError::ApprovalAlreadyConsumed);
|
||||
}
|
||||
|
||||
if grant.scope != *scope {
|
||||
return Err(ApprovalTokenError::ScopeMismatch {
|
||||
expected: Box::new(grant.scope.clone()),
|
||||
actual: Box::new(scope.clone()),
|
||||
});
|
||||
}
|
||||
|
||||
if grant.approved_executor != executing_actor {
|
||||
return Err(ApprovalTokenError::UnauthorizedDelegate {
|
||||
expected: grant.approved_executor.clone(),
|
||||
actual: executing_actor.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn audit_for(grant: &ApprovalTokenGrant, executing_actor: &str) -> ApprovalTokenAudit {
|
||||
let mut delegation_chain = grant.delegation_chain.clone();
|
||||
if delegation_chain.is_empty() {
|
||||
delegation_chain.push(ApprovalDelegationHop::new(
|
||||
grant.approving_actor.clone(),
|
||||
"approval granted",
|
||||
));
|
||||
}
|
||||
if grant.approving_actor != executing_actor
|
||||
&& !delegation_chain
|
||||
.iter()
|
||||
.any(|hop| hop.actor == executing_actor)
|
||||
{
|
||||
delegation_chain.push(ApprovalDelegationHop::new(
|
||||
executing_actor.to_string(),
|
||||
"delegated execution",
|
||||
));
|
||||
}
|
||||
|
||||
ApprovalTokenAudit {
|
||||
token: grant.token.clone(),
|
||||
scope: grant.scope.clone(),
|
||||
approving_actor: grant.approving_actor.clone(),
|
||||
executing_actor: executing_actor.to_string(),
|
||||
status: grant.status,
|
||||
delegated_execution: grant.approving_actor != executing_actor,
|
||||
delegation_chain,
|
||||
uses: grant.uses,
|
||||
max_uses: grant.max_uses,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
ApprovalDelegationHop, ApprovalScope, ApprovalTokenError, ApprovalTokenGrant,
|
||||
ApprovalTokenLedger, ApprovalTokenStatus,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn approval_token_blocks_until_owner_grants_policy_exception() {
|
||||
let mut ledger = ApprovalTokenLedger::new();
|
||||
let scope = ApprovalScope::new("main_push_forbidden", "git push")
|
||||
.with_repository("sisyphus/claw-code")
|
||||
.with_branch("main");
|
||||
ledger.insert(ApprovalTokenGrant::pending(
|
||||
"tok-pending",
|
||||
scope.clone(),
|
||||
"repo-owner",
|
||||
"release-bot",
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-missing", &scope, "release-bot", 10),
|
||||
Err(ApprovalTokenError::NoApproval)
|
||||
));
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-pending", &scope, "release-bot", 10),
|
||||
Err(ApprovalTokenError::ApprovalPending)
|
||||
));
|
||||
|
||||
ledger.insert(ApprovalTokenGrant::granted(
|
||||
"tok-granted",
|
||||
scope.clone(),
|
||||
"repo-owner",
|
||||
"release-bot",
|
||||
));
|
||||
let audit = ledger
|
||||
.verify("tok-granted", &scope, "release-bot", 10)
|
||||
.expect("owner approval should verify");
|
||||
|
||||
assert_eq!(audit.status, ApprovalTokenStatus::Granted);
|
||||
assert_eq!(audit.approving_actor, "repo-owner");
|
||||
assert_eq!(audit.executing_actor, "release-bot");
|
||||
assert!(audit.delegated_execution);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn approval_token_is_one_time_use_and_rejects_replay() {
|
||||
let mut ledger = ApprovalTokenLedger::new();
|
||||
let scope = ApprovalScope::new("release_requires_owner", "release publish")
|
||||
.with_repository("sisyphus/claw-code");
|
||||
ledger.insert(ApprovalTokenGrant::granted(
|
||||
"tok-once",
|
||||
scope.clone(),
|
||||
"owner",
|
||||
"release-bot",
|
||||
));
|
||||
|
||||
let first = ledger
|
||||
.consume("tok-once", &scope, "release-bot", 10)
|
||||
.expect("first use should consume token");
|
||||
assert_eq!(first.status, ApprovalTokenStatus::Consumed);
|
||||
assert_eq!(first.uses, 1);
|
||||
|
||||
assert!(matches!(
|
||||
ledger.consume("tok-once", &scope, "release-bot", 11),
|
||||
Err(ApprovalTokenError::ApprovalAlreadyConsumed)
|
||||
));
|
||||
assert_eq!(
|
||||
ledger.get("tok-once").map(|grant| grant.status),
|
||||
Some(ApprovalTokenStatus::Consumed)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn approval_token_rejects_scope_expansion_expiry_and_revocation() {
|
||||
let mut ledger = ApprovalTokenLedger::new();
|
||||
let scope = ApprovalScope::new("main_push_forbidden", "git push")
|
||||
.with_repository("sisyphus/claw-code")
|
||||
.with_branch("main");
|
||||
let dev_scope = ApprovalScope::new("main_push_forbidden", "git push")
|
||||
.with_repository("sisyphus/claw-code")
|
||||
.with_branch("dev");
|
||||
|
||||
ledger.insert(
|
||||
ApprovalTokenGrant::granted("tok-expiring", scope.clone(), "owner", "bot")
|
||||
.expires_at(20),
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-expiring", &dev_scope, "bot", 10),
|
||||
Err(ApprovalTokenError::ScopeMismatch { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-expiring", &scope, "bot", 21),
|
||||
Err(ApprovalTokenError::ApprovalExpired)
|
||||
));
|
||||
|
||||
ledger.insert(ApprovalTokenGrant::granted(
|
||||
"tok-revoked",
|
||||
scope.clone(),
|
||||
"owner",
|
||||
"bot",
|
||||
));
|
||||
let revoked = ledger
|
||||
.revoke("tok-revoked")
|
||||
.expect("revocation should be audited");
|
||||
assert_eq!(revoked.status, ApprovalTokenStatus::Revoked);
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-revoked", &scope, "bot", 10),
|
||||
Err(ApprovalTokenError::ApprovalRevoked)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn approval_token_preserves_delegation_traceability() {
|
||||
let mut ledger = ApprovalTokenLedger::new();
|
||||
let scope = ApprovalScope::new("deploy_requires_owner", "deploy prod");
|
||||
ledger.insert(
|
||||
ApprovalTokenGrant::granted("tok-delegated", scope.clone(), "owner", "deploy-bot")
|
||||
.with_delegation_hop(
|
||||
ApprovalDelegationHop::new("owner", "owner approval")
|
||||
.with_session_id("session-owner"),
|
||||
)
|
||||
.with_delegation_hop(
|
||||
ApprovalDelegationHop::new("lead-agent", "handoff to deploy bot")
|
||||
.with_session_id("session-lead"),
|
||||
),
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-delegated", &scope, "unexpected-bot", 10),
|
||||
Err(ApprovalTokenError::UnauthorizedDelegate { expected, actual })
|
||||
if expected == "deploy-bot" && actual == "unexpected-bot"
|
||||
));
|
||||
|
||||
let audit = ledger
|
||||
.consume("tok-delegated", &scope, "deploy-bot", 10)
|
||||
.expect("approved delegate should consume token");
|
||||
let actors = audit
|
||||
.delegation_chain
|
||||
.iter()
|
||||
.map(|hop| hop.actor.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(audit.delegated_execution);
|
||||
assert_eq!(actors, vec!["owner", "lead-agent", "deploy-bot"]);
|
||||
assert_eq!(
|
||||
audit.delegation_chain[0].session_id.as_deref(),
|
||||
Some("session-owner")
|
||||
);
|
||||
assert_eq!(
|
||||
audit.delegation_chain[1].session_id.as_deref(),
|
||||
Some("session-lead")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use tokio::process::Command as TokioCommand;
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::time::timeout;
|
||||
@@ -176,27 +177,10 @@ async fn execute_bash_async(
|
||||
let mut command = prepare_tokio_command(&input.command, &cwd, &sandbox_status, true);
|
||||
|
||||
let output_result = if let Some(timeout_ms) = input.timeout {
|
||||
match timeout(Duration::from_millis(timeout_ms), command.output()).await {
|
||||
Ok(result) => (result?, false),
|
||||
Err(_) => {
|
||||
return Ok(BashCommandOutput {
|
||||
stdout: String::new(),
|
||||
stderr: format!("Command exceeded timeout of {timeout_ms} ms"),
|
||||
raw_output_path: None,
|
||||
interrupted: true,
|
||||
is_image: None,
|
||||
background_task_id: None,
|
||||
backgrounded_by_user: None,
|
||||
assistant_auto_backgrounded: None,
|
||||
dangerously_disable_sandbox: input.dangerously_disable_sandbox,
|
||||
return_code_interpretation: Some(String::from("timeout")),
|
||||
no_output_expected: Some(true),
|
||||
structured_content: None,
|
||||
persisted_output_path: None,
|
||||
persisted_output_size: None,
|
||||
sandbox_status: Some(sandbox_status),
|
||||
});
|
||||
}
|
||||
if let Ok(result) = timeout(Duration::from_millis(timeout_ms), command.output()).await {
|
||||
(result?, false)
|
||||
} else {
|
||||
return Ok(timeout_output(&input, timeout_ms, sandbox_status));
|
||||
}
|
||||
} else {
|
||||
(command.output().await?, false)
|
||||
@@ -233,6 +217,67 @@ async fn execute_bash_async(
|
||||
})
|
||||
}
|
||||
|
||||
fn timeout_output(
|
||||
input: &BashCommandInput,
|
||||
timeout_ms: u64,
|
||||
sandbox_status: SandboxStatus,
|
||||
) -> BashCommandOutput {
|
||||
let is_test = is_test_command(&input.command);
|
||||
let return_code_interpretation = if is_test { "test.hung" } else { "timeout" };
|
||||
BashCommandOutput {
|
||||
stdout: String::new(),
|
||||
stderr: format!("Command exceeded timeout of {timeout_ms} ms"),
|
||||
raw_output_path: None,
|
||||
interrupted: true,
|
||||
is_image: None,
|
||||
background_task_id: None,
|
||||
backgrounded_by_user: None,
|
||||
assistant_auto_backgrounded: None,
|
||||
dangerously_disable_sandbox: input.dangerously_disable_sandbox,
|
||||
return_code_interpretation: Some(String::from(return_code_interpretation)),
|
||||
no_output_expected: Some(true),
|
||||
structured_content: Some(vec![test_timeout_provenance(
|
||||
&input.command,
|
||||
timeout_ms,
|
||||
is_test,
|
||||
)]),
|
||||
persisted_output_path: None,
|
||||
persisted_output_size: None,
|
||||
sandbox_status: Some(sandbox_status),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_test_command(command: &str) -> bool {
|
||||
let normalized = command
|
||||
.split_whitespace()
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ")
|
||||
.to_ascii_lowercase();
|
||||
normalized.contains("cargo test")
|
||||
|| normalized.contains("cargo nextest")
|
||||
|| normalized.contains("npm test")
|
||||
|| normalized.contains("pnpm test")
|
||||
|| normalized.contains("yarn test")
|
||||
|| normalized.contains("pytest")
|
||||
}
|
||||
|
||||
fn test_timeout_provenance(
|
||||
command: &str,
|
||||
timeout_ms: u64,
|
||||
classified_as_test_hang: bool,
|
||||
) -> serde_json::Value {
|
||||
json!({
|
||||
"event": if classified_as_test_hang { "test.hung" } else { "command.timeout" },
|
||||
"failureClass": if classified_as_test_hang { "test_hang" } else { "timeout" },
|
||||
"data": {
|
||||
"command": command,
|
||||
"timeoutMs": timeout_ms,
|
||||
"provenance": "bash.timeout",
|
||||
"classification": if classified_as_test_hang { "test.hung" } else { "timeout" }
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn sandbox_status_for_input(input: &BashCommandInput, cwd: &std::path::Path) -> SandboxStatus {
|
||||
let config = ConfigLoader::default_for(cwd).load().map_or_else(
|
||||
|_| SandboxConfig::default(),
|
||||
@@ -349,6 +394,31 @@ mod tests {
|
||||
|
||||
assert!(!output.sandbox_status.expect("sandbox status").enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn timed_out_test_command_is_classified_as_hung_test_with_provenance() {
|
||||
let output = execute_bash(BashCommandInput {
|
||||
command: String::from("sleep 1 # cargo test slow_case"),
|
||||
timeout: Some(1),
|
||||
description: None,
|
||||
run_in_background: Some(false),
|
||||
dangerously_disable_sandbox: Some(false),
|
||||
namespace_restrictions: Some(false),
|
||||
isolate_network: Some(false),
|
||||
filesystem_mode: Some(FilesystemIsolationMode::WorkspaceOnly),
|
||||
allowed_mounts: None,
|
||||
})
|
||||
.expect("bash command should return structured timeout");
|
||||
|
||||
assert!(output.interrupted);
|
||||
assert_eq!(
|
||||
output.return_code_interpretation.as_deref(),
|
||||
Some("test.hung")
|
||||
);
|
||||
let structured = output.structured_content.expect("structured content");
|
||||
assert_eq!(structured[0]["event"], "test.hung");
|
||||
assert_eq!(structured[0]["data"]["provenance"], "bash.timeout");
|
||||
}
|
||||
}
|
||||
|
||||
/// Maximum output bytes before truncation (16 KiB, matching upstream).
|
||||
|
||||
@@ -212,8 +212,7 @@ fn summarize_messages(messages: &[ConversationMessage]) -> String {
|
||||
.filter_map(|block| match block {
|
||||
ContentBlock::ToolUse { name, .. } => Some(name.as_str()),
|
||||
ContentBlock::ToolResult { tool_name, .. } => Some(tool_name.as_str()),
|
||||
ContentBlock::Text { .. } => None,
|
||||
ContentBlock::Thinking { .. } => None,
|
||||
ContentBlock::Text { .. } | ContentBlock::Thinking { .. } => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
tool_names.sort_unstable();
|
||||
|
||||
@@ -101,6 +101,7 @@ pub struct McpConfigCollection {
|
||||
/// MCP server config paired with the scope that defined it.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ScopedMcpServerConfig {
|
||||
pub required: bool,
|
||||
pub scope: ConfigSource,
|
||||
pub config: McpServerConfig,
|
||||
}
|
||||
@@ -414,6 +415,17 @@ impl RuntimeConfig {
|
||||
pub fn trusted_roots(&self) -> &[String] {
|
||||
&self.feature_config.trusted_roots
|
||||
}
|
||||
|
||||
/// Merge config-level default trusted roots with per-call roots.
|
||||
///
|
||||
/// Config roots are defaults and are kept first; per-call roots extend the
|
||||
/// allowlist for a specific worker/session creation request. Duplicates are
|
||||
/// removed without reordering the first occurrence so evidence remains
|
||||
/// deterministic while avoiding repeated trust checks.
|
||||
#[must_use]
|
||||
pub fn trusted_roots_with_overrides(&self, per_call_roots: &[String]) -> Vec<String> {
|
||||
merge_trusted_roots(self.trusted_roots(), per_call_roots)
|
||||
}
|
||||
}
|
||||
|
||||
impl RuntimeFeatureConfig {
|
||||
@@ -483,6 +495,22 @@ impl RuntimeFeatureConfig {
|
||||
pub fn trusted_roots(&self) -> &[String] {
|
||||
&self.trusted_roots
|
||||
}
|
||||
|
||||
/// Merge this config's default trusted roots with per-call roots.
|
||||
#[must_use]
|
||||
pub fn trusted_roots_with_overrides(&self, per_call_roots: &[String]) -> Vec<String> {
|
||||
merge_trusted_roots(self.trusted_roots(), per_call_roots)
|
||||
}
|
||||
}
|
||||
|
||||
fn merge_trusted_roots(config_roots: &[String], per_call_roots: &[String]) -> Vec<String> {
|
||||
let mut merged = Vec::with_capacity(config_roots.len() + per_call_roots.len());
|
||||
for root in config_roots.iter().chain(per_call_roots.iter()) {
|
||||
if !merged.contains(root) {
|
||||
merged.push(root.clone());
|
||||
}
|
||||
}
|
||||
merged
|
||||
}
|
||||
|
||||
impl ProviderFallbackConfig {
|
||||
@@ -725,6 +753,12 @@ fn merge_mcp_servers(
|
||||
target.insert(
|
||||
name.clone(),
|
||||
ScopedMcpServerConfig {
|
||||
required: optional_bool(
|
||||
expect_object(value, &format!("{}: mcpServers.{name}", path.display()))?,
|
||||
"required",
|
||||
&format!("{}: mcpServers.{name}", path.display()),
|
||||
)?
|
||||
.unwrap_or(false),
|
||||
scope: source,
|
||||
config: parsed,
|
||||
},
|
||||
@@ -1245,8 +1279,8 @@ fn push_unique(target: &mut Vec<String>, value: String) {
|
||||
mod tests {
|
||||
use super::{
|
||||
deep_merge_objects, parse_permission_mode_label, ConfigLoader, ConfigSource,
|
||||
McpServerConfig, McpTransport, ResolvedPermissionMode, RuntimeHookConfig,
|
||||
RuntimePluginConfig, CLAW_SETTINGS_SCHEMA_NAME,
|
||||
McpServerConfig, McpTransport, ResolvedPermissionMode, RuntimeFeatureConfig,
|
||||
RuntimeHookConfig, RuntimePluginConfig, CLAW_SETTINGS_SCHEMA_NAME,
|
||||
};
|
||||
use crate::json::JsonValue;
|
||||
use crate::sandbox::FilesystemIsolationMode;
|
||||
@@ -1502,6 +1536,51 @@ mod tests {
|
||||
fs::remove_dir_all(root).expect("cleanup temp dir");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trusted_roots_with_overrides_preserves_config_defaults_and_adds_per_call_roots() {
|
||||
// given
|
||||
let root = temp_dir();
|
||||
let cwd = root.join("project");
|
||||
let home = root.join("home").join(".claw");
|
||||
fs::create_dir_all(&home).expect("home config dir");
|
||||
fs::create_dir_all(&cwd).expect("project dir");
|
||||
fs::write(
|
||||
home.join("settings.json"),
|
||||
r#"{"trustedRoots": ["/tmp/config-default", "/tmp/shared"]}"#,
|
||||
)
|
||||
.expect("write settings");
|
||||
|
||||
// when
|
||||
let loaded = ConfigLoader::new(&cwd, &home)
|
||||
.load()
|
||||
.expect("config should load");
|
||||
let merged = loaded.trusted_roots_with_overrides(&[
|
||||
"/tmp/per-call".to_string(),
|
||||
"/tmp/shared".to_string(),
|
||||
]);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
merged,
|
||||
["/tmp/config-default", "/tmp/shared", "/tmp/per-call"]
|
||||
);
|
||||
|
||||
fs::remove_dir_all(root).expect("cleanup temp dir");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn runtime_feature_trusted_roots_with_overrides_matches_runtime_config_merge() {
|
||||
let config = RuntimeFeatureConfig {
|
||||
trusted_roots: vec!["/tmp/config".to_string()],
|
||||
..RuntimeFeatureConfig::default()
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
config.trusted_roots_with_overrides(&["/tmp/per-call".to_string()]),
|
||||
["/tmp/config", "/tmp/per-call"]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trusted_roots_default_is_empty_when_unset() {
|
||||
// given
|
||||
@@ -1538,7 +1617,8 @@ mod tests {
|
||||
"stdio-server": {
|
||||
"command": "uvx",
|
||||
"args": ["mcp-server"],
|
||||
"env": {"TOKEN": "secret"}
|
||||
"env": {"TOKEN": "secret"},
|
||||
"required": true
|
||||
},
|
||||
"remote-server": {
|
||||
"type": "http",
|
||||
@@ -1587,6 +1667,7 @@ mod tests {
|
||||
.get("stdio-server")
|
||||
.expect("stdio server should exist");
|
||||
assert_eq!(stdio_server.scope, ConfigSource::User);
|
||||
assert!(stdio_server.required);
|
||||
assert_eq!(stdio_server.transport(), McpTransport::Stdio);
|
||||
|
||||
let remote_server = loaded
|
||||
@@ -1594,6 +1675,7 @@ mod tests {
|
||||
.get("remote-server")
|
||||
.expect("remote server should exist");
|
||||
assert_eq!(remote_server.scope, ConfigSource::Local);
|
||||
assert!(!remote_server.required);
|
||||
assert_eq!(remote_server.transport(), McpTransport::Ws);
|
||||
match &remote_server.config {
|
||||
McpServerConfig::Ws(config) => {
|
||||
|
||||
@@ -307,11 +307,23 @@ pub fn edit_file(
|
||||
|
||||
/// Expands a glob pattern and returns matching filenames.
|
||||
pub fn glob_search(pattern: &str, path: Option<&str>) -> io::Result<GlobSearchOutput> {
|
||||
glob_search_impl(pattern, path, None)
|
||||
}
|
||||
|
||||
fn glob_search_impl(
|
||||
pattern: &str,
|
||||
path: Option<&str>,
|
||||
workspace_root: Option<&Path>,
|
||||
) -> io::Result<GlobSearchOutput> {
|
||||
let started = Instant::now();
|
||||
let base_dir = path
|
||||
.map(normalize_path)
|
||||
.transpose()?
|
||||
.unwrap_or(std::env::current_dir()?);
|
||||
let canonical_root = workspace_root.map(canonicalize_workspace_root);
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
validate_workspace_boundary(&base_dir, root)?;
|
||||
}
|
||||
let search_pattern = if Path::new(pattern).is_absolute() {
|
||||
pattern.to_owned()
|
||||
} else {
|
||||
@@ -329,6 +341,12 @@ pub fn glob_search(pattern: &str, path: Option<&str>) -> io::Result<GlobSearchOu
|
||||
let compiled = Pattern::new(pat)
|
||||
.map_err(|error| io::Error::new(io::ErrorKind::InvalidInput, error.to_string()))?;
|
||||
let walk_root = derive_glob_walk_root(pat);
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
let canonical_walk_root = walk_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| walk_root.clone());
|
||||
validate_workspace_boundary(&canonical_walk_root, root)?;
|
||||
}
|
||||
let entries = WalkDir::new(&walk_root)
|
||||
.into_iter()
|
||||
.filter_entry(|entry| !should_skip_glob_dir(entry));
|
||||
@@ -338,6 +356,10 @@ pub fn glob_search(pattern: &str, path: Option<&str>) -> io::Result<GlobSearchOu
|
||||
&& compiled.matches_path(candidate)
|
||||
&& seen.insert(candidate.to_path_buf())
|
||||
{
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
let canonical_candidate = candidate.canonicalize()?;
|
||||
validate_workspace_boundary(&canonical_candidate, root)?;
|
||||
}
|
||||
matches.push(candidate.to_path_buf());
|
||||
}
|
||||
}
|
||||
@@ -367,12 +389,23 @@ pub fn glob_search(pattern: &str, path: Option<&str>) -> io::Result<GlobSearchOu
|
||||
|
||||
/// Runs a regex search over workspace files with optional context lines.
|
||||
pub fn grep_search(input: &GrepSearchInput) -> io::Result<GrepSearchOutput> {
|
||||
grep_search_impl(input, None)
|
||||
}
|
||||
|
||||
fn grep_search_impl(
|
||||
input: &GrepSearchInput,
|
||||
workspace_root: Option<&Path>,
|
||||
) -> io::Result<GrepSearchOutput> {
|
||||
let base_path = input
|
||||
.path
|
||||
.as_deref()
|
||||
.map(normalize_path)
|
||||
.transpose()?
|
||||
.unwrap_or(std::env::current_dir()?);
|
||||
let canonical_root = workspace_root.map(canonicalize_workspace_root);
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
validate_workspace_boundary(&base_path, root)?;
|
||||
}
|
||||
|
||||
let regex = RegexBuilder::new(&input.pattern)
|
||||
.case_insensitive(input.case_insensitive.unwrap_or(false))
|
||||
@@ -398,6 +431,10 @@ pub fn grep_search(input: &GrepSearchInput) -> io::Result<GrepSearchOutput> {
|
||||
let mut total_matches = 0usize;
|
||||
|
||||
for file_path in collect_search_files(&base_path)? {
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
let canonical_file = file_path.canonicalize()?;
|
||||
validate_workspace_boundary(&canonical_file, root)?;
|
||||
}
|
||||
if !matches_optional_filters(&file_path, glob_filter.as_ref(), file_type) {
|
||||
continue;
|
||||
}
|
||||
@@ -447,27 +484,21 @@ pub fn grep_search(input: &GrepSearchInput) -> io::Result<GrepSearchOutput> {
|
||||
|
||||
let (filenames, applied_limit, applied_offset) =
|
||||
apply_limit(filenames, input.head_limit, input.offset);
|
||||
let content_output = if output_mode == "content" {
|
||||
let (lines, limit, offset) = apply_limit(content_lines, input.head_limit, input.offset);
|
||||
return Ok(GrepSearchOutput {
|
||||
mode: Some(output_mode),
|
||||
num_files: filenames.len(),
|
||||
if output_mode == "content" {
|
||||
return Ok(build_grep_content_output(
|
||||
output_mode,
|
||||
filenames,
|
||||
num_lines: Some(lines.len()),
|
||||
content: Some(lines.join("\n")),
|
||||
num_matches: None,
|
||||
applied_limit: limit,
|
||||
applied_offset: offset,
|
||||
});
|
||||
} else {
|
||||
None
|
||||
};
|
||||
content_lines,
|
||||
input.head_limit,
|
||||
input.offset,
|
||||
));
|
||||
}
|
||||
|
||||
Ok(GrepSearchOutput {
|
||||
mode: Some(output_mode.clone()),
|
||||
num_files: filenames.len(),
|
||||
filenames,
|
||||
content: content_output,
|
||||
content: None,
|
||||
num_lines: None,
|
||||
num_matches: (output_mode == "count").then_some(total_matches),
|
||||
applied_limit,
|
||||
@@ -475,6 +506,32 @@ pub fn grep_search(input: &GrepSearchInput) -> io::Result<GrepSearchOutput> {
|
||||
})
|
||||
}
|
||||
|
||||
fn build_grep_content_output(
|
||||
output_mode: String,
|
||||
filenames: Vec<String>,
|
||||
content_lines: Vec<String>,
|
||||
head_limit: Option<usize>,
|
||||
offset: Option<usize>,
|
||||
) -> GrepSearchOutput {
|
||||
let (lines, limit, offset) = apply_limit(content_lines, head_limit, offset);
|
||||
GrepSearchOutput {
|
||||
mode: Some(output_mode),
|
||||
num_files: filenames.len(),
|
||||
filenames,
|
||||
num_lines: Some(lines.len()),
|
||||
content: Some(lines.join("\n")),
|
||||
num_matches: None,
|
||||
applied_limit: limit,
|
||||
applied_offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
fn canonicalize_workspace_root(workspace_root: &Path) -> PathBuf {
|
||||
workspace_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace_root.to_path_buf())
|
||||
}
|
||||
|
||||
fn should_skip_glob_dir(entry: &DirEntry) -> bool {
|
||||
entry.file_type().is_dir()
|
||||
&& entry
|
||||
@@ -625,9 +682,7 @@ pub fn read_file_in_workspace(
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<ReadFileOutput> {
|
||||
let absolute_path = normalize_path(path)?;
|
||||
let canonical_root = workspace_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace_root.to_path_buf());
|
||||
let canonical_root = canonicalize_workspace_root(workspace_root);
|
||||
validate_workspace_boundary(&absolute_path, &canonical_root)?;
|
||||
read_file(path, offset, limit)
|
||||
}
|
||||
@@ -640,9 +695,7 @@ pub fn write_file_in_workspace(
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<WriteFileOutput> {
|
||||
let absolute_path = normalize_path_allow_missing(path)?;
|
||||
let canonical_root = workspace_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace_root.to_path_buf());
|
||||
let canonical_root = canonicalize_workspace_root(workspace_root);
|
||||
validate_workspace_boundary(&absolute_path, &canonical_root)?;
|
||||
write_file(path, content)
|
||||
}
|
||||
@@ -657,13 +710,30 @@ pub fn edit_file_in_workspace(
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<EditFileOutput> {
|
||||
let absolute_path = normalize_path(path)?;
|
||||
let canonical_root = workspace_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace_root.to_path_buf());
|
||||
let canonical_root = canonicalize_workspace_root(workspace_root);
|
||||
validate_workspace_boundary(&absolute_path, &canonical_root)?;
|
||||
edit_file(path, old_string, new_string, replace_all)
|
||||
}
|
||||
|
||||
/// Expand a glob pattern with workspace boundary enforcement.
|
||||
#[allow(dead_code)]
|
||||
pub fn glob_search_in_workspace(
|
||||
pattern: &str,
|
||||
path: Option<&str>,
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<GlobSearchOutput> {
|
||||
glob_search_impl(pattern, path, Some(workspace_root))
|
||||
}
|
||||
|
||||
/// Search file contents with workspace boundary enforcement.
|
||||
#[allow(dead_code)]
|
||||
pub fn grep_search_in_workspace(
|
||||
input: &GrepSearchInput,
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<GrepSearchOutput> {
|
||||
grep_search_impl(input, Some(workspace_root))
|
||||
}
|
||||
|
||||
/// Check whether a path is a symlink that resolves outside the workspace.
|
||||
#[allow(dead_code)]
|
||||
pub fn is_symlink_escape(path: &Path, workspace_root: &Path) -> io::Result<bool> {
|
||||
@@ -708,7 +778,7 @@ mod tests {
|
||||
use super::{
|
||||
component_contains_glob, derive_glob_walk_root, edit_file, expand_braces, glob_search,
|
||||
grep_search, is_symlink_escape, read_file, read_file_in_workspace, write_file,
|
||||
GrepSearchInput, MAX_WRITE_SIZE,
|
||||
write_file_in_workspace, GrepSearchInput, MAX_WRITE_SIZE,
|
||||
};
|
||||
|
||||
fn temp_path(name: &str) -> std::path::PathBuf {
|
||||
@@ -808,6 +878,68 @@ mod tests {
|
||||
assert!(!is_symlink_escape(&normal, &workspace).expect("check should succeed"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn workspace_read_rejects_symlink_escape_regression_3007_class() {
|
||||
let workspace = temp_path("workspace-read-symlink-escape");
|
||||
let outside = temp_path("workspace-read-symlink-target");
|
||||
std::fs::create_dir_all(&workspace).expect("workspace dir should be created");
|
||||
std::fs::create_dir_all(&outside).expect("outside dir should be created");
|
||||
let outside_file = outside.join("secret.txt");
|
||||
std::fs::write(&outside_file, "outside secret").expect("outside file should write");
|
||||
|
||||
let link_path = workspace.join("linked-secret.txt");
|
||||
std::os::unix::fs::symlink(&outside_file, &link_path).expect("symlink should create");
|
||||
|
||||
let result =
|
||||
read_file_in_workspace(link_path.to_string_lossy().as_ref(), None, None, &workspace);
|
||||
|
||||
assert!(result.is_err(), "symlink escape must be rejected");
|
||||
let error = result.unwrap_err();
|
||||
assert_eq!(error.kind(), std::io::ErrorKind::PermissionDenied);
|
||||
assert!(
|
||||
error.to_string().contains("escapes workspace"),
|
||||
"error should explain workspace escape: {error}"
|
||||
);
|
||||
|
||||
let _ = std::fs::remove_dir_all(&workspace);
|
||||
let _ = std::fs::remove_dir_all(&outside);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn workspace_write_rejects_parent_symlink_escape_regression_3007_class() {
|
||||
let workspace = temp_path("workspace-write-symlink-escape");
|
||||
let outside = temp_path("workspace-write-symlink-target");
|
||||
std::fs::create_dir_all(&workspace).expect("workspace dir should be created");
|
||||
std::fs::create_dir_all(&outside).expect("outside dir should be created");
|
||||
|
||||
let link_dir = workspace.join("linked-outside");
|
||||
std::os::unix::fs::symlink(&outside, &link_dir).expect("symlink dir should create");
|
||||
let escaped_child = link_dir.join("created.txt");
|
||||
|
||||
let result = write_file_in_workspace(
|
||||
escaped_child.to_string_lossy().as_ref(),
|
||||
"must not escape",
|
||||
&workspace,
|
||||
);
|
||||
|
||||
assert!(result.is_err(), "parent symlink escape must be rejected");
|
||||
let error = result.unwrap_err();
|
||||
assert_eq!(error.kind(), std::io::ErrorKind::PermissionDenied);
|
||||
assert!(
|
||||
error.to_string().contains("escapes workspace"),
|
||||
"error should explain workspace escape: {error}"
|
||||
);
|
||||
assert!(
|
||||
!outside.join("created.txt").exists(),
|
||||
"write should not create through an escaping symlink"
|
||||
);
|
||||
|
||||
let _ = std::fs::remove_dir_all(&workspace);
|
||||
let _ = std::fs::remove_dir_all(&outside);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn globs_and_greps_directory() {
|
||||
let dir = temp_path("search-dir");
|
||||
|
||||
399
rust/crates/runtime/src/g004_conformance.rs
Normal file
399
rust/crates/runtime/src/g004_conformance.rs
Normal file
@@ -0,0 +1,399 @@
|
||||
//! Machine-checkable conformance helpers for G004 event/report contract bundles.
|
||||
//!
|
||||
//! The harness intentionally validates JSON-shaped artifacts instead of owning the
|
||||
//! lane-event, report, or approval-token implementations. This keeps it usable by
|
||||
//! independent implementation lanes and by golden fixtures produced outside the
|
||||
//! runtime crate.
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
const BUNDLE_SCHEMA_VERSION: &str = "g004.contract.bundle.v1";
|
||||
const REPORT_SCHEMA_VERSION: &str = "g004.report.v1";
|
||||
|
||||
/// A single conformance validation failure.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct G004ConformanceError {
|
||||
/// JSON pointer-ish path to the invalid field.
|
||||
pub path: String,
|
||||
/// Human-readable reason the field failed validation.
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl G004ConformanceError {
|
||||
fn new(path: impl Into<String>, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
path: path.into(),
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate a G004 golden contract bundle.
|
||||
///
|
||||
/// The bundle shape is deliberately small and cross-lane:
|
||||
/// - `laneEvents[]` must expose stable event identity, ordering/provenance, and
|
||||
/// terminal dedupe fingerprints.
|
||||
/// - `reports[]` must expose schema identity, content hash, projection/redaction
|
||||
/// provenance, capability negotiation, fact/hypothesis/negative-evidence
|
||||
/// labels, confidence, and field-level delta attribution.
|
||||
/// - `approvalTokens[]` must expose owner/scope, delegation chain, one-time-use,
|
||||
/// and replay-prevention fields.
|
||||
#[must_use]
|
||||
pub fn validate_g004_contract_bundle(bundle: &Value) -> Vec<G004ConformanceError> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
require_string_eq(bundle, "/schemaVersion", BUNDLE_SCHEMA_VERSION, &mut errors);
|
||||
validate_lane_events(bundle.get("laneEvents"), "/laneEvents", &mut errors);
|
||||
validate_reports(bundle.get("reports"), "/reports", &mut errors);
|
||||
validate_approval_tokens(bundle.get("approvalTokens"), "/approvalTokens", &mut errors);
|
||||
|
||||
errors
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_g004_contract_bundle_valid(bundle: &Value) -> bool {
|
||||
validate_g004_contract_bundle(bundle).is_empty()
|
||||
}
|
||||
|
||||
fn validate_lane_events(value: Option<&Value>, path: &str, errors: &mut Vec<G004ConformanceError>) {
|
||||
let Some(events) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut previous_seq = None;
|
||||
for (index, event) in events.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_non_empty_string_at(event, "/event", &format!("{base}/event"), errors);
|
||||
require_non_empty_string_at(event, "/status", &format!("{base}/status"), errors);
|
||||
require_non_empty_string_at(event, "/emittedAt", &format!("{base}/emittedAt"), errors);
|
||||
require_non_empty_string_at(
|
||||
event,
|
||||
"/metadata/provenance",
|
||||
&format!("{base}/metadata/provenance"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
event,
|
||||
"/metadata/emitterIdentity",
|
||||
&format!("{base}/metadata/emitterIdentity"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
event,
|
||||
"/metadata/environmentLabel",
|
||||
&format!("{base}/metadata/environmentLabel"),
|
||||
errors,
|
||||
);
|
||||
|
||||
match get_path(event, "/metadata/seq").and_then(Value::as_u64) {
|
||||
Some(seq) => {
|
||||
if let Some(previous) = previous_seq {
|
||||
if seq <= previous {
|
||||
errors.push(G004ConformanceError::new(
|
||||
format!("{base}/metadata/seq"),
|
||||
"sequence must be strictly increasing",
|
||||
));
|
||||
}
|
||||
}
|
||||
previous_seq = Some(seq);
|
||||
}
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
format!("{base}/metadata/seq"),
|
||||
"required u64 field missing",
|
||||
)),
|
||||
}
|
||||
|
||||
if is_terminal_event_value(event.get("event")) {
|
||||
require_non_empty_string_at(
|
||||
event,
|
||||
"/metadata/eventFingerprint",
|
||||
&format!("{base}/metadata/eventFingerprint"),
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_reports(value: Option<&Value>, path: &str, errors: &mut Vec<G004ConformanceError>) {
|
||||
let Some(reports) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, report) in reports.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_string_eq_at(
|
||||
report,
|
||||
"/schemaVersion",
|
||||
&format!("{base}/schemaVersion"),
|
||||
REPORT_SCHEMA_VERSION,
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(report, "/reportId", &format!("{base}/reportId"), errors);
|
||||
require_non_empty_string_at(
|
||||
report,
|
||||
"/identity/contentHash",
|
||||
&format!("{base}/identity/contentHash"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
report,
|
||||
"/projection/provenance",
|
||||
&format!("{base}/projection/provenance"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
report,
|
||||
"/redaction/provenance",
|
||||
&format!("{base}/redaction/provenance"),
|
||||
errors,
|
||||
);
|
||||
non_empty_array(
|
||||
get_path(report, "/consumerCapabilities"),
|
||||
&format!("{base}/consumerCapabilities"),
|
||||
errors,
|
||||
);
|
||||
validate_findings(
|
||||
get_path(report, "/findings"),
|
||||
&format!("{base}/findings"),
|
||||
errors,
|
||||
);
|
||||
validate_field_deltas(
|
||||
get_path(report, "/fieldDeltas"),
|
||||
&format!("{base}/fieldDeltas"),
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_findings(value: Option<&Value>, path: &str, errors: &mut Vec<G004ConformanceError>) {
|
||||
let Some(findings) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, finding) in findings.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_one_of_at(
|
||||
finding,
|
||||
"/kind",
|
||||
&format!("{base}/kind"),
|
||||
&["fact", "hypothesis", "negative_evidence"],
|
||||
errors,
|
||||
);
|
||||
require_one_of_at(
|
||||
finding,
|
||||
"/confidence",
|
||||
&format!("{base}/confidence"),
|
||||
&["low", "medium", "high"],
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(finding, "/statement", &format!("{base}/statement"), errors);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_field_deltas(
|
||||
value: Option<&Value>,
|
||||
path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
let Some(deltas) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, delta) in deltas.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_non_empty_string_at(delta, "/field", &format!("{base}/field"), errors);
|
||||
require_non_empty_string_at(
|
||||
delta,
|
||||
"/previousHash",
|
||||
&format!("{base}/previousHash"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
delta,
|
||||
"/currentHash",
|
||||
&format!("{base}/currentHash"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
delta,
|
||||
"/attribution",
|
||||
&format!("{base}/attribution"),
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_approval_tokens(
|
||||
value: Option<&Value>,
|
||||
path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
let Some(tokens) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, token) in tokens.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_non_empty_string_at(token, "/tokenId", &format!("{base}/tokenId"), errors);
|
||||
require_non_empty_string_at(token, "/owner", &format!("{base}/owner"), errors);
|
||||
require_non_empty_string_at(token, "/scope", &format!("{base}/scope"), errors);
|
||||
require_non_empty_string_at(token, "/issuedAt", &format!("{base}/issuedAt"), errors);
|
||||
require_bool_true_at(token, "/oneTimeUse", &format!("{base}/oneTimeUse"), errors);
|
||||
require_non_empty_string_at(
|
||||
token,
|
||||
"/replayPreventionNonce",
|
||||
&format!("{base}/replayPreventionNonce"),
|
||||
errors,
|
||||
);
|
||||
validate_delegation_chain(
|
||||
get_path(token, "/delegationChain"),
|
||||
&format!("{base}/delegationChain"),
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_delegation_chain(
|
||||
value: Option<&Value>,
|
||||
path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
let Some(chain) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, hop) in chain.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_non_empty_string_at(hop, "/from", &format!("{base}/from"), errors);
|
||||
require_non_empty_string_at(hop, "/to", &format!("{base}/to"), errors);
|
||||
require_non_empty_string_at(hop, "/action", &format!("{base}/action"), errors);
|
||||
require_non_empty_string_at(hop, "/at", &format!("{base}/at"), errors);
|
||||
}
|
||||
}
|
||||
|
||||
fn non_empty_array<'a>(
|
||||
value: Option<&'a Value>,
|
||||
path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) -> Option<&'a Vec<Value>> {
|
||||
match value.and_then(Value::as_array) {
|
||||
Some(array) if !array.is_empty() => Some(array),
|
||||
Some(_) => {
|
||||
errors.push(G004ConformanceError::new(path, "array must not be empty"));
|
||||
None
|
||||
}
|
||||
None => {
|
||||
errors.push(G004ConformanceError::new(
|
||||
path,
|
||||
"required array field missing",
|
||||
));
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn require_string_eq(
|
||||
root: &Value,
|
||||
path: &str,
|
||||
expected: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
require_string_eq_at(root, path, path, expected, errors);
|
||||
}
|
||||
|
||||
fn require_string_eq_at(
|
||||
root: &Value,
|
||||
pointer: &str,
|
||||
error_path: &str,
|
||||
expected: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
match get_path(root, pointer).and_then(Value::as_str) {
|
||||
Some(actual) if actual == expected => {}
|
||||
Some(actual) => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
format!("expected '{expected}', got '{actual}'"),
|
||||
)),
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"required string field missing",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_non_empty_string_at(
|
||||
root: &Value,
|
||||
pointer: &str,
|
||||
error_path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
match get_path(root, pointer).and_then(Value::as_str) {
|
||||
Some(value) if !value.trim().is_empty() => {}
|
||||
Some(_) => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"string must not be empty",
|
||||
)),
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"required string field missing",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_one_of_at(
|
||||
root: &Value,
|
||||
pointer: &str,
|
||||
error_path: &str,
|
||||
allowed: &[&str],
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
match get_path(root, pointer).and_then(Value::as_str) {
|
||||
Some(value) if allowed.contains(&value) => {}
|
||||
Some(value) => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
format!("'{value}' is not one of {}", allowed.join(", ")),
|
||||
)),
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"required string field missing",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_bool_true_at(
|
||||
root: &Value,
|
||||
pointer: &str,
|
||||
error_path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
match get_path(root, pointer).and_then(Value::as_bool) {
|
||||
Some(true) => {}
|
||||
Some(false) => errors.push(G004ConformanceError::new(error_path, "must be true")),
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"required boolean field missing",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_terminal_event_value(value: Option<&Value>) -> bool {
|
||||
matches!(
|
||||
value.and_then(Value::as_str),
|
||||
Some("lane.finished" | "lane.failed" | "lane.merged" | "lane.superseded" | "lane.closed")
|
||||
)
|
||||
}
|
||||
|
||||
fn get_path<'a>(root: &'a Value, path: &str) -> Option<&'a Value> {
|
||||
if let Some(value) = root.pointer(path) {
|
||||
return Some(value);
|
||||
}
|
||||
|
||||
let segments = path.trim_start_matches('/').split('/').collect::<Vec<_>>();
|
||||
for index in 1..segments.len() {
|
||||
let relative = format!("/{}", segments[index..].join("/"));
|
||||
if let Some(value) = root.pointer(&relative) {
|
||||
return Some(value);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
@@ -27,19 +27,38 @@ impl std::fmt::Display for GreenLevel {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct GreenContract {
|
||||
pub required_level: GreenLevel,
|
||||
pub requirements: Vec<GreenContractRequirement>,
|
||||
pub block_known_flakes: bool,
|
||||
}
|
||||
|
||||
impl GreenContract {
|
||||
#[must_use]
|
||||
pub fn new(required_level: GreenLevel) -> Self {
|
||||
Self { required_level }
|
||||
Self {
|
||||
required_level,
|
||||
requirements: Vec::new(),
|
||||
block_known_flakes: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate(self, observed_level: Option<GreenLevel>) -> GreenContractOutcome {
|
||||
pub fn merge_ready(required_level: GreenLevel) -> Self {
|
||||
Self {
|
||||
required_level,
|
||||
requirements: vec![
|
||||
GreenContractRequirement::TestCommandProvenance,
|
||||
GreenContractRequirement::BaseBranchFreshness,
|
||||
GreenContractRequirement::RecoveryAttemptContext,
|
||||
],
|
||||
block_known_flakes: true,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate(&self, observed_level: Option<GreenLevel>) -> GreenContractOutcome {
|
||||
match observed_level {
|
||||
Some(level) if level >= self.required_level => GreenContractOutcome::Satisfied {
|
||||
required_level: self.required_level,
|
||||
@@ -53,11 +72,170 @@ impl GreenContract {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_satisfied_by(self, observed_level: GreenLevel) -> bool {
|
||||
pub fn evaluate_evidence(&self, evidence: &GreenEvidence) -> GreenEvidenceOutcome {
|
||||
let mut missing = Vec::new();
|
||||
let mut blocking_flakes = Vec::new();
|
||||
|
||||
if evidence.observed_level < self.required_level {
|
||||
missing.push(GreenContractRequirement::RequiredLevel);
|
||||
}
|
||||
|
||||
for requirement in &self.requirements {
|
||||
match requirement {
|
||||
GreenContractRequirement::TestCommandProvenance
|
||||
if !evidence.has_passing_test_command() =>
|
||||
{
|
||||
missing.push(*requirement);
|
||||
}
|
||||
GreenContractRequirement::BaseBranchFreshness if !evidence.base_branch_fresh => {
|
||||
missing.push(*requirement);
|
||||
}
|
||||
GreenContractRequirement::RecoveryAttemptContext
|
||||
if !evidence.recovery_attempt_context_recorded =>
|
||||
{
|
||||
missing.push(*requirement);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
if self.block_known_flakes {
|
||||
blocking_flakes = evidence
|
||||
.known_flakes
|
||||
.iter()
|
||||
.filter(|flake| flake.blocks_green)
|
||||
.cloned()
|
||||
.collect();
|
||||
}
|
||||
|
||||
if missing.is_empty() && blocking_flakes.is_empty() {
|
||||
GreenEvidenceOutcome::Satisfied {
|
||||
required_level: self.required_level,
|
||||
observed_level: evidence.observed_level,
|
||||
}
|
||||
} else {
|
||||
GreenEvidenceOutcome::Unsatisfied {
|
||||
required_level: self.required_level,
|
||||
observed_level: evidence.observed_level,
|
||||
missing,
|
||||
blocking_flakes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_satisfied_by(&self, observed_level: GreenLevel) -> bool {
|
||||
observed_level >= self.required_level
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct GreenEvidence {
|
||||
pub observed_level: GreenLevel,
|
||||
pub test_commands: Vec<TestCommandProvenance>,
|
||||
pub base_branch_fresh: bool,
|
||||
pub known_flakes: Vec<KnownFlake>,
|
||||
pub recovery_attempt_context_recorded: bool,
|
||||
}
|
||||
|
||||
impl GreenEvidence {
|
||||
#[must_use]
|
||||
pub fn new(observed_level: GreenLevel) -> Self {
|
||||
Self {
|
||||
observed_level,
|
||||
test_commands: Vec::new(),
|
||||
base_branch_fresh: false,
|
||||
known_flakes: Vec::new(),
|
||||
recovery_attempt_context_recorded: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_test_command(mut self, command: impl Into<String>, exit_code: i32) -> Self {
|
||||
self.test_commands.push(TestCommandProvenance {
|
||||
command: command.into(),
|
||||
exit_code,
|
||||
});
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_base_branch_fresh(mut self, is_fresh: bool) -> Self {
|
||||
self.base_branch_fresh = is_fresh;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_known_flake(mut self, test_name: impl Into<String>, blocks_green: bool) -> Self {
|
||||
self.known_flakes.push(KnownFlake {
|
||||
test_name: test_name.into(),
|
||||
blocks_green,
|
||||
});
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_recovery_attempt_context(mut self, recorded: bool) -> Self {
|
||||
self.recovery_attempt_context_recorded = recorded;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn has_passing_test_command(&self) -> bool {
|
||||
self.test_commands.iter().any(TestCommandProvenance::passed)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct TestCommandProvenance {
|
||||
pub command: String,
|
||||
pub exit_code: i32,
|
||||
}
|
||||
|
||||
impl TestCommandProvenance {
|
||||
#[must_use]
|
||||
pub fn passed(&self) -> bool {
|
||||
self.exit_code == 0 && !self.command.trim().is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct KnownFlake {
|
||||
pub test_name: String,
|
||||
pub blocks_green: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum GreenContractRequirement {
|
||||
RequiredLevel,
|
||||
TestCommandProvenance,
|
||||
BaseBranchFreshness,
|
||||
RecoveryAttemptContext,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(tag = "outcome", rename_all = "snake_case")]
|
||||
pub enum GreenEvidenceOutcome {
|
||||
Satisfied {
|
||||
required_level: GreenLevel,
|
||||
observed_level: GreenLevel,
|
||||
},
|
||||
Unsatisfied {
|
||||
required_level: GreenLevel,
|
||||
observed_level: GreenLevel,
|
||||
missing: Vec<GreenContractRequirement>,
|
||||
blocking_flakes: Vec<KnownFlake>,
|
||||
},
|
||||
}
|
||||
|
||||
impl GreenEvidenceOutcome {
|
||||
#[must_use]
|
||||
pub fn is_satisfied(&self) -> bool {
|
||||
matches!(self, Self::Satisfied { .. })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(tag = "outcome", rename_all = "snake_case")]
|
||||
pub enum GreenContractOutcome {
|
||||
@@ -149,4 +327,83 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn merge_ready_contract_requires_provenance_beyond_test_level() {
|
||||
// given
|
||||
let contract = GreenContract::merge_ready(GreenLevel::Workspace);
|
||||
let evidence = GreenEvidence::new(GreenLevel::Workspace)
|
||||
.with_test_command("cargo test --manifest-path rust/Cargo.toml", 0);
|
||||
|
||||
// when
|
||||
let outcome = contract.evaluate_evidence(&evidence);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
outcome,
|
||||
GreenEvidenceOutcome::Unsatisfied {
|
||||
required_level: GreenLevel::Workspace,
|
||||
observed_level: GreenLevel::Workspace,
|
||||
missing: vec![
|
||||
GreenContractRequirement::BaseBranchFreshness,
|
||||
GreenContractRequirement::RecoveryAttemptContext,
|
||||
],
|
||||
blocking_flakes: vec![],
|
||||
}
|
||||
);
|
||||
assert!(!outcome.is_satisfied());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_ready_contract_accepts_complete_test_provenance_context() {
|
||||
// given
|
||||
let contract = GreenContract::merge_ready(GreenLevel::Workspace);
|
||||
let evidence = GreenEvidence::new(GreenLevel::MergeReady)
|
||||
.with_test_command("cargo test --manifest-path rust/Cargo.toml", 0)
|
||||
.with_base_branch_fresh(true)
|
||||
.with_recovery_attempt_context(true);
|
||||
|
||||
// when
|
||||
let outcome = contract.evaluate_evidence(&evidence);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
outcome,
|
||||
GreenEvidenceOutcome::Satisfied {
|
||||
required_level: GreenLevel::Workspace,
|
||||
observed_level: GreenLevel::MergeReady,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn known_blocking_flake_prevents_green_contract_satisfaction() {
|
||||
// given
|
||||
let contract = GreenContract::merge_ready(GreenLevel::Workspace);
|
||||
let evidence = GreenEvidence::new(GreenLevel::MergeReady)
|
||||
.with_test_command("cargo test --manifest-path rust/Cargo.toml", 0)
|
||||
.with_base_branch_fresh(true)
|
||||
.with_recovery_attempt_context(true)
|
||||
.with_known_flake(
|
||||
"session_lifecycle_prefers_running_process_over_idle_shell",
|
||||
true,
|
||||
);
|
||||
|
||||
// when
|
||||
let outcome = contract.evaluate_evidence(&evidence);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
outcome,
|
||||
GreenEvidenceOutcome::Unsatisfied {
|
||||
required_level: GreenLevel::Workspace,
|
||||
observed_level: GreenLevel::MergeReady,
|
||||
missing: vec![],
|
||||
blocking_flakes: vec![KnownFlake {
|
||||
test_name: "session_lifecycle_prefers_running_process_over_idle_shell"
|
||||
.to_string(),
|
||||
blocks_green: true,
|
||||
}],
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -449,18 +449,21 @@ pub fn compute_event_fingerprint(
|
||||
status: &LaneEventStatus,
|
||||
data: Option<&serde_json::Value>,
|
||||
) -> String {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
let mut hasher = DefaultHasher::new();
|
||||
format!("{event:?}").hash(&mut hasher);
|
||||
format!("{status:?}").hash(&mut hasher);
|
||||
if let Some(d) = data {
|
||||
serde_json::to_string(d)
|
||||
.unwrap_or_default()
|
||||
.hash(&mut hasher);
|
||||
let payload = serde_json::json!({
|
||||
"event": event,
|
||||
"status": status,
|
||||
"data": data,
|
||||
});
|
||||
let canonical = serde_json::to_vec(&payload).unwrap_or_default();
|
||||
let digest = Sha256::digest(canonical);
|
||||
let mut fingerprint = String::with_capacity(16);
|
||||
for byte in &digest[..8] {
|
||||
use std::fmt::Write as _;
|
||||
write!(&mut fingerprint, "{byte:02x}").expect("writing to String should not fail");
|
||||
}
|
||||
format!("{:016x}", hasher.finish())
|
||||
fingerprint
|
||||
}
|
||||
|
||||
/// Classification of event terminality for reconciliation.
|
||||
@@ -1045,6 +1048,7 @@ impl LaneEvent {
|
||||
emitted_at,
|
||||
)
|
||||
.with_optional_detail(detail)
|
||||
.with_terminal_fingerprint()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
@@ -1098,7 +1102,7 @@ impl LaneEvent {
|
||||
event =
|
||||
event.with_data(serde_json::to_value(subphase).expect("subphase should serialize"));
|
||||
}
|
||||
event
|
||||
event.with_terminal_fingerprint()
|
||||
}
|
||||
|
||||
/// Ship prepared — §4.44.5
|
||||
@@ -1170,6 +1174,21 @@ impl LaneEvent {
|
||||
#[must_use]
|
||||
pub fn with_data(mut self, data: Value) -> Self {
|
||||
self.data = Some(data);
|
||||
if is_terminal_event(self.event) {
|
||||
self = self.with_terminal_fingerprint();
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn with_terminal_fingerprint(mut self) -> Self {
|
||||
if is_terminal_event(self.event) {
|
||||
self.metadata.event_fingerprint = Some(compute_event_fingerprint(
|
||||
&self.event,
|
||||
&self.status,
|
||||
self.data.as_ref(),
|
||||
));
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -1375,6 +1394,39 @@ mod tests {
|
||||
assert_eq!(round_trip.event, LaneEventName::ShipPushedMain);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn convenience_terminal_events_attach_and_refresh_fingerprints() {
|
||||
let finished = LaneEvent::finished("2026-04-04T00:00:00Z", Some("done".to_string()));
|
||||
let initial_fingerprint = finished
|
||||
.metadata
|
||||
.event_fingerprint
|
||||
.clone()
|
||||
.expect("finished events should carry terminal fingerprint");
|
||||
|
||||
let with_payload = finished.with_data(json!({"result": "ok", "attempt": 1}));
|
||||
assert!(with_payload.metadata.event_fingerprint.is_some());
|
||||
assert_ne!(
|
||||
Some(initial_fingerprint),
|
||||
with_payload.metadata.event_fingerprint,
|
||||
"payload changes must refresh the actionable terminal fingerprint"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tool_style_finished_events_dedupe_after_payload_is_added() {
|
||||
let first = LaneEvent::finished("2026-04-04T00:00:00Z", Some("done".to_string()))
|
||||
.with_data(json!({"result": "ok"}));
|
||||
let duplicate = LaneEvent::finished("2026-04-04T00:00:01Z", Some("done again".to_string()))
|
||||
.with_data(json!({"result": "ok"}));
|
||||
|
||||
assert_eq!(
|
||||
first.metadata.event_fingerprint,
|
||||
duplicate.metadata.event_fingerprint
|
||||
);
|
||||
let deduped = dedupe_terminal_events(&[first, duplicate]);
|
||||
assert_eq!(deduped.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn commit_events_can_carry_worktree_and_supersession_metadata() {
|
||||
let event = LaneEvent::commit_created(
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
//! MCP plumbing, tool-facing file operations, and the core conversation loop
|
||||
//! that drives interactive and one-shot turns.
|
||||
|
||||
mod approval_tokens;
|
||||
mod bash;
|
||||
pub mod bash_validation;
|
||||
mod bootstrap;
|
||||
@@ -13,6 +14,7 @@ mod config;
|
||||
pub mod config_validate;
|
||||
mod conversation;
|
||||
mod file_ops;
|
||||
pub mod g004_conformance;
|
||||
mod git_context;
|
||||
pub mod green_contract;
|
||||
mod hooks;
|
||||
@@ -33,6 +35,7 @@ mod policy_engine;
|
||||
mod prompt;
|
||||
pub mod recovery_recipes;
|
||||
mod remote;
|
||||
mod report_schema;
|
||||
pub mod sandbox;
|
||||
mod session;
|
||||
pub mod session_control;
|
||||
@@ -49,6 +52,10 @@ mod trust_resolver;
|
||||
mod usage;
|
||||
pub mod worker_boot;
|
||||
|
||||
pub use approval_tokens::{
|
||||
ApprovalDelegationHop, ApprovalScope, ApprovalTokenAudit, ApprovalTokenError,
|
||||
ApprovalTokenGrant, ApprovalTokenLedger, ApprovalTokenStatus,
|
||||
};
|
||||
pub use bash::{execute_bash, BashCommandInput, BashCommandOutput};
|
||||
pub use bootstrap::{BootstrapPhase, BootstrapPlan};
|
||||
pub use branch_lock::{detect_branch_lock_collisions, BranchLockCollision, BranchLockIntent};
|
||||
@@ -74,9 +81,10 @@ pub use conversation::{
|
||||
ToolExecutor, TurnSummary,
|
||||
};
|
||||
pub use file_ops::{
|
||||
edit_file, glob_search, grep_search, read_file, write_file, EditFileOutput, GlobSearchOutput,
|
||||
GrepSearchInput, GrepSearchOutput, ReadFileOutput, StructuredPatchHunk, TextFilePayload,
|
||||
WriteFileOutput,
|
||||
edit_file, edit_file_in_workspace, glob_search, glob_search_in_workspace, grep_search,
|
||||
grep_search_in_workspace, read_file, read_file_in_workspace, write_file,
|
||||
write_file_in_workspace, EditFileOutput, GlobSearchOutput, GrepSearchInput, GrepSearchOutput,
|
||||
ReadFileOutput, StructuredPatchHunk, TextFilePayload, WriteFileOutput,
|
||||
};
|
||||
pub use git_context::{GitCommitEntry, GitContext};
|
||||
pub use hooks::{
|
||||
@@ -127,22 +135,31 @@ pub use plugin_lifecycle::{
|
||||
PluginState, ResourceInfo, ServerHealth, ServerStatus, ToolInfo,
|
||||
};
|
||||
pub use policy_engine::{
|
||||
evaluate, DiffScope, GreenLevel, LaneBlocker, LaneContext, PolicyAction, PolicyCondition,
|
||||
PolicyEngine, PolicyRule, ReconcileReason, ReviewStatus,
|
||||
evaluate, evaluate_with_events, ApprovalToken, DiffScope, GreenLevel, LaneBlocker, LaneContext,
|
||||
PolicyAction, PolicyCondition, PolicyDecisionEvent, PolicyDecisionKind, PolicyEngine,
|
||||
PolicyEvaluation, PolicyRule, ReconcileReason, ReviewStatus,
|
||||
};
|
||||
pub use prompt::{
|
||||
load_system_prompt, prepend_bullets, ContextFile, ModelFamilyIdentity, ProjectContext,
|
||||
PromptBuildError, SystemPromptBuilder, FRONTIER_MODEL_NAME, SYSTEM_PROMPT_DYNAMIC_BOUNDARY,
|
||||
};
|
||||
pub use recovery_recipes::{
|
||||
attempt_recovery, recipe_for, EscalationPolicy, FailureScenario, RecoveryContext,
|
||||
RecoveryEvent, RecoveryRecipe, RecoveryResult, RecoveryStep,
|
||||
attempt_recovery, recipe_for, EscalationPolicy, FailureScenario, RecoveryAttemptState,
|
||||
RecoveryAttemptType, RecoveryCommandResult, RecoveryContext, RecoveryEvent,
|
||||
RecoveryLedgerEntry, RecoveryRecipe, RecoveryResult, RecoveryStatusReport, RecoveryStep,
|
||||
};
|
||||
pub use remote::{
|
||||
inherited_upstream_proxy_env, no_proxy_list, read_token, upstream_proxy_ws_url,
|
||||
RemoteSessionContext, UpstreamProxyBootstrap, UpstreamProxyState, DEFAULT_REMOTE_BASE_URL,
|
||||
DEFAULT_SESSION_TOKEN_PATH, DEFAULT_SYSTEM_CA_BUNDLE, NO_PROXY_HOSTS, UPSTREAM_PROXY_ENV_KEYS,
|
||||
};
|
||||
pub use report_schema::{
|
||||
canonicalize_report, project_report, report_content_hash, report_schema_v1_registry,
|
||||
CanonicalReportV1, ClaimKind, ConsumerCapabilities, FieldDelta, FieldDeltaState,
|
||||
NegativeEvidence, NegativeFindingStatus, ProjectionProvenance, RedactionProvenance,
|
||||
ReportClaim, ReportConfidence, ReportIdentity, ReportProjectionV1, ReportSchemaField,
|
||||
ReportSchemaRegistry, SensitivityClass, DEFAULT_PROJECTION_POLICY_V1, REPORT_SCHEMA_V1,
|
||||
};
|
||||
pub use sandbox::{
|
||||
build_linux_sandbox_command, detect_container_environment, detect_container_environment_from,
|
||||
resolve_sandbox_status, resolve_sandbox_status_for_request, ContainerEnvironment,
|
||||
@@ -151,7 +168,7 @@ pub use sandbox::{
|
||||
};
|
||||
pub use session::{
|
||||
ContentBlock, ConversationMessage, MessageRole, Session, SessionCompaction, SessionError,
|
||||
SessionFork, SessionPromptEntry,
|
||||
SessionFork, SessionHeartbeat, SessionLiveness, SessionPromptEntry,
|
||||
};
|
||||
pub use sse::{IncrementalSseParser, SseEvent};
|
||||
pub use stale_base::{
|
||||
@@ -162,7 +179,10 @@ pub use stale_branch::{
|
||||
apply_policy, check_freshness, BranchFreshness, StaleBranchAction, StaleBranchEvent,
|
||||
StaleBranchPolicy,
|
||||
};
|
||||
pub use task_packet::{validate_packet, TaskPacket, TaskPacketValidationError, ValidatedPacket};
|
||||
pub use task_packet::{
|
||||
validate_packet, TaskPacket, TaskPacketValidationError, TaskResource, ValidatedPacket,
|
||||
};
|
||||
pub use task_registry::{LaneBoard, LaneBoardEntry, LaneFreshness, LaneHeartbeat};
|
||||
#[cfg(test)]
|
||||
pub use trust_resolver::{TrustConfig, TrustDecision, TrustEvent, TrustPolicy, TrustResolver};
|
||||
pub use usage::{
|
||||
|
||||
@@ -117,7 +117,7 @@ pub fn scoped_mcp_config_hash(config: &ScopedMcpServerConfig) -> String {
|
||||
format!("claudeai-proxy|{}|{}", proxy.url, proxy.id)
|
||||
}
|
||||
};
|
||||
stable_hex_hash(&rendered)
|
||||
stable_hex_hash(&format!("required:{}|{rendered}", config.required))
|
||||
}
|
||||
|
||||
fn render_command_signature(command: &[String]) -> String {
|
||||
@@ -275,10 +275,12 @@ mod tests {
|
||||
oauth: None,
|
||||
});
|
||||
let user = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::User,
|
||||
config: base_config.clone(),
|
||||
};
|
||||
let local = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: base_config,
|
||||
};
|
||||
@@ -288,6 +290,7 @@ mod tests {
|
||||
);
|
||||
|
||||
let changed = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Http(McpRemoteServerConfig {
|
||||
url: "https://vendor.example/v2/mcp".to_string(),
|
||||
|
||||
@@ -143,6 +143,7 @@ mod tests {
|
||||
#[test]
|
||||
fn bootstraps_stdio_servers_into_transport_targets() {
|
||||
let config = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::User,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "uvx".to_string(),
|
||||
@@ -176,6 +177,7 @@ mod tests {
|
||||
#[test]
|
||||
fn bootstraps_remote_servers_with_oauth_auth() {
|
||||
let config = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Project,
|
||||
config: McpServerConfig::Http(McpRemoteServerConfig {
|
||||
url: "https://vendor.example/mcp".to_string(),
|
||||
@@ -213,6 +215,7 @@ mod tests {
|
||||
#[test]
|
||||
fn bootstraps_websocket_and_sdk_transports_without_oauth() {
|
||||
let ws = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Ws(McpWebSocketServerConfig {
|
||||
url: "wss://vendor.example/mcp".to_string(),
|
||||
@@ -221,6 +224,7 @@ mod tests {
|
||||
}),
|
||||
};
|
||||
let sdk = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Sdk(McpSdkServerConfig {
|
||||
name: "sdk-server".to_string(),
|
||||
|
||||
@@ -230,6 +230,7 @@ pub struct ManagedMcpTool {
|
||||
pub struct UnsupportedMcpServer {
|
||||
pub server_name: String,
|
||||
pub transport: McpTransport,
|
||||
pub required: bool,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
@@ -237,6 +238,7 @@ pub struct UnsupportedMcpServer {
|
||||
pub struct McpDiscoveryFailure {
|
||||
pub server_name: String,
|
||||
pub phase: McpLifecyclePhase,
|
||||
pub required: bool,
|
||||
pub error: String,
|
||||
pub recoverable: bool,
|
||||
pub context: BTreeMap<String, String>,
|
||||
@@ -366,7 +368,7 @@ impl McpServerManagerError {
|
||||
) && matches!(self, Self::Transport { .. } | Self::Timeout { .. })
|
||||
}
|
||||
|
||||
fn discovery_failure(&self, server_name: &str) -> McpDiscoveryFailure {
|
||||
fn discovery_failure(&self, server_name: &str, required: bool) -> McpDiscoveryFailure {
|
||||
let phase = self.lifecycle_phase();
|
||||
let recoverable = self.recoverable();
|
||||
let context = self.error_context();
|
||||
@@ -374,6 +376,7 @@ impl McpServerManagerError {
|
||||
McpDiscoveryFailure {
|
||||
server_name: server_name.to_string(),
|
||||
phase,
|
||||
required,
|
||||
error: self.to_string(),
|
||||
recoverable,
|
||||
context,
|
||||
@@ -447,7 +450,10 @@ fn unsupported_server_failed_server(server: &UnsupportedMcpServer) -> McpFailedS
|
||||
McpLifecyclePhase::ServerRegistration,
|
||||
Some(server.server_name.clone()),
|
||||
server.reason.clone(),
|
||||
BTreeMap::from([("transport".to_string(), format!("{:?}", server.transport))]),
|
||||
BTreeMap::from([
|
||||
("transport".to_string(), format!("{:?}", server.transport)),
|
||||
("required".to_string(), server.required.to_string()),
|
||||
]),
|
||||
false,
|
||||
),
|
||||
}
|
||||
@@ -464,14 +470,16 @@ struct ManagedMcpServer {
|
||||
bootstrap: McpClientBootstrap,
|
||||
process: Option<McpStdioProcess>,
|
||||
initialized: bool,
|
||||
required: bool,
|
||||
}
|
||||
|
||||
impl ManagedMcpServer {
|
||||
fn new(bootstrap: McpClientBootstrap) -> Self {
|
||||
fn new(bootstrap: McpClientBootstrap, required: bool) -> Self {
|
||||
Self {
|
||||
bootstrap,
|
||||
process: None,
|
||||
initialized: false,
|
||||
required,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -498,11 +506,15 @@ impl McpServerManager {
|
||||
for (server_name, server_config) in servers {
|
||||
if server_config.transport() == McpTransport::Stdio {
|
||||
let bootstrap = McpClientBootstrap::from_scoped_config(server_name, server_config);
|
||||
managed_servers.insert(server_name.clone(), ManagedMcpServer::new(bootstrap));
|
||||
managed_servers.insert(
|
||||
server_name.clone(),
|
||||
ManagedMcpServer::new(bootstrap, server_config.required),
|
||||
);
|
||||
} else {
|
||||
unsupported_servers.push(UnsupportedMcpServer {
|
||||
server_name: server_name.clone(),
|
||||
transport: server_config.transport(),
|
||||
required: server_config.required,
|
||||
reason: format!(
|
||||
"transport {:?} is not supported by McpServerManager",
|
||||
server_config.transport()
|
||||
@@ -576,7 +588,11 @@ impl McpServerManager {
|
||||
}
|
||||
Err(error) => {
|
||||
self.clear_routes_for_server(&server_name);
|
||||
failed_servers.push(error.discovery_failure(&server_name));
|
||||
let required = self
|
||||
.servers
|
||||
.get(&server_name)
|
||||
.is_some_and(|server| server.required);
|
||||
failed_servers.push(error.discovery_failure(&server_name, required));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -590,7 +606,11 @@ impl McpServerManager {
|
||||
failure.phase,
|
||||
Some(failure.server_name.clone()),
|
||||
failure.error.clone(),
|
||||
failure.context.clone(),
|
||||
{
|
||||
let mut context = failure.context.clone();
|
||||
context.insert("required".to_string(), failure.required.to_string());
|
||||
context
|
||||
},
|
||||
failure.recoverable,
|
||||
),
|
||||
})
|
||||
@@ -1765,6 +1785,7 @@ mod tests {
|
||||
|
||||
fn sample_bootstrap(script_path: &Path) -> McpClientBootstrap {
|
||||
let config = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "/bin/sh".to_string(),
|
||||
@@ -1832,6 +1853,7 @@ mod tests {
|
||||
]);
|
||||
env.extend(extra_env);
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "python3".to_string(),
|
||||
@@ -1874,6 +1896,7 @@ mod tests {
|
||||
#[test]
|
||||
fn rejects_non_stdio_bootstrap() {
|
||||
let config = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Sdk(crate::config::McpSdkServerConfig {
|
||||
name: "sdk-server".to_string(),
|
||||
@@ -2310,6 +2333,7 @@ mod tests {
|
||||
let servers = BTreeMap::from([(
|
||||
"slow".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "python3".to_string(),
|
||||
@@ -2363,6 +2387,7 @@ mod tests {
|
||||
let servers = BTreeMap::from([(
|
||||
"broken".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "python3".to_string(),
|
||||
@@ -2701,6 +2726,7 @@ mod tests {
|
||||
(
|
||||
"broken".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: true,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: broken_script_path.display().to_string(),
|
||||
@@ -2722,6 +2748,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(report.failed_servers.len(), 1);
|
||||
assert_eq!(report.failed_servers[0].server_name, "broken");
|
||||
assert!(report.failed_servers[0].required);
|
||||
assert_eq!(
|
||||
report.failed_servers[0].phase,
|
||||
McpLifecyclePhase::InitializeHandshake
|
||||
@@ -2742,6 +2769,14 @@ mod tests {
|
||||
assert_eq!(degraded.working_servers, vec!["alpha".to_string()]);
|
||||
assert_eq!(degraded.failed_servers.len(), 1);
|
||||
assert_eq!(degraded.failed_servers[0].server_name, "broken");
|
||||
assert_eq!(
|
||||
degraded.failed_servers[0]
|
||||
.error
|
||||
.context
|
||||
.get("required")
|
||||
.map(String::as_str),
|
||||
Some("true")
|
||||
);
|
||||
assert_eq!(
|
||||
degraded.failed_servers[0].phase,
|
||||
McpLifecyclePhase::InitializeHandshake
|
||||
@@ -2777,6 +2812,7 @@ mod tests {
|
||||
(
|
||||
"http".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: true,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Http(McpRemoteServerConfig {
|
||||
url: "https://example.test/mcp".to_string(),
|
||||
@@ -2789,6 +2825,7 @@ mod tests {
|
||||
(
|
||||
"sdk".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Sdk(McpSdkServerConfig {
|
||||
name: "sdk-server".to_string(),
|
||||
@@ -2798,6 +2835,7 @@ mod tests {
|
||||
(
|
||||
"ws".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Ws(McpWebSocketServerConfig {
|
||||
url: "wss://example.test/mcp".to_string(),
|
||||
@@ -2813,11 +2851,14 @@ mod tests {
|
||||
|
||||
assert_eq!(unsupported.len(), 3);
|
||||
assert_eq!(unsupported[0].server_name, "http");
|
||||
assert!(unsupported[0].required);
|
||||
assert_eq!(unsupported[1].server_name, "sdk");
|
||||
assert_eq!(unsupported[2].server_name, "ws");
|
||||
let failed = unsupported_server_failed_server(&unsupported[0]);
|
||||
assert_eq!(failed.phase, McpLifecyclePhase::ServerRegistration);
|
||||
assert_eq!(
|
||||
unsupported_server_failed_server(&unsupported[0]).phase,
|
||||
McpLifecyclePhase::ServerRegistration
|
||||
failed.error.context.get("required").map(String::as_str),
|
||||
Some("true")
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -442,6 +442,7 @@ mod tests {
|
||||
log_path: &Path,
|
||||
) -> ScopedMcpServerConfig {
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "python3".to_string(),
|
||||
|
||||
@@ -61,6 +61,25 @@ pub enum PluginState {
|
||||
}
|
||||
|
||||
impl PluginState {
|
||||
#[must_use]
|
||||
pub fn startup_event(&self) -> Option<PluginLifecycleEvent> {
|
||||
match self {
|
||||
Self::Healthy => Some(PluginLifecycleEvent::StartupHealthy),
|
||||
Self::Degraded { .. } => Some(PluginLifecycleEvent::StartupDegraded),
|
||||
Self::Failed { .. } => Some(PluginLifecycleEvent::StartupFailed),
|
||||
Self::Unconfigured
|
||||
| Self::Validated
|
||||
| Self::Starting
|
||||
| Self::ShuttingDown
|
||||
| Self::Stopped => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_startup_terminal(&self) -> bool {
|
||||
self.startup_event().is_some()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn from_servers(servers: &[ServerHealth]) -> Self {
|
||||
if servers.is_empty() {
|
||||
@@ -122,6 +141,11 @@ pub struct PluginHealthcheck {
|
||||
}
|
||||
|
||||
impl PluginHealthcheck {
|
||||
#[must_use]
|
||||
pub fn startup_event(&self) -> Option<PluginLifecycleEvent> {
|
||||
self.state.startup_event()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new(plugin_name: impl Into<String>, servers: Vec<ServerHealth>) -> Self {
|
||||
let state = PluginState::from_servers(&servers);
|
||||
@@ -343,6 +367,41 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn startup_event_maps_terminal_health_states() {
|
||||
// given
|
||||
let healthy =
|
||||
PluginHealthcheck::new("healthy-plugin", vec![healthy_server("alpha", &["search"])]);
|
||||
let degraded = PluginHealthcheck::new(
|
||||
"degraded-plugin",
|
||||
vec![
|
||||
healthy_server("alpha", &["search"]),
|
||||
failed_server("beta", &["write"], "connection refused"),
|
||||
],
|
||||
);
|
||||
let failed = PluginHealthcheck::new(
|
||||
"failed-plugin",
|
||||
vec![failed_server("beta", &["write"], "connection refused")],
|
||||
);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
healthy.startup_event(),
|
||||
Some(PluginLifecycleEvent::StartupHealthy)
|
||||
);
|
||||
assert_eq!(
|
||||
degraded.startup_event(),
|
||||
Some(PluginLifecycleEvent::StartupDegraded)
|
||||
);
|
||||
assert_eq!(
|
||||
failed.startup_event(),
|
||||
Some(PluginLifecycleEvent::StartupFailed)
|
||||
);
|
||||
assert!(healthy.state.is_startup_terminal());
|
||||
assert_eq!(PluginState::Starting.startup_event(), None);
|
||||
assert!(!PluginState::Starting.is_startup_terminal());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn full_lifecycle_happy_path() {
|
||||
// given
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub type GreenLevel = u8;
|
||||
|
||||
const STALE_BRANCH_THRESHOLD: Duration = Duration::from_secs(60 * 60);
|
||||
const STALE_BRANCH_THRESHOLD: Duration = Duration::from_hours(1);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct PolicyRule {
|
||||
@@ -46,6 +48,11 @@ pub enum PolicyCondition {
|
||||
ReviewPassed,
|
||||
ScopedDiff,
|
||||
TimedOut { duration: Duration },
|
||||
RetryAvailable,
|
||||
RebaseRequired,
|
||||
StaleCleanupRequired,
|
||||
ApprovalTokenPresent,
|
||||
ApprovalTokenMissing,
|
||||
}
|
||||
|
||||
impl PolicyCondition {
|
||||
@@ -58,7 +65,9 @@ impl PolicyCondition {
|
||||
Self::Or(conditions) => conditions
|
||||
.iter()
|
||||
.any(|condition| condition.matches(context)),
|
||||
Self::GreenAt { level } => context.green_level >= *level,
|
||||
Self::GreenAt { level } => {
|
||||
context.green_contract_satisfied && context.green_level >= *level
|
||||
}
|
||||
Self::StaleBranch => context.branch_freshness >= STALE_BRANCH_THRESHOLD,
|
||||
Self::StartupBlocked => context.blocker == LaneBlocker::Startup,
|
||||
Self::LaneCompleted => context.completed,
|
||||
@@ -66,6 +75,11 @@ impl PolicyCondition {
|
||||
Self::ReviewPassed => context.review_status == ReviewStatus::Approved,
|
||||
Self::ScopedDiff => context.diff_scope == DiffScope::Scoped,
|
||||
Self::TimedOut { duration } => context.branch_freshness >= *duration,
|
||||
Self::RetryAvailable => context.retry_count < context.retry_limit,
|
||||
Self::RebaseRequired => context.rebase_required,
|
||||
Self::StaleCleanupRequired => context.stale_cleanup_required,
|
||||
Self::ApprovalTokenPresent => context.approval_token.is_some(),
|
||||
Self::ApprovalTokenMissing => context.approval_token.is_none(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -75,11 +89,15 @@ pub enum PolicyAction {
|
||||
MergeToDev,
|
||||
MergeForward,
|
||||
RecoverOnce,
|
||||
Retry { reason: String },
|
||||
Rebase { reason: String },
|
||||
Escalate { reason: String },
|
||||
CloseoutLane,
|
||||
CleanupSession,
|
||||
CleanupStale { reason: String },
|
||||
Reconcile { reason: ReconcileReason },
|
||||
Notify { channel: String },
|
||||
RequireApprovalToken { operation: String },
|
||||
Block { reason: String },
|
||||
Chain(Vec<PolicyAction>),
|
||||
}
|
||||
@@ -130,16 +148,61 @@ pub enum DiffScope {
|
||||
Scoped,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ApprovalToken {
|
||||
pub token_id: String,
|
||||
pub operation: String,
|
||||
pub granted_by: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum PolicyDecisionKind {
|
||||
Retry,
|
||||
Rebase,
|
||||
Merge,
|
||||
Escalate,
|
||||
StaleCleanup,
|
||||
ApprovalRequired,
|
||||
Notify,
|
||||
Block,
|
||||
Closeout,
|
||||
Reconcile,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct PolicyDecisionEvent {
|
||||
pub lane_id: String,
|
||||
pub rule_name: String,
|
||||
pub priority: u32,
|
||||
pub kind: PolicyDecisionKind,
|
||||
pub explanation: String,
|
||||
pub approval_token_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct PolicyEvaluation {
|
||||
pub actions: Vec<PolicyAction>,
|
||||
pub events: Vec<PolicyDecisionEvent>,
|
||||
}
|
||||
|
||||
#[allow(clippy::struct_excessive_bools)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct LaneContext {
|
||||
pub lane_id: String,
|
||||
pub green_level: GreenLevel,
|
||||
pub green_contract_satisfied: bool,
|
||||
pub branch_freshness: Duration,
|
||||
pub blocker: LaneBlocker,
|
||||
pub review_status: ReviewStatus,
|
||||
pub diff_scope: DiffScope,
|
||||
pub completed: bool,
|
||||
pub reconciled: bool,
|
||||
pub retry_count: u32,
|
||||
pub retry_limit: u32,
|
||||
pub rebase_required: bool,
|
||||
pub stale_cleanup_required: bool,
|
||||
pub approval_token: Option<ApprovalToken>,
|
||||
}
|
||||
|
||||
impl LaneContext {
|
||||
@@ -156,12 +219,18 @@ impl LaneContext {
|
||||
Self {
|
||||
lane_id: lane_id.into(),
|
||||
green_level,
|
||||
green_contract_satisfied: false,
|
||||
branch_freshness,
|
||||
blocker,
|
||||
review_status,
|
||||
diff_scope,
|
||||
completed,
|
||||
reconciled: false,
|
||||
retry_count: 0,
|
||||
retry_limit: 1,
|
||||
rebase_required: false,
|
||||
stale_cleanup_required: false,
|
||||
approval_token: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,14 +240,51 @@ impl LaneContext {
|
||||
Self {
|
||||
lane_id: lane_id.into(),
|
||||
green_level: 0,
|
||||
green_contract_satisfied: false,
|
||||
branch_freshness: Duration::from_secs(0),
|
||||
blocker: LaneBlocker::None,
|
||||
review_status: ReviewStatus::Pending,
|
||||
diff_scope: DiffScope::Full,
|
||||
completed: true,
|
||||
reconciled: true,
|
||||
retry_count: 0,
|
||||
retry_limit: 1,
|
||||
rebase_required: false,
|
||||
stale_cleanup_required: false,
|
||||
approval_token: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_green_contract_satisfied(mut self, satisfied: bool) -> Self {
|
||||
self.green_contract_satisfied = satisfied;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_retry_state(mut self, retry_count: u32, retry_limit: u32) -> Self {
|
||||
self.retry_count = retry_count;
|
||||
self.retry_limit = retry_limit;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_rebase_required(mut self, required: bool) -> Self {
|
||||
self.rebase_required = required;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_stale_cleanup_required(mut self, required: bool) -> Self {
|
||||
self.stale_cleanup_required = required;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_approval_token(mut self, token: ApprovalToken) -> Self {
|
||||
self.approval_token = Some(token);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
@@ -202,17 +308,119 @@ impl PolicyEngine {
|
||||
pub fn evaluate(&self, context: &LaneContext) -> Vec<PolicyAction> {
|
||||
evaluate(self, context)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate_with_events(&self, context: &LaneContext) -> PolicyEvaluation {
|
||||
evaluate_with_events(self, context)
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate(engine: &PolicyEngine, context: &LaneContext) -> Vec<PolicyAction> {
|
||||
evaluate_with_events(engine, context).actions
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate_with_events(engine: &PolicyEngine, context: &LaneContext) -> PolicyEvaluation {
|
||||
let mut actions = Vec::new();
|
||||
let mut events = Vec::new();
|
||||
for rule in &engine.rules {
|
||||
if rule.matches(context) {
|
||||
let before = actions.len();
|
||||
rule.action.flatten_into(&mut actions);
|
||||
for action in &actions[before..] {
|
||||
events.push(decision_event(rule, context, action));
|
||||
}
|
||||
}
|
||||
}
|
||||
actions
|
||||
PolicyEvaluation { actions, events }
|
||||
}
|
||||
|
||||
fn decision_event(
|
||||
rule: &PolicyRule,
|
||||
context: &LaneContext,
|
||||
action: &PolicyAction,
|
||||
) -> PolicyDecisionEvent {
|
||||
let (kind, explanation) = match action {
|
||||
PolicyAction::MergeToDev | PolicyAction::MergeForward => (
|
||||
PolicyDecisionKind::Merge,
|
||||
format!(
|
||||
"rule '{}' allows merge action for lane {}",
|
||||
rule.name, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::RecoverOnce | PolicyAction::Retry { reason: _ } => (
|
||||
PolicyDecisionKind::Retry,
|
||||
format!(
|
||||
"rule '{}' allows retry {}/{} for lane {}",
|
||||
rule.name, context.retry_count, context.retry_limit, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::Rebase { reason } => (
|
||||
PolicyDecisionKind::Rebase,
|
||||
format!("rule '{}' requires rebase: {reason}", rule.name),
|
||||
),
|
||||
PolicyAction::Escalate { reason } => (
|
||||
PolicyDecisionKind::Escalate,
|
||||
format!(
|
||||
"rule '{}' escalates lane {}: {reason}",
|
||||
rule.name, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::CleanupStale { reason } => (
|
||||
PolicyDecisionKind::StaleCleanup,
|
||||
format!("rule '{}' requests cleanup: {reason}", rule.name),
|
||||
),
|
||||
PolicyAction::CleanupSession => (
|
||||
PolicyDecisionKind::StaleCleanup,
|
||||
format!("rule '{}' requests session cleanup", rule.name),
|
||||
),
|
||||
PolicyAction::CloseoutLane => (
|
||||
PolicyDecisionKind::Closeout,
|
||||
format!("rule '{}' closes out lane {}", rule.name, context.lane_id),
|
||||
),
|
||||
PolicyAction::Reconcile { reason } => (
|
||||
PolicyDecisionKind::Reconcile,
|
||||
format!(
|
||||
"rule '{}' reconciles lane {}: {reason:?}",
|
||||
rule.name, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::Notify { channel } => (
|
||||
PolicyDecisionKind::Notify,
|
||||
format!("rule '{}' notifies {channel}", rule.name),
|
||||
),
|
||||
PolicyAction::RequireApprovalToken { operation } => (
|
||||
PolicyDecisionKind::ApprovalRequired,
|
||||
format!(
|
||||
"rule '{}' requires approval token for {operation}",
|
||||
rule.name
|
||||
),
|
||||
),
|
||||
PolicyAction::Block { reason } => (
|
||||
PolicyDecisionKind::Block,
|
||||
format!(
|
||||
"rule '{}' blocks lane {}: {reason}",
|
||||
rule.name, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::Chain(_) => (
|
||||
PolicyDecisionKind::Notify,
|
||||
format!("rule '{}' expanded a chained action", rule.name),
|
||||
),
|
||||
};
|
||||
|
||||
PolicyDecisionEvent {
|
||||
lane_id: context.lane_id.clone(),
|
||||
rule_name: rule.name.clone(),
|
||||
priority: rule.priority,
|
||||
kind,
|
||||
explanation,
|
||||
approval_token_id: context
|
||||
.approval_token
|
||||
.as_ref()
|
||||
.map(|token| token.token_id.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -220,8 +428,9 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use super::{
|
||||
evaluate, DiffScope, LaneBlocker, LaneContext, PolicyAction, PolicyCondition, PolicyEngine,
|
||||
PolicyRule, ReconcileReason, ReviewStatus, STALE_BRANCH_THRESHOLD,
|
||||
evaluate, ApprovalToken, DiffScope, LaneBlocker, LaneContext, PolicyAction,
|
||||
PolicyCondition, PolicyDecisionKind, PolicyEngine, PolicyRule, ReconcileReason,
|
||||
ReviewStatus, STALE_BRANCH_THRESHOLD,
|
||||
};
|
||||
|
||||
fn default_context() -> LaneContext {
|
||||
@@ -257,7 +466,8 @@ mod tests {
|
||||
ReviewStatus::Approved,
|
||||
DiffScope::Scoped,
|
||||
false,
|
||||
);
|
||||
)
|
||||
.with_green_contract_satisfied(true);
|
||||
|
||||
// when
|
||||
let actions = engine.evaluate(&context);
|
||||
@@ -266,6 +476,36 @@ mod tests {
|
||||
assert_eq!(actions, vec![PolicyAction::MergeToDev]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_rule_blocks_when_green_tests_lack_contract_provenance() {
|
||||
// given
|
||||
let engine = PolicyEngine::new(vec![PolicyRule::new(
|
||||
"merge-to-dev",
|
||||
PolicyCondition::And(vec![
|
||||
PolicyCondition::GreenAt { level: 2 },
|
||||
PolicyCondition::ScopedDiff,
|
||||
PolicyCondition::ReviewPassed,
|
||||
]),
|
||||
PolicyAction::MergeToDev,
|
||||
20,
|
||||
)]);
|
||||
let context = LaneContext::new(
|
||||
"lane-7",
|
||||
3,
|
||||
Duration::from_secs(5),
|
||||
LaneBlocker::None,
|
||||
ReviewStatus::Approved,
|
||||
DiffScope::Scoped,
|
||||
false,
|
||||
);
|
||||
|
||||
// when
|
||||
let actions = engine.evaluate(&context);
|
||||
|
||||
// then
|
||||
assert!(actions.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stale_branch_rule_fires_at_threshold() {
|
||||
// given
|
||||
@@ -468,7 +708,8 @@ mod tests {
|
||||
ReviewStatus::Pending,
|
||||
DiffScope::Full,
|
||||
false,
|
||||
);
|
||||
)
|
||||
.with_green_contract_satisfied(true);
|
||||
|
||||
// when
|
||||
let actions = engine.evaluate(&context);
|
||||
@@ -489,6 +730,121 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::duration_suboptimal_units, clippy::too_many_lines)]
|
||||
fn executable_decision_table_emits_retry_rebase_merge_escalate_cleanup_and_approval_events() {
|
||||
let engine = PolicyEngine::new(vec![
|
||||
PolicyRule::new(
|
||||
"retry-available",
|
||||
PolicyCondition::RetryAvailable,
|
||||
PolicyAction::Retry {
|
||||
reason: "transient failure".to_string(),
|
||||
},
|
||||
1,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"rebase-required",
|
||||
PolicyCondition::RebaseRequired,
|
||||
PolicyAction::Rebase {
|
||||
reason: "base branch moved".to_string(),
|
||||
},
|
||||
2,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"stale-cleanup",
|
||||
PolicyCondition::StaleCleanupRequired,
|
||||
PolicyAction::CleanupStale {
|
||||
reason: "lease expired".to_string(),
|
||||
},
|
||||
3,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"approval-required",
|
||||
PolicyCondition::ApprovalTokenMissing,
|
||||
PolicyAction::RequireApprovalToken {
|
||||
operation: "merge".to_string(),
|
||||
},
|
||||
4,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"merge-approved",
|
||||
PolicyCondition::And(vec![
|
||||
PolicyCondition::ApprovalTokenPresent,
|
||||
PolicyCondition::GreenAt { level: 2 },
|
||||
PolicyCondition::ScopedDiff,
|
||||
PolicyCondition::ReviewPassed,
|
||||
]),
|
||||
PolicyAction::MergeToDev,
|
||||
5,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"retry-exhausted",
|
||||
PolicyCondition::TimedOut {
|
||||
duration: Duration::from_secs(60),
|
||||
},
|
||||
PolicyAction::Escalate {
|
||||
reason: "lane timed out".to_string(),
|
||||
},
|
||||
6,
|
||||
),
|
||||
]);
|
||||
|
||||
let missing_token_context = LaneContext::new(
|
||||
"lane-cc2",
|
||||
2,
|
||||
Duration::from_secs(90),
|
||||
LaneBlocker::None,
|
||||
ReviewStatus::Approved,
|
||||
DiffScope::Scoped,
|
||||
false,
|
||||
)
|
||||
.with_green_contract_satisfied(true)
|
||||
.with_retry_state(0, 1)
|
||||
.with_rebase_required(true)
|
||||
.with_stale_cleanup_required(true);
|
||||
|
||||
let missing = engine.evaluate_with_events(&missing_token_context);
|
||||
assert!(missing.actions.contains(&PolicyAction::Retry {
|
||||
reason: "transient failure".to_string()
|
||||
}));
|
||||
assert!(missing.actions.contains(&PolicyAction::Rebase {
|
||||
reason: "base branch moved".to_string()
|
||||
}));
|
||||
assert!(missing.actions.contains(&PolicyAction::CleanupStale {
|
||||
reason: "lease expired".to_string()
|
||||
}));
|
||||
assert!(missing
|
||||
.actions
|
||||
.contains(&PolicyAction::RequireApprovalToken {
|
||||
operation: "merge".to_string()
|
||||
}));
|
||||
assert!(missing.actions.contains(&PolicyAction::Escalate {
|
||||
reason: "lane timed out".to_string()
|
||||
}));
|
||||
assert!(missing
|
||||
.events
|
||||
.iter()
|
||||
.any(|event| event.kind == PolicyDecisionKind::ApprovalRequired
|
||||
&& event.explanation.contains("approval token")));
|
||||
|
||||
let approved_context = missing_token_context.with_approval_token(ApprovalToken {
|
||||
token_id: "approval-123".to_string(),
|
||||
operation: "merge".to_string(),
|
||||
granted_by: "leader".to_string(),
|
||||
});
|
||||
let approved = engine.evaluate_with_events(&approved_context);
|
||||
assert!(approved.actions.contains(&PolicyAction::MergeToDev));
|
||||
let merge_event = approved
|
||||
.events
|
||||
.iter()
|
||||
.find(|event| event.kind == PolicyDecisionKind::Merge)
|
||||
.expect("merge event should be emitted");
|
||||
assert_eq!(
|
||||
merge_event.approval_token_id.as_deref(),
|
||||
Some("approval-123")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reconciled_lane_emits_reconcile_and_cleanup() {
|
||||
// given — a lane where branch is already merged, no PR needed, session stale
|
||||
|
||||
@@ -121,6 +121,21 @@ pub enum RecoveryResult {
|
||||
},
|
||||
}
|
||||
|
||||
/// Type of recovery execution represented in the ledger.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RecoveryAttemptType {
|
||||
Automatic,
|
||||
}
|
||||
|
||||
/// Result for one executable recovery command/step.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RecoveryCommandResult {
|
||||
pub command: RecoveryStep,
|
||||
pub status: RecoveryAttemptState,
|
||||
pub result: String,
|
||||
}
|
||||
|
||||
/// Structured event emitted during recovery.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
@@ -135,14 +150,59 @@ pub enum RecoveryEvent {
|
||||
Escalated,
|
||||
}
|
||||
|
||||
/// Machine-readable recovery progress for one failure scenario.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RecoveryLedgerEntry {
|
||||
pub recipe_id: String,
|
||||
pub attempt_type: RecoveryAttemptType,
|
||||
pub trigger: FailureScenario,
|
||||
pub attempt_count: u32,
|
||||
pub retry_limit: u32,
|
||||
pub attempts_remaining: u32,
|
||||
pub state: RecoveryAttemptState,
|
||||
pub started_at: Option<String>,
|
||||
pub finished_at: Option<String>,
|
||||
pub command_results: Vec<RecoveryCommandResult>,
|
||||
pub result: Option<RecoveryResult>,
|
||||
pub last_failure_summary: Option<String>,
|
||||
pub escalation_reason: Option<String>,
|
||||
}
|
||||
|
||||
/// Current state of a recovery recipe attempt.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RecoveryAttemptState {
|
||||
Queued,
|
||||
Running,
|
||||
Succeeded,
|
||||
Failed,
|
||||
Exhausted,
|
||||
}
|
||||
|
||||
/// Machine-readable status projection for callers that need to
|
||||
/// distinguish an untouched scenario from an exhausted recovery.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RecoveryStatusReport {
|
||||
pub scenario: FailureScenario,
|
||||
pub attempted: bool,
|
||||
pub state: Option<RecoveryAttemptState>,
|
||||
pub attempt_count: u32,
|
||||
pub retry_limit: Option<u32>,
|
||||
pub attempts_remaining: Option<u32>,
|
||||
pub escalation_reason: Option<String>,
|
||||
}
|
||||
|
||||
/// Minimal context for tracking recovery state and emitting events.
|
||||
///
|
||||
/// Holds per-scenario attempt counts, a structured event log, and an
|
||||
/// optional simulation knob for controlling step outcomes during tests.
|
||||
/// Holds per-scenario attempt counts, a structured event log, a recovery
|
||||
/// attempt ledger, and an optional simulation knob for controlling step
|
||||
/// outcomes during tests.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct RecoveryContext {
|
||||
attempts: HashMap<FailureScenario, u32>,
|
||||
events: Vec<RecoveryEvent>,
|
||||
ledger: HashMap<FailureScenario, RecoveryLedgerEntry>,
|
||||
clock_tick: u64,
|
||||
/// Optional step index at which simulated execution fails.
|
||||
/// `None` means all steps succeed.
|
||||
fail_at_step: Option<usize>,
|
||||
@@ -172,6 +232,51 @@ impl RecoveryContext {
|
||||
pub fn attempt_count(&self, scenario: &FailureScenario) -> u32 {
|
||||
self.attempts.get(scenario).copied().unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Returns the machine-readable recovery ledger entry for a scenario.
|
||||
#[must_use]
|
||||
pub fn ledger_entry(&self, scenario: &FailureScenario) -> Option<&RecoveryLedgerEntry> {
|
||||
self.ledger.get(scenario)
|
||||
}
|
||||
|
||||
/// Returns all recovery ledger entries currently tracked by this context.
|
||||
#[must_use]
|
||||
pub fn ledger_entries(&self) -> Vec<&RecoveryLedgerEntry> {
|
||||
let mut entries: Vec<_> = self.ledger.values().collect();
|
||||
entries.sort_by(|left, right| left.recipe_id.cmp(&right.recipe_id));
|
||||
entries
|
||||
}
|
||||
|
||||
/// Returns a compact machine-readable recovery status for a scenario,
|
||||
/// including `attempted = false` when no ledger entry exists yet.
|
||||
#[must_use]
|
||||
pub fn status_report(&self, scenario: &FailureScenario) -> RecoveryStatusReport {
|
||||
self.ledger_entry(scenario).map_or(
|
||||
RecoveryStatusReport {
|
||||
scenario: *scenario,
|
||||
attempted: false,
|
||||
state: None,
|
||||
attempt_count: 0,
|
||||
retry_limit: None,
|
||||
attempts_remaining: None,
|
||||
escalation_reason: None,
|
||||
},
|
||||
|entry| RecoveryStatusReport {
|
||||
scenario: *scenario,
|
||||
attempted: entry.attempt_count > 0,
|
||||
state: Some(entry.state),
|
||||
attempt_count: entry.attempt_count,
|
||||
retry_limit: Some(entry.retry_limit),
|
||||
attempts_remaining: Some(entry.attempts_remaining),
|
||||
escalation_reason: entry.escalation_reason.clone(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn next_timestamp(&mut self) -> String {
|
||||
self.clock_tick += 1;
|
||||
format!("recovery-ledger-tick-{}", self.clock_tick)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the known recovery recipe for the given failure scenario.
|
||||
@@ -233,18 +338,51 @@ pub fn recipe_for(scenario: &FailureScenario) -> RecoveryRecipe {
|
||||
/// Looks up the recipe, enforces the one-attempt-before-escalation
|
||||
/// policy, simulates step execution (controlled by the context), and
|
||||
/// emits structured [`RecoveryEvent`]s for every attempt.
|
||||
#[allow(clippy::too_many_lines)]
|
||||
pub fn attempt_recovery(scenario: &FailureScenario, ctx: &mut RecoveryContext) -> RecoveryResult {
|
||||
let recipe = recipe_for(scenario);
|
||||
let attempt_count = ctx.attempts.entry(*scenario).or_insert(0);
|
||||
let recipe_id = scenario.to_string();
|
||||
ctx.ledger
|
||||
.entry(*scenario)
|
||||
.or_insert_with(|| RecoveryLedgerEntry {
|
||||
recipe_id: recipe_id.clone(),
|
||||
attempt_type: RecoveryAttemptType::Automatic,
|
||||
trigger: *scenario,
|
||||
attempt_count: 0,
|
||||
retry_limit: recipe.max_attempts,
|
||||
attempts_remaining: recipe.max_attempts,
|
||||
state: RecoveryAttemptState::Queued,
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
command_results: Vec::new(),
|
||||
result: None,
|
||||
last_failure_summary: None,
|
||||
escalation_reason: None,
|
||||
});
|
||||
|
||||
let current_attempts = ctx.attempt_count(scenario);
|
||||
|
||||
// Enforce one automatic recovery attempt before escalation.
|
||||
if *attempt_count >= recipe.max_attempts {
|
||||
if current_attempts >= recipe.max_attempts {
|
||||
let result = RecoveryResult::EscalationRequired {
|
||||
reason: format!(
|
||||
"max recovery attempts ({}) exceeded for {}",
|
||||
recipe.max_attempts, scenario
|
||||
),
|
||||
};
|
||||
let finished_at = ctx.next_timestamp();
|
||||
if let Some(entry) = ctx.ledger.get_mut(scenario) {
|
||||
entry.attempt_count = current_attempts;
|
||||
entry.attempts_remaining = 0;
|
||||
entry.state = RecoveryAttemptState::Exhausted;
|
||||
entry.finished_at = Some(finished_at);
|
||||
entry.result = Some(result.clone());
|
||||
let RecoveryResult::EscalationRequired { reason } = &result else {
|
||||
unreachable!("exhaustion always produces escalation");
|
||||
};
|
||||
entry.last_failure_summary = Some(reason.clone());
|
||||
entry.escalation_reason = Some(reason.clone());
|
||||
}
|
||||
ctx.events.push(RecoveryEvent::RecoveryAttempted {
|
||||
scenario: *scenario,
|
||||
recipe,
|
||||
@@ -254,19 +392,44 @@ pub fn attempt_recovery(scenario: &FailureScenario, ctx: &mut RecoveryContext) -
|
||||
return result;
|
||||
}
|
||||
|
||||
*attempt_count += 1;
|
||||
let updated_attempts = ctx.attempts.entry(*scenario).or_insert(0);
|
||||
*updated_attempts += 1;
|
||||
let updated_attempts = *updated_attempts;
|
||||
let started_at = ctx.next_timestamp();
|
||||
if let Some(entry) = ctx.ledger.get_mut(scenario) {
|
||||
entry.attempt_count = updated_attempts;
|
||||
entry.attempts_remaining = recipe.max_attempts.saturating_sub(updated_attempts);
|
||||
entry.state = RecoveryAttemptState::Running;
|
||||
entry.started_at = Some(started_at);
|
||||
entry.finished_at = None;
|
||||
entry.command_results.clear();
|
||||
entry.result = None;
|
||||
entry.last_failure_summary = None;
|
||||
entry.escalation_reason = None;
|
||||
}
|
||||
|
||||
// Execute steps, honoring the optional fail_at_step simulation.
|
||||
let fail_index = ctx.fail_at_step;
|
||||
let mut executed = Vec::new();
|
||||
let mut command_results = Vec::new();
|
||||
let mut failed = false;
|
||||
|
||||
for (i, step) in recipe.steps.iter().enumerate() {
|
||||
if fail_index == Some(i) {
|
||||
command_results.push(RecoveryCommandResult {
|
||||
command: step.clone(),
|
||||
status: RecoveryAttemptState::Failed,
|
||||
result: format!("step {i} failed for {scenario}"),
|
||||
});
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
executed.push(step.clone());
|
||||
command_results.push(RecoveryCommandResult {
|
||||
command: step.clone(),
|
||||
status: RecoveryAttemptState::Succeeded,
|
||||
result: format!("step {i} succeeded for {scenario}"),
|
||||
});
|
||||
}
|
||||
|
||||
let result = if failed {
|
||||
@@ -288,6 +451,29 @@ pub fn attempt_recovery(scenario: &FailureScenario, ctx: &mut RecoveryContext) -
|
||||
};
|
||||
|
||||
// Emit the attempt as structured event data.
|
||||
let finished_at = ctx.next_timestamp();
|
||||
if let Some(entry) = ctx.ledger.get_mut(scenario) {
|
||||
entry.finished_at = Some(finished_at);
|
||||
entry.command_results = command_results;
|
||||
entry.result = Some(result.clone());
|
||||
match &result {
|
||||
RecoveryResult::Recovered { .. } => {
|
||||
entry.state = RecoveryAttemptState::Succeeded;
|
||||
}
|
||||
RecoveryResult::PartialRecovery { remaining, .. } => {
|
||||
entry.state = RecoveryAttemptState::Failed;
|
||||
entry.last_failure_summary = Some(format!(
|
||||
"{} step(s) remaining after partial recovery",
|
||||
remaining.len()
|
||||
));
|
||||
}
|
||||
RecoveryResult::EscalationRequired { reason } => {
|
||||
entry.state = RecoveryAttemptState::Exhausted;
|
||||
entry.last_failure_summary = Some(reason.clone());
|
||||
entry.escalation_reason = Some(reason.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.events.push(RecoveryEvent::RecoveryAttempted {
|
||||
scenario: *scenario,
|
||||
recipe,
|
||||
@@ -499,6 +685,126 @@ mod tests {
|
||||
assert_eq!(ctx.attempt_count(&FailureScenario::PromptMisdelivery), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_context_exposes_machine_readable_ledger() {
|
||||
// given
|
||||
let mut ctx = RecoveryContext::new();
|
||||
|
||||
// when
|
||||
let result = attempt_recovery(&FailureScenario::StaleBranch, &mut ctx);
|
||||
|
||||
// then
|
||||
assert_eq!(result, RecoveryResult::Recovered { steps_taken: 2 });
|
||||
let entry = ctx
|
||||
.ledger_entry(&FailureScenario::StaleBranch)
|
||||
.expect("stale branch ledger entry");
|
||||
assert_eq!(entry.recipe_id, "stale_branch");
|
||||
assert_eq!(entry.attempt_type, RecoveryAttemptType::Automatic);
|
||||
assert_eq!(entry.trigger, FailureScenario::StaleBranch);
|
||||
assert_eq!(entry.attempt_count, 1);
|
||||
assert_eq!(entry.retry_limit, 1);
|
||||
assert_eq!(entry.attempts_remaining, 0);
|
||||
assert_eq!(entry.state, RecoveryAttemptState::Succeeded);
|
||||
assert!(entry.started_at.is_some());
|
||||
assert!(entry.finished_at.is_some());
|
||||
assert_eq!(
|
||||
entry.result,
|
||||
Some(RecoveryResult::Recovered { steps_taken: 2 })
|
||||
);
|
||||
assert_eq!(entry.command_results.len(), 2);
|
||||
assert_eq!(entry.command_results[0].command, RecoveryStep::RebaseBranch);
|
||||
assert_eq!(
|
||||
entry.command_results[0].status,
|
||||
RecoveryAttemptState::Succeeded
|
||||
);
|
||||
assert_eq!(entry.last_failure_summary, None);
|
||||
assert_eq!(entry.escalation_reason, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_ledger_records_exhausted_escalation_reason() {
|
||||
// given
|
||||
let mut ctx = RecoveryContext::new();
|
||||
let scenario = FailureScenario::PromptMisdelivery;
|
||||
|
||||
// when
|
||||
let _ = attempt_recovery(&scenario, &mut ctx);
|
||||
let result = attempt_recovery(&scenario, &mut ctx);
|
||||
|
||||
// then
|
||||
assert!(matches!(result, RecoveryResult::EscalationRequired { .. }));
|
||||
let entry = ctx.ledger_entry(&scenario).expect("ledger entry");
|
||||
assert_eq!(entry.state, RecoveryAttemptState::Exhausted);
|
||||
assert_eq!(entry.attempt_count, 1);
|
||||
assert_eq!(entry.attempts_remaining, 0);
|
||||
assert!(matches!(
|
||||
entry.result,
|
||||
Some(RecoveryResult::EscalationRequired { .. })
|
||||
));
|
||||
assert!(entry
|
||||
.escalation_reason
|
||||
.as_deref()
|
||||
.expect("escalation reason")
|
||||
.contains("max recovery attempts"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_status_report_distinguishes_not_attempted_from_exhausted() {
|
||||
// given
|
||||
let mut ctx = RecoveryContext::new();
|
||||
let scenario = FailureScenario::PromptMisdelivery;
|
||||
|
||||
// then — no ledger entry is not the same as exhausted.
|
||||
let not_attempted = ctx.status_report(&scenario);
|
||||
assert!(!not_attempted.attempted);
|
||||
assert_eq!(not_attempted.state, None);
|
||||
assert_eq!(not_attempted.attempt_count, 0);
|
||||
assert_eq!(not_attempted.retry_limit, None);
|
||||
|
||||
// when — one allowed attempt then one extra attempt.
|
||||
let _ = attempt_recovery(&scenario, &mut ctx);
|
||||
let _ = attempt_recovery(&scenario, &mut ctx);
|
||||
|
||||
// then
|
||||
let exhausted = ctx.status_report(&scenario);
|
||||
assert!(exhausted.attempted);
|
||||
assert_eq!(exhausted.state, Some(RecoveryAttemptState::Exhausted));
|
||||
assert_eq!(exhausted.attempt_count, 1);
|
||||
assert_eq!(exhausted.retry_limit, Some(1));
|
||||
assert_eq!(exhausted.attempts_remaining, Some(0));
|
||||
assert!(exhausted
|
||||
.escalation_reason
|
||||
.as_deref()
|
||||
.is_some_and(|reason| reason.contains("max recovery attempts")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_ledger_records_failed_command_result() {
|
||||
// given
|
||||
let mut ctx = RecoveryContext::new().with_fail_at_step(1);
|
||||
let scenario = FailureScenario::PartialPluginStartup;
|
||||
|
||||
// when
|
||||
let result = attempt_recovery(&scenario, &mut ctx);
|
||||
|
||||
// then
|
||||
assert!(matches!(result, RecoveryResult::PartialRecovery { .. }));
|
||||
let entry = ctx.ledger_entry(&scenario).expect("ledger entry");
|
||||
assert_eq!(entry.state, RecoveryAttemptState::Failed);
|
||||
assert_eq!(entry.command_results.len(), 2);
|
||||
assert_eq!(
|
||||
entry.command_results[0].status,
|
||||
RecoveryAttemptState::Succeeded
|
||||
);
|
||||
assert_eq!(
|
||||
entry.command_results[1].status,
|
||||
RecoveryAttemptState::Failed
|
||||
);
|
||||
assert!(entry.command_results[1]
|
||||
.result
|
||||
.contains("partial_plugin_startup"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stale_branch_recipe_has_rebase_then_clean_build() {
|
||||
// given
|
||||
|
||||
552
rust/crates/runtime/src/report_schema.rs
Normal file
552
rust/crates/runtime/src/report_schema.rs
Normal file
@@ -0,0 +1,552 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
pub const REPORT_SCHEMA_V1: &str = "claw.report.v1";
|
||||
pub const DEFAULT_PROJECTION_POLICY_V1: &str = "claw.report.projection.v1";
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ClaimKind {
|
||||
ObservedFact,
|
||||
Inference,
|
||||
Hypothesis,
|
||||
Recommendation,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ReportConfidence {
|
||||
High,
|
||||
Medium,
|
||||
Low,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum SensitivityClass {
|
||||
Public,
|
||||
Internal,
|
||||
OperatorOnly,
|
||||
Secret,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum FieldDeltaState {
|
||||
Changed,
|
||||
Unchanged,
|
||||
Cleared,
|
||||
CarriedForward,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum NegativeFindingStatus {
|
||||
NotObservedInCheckedScope,
|
||||
UnknownNotChecked,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportClaim {
|
||||
pub id: String,
|
||||
pub kind: ClaimKind,
|
||||
pub text: String,
|
||||
pub confidence: ReportConfidence,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub evidence: Vec<String>,
|
||||
pub sensitivity: SensitivityClass,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct NegativeEvidence {
|
||||
pub id: String,
|
||||
pub status: NegativeFindingStatus,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub checked_surfaces: Vec<String>,
|
||||
pub query: String,
|
||||
pub window: String,
|
||||
pub sensitivity: SensitivityClass,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct FieldDelta {
|
||||
pub field: String,
|
||||
pub state: FieldDeltaState,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub previous_hash: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub current_hash: Option<String>,
|
||||
pub attribution: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportIdentity {
|
||||
pub report_id: String,
|
||||
pub content_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct CanonicalReportV1 {
|
||||
pub schema_version: String,
|
||||
pub identity: ReportIdentity,
|
||||
pub generated_at: String,
|
||||
pub producer: String,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub claims: Vec<ReportClaim>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub negative_evidence: Vec<NegativeEvidence>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub field_deltas: Vec<FieldDelta>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ConsumerCapabilities {
|
||||
pub consumer: String,
|
||||
#[serde(default, skip_serializing_if = "BTreeSet::is_empty")]
|
||||
pub schema_versions: BTreeSet<String>,
|
||||
#[serde(default, skip_serializing_if = "BTreeSet::is_empty")]
|
||||
pub field_families: BTreeSet<String>,
|
||||
pub max_sensitivity: SensitivityClass,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RedactionProvenance {
|
||||
pub field_path: String,
|
||||
pub reason: String,
|
||||
pub policy_id: String,
|
||||
pub original_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ProjectionProvenance {
|
||||
pub policy_id: String,
|
||||
pub source_schema_version: String,
|
||||
pub source_report_id: String,
|
||||
pub source_content_hash: String,
|
||||
pub consumer: String,
|
||||
pub downgraded: bool,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub omitted_field_families: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub redactions: Vec<RedactionProvenance>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportProjectionV1 {
|
||||
pub schema_version: String,
|
||||
pub projection_id: String,
|
||||
pub view: String,
|
||||
pub provenance: ProjectionProvenance,
|
||||
pub payload: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportSchemaField {
|
||||
pub id: String,
|
||||
pub description: String,
|
||||
pub required: bool,
|
||||
pub field_family: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportSchemaRegistry {
|
||||
pub schema_version: String,
|
||||
pub compatibility: String,
|
||||
pub fields: Vec<ReportSchemaField>,
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn report_schema_v1_registry() -> ReportSchemaRegistry {
|
||||
ReportSchemaRegistry {
|
||||
schema_version: REPORT_SCHEMA_V1.to_string(),
|
||||
compatibility: "additive fields are compatible; missing required fields are breaking"
|
||||
.to_string(),
|
||||
fields: vec![
|
||||
field(
|
||||
"identity.report_id",
|
||||
"stable canonical report identity",
|
||||
true,
|
||||
"identity",
|
||||
),
|
||||
field(
|
||||
"identity.content_hash",
|
||||
"hash of canonical payload excluding identity",
|
||||
true,
|
||||
"identity",
|
||||
),
|
||||
field(
|
||||
"claims[].kind",
|
||||
"fact/inference/hypothesis/recommendation label",
|
||||
true,
|
||||
"claims",
|
||||
),
|
||||
field(
|
||||
"claims[].confidence",
|
||||
"confidence bucket for the claim",
|
||||
true,
|
||||
"claims",
|
||||
),
|
||||
field(
|
||||
"claims[].evidence",
|
||||
"evidence ids supporting a claim",
|
||||
false,
|
||||
"claims",
|
||||
),
|
||||
field(
|
||||
"negative_evidence[]",
|
||||
"searched-and-not-found findings with checked scope",
|
||||
false,
|
||||
"negative_evidence",
|
||||
),
|
||||
field(
|
||||
"field_deltas[]",
|
||||
"field-level changed/unchanged/cleared/carried-forward attribution",
|
||||
false,
|
||||
"field_deltas",
|
||||
),
|
||||
field(
|
||||
"projection.provenance.redactions[]",
|
||||
"redaction policy provenance for projected fields",
|
||||
false,
|
||||
"projection",
|
||||
),
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn canonicalize_report(mut report: CanonicalReportV1) -> CanonicalReportV1 {
|
||||
report.schema_version = REPORT_SCHEMA_V1.to_string();
|
||||
report.claims.sort_by(|a, b| a.id.cmp(&b.id));
|
||||
report.negative_evidence.sort_by(|a, b| a.id.cmp(&b.id));
|
||||
report.field_deltas.sort_by(|a, b| a.field.cmp(&b.field));
|
||||
let content_hash = report_content_hash(&report);
|
||||
if report.identity.report_id.is_empty() {
|
||||
report.identity.report_id = format!("report-{content_hash}");
|
||||
}
|
||||
report.identity.content_hash = content_hash;
|
||||
report
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn report_content_hash(report: &CanonicalReportV1) -> String {
|
||||
let mut hashable = report.clone();
|
||||
hashable.identity.report_id.clear();
|
||||
hashable.identity.content_hash.clear();
|
||||
stable_json_hash(&serde_json::to_value(hashable).expect("report should serialize"))
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn project_report(
|
||||
report: &CanonicalReportV1,
|
||||
capabilities: &ConsumerCapabilities,
|
||||
view: impl Into<String>,
|
||||
) -> ReportProjectionV1 {
|
||||
let view = view.into();
|
||||
let supports_schema = capabilities.schema_versions.contains(REPORT_SCHEMA_V1);
|
||||
let mut omitted_field_families = Vec::new();
|
||||
let mut redactions = Vec::new();
|
||||
let mut payload = serde_json::Map::new();
|
||||
|
||||
payload.insert(
|
||||
"identity".to_string(),
|
||||
serde_json::to_value(&report.identity).expect("identity serializes"),
|
||||
);
|
||||
payload.insert(
|
||||
"generated_at".to_string(),
|
||||
Value::String(report.generated_at.clone()),
|
||||
);
|
||||
payload.insert(
|
||||
"producer".to_string(),
|
||||
Value::String(report.producer.clone()),
|
||||
);
|
||||
|
||||
if supports_family(capabilities, "claims") {
|
||||
let claims = report
|
||||
.claims
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(index, claim)| redact_claim(index, claim, capabilities, &mut redactions))
|
||||
.collect::<Vec<_>>();
|
||||
payload.insert("claims".to_string(), Value::Array(claims));
|
||||
} else {
|
||||
omitted_field_families.push("claims".to_string());
|
||||
}
|
||||
|
||||
if supports_family(capabilities, "negative_evidence") {
|
||||
payload.insert(
|
||||
"negative_evidence".to_string(),
|
||||
serde_json::to_value(&report.negative_evidence).expect("negative evidence serializes"),
|
||||
);
|
||||
} else {
|
||||
omitted_field_families.push("negative_evidence".to_string());
|
||||
}
|
||||
|
||||
if supports_family(capabilities, "field_deltas") {
|
||||
payload.insert(
|
||||
"field_deltas".to_string(),
|
||||
serde_json::to_value(&report.field_deltas).expect("field deltas serialize"),
|
||||
);
|
||||
} else {
|
||||
omitted_field_families.push("field_deltas".to_string());
|
||||
}
|
||||
|
||||
let downgraded =
|
||||
!supports_schema || !omitted_field_families.is_empty() || !redactions.is_empty();
|
||||
let provenance = ProjectionProvenance {
|
||||
policy_id: DEFAULT_PROJECTION_POLICY_V1.to_string(),
|
||||
source_schema_version: report.schema_version.clone(),
|
||||
source_report_id: report.identity.report_id.clone(),
|
||||
source_content_hash: report.identity.content_hash.clone(),
|
||||
consumer: capabilities.consumer.clone(),
|
||||
downgraded,
|
||||
omitted_field_families,
|
||||
redactions,
|
||||
};
|
||||
let mut projection = ReportProjectionV1 {
|
||||
schema_version: REPORT_SCHEMA_V1.to_string(),
|
||||
projection_id: String::new(),
|
||||
view,
|
||||
provenance,
|
||||
payload: Value::Object(payload),
|
||||
};
|
||||
projection.projection_id = stable_json_hash(&serde_json::json!({
|
||||
"view": projection.view,
|
||||
"provenance": projection.provenance,
|
||||
"payload": projection.payload,
|
||||
}));
|
||||
projection
|
||||
}
|
||||
|
||||
fn field(id: &str, description: &str, required: bool, field_family: &str) -> ReportSchemaField {
|
||||
ReportSchemaField {
|
||||
id: id.to_string(),
|
||||
description: description.to_string(),
|
||||
required,
|
||||
field_family: field_family.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn supports_family(capabilities: &ConsumerCapabilities, family: &str) -> bool {
|
||||
capabilities.field_families.is_empty() || capabilities.field_families.contains(family)
|
||||
}
|
||||
|
||||
fn redact_claim(
|
||||
index: usize,
|
||||
claim: &ReportClaim,
|
||||
capabilities: &ConsumerCapabilities,
|
||||
redactions: &mut Vec<RedactionProvenance>,
|
||||
) -> Option<Value> {
|
||||
if claim.sensitivity <= capabilities.max_sensitivity {
|
||||
return Some(serde_json::to_value(claim).expect("claim serializes"));
|
||||
}
|
||||
if claim.sensitivity == SensitivityClass::Secret {
|
||||
redactions.push(RedactionProvenance {
|
||||
field_path: format!("claims[{index}]"),
|
||||
reason: "omitted: sensitivity exceeds consumer policy".to_string(),
|
||||
policy_id: DEFAULT_PROJECTION_POLICY_V1.to_string(),
|
||||
original_hash: stable_json_hash(
|
||||
&serde_json::to_value(claim).expect("claim serializes"),
|
||||
),
|
||||
});
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut redacted = claim.clone();
|
||||
let original_hash = stable_json_hash(&serde_json::to_value(claim).expect("claim serializes"));
|
||||
redacted.text = "<redacted>".to_string();
|
||||
redacted.evidence.clear();
|
||||
redactions.push(RedactionProvenance {
|
||||
field_path: format!("claims[{index}].text"),
|
||||
reason: "transformed: sensitivity exceeds consumer policy".to_string(),
|
||||
policy_id: DEFAULT_PROJECTION_POLICY_V1.to_string(),
|
||||
original_hash,
|
||||
});
|
||||
Some(serde_json::to_value(redacted).expect("redacted claim serializes"))
|
||||
}
|
||||
|
||||
fn stable_json_hash(value: &Value) -> String {
|
||||
let normalized = normalize_json(value);
|
||||
let bytes = serde_json::to_vec(&normalized).expect("normalized json should serialize");
|
||||
let digest = Sha256::digest(bytes);
|
||||
let mut hash = String::with_capacity(16);
|
||||
for byte in &digest[..8] {
|
||||
use std::fmt::Write as _;
|
||||
write!(&mut hash, "{byte:02x}").expect("writing to String should not fail");
|
||||
}
|
||||
hash
|
||||
}
|
||||
|
||||
fn normalize_json(value: &Value) -> Value {
|
||||
match value {
|
||||
Value::Array(values) => Value::Array(values.iter().map(normalize_json).collect()),
|
||||
Value::Object(map) => {
|
||||
let sorted = map
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), normalize_json(value)))
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
serde_json::to_value(sorted).expect("sorted map should serialize")
|
||||
}
|
||||
other => other.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
canonicalize_report, project_report, report_schema_v1_registry, CanonicalReportV1,
|
||||
ClaimKind, ConsumerCapabilities, FieldDelta, FieldDeltaState, NegativeEvidence,
|
||||
NegativeFindingStatus, ReportClaim, ReportConfidence, ReportIdentity, SensitivityClass,
|
||||
REPORT_SCHEMA_V1,
|
||||
};
|
||||
|
||||
fn fixture_report() -> CanonicalReportV1 {
|
||||
canonicalize_report(CanonicalReportV1 {
|
||||
schema_version: String::new(),
|
||||
identity: ReportIdentity {
|
||||
report_id: String::new(),
|
||||
content_hash: String::new(),
|
||||
},
|
||||
generated_at: "2026-05-14T00:00:00Z".to_string(),
|
||||
producer: "worker-1".to_string(),
|
||||
claims: vec![
|
||||
ReportClaim {
|
||||
id: "claim-secret".to_string(),
|
||||
kind: ClaimKind::ObservedFact,
|
||||
text: "secret token appeared in logs".to_string(),
|
||||
confidence: ReportConfidence::High,
|
||||
evidence: vec!["log:secret".to_string()],
|
||||
sensitivity: SensitivityClass::Secret,
|
||||
},
|
||||
ReportClaim {
|
||||
id: "claim-hypothesis".to_string(),
|
||||
kind: ClaimKind::Hypothesis,
|
||||
text: "transport restart likely caused the retry".to_string(),
|
||||
confidence: ReportConfidence::Medium,
|
||||
evidence: vec!["event:transport".to_string()],
|
||||
sensitivity: SensitivityClass::Internal,
|
||||
},
|
||||
ReportClaim {
|
||||
id: "claim-fact".to_string(),
|
||||
kind: ClaimKind::ObservedFact,
|
||||
text: "lane finished once".to_string(),
|
||||
confidence: ReportConfidence::High,
|
||||
evidence: vec!["event:lane.finished".to_string()],
|
||||
sensitivity: SensitivityClass::Public,
|
||||
},
|
||||
],
|
||||
negative_evidence: vec![NegativeEvidence {
|
||||
id: "neg-blocker".to_string(),
|
||||
status: NegativeFindingStatus::NotObservedInCheckedScope,
|
||||
checked_surfaces: vec!["lane_events".to_string(), "worker_status".to_string()],
|
||||
query: "current blocker".to_string(),
|
||||
window: "2026-05-14T00:00:00Z/2026-05-14T00:05:00Z".to_string(),
|
||||
sensitivity: SensitivityClass::Public,
|
||||
}],
|
||||
field_deltas: vec![FieldDelta {
|
||||
field: "blocker".to_string(),
|
||||
state: FieldDeltaState::Cleared,
|
||||
previous_hash: Some("prev123".to_string()),
|
||||
current_hash: None,
|
||||
attribution: "lane.failed reconciled to lane.finished".to_string(),
|
||||
}],
|
||||
})
|
||||
}
|
||||
|
||||
fn capabilities(families: &[&str], max_sensitivity: SensitivityClass) -> ConsumerCapabilities {
|
||||
ConsumerCapabilities {
|
||||
consumer: "clawhip".to_string(),
|
||||
schema_versions: [REPORT_SCHEMA_V1.to_string()].into_iter().collect(),
|
||||
field_families: families
|
||||
.iter()
|
||||
.map(|family| (*family).to_string())
|
||||
.collect(),
|
||||
max_sensitivity,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn report_schema_registry_is_self_describing() {
|
||||
let registry = report_schema_v1_registry();
|
||||
assert_eq!(registry.schema_version, REPORT_SCHEMA_V1);
|
||||
assert!(registry
|
||||
.fields
|
||||
.iter()
|
||||
.any(|field| field.id == "claims[].kind"));
|
||||
assert!(registry
|
||||
.fields
|
||||
.iter()
|
||||
.any(|field| field.id == "negative_evidence[]"));
|
||||
assert!(registry
|
||||
.fields
|
||||
.iter()
|
||||
.any(|field| field.id == "projection.provenance.redactions[]"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn canonical_report_labels_claims_negative_evidence_and_deltas() {
|
||||
let report = fixture_report();
|
||||
assert_eq!(report.schema_version, REPORT_SCHEMA_V1);
|
||||
assert!(report.identity.report_id.starts_with("report-"));
|
||||
assert_eq!(report.identity.content_hash.len(), 16);
|
||||
assert_eq!(report.claims[0].id, "claim-fact");
|
||||
assert_eq!(report.claims[1].kind, ClaimKind::Hypothesis);
|
||||
assert_eq!(report.claims[1].confidence, ReportConfidence::Medium);
|
||||
assert_eq!(
|
||||
report.negative_evidence[0].status,
|
||||
NegativeFindingStatus::NotObservedInCheckedScope
|
||||
);
|
||||
assert_eq!(report.field_deltas[0].state, FieldDeltaState::Cleared);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn projections_are_deterministic_and_record_redaction_provenance() {
|
||||
let report = fixture_report();
|
||||
let capabilities = capabilities(
|
||||
&["claims", "negative_evidence", "field_deltas"],
|
||||
SensitivityClass::Public,
|
||||
);
|
||||
|
||||
let first = project_report(&report, &capabilities, "delta_brief");
|
||||
let second = project_report(&report, &capabilities, "delta_brief");
|
||||
|
||||
assert_eq!(first, second);
|
||||
assert_eq!(first.provenance.source_report_id, report.identity.report_id);
|
||||
assert_eq!(
|
||||
first.provenance.source_content_hash,
|
||||
report.identity.content_hash
|
||||
);
|
||||
assert!(first.provenance.downgraded);
|
||||
assert_eq!(first.provenance.redactions.len(), 2);
|
||||
assert!(first
|
||||
.provenance
|
||||
.redactions
|
||||
.iter()
|
||||
.any(|redaction| redaction.field_path == "claims[1].text"));
|
||||
assert!(first
|
||||
.provenance
|
||||
.redactions
|
||||
.iter()
|
||||
.any(|redaction| redaction.field_path == "claims[2]"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn capability_negotiation_omits_unsupported_field_families() {
|
||||
let report = fixture_report();
|
||||
let capabilities = capabilities(&["claims"], SensitivityClass::Internal);
|
||||
let projection = project_report(&report, &capabilities, "legacy_clawhip");
|
||||
|
||||
assert!(projection.provenance.downgraded);
|
||||
assert_eq!(
|
||||
projection.provenance.omitted_field_families,
|
||||
vec!["negative_evidence".to_string(), "field_deltas".to_string()]
|
||||
);
|
||||
assert!(projection.payload.get("claims").is_some());
|
||||
assert!(projection.payload.get("negative_evidence").is_none());
|
||||
assert!(projection.payload.get("field_deltas").is_none());
|
||||
}
|
||||
}
|
||||
@@ -298,8 +298,7 @@ fn unshare_user_namespace_works() -> bool {
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.status()
|
||||
.map(|s| s.success())
|
||||
.unwrap_or(false)
|
||||
.is_ok_and(|status| status.success())
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -8,10 +8,14 @@ use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::json::{JsonError, JsonValue};
|
||||
use crate::usage::TokenUsage;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const SESSION_VERSION: u32 = 1;
|
||||
const ROTATE_AFTER_BYTES: u64 = 256 * 1024;
|
||||
const MAX_ROTATED_FILES: usize = 3;
|
||||
const MAX_JSONL_FIELD_CHARS: usize = 16 * 1024;
|
||||
const JSONL_TRUNCATION_MARKER: &str = "… [truncated for session JSONL]";
|
||||
const JSONL_REDACTION_MARKER: &str = "[redacted]";
|
||||
static SESSION_ID_COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
static LAST_TIMESTAMP_MS: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
@@ -82,6 +86,25 @@ struct SessionPersistence {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
/// Running-state liveness classification for a session heartbeat.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum SessionLiveness {
|
||||
Healthy,
|
||||
Stalled,
|
||||
TransportDead,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
/// Heartbeat emitted from canonical session state, independent of terminal rendering.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct SessionHeartbeat {
|
||||
pub session_id: String,
|
||||
pub observed_at_ms: u64,
|
||||
pub transport_alive: bool,
|
||||
pub liveness: SessionLiveness,
|
||||
}
|
||||
|
||||
/// Persisted conversational state for the runtime and CLI session manager.
|
||||
///
|
||||
/// `workspace_root` binds the session to the worktree it was created in. The
|
||||
@@ -250,6 +273,35 @@ impl Session {
|
||||
self.push_message(ConversationMessage::user_text(text))
|
||||
}
|
||||
|
||||
pub fn record_health_check(&mut self, timestamp_ms: u64) {
|
||||
self.last_health_check_ms = Some(timestamp_ms);
|
||||
self.touch();
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn heartbeat_at(
|
||||
&self,
|
||||
now_ms: u64,
|
||||
stalled_after_ms: u64,
|
||||
transport_alive: bool,
|
||||
) -> SessionHeartbeat {
|
||||
let liveness = match (transport_alive, self.last_health_check_ms) {
|
||||
(false, _) => SessionLiveness::TransportDead,
|
||||
(true, Some(last)) if now_ms.saturating_sub(last) <= stalled_after_ms => {
|
||||
SessionLiveness::Healthy
|
||||
}
|
||||
(true, Some(_)) => SessionLiveness::Stalled,
|
||||
(true, None) => SessionLiveness::Unknown,
|
||||
};
|
||||
|
||||
SessionHeartbeat {
|
||||
session_id: self.session_id.clone(),
|
||||
observed_at_ms: now_ms,
|
||||
transport_alive,
|
||||
liveness,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_compaction(&mut self, summary: impl Into<String>, removed_message_count: usize) {
|
||||
self.touch();
|
||||
let count = self.compaction.as_ref().map_or(1, |value| value.count + 1);
|
||||
@@ -871,7 +923,7 @@ impl SessionCompaction {
|
||||
);
|
||||
object.insert(
|
||||
"summary".to_string(),
|
||||
JsonValue::String(self.summary.clone()),
|
||||
JsonValue::String(sanitize_jsonl_field(&self.summary)),
|
||||
);
|
||||
Ok(JsonValue::Object(object))
|
||||
}
|
||||
@@ -931,7 +983,10 @@ impl SessionPromptEntry {
|
||||
"timestamp_ms".to_string(),
|
||||
JsonValue::Number(i64::try_from(self.timestamp_ms).unwrap_or(i64::MAX)),
|
||||
);
|
||||
object.insert("text".to_string(), JsonValue::String(self.text.clone()));
|
||||
object.insert(
|
||||
"text".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(&self.text)),
|
||||
);
|
||||
JsonValue::Object(object)
|
||||
}
|
||||
|
||||
@@ -949,10 +1004,165 @@ impl SessionPromptEntry {
|
||||
fn message_record(message: &ConversationMessage) -> JsonValue {
|
||||
let mut object = BTreeMap::new();
|
||||
object.insert("type".to_string(), JsonValue::String("message".to_string()));
|
||||
object.insert("message".to_string(), message.to_json());
|
||||
object.insert("message".to_string(), persisted_message_json(message));
|
||||
JsonValue::Object(object)
|
||||
}
|
||||
|
||||
fn persisted_message_json(message: &ConversationMessage) -> JsonValue {
|
||||
let mut object = BTreeMap::new();
|
||||
object.insert(
|
||||
"role".to_string(),
|
||||
JsonValue::String(
|
||||
match message.role {
|
||||
MessageRole::System => "system",
|
||||
MessageRole::User => "user",
|
||||
MessageRole::Assistant => "assistant",
|
||||
MessageRole::Tool => "tool",
|
||||
}
|
||||
.to_string(),
|
||||
),
|
||||
);
|
||||
object.insert(
|
||||
"blocks".to_string(),
|
||||
JsonValue::Array(message.blocks.iter().map(persisted_block_json).collect()),
|
||||
);
|
||||
if let Some(usage) = message.usage {
|
||||
object.insert("usage".to_string(), usage_to_json(usage));
|
||||
}
|
||||
JsonValue::Object(object)
|
||||
}
|
||||
|
||||
fn persisted_block_json(block: &ContentBlock) -> JsonValue {
|
||||
let mut object = BTreeMap::new();
|
||||
match block {
|
||||
ContentBlock::Text { text } => {
|
||||
object.insert("type".to_string(), JsonValue::String("text".to_string()));
|
||||
object.insert(
|
||||
"text".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(text)),
|
||||
);
|
||||
}
|
||||
ContentBlock::Thinking {
|
||||
thinking,
|
||||
signature,
|
||||
} => {
|
||||
object.insert(
|
||||
"type".to_string(),
|
||||
JsonValue::String("thinking".to_string()),
|
||||
);
|
||||
object.insert(
|
||||
"thinking".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(thinking)),
|
||||
);
|
||||
if let Some(signature) = signature {
|
||||
object.insert(
|
||||
"signature".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(signature)),
|
||||
);
|
||||
}
|
||||
}
|
||||
ContentBlock::ToolUse { id, name, input } => {
|
||||
object.insert(
|
||||
"type".to_string(),
|
||||
JsonValue::String("tool_use".to_string()),
|
||||
);
|
||||
object.insert(
|
||||
"id".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(id)),
|
||||
);
|
||||
object.insert("name".to_string(), JsonValue::String(name.clone()));
|
||||
object.insert(
|
||||
"input".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(input)),
|
||||
);
|
||||
}
|
||||
ContentBlock::ToolResult {
|
||||
tool_use_id,
|
||||
tool_name,
|
||||
output,
|
||||
is_error,
|
||||
} => {
|
||||
object.insert(
|
||||
"type".to_string(),
|
||||
JsonValue::String("tool_result".to_string()),
|
||||
);
|
||||
object.insert(
|
||||
"tool_use_id".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(tool_use_id)),
|
||||
);
|
||||
object.insert(
|
||||
"tool_name".to_string(),
|
||||
JsonValue::String(tool_name.clone()),
|
||||
);
|
||||
object.insert(
|
||||
"output".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(output)),
|
||||
);
|
||||
object.insert("is_error".to_string(), JsonValue::Bool(*is_error));
|
||||
}
|
||||
}
|
||||
JsonValue::Object(object)
|
||||
}
|
||||
|
||||
fn sanitize_jsonl_field(value: &str) -> String {
|
||||
truncate_jsonl_field(&redact_jsonl_secrets(value))
|
||||
}
|
||||
|
||||
fn truncate_jsonl_field(value: &str) -> String {
|
||||
let char_count = value.chars().count();
|
||||
if char_count <= MAX_JSONL_FIELD_CHARS {
|
||||
return value.to_string();
|
||||
}
|
||||
|
||||
let keep = MAX_JSONL_FIELD_CHARS.saturating_sub(JSONL_TRUNCATION_MARKER.chars().count());
|
||||
let mut truncated = value.chars().take(keep).collect::<String>();
|
||||
truncated.push_str(JSONL_TRUNCATION_MARKER);
|
||||
truncated
|
||||
}
|
||||
|
||||
fn redact_jsonl_secrets(value: &str) -> String {
|
||||
let mut redacted = value.to_string();
|
||||
for marker in [
|
||||
"ANTHROPIC_API_KEY=",
|
||||
"ANTHROPIC_AUTH_TOKEN=",
|
||||
"OPENAI_API_KEY=",
|
||||
"DASHSCOPE_API_KEY=",
|
||||
"XAI_API_KEY=",
|
||||
"Authorization: Bearer ",
|
||||
"authorization: Bearer ",
|
||||
"Bearer sk-",
|
||||
"sk-ant-",
|
||||
] {
|
||||
redacted = redact_after_marker(&redacted, marker);
|
||||
}
|
||||
redacted
|
||||
}
|
||||
|
||||
fn redact_after_marker(value: &str, marker: &str) -> String {
|
||||
let mut output = String::with_capacity(value.len());
|
||||
let mut rest = value;
|
||||
|
||||
while let Some(index) = rest.find(marker) {
|
||||
let (before, after_before) = rest.split_at(index);
|
||||
output.push_str(before);
|
||||
output.push_str(marker);
|
||||
output.push_str(JSONL_REDACTION_MARKER);
|
||||
|
||||
let secret_start = marker.len();
|
||||
let after_marker = &after_before[secret_start..];
|
||||
let secret_end = after_marker
|
||||
.char_indices()
|
||||
.find_map(|(idx, ch)| {
|
||||
(ch.is_whitespace() || matches!(ch, '\'' | '"' | ',' | '}' | ']')).then_some(idx)
|
||||
})
|
||||
.unwrap_or(after_marker.len());
|
||||
rest = &after_marker[secret_end..];
|
||||
}
|
||||
|
||||
output.push_str(rest);
|
||||
output
|
||||
}
|
||||
|
||||
fn usage_to_json(usage: TokenUsage) -> JsonValue {
|
||||
let mut object = BTreeMap::new();
|
||||
object.insert(
|
||||
@@ -1315,6 +1525,54 @@ mod tests {
|
||||
assert_eq!(restored.messages[0], ConversationMessage::user_text("hi"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn jsonl_persistence_redacts_and_truncates_oversized_payload_fields() {
|
||||
let path = temp_session_path("jsonl-safeguards");
|
||||
let secret = "sk-live-secret-should-not-persist";
|
||||
let oversized_output = format!(
|
||||
"OPENAI_API_KEY={secret}\n{}",
|
||||
"tool-output ".repeat(super::MAX_JSONL_FIELD_CHARS)
|
||||
);
|
||||
let mut session = Session::new();
|
||||
session
|
||||
.push_message(ConversationMessage::assistant(vec![
|
||||
ContentBlock::ToolUse {
|
||||
id: "tool-1".to_string(),
|
||||
name: "bash".to_string(),
|
||||
input: format!("Authorization: Bearer {secret}"),
|
||||
},
|
||||
]))
|
||||
.expect("tool use should append");
|
||||
session
|
||||
.push_message(ConversationMessage::tool_result(
|
||||
"tool-1",
|
||||
"bash",
|
||||
oversized_output,
|
||||
false,
|
||||
))
|
||||
.expect("tool result should append");
|
||||
|
||||
session.save_to_path(&path).expect("session should save");
|
||||
let persisted = fs::read_to_string(&path).expect("session jsonl should read");
|
||||
let restored = Session::load_from_path(&path).expect("session should load");
|
||||
fs::remove_file(&path).expect("temp file should be removable");
|
||||
|
||||
assert!(
|
||||
!persisted.contains(secret),
|
||||
"secret leaked into JSONL: {persisted}"
|
||||
);
|
||||
assert!(persisted.contains(super::JSONL_REDACTION_MARKER));
|
||||
assert!(persisted.contains(super::JSONL_TRUNCATION_MARKER));
|
||||
|
||||
let ContentBlock::ToolResult { output, .. } = &restored.messages[1].blocks[0] else {
|
||||
panic!("restored second message should be a tool result");
|
||||
};
|
||||
assert!(!output.contains(secret));
|
||||
assert!(output.contains(super::JSONL_REDACTION_MARKER));
|
||||
assert!(output.ends_with(super::JSONL_TRUNCATION_MARKER));
|
||||
assert!(output.chars().count() <= super::MAX_JSONL_FIELD_CHARS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn persists_compaction_metadata() {
|
||||
let path = temp_session_path("compaction");
|
||||
@@ -1599,4 +1857,26 @@ mod workspace_sessions_dir_tests {
|
||||
fs::remove_dir_all(&tmp_a).ok();
|
||||
fs::remove_dir_all(&tmp_b).ok();
|
||||
}
|
||||
#[test]
|
||||
fn session_heartbeat_classifies_healthy_stalled_transport_dead_and_unknown() {
|
||||
let mut session = Session::new();
|
||||
assert_eq!(
|
||||
session.heartbeat_at(1_000, 500, true).liveness,
|
||||
SessionLiveness::Unknown
|
||||
);
|
||||
|
||||
session.record_health_check(800);
|
||||
assert_eq!(
|
||||
session.heartbeat_at(1_000, 500, true).liveness,
|
||||
SessionLiveness::Healthy
|
||||
);
|
||||
assert_eq!(
|
||||
session.heartbeat_at(2_000, 500, true).liveness,
|
||||
SessionLiveness::Stalled
|
||||
);
|
||||
assert_eq!(
|
||||
session.heartbeat_at(1_000, 500, false).liveness,
|
||||
SessionLiveness::TransportDead
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,6 +163,17 @@ impl SessionStore {
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn session_exists(&self, reference: &str) -> bool {
|
||||
self.resolve_reference(reference).is_ok()
|
||||
}
|
||||
|
||||
pub fn delete_session(&self, reference: &str) -> Result<SessionHandle, SessionControlError> {
|
||||
let handle = self.resolve_reference(reference)?;
|
||||
fs::remove_file(&handle.path)?;
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
pub fn load_session(
|
||||
&self,
|
||||
reference: &str,
|
||||
@@ -480,6 +491,30 @@ pub fn load_managed_session(reference: &str) -> Result<LoadedManagedSession, Ses
|
||||
load_managed_session_for(env::current_dir()?, reference)
|
||||
}
|
||||
|
||||
pub fn managed_session_exists(reference: &str) -> Result<bool, SessionControlError> {
|
||||
managed_session_exists_for(env::current_dir()?, reference)
|
||||
}
|
||||
|
||||
pub fn managed_session_exists_for(
|
||||
base_dir: impl AsRef<Path>,
|
||||
reference: &str,
|
||||
) -> Result<bool, SessionControlError> {
|
||||
let store = SessionStore::from_cwd(base_dir)?;
|
||||
Ok(store.session_exists(reference))
|
||||
}
|
||||
|
||||
pub fn delete_managed_session(reference: &str) -> Result<SessionHandle, SessionControlError> {
|
||||
delete_managed_session_for(env::current_dir()?, reference)
|
||||
}
|
||||
|
||||
pub fn delete_managed_session_for(
|
||||
base_dir: impl AsRef<Path>,
|
||||
reference: &str,
|
||||
) -> Result<SessionHandle, SessionControlError> {
|
||||
let store = SessionStore::from_cwd(base_dir)?;
|
||||
store.delete_session(reference)
|
||||
}
|
||||
|
||||
pub fn load_managed_session_for(
|
||||
base_dir: impl AsRef<Path>,
|
||||
reference: &str,
|
||||
@@ -569,10 +604,10 @@ fn path_is_within_workspace(path: &Path, workspace_root: &Path) -> bool {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
create_managed_session_handle_for, fork_managed_session_for, is_session_reference_alias,
|
||||
list_managed_sessions_for, load_managed_session_for, resolve_session_reference_for,
|
||||
workspace_fingerprint, ManagedSessionSummary, SessionControlError, SessionStore,
|
||||
LATEST_SESSION_REFERENCE,
|
||||
create_managed_session_handle_for, delete_managed_session_for, fork_managed_session_for,
|
||||
is_session_reference_alias, list_managed_sessions_for, load_managed_session_for,
|
||||
managed_session_exists_for, resolve_session_reference_for, workspace_fingerprint,
|
||||
ManagedSessionSummary, SessionControlError, SessionStore, LATEST_SESSION_REFERENCE,
|
||||
};
|
||||
use crate::session::Session;
|
||||
use std::fs;
|
||||
@@ -996,6 +1031,32 @@ mod tests {
|
||||
fs::remove_dir_all(base).expect("temp dir should clean up");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn session_exists_and_delete_are_scoped_to_workspace_store() {
|
||||
// given
|
||||
let base = temp_dir();
|
||||
fs::create_dir_all(&base).expect("base dir should exist");
|
||||
let store = SessionStore::from_cwd(&base).expect("store should build");
|
||||
let session = persist_session_via_store(&store, "delete me");
|
||||
|
||||
// when
|
||||
assert!(
|
||||
managed_session_exists_for(&base, &session.session_id).expect("exists should run"),
|
||||
"persisted session should exist before deletion"
|
||||
);
|
||||
let deleted =
|
||||
delete_managed_session_for(&base, &session.session_id).expect("delete should succeed");
|
||||
|
||||
// then
|
||||
assert_eq!(deleted.id, session.session_id);
|
||||
assert!(!deleted.path.exists(), "session file should be removed");
|
||||
assert!(
|
||||
!managed_session_exists_for(&base, &session.session_id).expect("exists should run"),
|
||||
"deleted session should not exist"
|
||||
);
|
||||
fs::remove_dir_all(base).expect("temp dir should clean up");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn session_store_fork_stays_in_same_namespace() {
|
||||
// given
|
||||
|
||||
@@ -38,10 +38,38 @@ pub struct TaskPacket {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub worktree: Option<String>,
|
||||
pub branch_policy: String,
|
||||
/// Legacy verification commands kept for compatibility with existing task packets.
|
||||
#[serde(default)]
|
||||
pub acceptance_tests: Vec<String>,
|
||||
/// Human-readable acceptance criteria for the task objective.
|
||||
#[serde(default)]
|
||||
pub acceptance_criteria: Vec<String>,
|
||||
/// Files, directories, services, or other resources the task is allowed to touch.
|
||||
#[serde(default)]
|
||||
pub resources: Vec<TaskResource>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub model: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub provider: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub permission_profile: Option<String>,
|
||||
pub commit_policy: String,
|
||||
/// Legacy reporting contract kept for compatibility with existing task packets.
|
||||
pub reporting_contract: String,
|
||||
#[serde(default)]
|
||||
pub reporting_targets: Vec<String>,
|
||||
/// Legacy escalation policy kept for compatibility with existing task packets.
|
||||
pub escalation_policy: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub recovery_policy: Option<String>,
|
||||
#[serde(default)]
|
||||
pub verification_plan: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct TaskResource {
|
||||
pub kind: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
@@ -91,16 +119,25 @@ pub fn validate_packet(packet: TaskPacket) -> Result<ValidatedPacket, TaskPacket
|
||||
validate_required("repo", &packet.repo, &mut errors);
|
||||
validate_required("branch_policy", &packet.branch_policy, &mut errors);
|
||||
validate_required("commit_policy", &packet.commit_policy, &mut errors);
|
||||
validate_required(
|
||||
"reporting_contract",
|
||||
&packet.reporting_contract,
|
||||
&mut errors,
|
||||
);
|
||||
validate_required("escalation_policy", &packet.escalation_policy, &mut errors);
|
||||
if packet.reporting_contract.trim().is_empty() && packet.reporting_targets.is_empty() {
|
||||
errors.push("reporting_contract or reporting_targets must not be empty".to_string());
|
||||
}
|
||||
if packet.escalation_policy.trim().is_empty()
|
||||
&& packet
|
||||
.recovery_policy
|
||||
.as_ref()
|
||||
.is_none_or(|policy| policy.trim().is_empty())
|
||||
{
|
||||
errors.push("escalation_policy or recovery_policy must not be empty".to_string());
|
||||
}
|
||||
|
||||
// Validate scope-specific requirements
|
||||
validate_scope_requirements(&packet, &mut errors);
|
||||
|
||||
if packet.acceptance_tests.is_empty() && packet.acceptance_criteria.is_empty() {
|
||||
errors.push("acceptance_tests or acceptance_criteria must not be empty".to_string());
|
||||
}
|
||||
|
||||
for (index, test) in packet.acceptance_tests.iter().enumerate() {
|
||||
if test.trim().is_empty() {
|
||||
errors.push(format!(
|
||||
@@ -109,6 +146,43 @@ pub fn validate_packet(packet: TaskPacket) -> Result<ValidatedPacket, TaskPacket
|
||||
}
|
||||
}
|
||||
|
||||
for (index, criterion) in packet.acceptance_criteria.iter().enumerate() {
|
||||
if criterion.trim().is_empty() {
|
||||
errors.push(format!(
|
||||
"acceptance_criteria contains an empty value at index {index}"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
for (index, resource) in packet.resources.iter().enumerate() {
|
||||
if resource.kind.trim().is_empty() || resource.value.trim().is_empty() {
|
||||
errors.push(format!(
|
||||
"resources contains an incomplete entry at index {index}"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
validate_optional("model", packet.model.as_deref(), &mut errors);
|
||||
validate_optional("provider", packet.provider.as_deref(), &mut errors);
|
||||
validate_optional(
|
||||
"permission_profile",
|
||||
packet.permission_profile.as_deref(),
|
||||
&mut errors,
|
||||
);
|
||||
validate_optional(
|
||||
"recovery_policy",
|
||||
packet.recovery_policy.as_deref(),
|
||||
&mut errors,
|
||||
);
|
||||
|
||||
for (index, step) in packet.verification_plan.iter().enumerate() {
|
||||
if step.trim().is_empty() {
|
||||
errors.push(format!(
|
||||
"verification_plan contains an empty value at index {index}"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
Ok(ValidatedPacket(packet))
|
||||
} else {
|
||||
@@ -142,6 +216,12 @@ fn validate_required(field: &str, value: &str, errors: &mut Vec<String>) {
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_optional(field: &str, value: Option<&str>, errors: &mut Vec<String>) {
|
||||
if value.is_some_and(|value| value.trim().is_empty()) {
|
||||
errors.push(format!("{field} must not be empty when present"));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -158,9 +238,20 @@ mod tests {
|
||||
"cargo build --workspace".to_string(),
|
||||
"cargo test --workspace".to_string(),
|
||||
],
|
||||
acceptance_criteria: vec!["packet can launch without pane scraping".to_string()],
|
||||
resources: vec![TaskResource {
|
||||
kind: "file".to_string(),
|
||||
value: "rust/crates/runtime/src/task_packet.rs".to_string(),
|
||||
}],
|
||||
model: Some("gpt-5.5".to_string()),
|
||||
provider: Some("openai".to_string()),
|
||||
permission_profile: Some("workspace-write".to_string()),
|
||||
commit_policy: "single verified commit".to_string(),
|
||||
reporting_contract: "print build result, test result, commit sha".to_string(),
|
||||
reporting_targets: vec!["leader".to_string()],
|
||||
escalation_policy: "stop only on destructive ambiguity".to_string(),
|
||||
recovery_policy: Some("retry once then escalate".to_string()),
|
||||
verification_plan: vec!["cargo test -p runtime task_packet".to_string()],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,9 +274,20 @@ mod tests {
|
||||
repo: String::new(),
|
||||
branch_policy: "\t".to_string(),
|
||||
acceptance_tests: vec!["ok".to_string(), " ".to_string()],
|
||||
acceptance_criteria: vec![" ".to_string()],
|
||||
resources: vec![TaskResource {
|
||||
kind: " ".to_string(),
|
||||
value: "resource".to_string(),
|
||||
}],
|
||||
model: Some(" ".to_string()),
|
||||
provider: Some("openai".to_string()),
|
||||
permission_profile: Some("workspace-write".to_string()),
|
||||
commit_policy: String::new(),
|
||||
reporting_contract: String::new(),
|
||||
reporting_targets: Vec::new(),
|
||||
escalation_policy: String::new(),
|
||||
recovery_policy: None,
|
||||
verification_plan: vec![" ".to_string()],
|
||||
};
|
||||
|
||||
let error = validate_packet(packet).expect_err("packet should be rejected");
|
||||
@@ -202,6 +304,51 @@ mod tests {
|
||||
.contains(&"acceptance_tests contains an empty value at index 1".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn legacy_packet_json_deserializes_with_defaulted_cc2_fields() {
|
||||
let legacy = r#"{
|
||||
"objective": "Legacy packet",
|
||||
"scope": "workspace",
|
||||
"repo": "claw-code",
|
||||
"branch_policy": "origin/main only",
|
||||
"acceptance_tests": ["cargo test"],
|
||||
"commit_policy": "single commit",
|
||||
"reporting_contract": "report sha",
|
||||
"escalation_policy": "ask leader"
|
||||
}"#;
|
||||
|
||||
let packet: TaskPacket = serde_json::from_str(legacy).expect("legacy packet should load");
|
||||
|
||||
assert_eq!(packet.objective, "Legacy packet");
|
||||
assert!(packet.acceptance_criteria.is_empty());
|
||||
assert!(packet.resources.is_empty());
|
||||
assert_eq!(packet.model, None);
|
||||
validate_packet(packet).expect("legacy packet remains valid through aliases");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rich_cc2_packet_fields_roundtrip_and_validate() {
|
||||
let packet = sample_packet();
|
||||
let json = serde_json::to_value(&packet).expect("packet should serialize");
|
||||
|
||||
assert_eq!(
|
||||
json["acceptance_criteria"][0],
|
||||
"packet can launch without pane scraping"
|
||||
);
|
||||
assert_eq!(json["resources"][0]["kind"], "file");
|
||||
assert_eq!(json["model"], "gpt-5.5");
|
||||
assert_eq!(json["provider"], "openai");
|
||||
assert_eq!(json["permission_profile"], "workspace-write");
|
||||
assert_eq!(json["recovery_policy"], "retry once then escalate");
|
||||
assert_eq!(
|
||||
json["verification_plan"][0],
|
||||
"cargo test -p runtime task_packet"
|
||||
);
|
||||
|
||||
let roundtrip: TaskPacket = serde_json::from_value(json).expect("rich packet roundtrips");
|
||||
validate_packet(roundtrip).expect("rich packet validates");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialization_roundtrip_preserves_packet() {
|
||||
let packet = sample_packet();
|
||||
@@ -210,4 +357,96 @@ mod tests {
|
||||
serde_json::from_str(&serialized).expect("packet should deserialize");
|
||||
assert_eq!(deserialized, packet);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn legacy_packet_json_deserializes_with_defaults() {
|
||||
let legacy = r#"{
|
||||
"objective": "Ship legacy task packet",
|
||||
"scope": "module",
|
||||
"scope_path": "runtime/task system",
|
||||
"repo": "claw-code-parity",
|
||||
"worktree": "/tmp/wt-legacy",
|
||||
"branch_policy": "origin/main only",
|
||||
"acceptance_tests": ["cargo test --workspace"],
|
||||
"commit_policy": "single verified commit",
|
||||
"reporting_contract": "print build result, test result, commit sha",
|
||||
"escalation_policy": "manual escalation"
|
||||
}"#;
|
||||
|
||||
let packet: TaskPacket = serde_json::from_str(legacy).expect("legacy packet should parse");
|
||||
|
||||
assert_eq!(packet.acceptance_criteria, Vec::<String>::new());
|
||||
assert_eq!(packet.permission_profile, None);
|
||||
assert_eq!(packet.model, None);
|
||||
assert_eq!(packet.provider, None);
|
||||
assert_eq!(packet.recovery_policy, None);
|
||||
assert_eq!(packet.commit_policy, "single verified commit");
|
||||
assert_eq!(
|
||||
packet.reporting_contract,
|
||||
"print build result, test result, commit sha"
|
||||
);
|
||||
assert_eq!(packet.escalation_policy, "manual escalation");
|
||||
validate_packet(packet).expect("legacy packet should remain valid");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new_schema_fields_validate_without_legacy_acceptance_tests() {
|
||||
let mut packet = sample_packet();
|
||||
packet.acceptance_tests.clear();
|
||||
packet.reporting_contract.clear();
|
||||
packet.escalation_policy.clear();
|
||||
|
||||
validate_packet(packet).expect("new schema fields should be sufficient");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn scoped_packets_require_scope_path() {
|
||||
for scope in [TaskScope::Module, TaskScope::SingleFile, TaskScope::Custom] {
|
||||
let mut packet = sample_packet();
|
||||
packet.scope = scope;
|
||||
packet.scope_path = Some(" ".to_string());
|
||||
|
||||
let error = validate_packet(packet).expect_err("scoped packet should require path");
|
||||
assert!(error
|
||||
.errors()
|
||||
.contains(&format!("scope_path is required for scope '{scope}'")));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn modern_required_groups_report_missing_fallbacks() {
|
||||
let mut packet = sample_packet();
|
||||
packet.acceptance_criteria.clear();
|
||||
packet.acceptance_tests.clear();
|
||||
packet.recovery_policy = None;
|
||||
packet.escalation_policy.clear();
|
||||
packet.reporting_targets.clear();
|
||||
packet.reporting_contract.clear();
|
||||
|
||||
let error = validate_packet(packet).expect_err("packet should require task policies");
|
||||
|
||||
for expected in [
|
||||
"acceptance_tests or acceptance_criteria must not be empty",
|
||||
"escalation_policy or recovery_policy must not be empty",
|
||||
"reporting_contract or reporting_targets must not be empty",
|
||||
] {
|
||||
assert!(error.errors().contains(&expected.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn permission_profile_serializes_as_optional_string() {
|
||||
let mut packet = sample_packet();
|
||||
packet.permission_profile = Some("danger-full-access".to_string());
|
||||
|
||||
let json = serde_json::to_value(&packet).expect("packet should serialize");
|
||||
assert_eq!(json["permission_profile"], "danger-full-access");
|
||||
|
||||
let roundtrip: TaskPacket =
|
||||
serde_json::from_value(json).expect("packet should deserialize");
|
||||
assert_eq!(
|
||||
roundtrip.permission_profile.as_deref(),
|
||||
Some("danger-full-access")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ use crate::{validate_packet, TaskPacket, TaskPacketValidationError};
|
||||
pub enum TaskStatus {
|
||||
Created,
|
||||
Running,
|
||||
Blocked,
|
||||
Completed,
|
||||
Failed,
|
||||
Stopped,
|
||||
@@ -24,6 +25,7 @@ impl std::fmt::Display for TaskStatus {
|
||||
match self {
|
||||
Self::Created => write!(f, "created"),
|
||||
Self::Running => write!(f, "running"),
|
||||
Self::Blocked => write!(f, "blocked"),
|
||||
Self::Completed => write!(f, "completed"),
|
||||
Self::Failed => write!(f, "failed"),
|
||||
Self::Stopped => write!(f, "stopped"),
|
||||
@@ -43,6 +45,54 @@ pub struct Task {
|
||||
pub messages: Vec<TaskMessage>,
|
||||
pub output: String,
|
||||
pub team_id: Option<String>,
|
||||
pub heartbeat: Option<LaneHeartbeat>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum LaneFreshness {
|
||||
Healthy,
|
||||
Stalled,
|
||||
TransportDead,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct LaneHeartbeat {
|
||||
pub observed_at: u64,
|
||||
pub transport_alive: bool,
|
||||
pub status: String,
|
||||
}
|
||||
|
||||
impl LaneHeartbeat {
|
||||
#[must_use]
|
||||
pub fn freshness_at(&self, now: u64, stalled_after_secs: u64) -> LaneFreshness {
|
||||
if !self.transport_alive {
|
||||
return LaneFreshness::TransportDead;
|
||||
}
|
||||
if now.saturating_sub(self.observed_at) > stalled_after_secs {
|
||||
return LaneFreshness::Stalled;
|
||||
}
|
||||
LaneFreshness::Healthy
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct LaneBoardEntry {
|
||||
pub task_id: String,
|
||||
pub prompt: String,
|
||||
pub status: TaskStatus,
|
||||
pub team_id: Option<String>,
|
||||
pub heartbeat: Option<LaneHeartbeat>,
|
||||
pub freshness: LaneFreshness,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct LaneBoard {
|
||||
pub generated_at: u64,
|
||||
pub active: Vec<LaneBoardEntry>,
|
||||
pub blocked: Vec<LaneBoardEntry>,
|
||||
pub finished: Vec<LaneBoardEntry>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -114,6 +164,7 @@ impl TaskRegistry {
|
||||
messages: Vec::new(),
|
||||
output: String::new(),
|
||||
team_id: None,
|
||||
heartbeat: None,
|
||||
};
|
||||
inner.tasks.insert(task_id, task.clone());
|
||||
task
|
||||
@@ -134,6 +185,67 @@ impl TaskRegistry {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn update_heartbeat(&self, task_id: &str, heartbeat: LaneHeartbeat) -> Result<(), String> {
|
||||
let mut inner = self.inner.lock().expect("registry lock poisoned");
|
||||
let task = inner
|
||||
.tasks
|
||||
.get_mut(task_id)
|
||||
.ok_or_else(|| format!("task not found: {task_id}"))?;
|
||||
task.heartbeat = Some(heartbeat);
|
||||
task.updated_at = now_secs();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn lane_board(&self, stalled_after_secs: u64) -> LaneBoard {
|
||||
let now = now_secs();
|
||||
self.lane_board_at(now, stalled_after_secs)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn lane_board_at(&self, now: u64, stalled_after_secs: u64) -> LaneBoard {
|
||||
let inner = self.inner.lock().expect("registry lock poisoned");
|
||||
let mut board = LaneBoard {
|
||||
generated_at: now,
|
||||
active: Vec::new(),
|
||||
blocked: Vec::new(),
|
||||
finished: Vec::new(),
|
||||
};
|
||||
|
||||
for task in inner.tasks.values() {
|
||||
let freshness = task
|
||||
.heartbeat
|
||||
.as_ref()
|
||||
.map_or(LaneFreshness::Unknown, |heartbeat| {
|
||||
heartbeat.freshness_at(now, stalled_after_secs)
|
||||
});
|
||||
let entry = LaneBoardEntry {
|
||||
task_id: task.task_id.clone(),
|
||||
prompt: task.prompt.clone(),
|
||||
status: task.status,
|
||||
team_id: task.team_id.clone(),
|
||||
heartbeat: task.heartbeat.clone(),
|
||||
freshness,
|
||||
};
|
||||
|
||||
match task.status {
|
||||
TaskStatus::Running | TaskStatus::Created => board.active.push(entry),
|
||||
TaskStatus::Blocked => board.blocked.push(entry),
|
||||
TaskStatus::Completed | TaskStatus::Failed | TaskStatus::Stopped => {
|
||||
board.finished.push(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
board
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn lane_status_json_at(&self, now: u64, stalled_after_secs: u64) -> serde_json::Value {
|
||||
serde_json::to_value(self.lane_board_at(now, stalled_after_secs))
|
||||
.expect("lane board should serialize")
|
||||
}
|
||||
|
||||
pub fn stop(&self, task_id: &str) -> Result<Task, String> {
|
||||
let mut inner = self.inner.lock().expect("registry lock poisoned");
|
||||
let task = inner
|
||||
@@ -260,9 +372,20 @@ mod tests {
|
||||
repo: "claw-code-parity".to_string(),
|
||||
branch_policy: "origin/main only".to_string(),
|
||||
acceptance_tests: vec!["cargo test --workspace".to_string()],
|
||||
acceptance_criteria: vec!["task is inspectable".to_string()],
|
||||
resources: vec![crate::TaskResource {
|
||||
kind: "module".to_string(),
|
||||
value: "runtime/task system".to_string(),
|
||||
}],
|
||||
model: Some("gpt-5.5".to_string()),
|
||||
provider: Some("openai".to_string()),
|
||||
permission_profile: Some("workspace-write".to_string()),
|
||||
commit_policy: "single commit".to_string(),
|
||||
reporting_contract: "print commit sha".to_string(),
|
||||
reporting_targets: vec!["leader".to_string()],
|
||||
escalation_policy: "manual escalation".to_string(),
|
||||
recovery_policy: Some("retry once".to_string()),
|
||||
verification_plan: vec!["cargo test --workspace".to_string()],
|
||||
};
|
||||
|
||||
let task = registry
|
||||
@@ -340,6 +463,68 @@ mod tests {
|
||||
assert_eq!(output, "line 1\nline 2\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lane_board_groups_active_blocked_finished_and_reports_freshness() {
|
||||
let registry = TaskRegistry::new();
|
||||
let active = registry.create("active", None);
|
||||
let blocked = registry.create("blocked", None);
|
||||
let finished = registry.create("finished", None);
|
||||
|
||||
registry
|
||||
.set_status(&active.task_id, TaskStatus::Running)
|
||||
.expect("running status");
|
||||
registry
|
||||
.set_status(&blocked.task_id, TaskStatus::Blocked)
|
||||
.expect("blocked status");
|
||||
registry
|
||||
.set_status(&finished.task_id, TaskStatus::Completed)
|
||||
.expect("completed status");
|
||||
registry
|
||||
.update_heartbeat(
|
||||
&active.task_id,
|
||||
LaneHeartbeat {
|
||||
observed_at: 100,
|
||||
transport_alive: true,
|
||||
status: "running".to_string(),
|
||||
},
|
||||
)
|
||||
.expect("heartbeat");
|
||||
registry
|
||||
.update_heartbeat(
|
||||
&blocked.task_id,
|
||||
LaneHeartbeat {
|
||||
observed_at: 10,
|
||||
transport_alive: true,
|
||||
status: "waiting".to_string(),
|
||||
},
|
||||
)
|
||||
.expect("heartbeat");
|
||||
registry
|
||||
.update_heartbeat(
|
||||
&finished.task_id,
|
||||
LaneHeartbeat {
|
||||
observed_at: 100,
|
||||
transport_alive: false,
|
||||
status: "done".to_string(),
|
||||
},
|
||||
)
|
||||
.expect("heartbeat");
|
||||
|
||||
let board = registry.lane_board_at(110, 30);
|
||||
|
||||
assert_eq!(board.active.len(), 1);
|
||||
assert_eq!(board.active[0].freshness, LaneFreshness::Healthy);
|
||||
assert_eq!(board.blocked.len(), 1);
|
||||
assert_eq!(board.blocked[0].freshness, LaneFreshness::Stalled);
|
||||
assert_eq!(board.finished.len(), 1);
|
||||
assert_eq!(board.finished[0].freshness, LaneFreshness::TransportDead);
|
||||
|
||||
let json = registry.lane_status_json_at(110, 30);
|
||||
assert_eq!(json["active"][0]["status"], "running");
|
||||
assert_eq!(json["blocked"][0]["freshness"], "stalled");
|
||||
assert_eq!(json["finished"][0]["freshness"], "transport_dead");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn assigns_team_and_removes_task() {
|
||||
let registry = TaskRegistry::new();
|
||||
@@ -375,6 +560,7 @@ mod tests {
|
||||
let cases = [
|
||||
(TaskStatus::Created, "created"),
|
||||
(TaskStatus::Running, "running"),
|
||||
(TaskStatus::Blocked, "blocked"),
|
||||
(TaskStatus::Completed, "completed"),
|
||||
(TaskStatus::Failed, "failed"),
|
||||
(TaskStatus::Stopped, "stopped"),
|
||||
@@ -392,6 +578,7 @@ mod tests {
|
||||
vec![
|
||||
("created".to_string(), "created"),
|
||||
("running".to_string(), "running"),
|
||||
("blocked".to_string(), "blocked"),
|
||||
("completed".to_string(), "completed"),
|
||||
("failed".to_string(), "failed"),
|
||||
("stopped".to_string(), "stopped"),
|
||||
@@ -478,6 +665,7 @@ mod tests {
|
||||
assert!(task.messages.is_empty());
|
||||
assert!(task.output.is_empty());
|
||||
assert_eq!(task.team_id, None);
|
||||
assert_eq!(task.heartbeat, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -122,13 +122,37 @@ pub enum StartupFailureClassification {
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct StartupHealthSummary {
|
||||
/// Whether this subsystem appeared healthy at timeout.
|
||||
pub healthy: bool,
|
||||
/// Stable placeholder/source string until deeper transport and MCP probes are wired in.
|
||||
pub summary: String,
|
||||
}
|
||||
|
||||
impl StartupHealthSummary {
|
||||
fn observed(name: &str, healthy: bool) -> Self {
|
||||
let status = if healthy { "healthy" } else { "unhealthy" };
|
||||
Self {
|
||||
healthy,
|
||||
summary: format!("{name}_{status}_placeholder"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Evidence bundle collected when worker startup times out without clear evidence.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct StartupEvidenceBundle {
|
||||
/// Last known worker lifecycle state before timeout
|
||||
pub last_lifecycle_state: WorkerStatus,
|
||||
/// Timestamp of the last lifecycle state transition, unix epoch seconds
|
||||
pub last_lifecycle_at: u64,
|
||||
/// The pane/command that was being executed
|
||||
pub pane_command: String,
|
||||
/// Timestamp when the pane/command snapshot was observed, unix epoch seconds
|
||||
pub pane_observed_at: u64,
|
||||
/// Timestamp when the worker command was started, unix epoch seconds
|
||||
pub command_started_at: u64,
|
||||
/// Timestamp when prompt was sent (if any), unix epoch seconds
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prompt_sent_at: Option<u64>,
|
||||
@@ -146,8 +170,12 @@ pub struct StartupEvidenceBundle {
|
||||
pub tool_permission_allow_scope: Option<ToolPermissionAllowScope>,
|
||||
/// Transport health summary (true = healthy/responsive)
|
||||
pub transport_healthy: bool,
|
||||
/// Typed transport health placeholder for future concrete probes
|
||||
pub transport_health: StartupHealthSummary,
|
||||
/// MCP health summary (true = all servers healthy)
|
||||
pub mcp_healthy: bool,
|
||||
/// Typed MCP health placeholder for future concrete probes
|
||||
pub mcp_health: StartupHealthSummary,
|
||||
/// Seconds since worker creation
|
||||
pub elapsed_seconds: u64,
|
||||
}
|
||||
@@ -225,6 +253,7 @@ pub struct Worker {
|
||||
pub auto_recover_prompt_misdelivery: bool,
|
||||
pub prompt_delivery_attempts: u32,
|
||||
pub prompt_in_flight: bool,
|
||||
pub prompt_sent_at: Option<u64>,
|
||||
pub last_prompt: Option<String>,
|
||||
pub expected_receipt: Option<WorkerTaskReceipt>,
|
||||
pub replay_prompt: Option<String>,
|
||||
@@ -274,6 +303,7 @@ impl WorkerRegistry {
|
||||
auto_recover_prompt_misdelivery,
|
||||
prompt_delivery_attempts: 0,
|
||||
prompt_in_flight: false,
|
||||
prompt_sent_at: None,
|
||||
last_prompt: None,
|
||||
expected_receipt: None,
|
||||
replay_prompt: None,
|
||||
@@ -528,6 +558,7 @@ impl WorkerRegistry {
|
||||
|
||||
worker.prompt_delivery_attempts += 1;
|
||||
worker.prompt_in_flight = true;
|
||||
worker.prompt_sent_at = Some(now_secs());
|
||||
worker.last_prompt = Some(next_prompt.clone());
|
||||
worker.expected_receipt = task_receipt;
|
||||
worker.replay_prompt = None;
|
||||
@@ -579,6 +610,7 @@ impl WorkerRegistry {
|
||||
worker.last_error = None;
|
||||
worker.prompt_delivery_attempts = 0;
|
||||
worker.prompt_in_flight = false;
|
||||
worker.prompt_sent_at = None;
|
||||
push_event(
|
||||
worker,
|
||||
WorkerEventKind::Restarted,
|
||||
@@ -696,12 +728,11 @@ impl WorkerRegistry {
|
||||
// Build evidence bundle
|
||||
let evidence = StartupEvidenceBundle {
|
||||
last_lifecycle_state: worker.status,
|
||||
last_lifecycle_at: worker.updated_at,
|
||||
pane_command: pane_command.to_string(),
|
||||
prompt_sent_at: if worker.prompt_delivery_attempts > 0 {
|
||||
Some(worker.updated_at)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
pane_observed_at: now,
|
||||
command_started_at: worker.created_at,
|
||||
prompt_sent_at: worker.prompt_sent_at,
|
||||
prompt_acceptance_state: worker.status == WorkerStatus::Running
|
||||
&& !worker.prompt_in_flight,
|
||||
trust_prompt_detected: worker
|
||||
@@ -716,7 +747,9 @@ impl WorkerRegistry {
|
||||
.map(|event| now.saturating_sub(event.timestamp)),
|
||||
tool_permission_allow_scope,
|
||||
transport_healthy,
|
||||
transport_health: StartupHealthSummary::observed("transport", transport_healthy),
|
||||
mcp_healthy,
|
||||
mcp_health: StartupHealthSummary::observed("mcp", mcp_healthy),
|
||||
elapsed_seconds: elapsed,
|
||||
};
|
||||
|
||||
@@ -1840,8 +1873,16 @@ mod tests {
|
||||
"last state should be spawning"
|
||||
);
|
||||
assert_eq!(evidence.pane_command, "cargo test");
|
||||
assert!(evidence.command_started_at <= evidence.pane_observed_at);
|
||||
assert!(evidence.last_lifecycle_at <= evidence.pane_observed_at);
|
||||
assert!(!evidence.transport_healthy);
|
||||
assert!(!evidence.transport_health.healthy);
|
||||
assert!(evidence
|
||||
.transport_health
|
||||
.summary
|
||||
.contains("transport_unhealthy"));
|
||||
assert!(evidence.mcp_healthy);
|
||||
assert!(evidence.mcp_health.healthy);
|
||||
assert_eq!(*classification, StartupFailureClassification::TransportDead);
|
||||
}
|
||||
_ => panic!(
|
||||
@@ -1932,11 +1973,53 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn startup_timeout_preserves_original_prompt_sent_timestamp() {
|
||||
let registry = WorkerRegistry::new();
|
||||
let worker = registry.create("/tmp/repo-prompt-timestamp", &[], true);
|
||||
|
||||
registry
|
||||
.observe(&worker.worker_id, "Ready for input\n>")
|
||||
.expect("ready observe should succeed");
|
||||
let prompted = registry
|
||||
.send_prompt(
|
||||
&worker.worker_id,
|
||||
Some("Run timestamp-sensitive work"),
|
||||
None,
|
||||
)
|
||||
.expect("prompt send should succeed");
|
||||
let sent_at = prompted
|
||||
.prompt_sent_at
|
||||
.expect("prompt send should record a prompt timestamp");
|
||||
|
||||
let timed_out = registry
|
||||
.observe_startup_timeout(&worker.worker_id, "claw worker", true, true)
|
||||
.expect("startup timeout observe should succeed");
|
||||
|
||||
let event = timed_out
|
||||
.events
|
||||
.iter()
|
||||
.find(|e| e.kind == WorkerEventKind::StartupNoEvidence)
|
||||
.expect("startup no evidence event should exist");
|
||||
|
||||
match event.payload.as_ref() {
|
||||
Some(WorkerEventPayload::StartupNoEvidence { evidence, .. }) => {
|
||||
assert_eq!(evidence.prompt_sent_at, Some(sent_at));
|
||||
assert!(evidence.last_lifecycle_at <= evidence.pane_observed_at);
|
||||
assert!(evidence.command_started_at <= sent_at);
|
||||
}
|
||||
_ => panic!("expected StartupNoEvidence payload"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn startup_evidence_bundle_serializes_correctly() {
|
||||
let bundle = StartupEvidenceBundle {
|
||||
last_lifecycle_state: WorkerStatus::Running,
|
||||
last_lifecycle_at: 1_234_567_889,
|
||||
pane_command: "test command".to_string(),
|
||||
pane_observed_at: 1_234_567_891,
|
||||
command_started_at: 1_234_567_800,
|
||||
prompt_sent_at: Some(1_234_567_890),
|
||||
prompt_acceptance_state: false,
|
||||
trust_prompt_detected: true,
|
||||
@@ -1944,7 +2027,9 @@ mod tests {
|
||||
tool_permission_prompt_age_seconds: None,
|
||||
tool_permission_allow_scope: None,
|
||||
transport_healthy: true,
|
||||
transport_health: StartupHealthSummary::observed("transport", true),
|
||||
mcp_healthy: false,
|
||||
mcp_health: StartupHealthSummary::observed("mcp", false),
|
||||
elapsed_seconds: 60,
|
||||
};
|
||||
|
||||
@@ -1953,8 +2038,13 @@ mod tests {
|
||||
assert!(json.contains("\"pane_command\""));
|
||||
assert!(json.contains("\"prompt_sent_at\":1234567890"));
|
||||
assert!(json.contains("\"trust_prompt_detected\":true"));
|
||||
assert!(json.contains("\"last_lifecycle_at\":1234567889"));
|
||||
assert!(json.contains("\"pane_observed_at\":1234567891"));
|
||||
assert!(json.contains("\"command_started_at\":1234567800"));
|
||||
assert!(json.contains("\"transport_healthy\":true"));
|
||||
assert!(json.contains("\"transport_health\""));
|
||||
assert!(json.contains("\"mcp_healthy\":false"));
|
||||
assert!(json.contains("\"mcp_health\""));
|
||||
|
||||
let deserialized: StartupEvidenceBundle =
|
||||
serde_json::from_str(&json).expect("should deserialize");
|
||||
@@ -1966,7 +2056,10 @@ mod tests {
|
||||
fn classify_startup_failure_detects_transport_dead() {
|
||||
let evidence = StartupEvidenceBundle {
|
||||
last_lifecycle_state: WorkerStatus::Spawning,
|
||||
last_lifecycle_at: 10,
|
||||
pane_command: "test".to_string(),
|
||||
pane_observed_at: 40,
|
||||
command_started_at: 1,
|
||||
prompt_sent_at: None,
|
||||
prompt_acceptance_state: false,
|
||||
trust_prompt_detected: false,
|
||||
@@ -1974,7 +2067,9 @@ mod tests {
|
||||
tool_permission_prompt_age_seconds: None,
|
||||
tool_permission_allow_scope: None,
|
||||
transport_healthy: false,
|
||||
transport_health: StartupHealthSummary::observed("transport", false),
|
||||
mcp_healthy: true,
|
||||
mcp_health: StartupHealthSummary::observed("mcp", true),
|
||||
elapsed_seconds: 30,
|
||||
};
|
||||
|
||||
@@ -1986,7 +2081,10 @@ mod tests {
|
||||
fn classify_startup_failure_defaults_to_unknown() {
|
||||
let evidence = StartupEvidenceBundle {
|
||||
last_lifecycle_state: WorkerStatus::Spawning,
|
||||
last_lifecycle_at: 10,
|
||||
pane_command: "test".to_string(),
|
||||
pane_observed_at: 40,
|
||||
command_started_at: 1,
|
||||
prompt_sent_at: None,
|
||||
prompt_acceptance_state: false,
|
||||
trust_prompt_detected: false,
|
||||
@@ -1994,7 +2092,9 @@ mod tests {
|
||||
tool_permission_prompt_age_seconds: None,
|
||||
tool_permission_allow_scope: None,
|
||||
transport_healthy: true,
|
||||
transport_health: StartupHealthSummary::observed("transport", true),
|
||||
mcp_healthy: true,
|
||||
mcp_health: StartupHealthSummary::observed("mcp", true),
|
||||
elapsed_seconds: 10,
|
||||
};
|
||||
|
||||
@@ -2002,13 +2102,44 @@ mod tests {
|
||||
assert_eq!(classification, StartupFailureClassification::Unknown);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn classify_startup_failure_detects_prompt_misdelivery_after_timeout() {
|
||||
let evidence = StartupEvidenceBundle {
|
||||
last_lifecycle_state: WorkerStatus::ReadyForPrompt,
|
||||
last_lifecycle_at: 10,
|
||||
pane_command: "test".to_string(),
|
||||
pane_observed_at: 45,
|
||||
command_started_at: 1,
|
||||
prompt_sent_at: Some(10),
|
||||
prompt_acceptance_state: false,
|
||||
trust_prompt_detected: false,
|
||||
tool_permission_prompt_detected: false,
|
||||
tool_permission_prompt_age_seconds: None,
|
||||
tool_permission_allow_scope: None,
|
||||
transport_healthy: true,
|
||||
transport_health: StartupHealthSummary::observed("transport", true),
|
||||
mcp_healthy: true,
|
||||
mcp_health: StartupHealthSummary::observed("mcp", true),
|
||||
elapsed_seconds: 31,
|
||||
};
|
||||
|
||||
let classification = classify_startup_failure(&evidence);
|
||||
assert_eq!(
|
||||
classification,
|
||||
StartupFailureClassification::PromptMisdelivery
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn classify_startup_failure_detects_worker_crashed() {
|
||||
// Worker crashed scenario: transport healthy but MCP unhealthy
|
||||
// Don't have prompt in flight (no prompt_sent_at) to avoid matching PromptAcceptanceTimeout
|
||||
let evidence = StartupEvidenceBundle {
|
||||
last_lifecycle_state: WorkerStatus::Spawning,
|
||||
last_lifecycle_at: 10,
|
||||
pane_command: "test".to_string(),
|
||||
pane_observed_at: 40,
|
||||
command_started_at: 1,
|
||||
prompt_sent_at: None, // No prompt sent yet
|
||||
prompt_acceptance_state: false,
|
||||
trust_prompt_detected: false,
|
||||
@@ -2016,7 +2147,9 @@ mod tests {
|
||||
tool_permission_prompt_age_seconds: None,
|
||||
tool_permission_allow_scope: None,
|
||||
transport_healthy: true,
|
||||
mcp_healthy: false, // MCP unhealthy but transport healthy suggests crash
|
||||
transport_health: StartupHealthSummary::observed("transport", true),
|
||||
mcp_healthy: false,
|
||||
mcp_health: StartupHealthSummary::observed("mcp", false), // MCP unhealthy but transport healthy suggests crash
|
||||
elapsed_seconds: 45,
|
||||
};
|
||||
|
||||
|
||||
81
rust/crates/runtime/tests/fixtures/g004_contract_bundle.valid.json
vendored
Normal file
81
rust/crates/runtime/tests/fixtures/g004_contract_bundle.valid.json
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
{
|
||||
"schemaVersion": "g004.contract.bundle.v1",
|
||||
"laneEvents": [
|
||||
{
|
||||
"event": "lane.started",
|
||||
"status": "running",
|
||||
"emittedAt": "2026-05-14T00:00:00Z",
|
||||
"metadata": {
|
||||
"seq": 1,
|
||||
"provenance": "live_lane",
|
||||
"emitterIdentity": "worker-1",
|
||||
"environmentLabel": "team-g004"
|
||||
}
|
||||
},
|
||||
{
|
||||
"event": "lane.finished",
|
||||
"status": "completed",
|
||||
"emittedAt": "2026-05-14T00:00:10Z",
|
||||
"metadata": {
|
||||
"seq": 2,
|
||||
"provenance": "live_lane",
|
||||
"emitterIdentity": "worker-1",
|
||||
"environmentLabel": "team-g004",
|
||||
"eventFingerprint": "terminal-fp-001"
|
||||
}
|
||||
}
|
||||
],
|
||||
"reports": [
|
||||
{
|
||||
"schemaVersion": "g004.report.v1",
|
||||
"reportId": "report-g004-fixture",
|
||||
"identity": { "contentHash": "sha256:report-content" },
|
||||
"projection": { "provenance": "runtime.event_projection.v1" },
|
||||
"redaction": { "provenance": "runtime.redaction_policy.v1" },
|
||||
"consumerCapabilities": ["facts", "field_deltas", "redaction_provenance"],
|
||||
"findings": [
|
||||
{
|
||||
"kind": "fact",
|
||||
"confidence": "high",
|
||||
"statement": "lane event reached terminal state"
|
||||
},
|
||||
{
|
||||
"kind": "hypothesis",
|
||||
"confidence": "medium",
|
||||
"statement": "consumer can reconcile the terminal fingerprint"
|
||||
},
|
||||
{
|
||||
"kind": "negative_evidence",
|
||||
"confidence": "high",
|
||||
"statement": "no duplicate terminal event appears in this fixture"
|
||||
}
|
||||
],
|
||||
"fieldDeltas": [
|
||||
{
|
||||
"field": "/laneEvents/1/status",
|
||||
"previousHash": "sha256:running",
|
||||
"currentHash": "sha256:completed",
|
||||
"attribution": "worker-1 terminal reconciliation"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"approvalTokens": [
|
||||
{
|
||||
"tokenId": "approval-token-fixture",
|
||||
"owner": "leader-fixed",
|
||||
"scope": "g004.contract.bundle.fixture",
|
||||
"issuedAt": "2026-05-14T00:00:01Z",
|
||||
"oneTimeUse": true,
|
||||
"replayPreventionNonce": "nonce-fixture-001",
|
||||
"delegationChain": [
|
||||
{
|
||||
"from": "leader-fixed",
|
||||
"to": "worker-3",
|
||||
"action": "validate-g004-contract-fixture",
|
||||
"at": "2026-05-14T00:00:02Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
11
rust/crates/runtime/tests/fixtures/report_schema_v1/README.md
vendored
Normal file
11
rust/crates/runtime/tests/fixtures/report_schema_v1/README.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Report schema v1 fixture set
|
||||
|
||||
Validated by `cargo test -p runtime report_schema -- --nocapture`.
|
||||
|
||||
The in-code fixture in `runtime::report_schema::tests::fixture_report` covers:
|
||||
- fact / hypothesis / confidence labels
|
||||
- negative evidence with checked surfaces and query window
|
||||
- field-level delta attribution
|
||||
- canonical report id plus content hash
|
||||
- deterministic projection/redaction provenance
|
||||
- consumer capability negotiation and downgraded projections
|
||||
80
rust/crates/runtime/tests/g004_conformance.rs
Normal file
80
rust/crates/runtime/tests/g004_conformance.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use runtime::g004_conformance::{is_g004_contract_bundle_valid, validate_g004_contract_bundle};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
fn valid_bundle() -> Value {
|
||||
serde_json::from_str(include_str!("fixtures/g004_contract_bundle.valid.json"))
|
||||
.expect("valid fixture JSON should parse")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn valid_g004_contract_bundle_fixture_passes_conformance() {
|
||||
let fixture = valid_bundle();
|
||||
|
||||
let errors = validate_g004_contract_bundle(&fixture);
|
||||
|
||||
assert!(
|
||||
errors.is_empty(),
|
||||
"unexpected conformance errors: {errors:?}"
|
||||
);
|
||||
assert!(is_g004_contract_bundle_valid(&fixture));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn g004_conformance_reports_machine_readable_paths_for_contract_gaps() {
|
||||
let invalid = json!({
|
||||
"schemaVersion": "g004.contract.bundle.v1",
|
||||
"laneEvents": [
|
||||
{
|
||||
"event": "lane.finished",
|
||||
"status": "completed",
|
||||
"emittedAt": "2026-05-14T00:00:10Z",
|
||||
"metadata": {
|
||||
"seq": 1,
|
||||
"provenance": "live_lane",
|
||||
"emitterIdentity": "worker-1",
|
||||
"environmentLabel": "team-g004"
|
||||
}
|
||||
}
|
||||
],
|
||||
"reports": [
|
||||
{
|
||||
"schemaVersion": "g004.report.v1",
|
||||
"reportId": "report-with-gaps",
|
||||
"identity": { "contentHash": "sha256:report-content" },
|
||||
"projection": { "provenance": "runtime.event_projection.v1" },
|
||||
"redaction": { "provenance": "runtime.redaction_policy.v1" },
|
||||
"consumerCapabilities": [],
|
||||
"findings": [
|
||||
{
|
||||
"kind": "guess",
|
||||
"confidence": "certain",
|
||||
"statement": "bad labels should be rejected"
|
||||
}
|
||||
],
|
||||
"fieldDeltas": []
|
||||
}
|
||||
],
|
||||
"approvalTokens": [
|
||||
{
|
||||
"tokenId": "approval-token-fixture",
|
||||
"owner": "leader-fixed",
|
||||
"scope": "g004.contract.bundle.fixture",
|
||||
"issuedAt": "2026-05-14T00:00:01Z",
|
||||
"oneTimeUse": false,
|
||||
"replayPreventionNonce": "nonce-fixture-001",
|
||||
"delegationChain": []
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
let errors = validate_g004_contract_bundle(&invalid);
|
||||
let paths: Vec<&str> = errors.iter().map(|error| error.path.as_str()).collect();
|
||||
|
||||
assert!(paths.contains(&"/laneEvents/0/metadata/eventFingerprint"));
|
||||
assert!(paths.contains(&"/reports/0/consumerCapabilities"));
|
||||
assert!(paths.contains(&"/reports/0/findings/0/kind"));
|
||||
assert!(paths.contains(&"/reports/0/findings/0/confidence"));
|
||||
assert!(paths.contains(&"/reports/0/fieldDeltas"));
|
||||
assert!(paths.contains(&"/approvalTokens/0/oneTimeUse"));
|
||||
assert!(paths.contains(&"/approvalTokens/0/delegationChain"));
|
||||
}
|
||||
@@ -22,7 +22,7 @@ fn stale_branch_detection_flows_into_policy_engine() {
|
||||
let stale_context = LaneContext::new(
|
||||
"stale-lane",
|
||||
0,
|
||||
Duration::from_secs(2 * 60 * 60), // 2 hours stale
|
||||
Duration::from_hours(2), // 2 hours stale
|
||||
LaneBlocker::None,
|
||||
ReviewStatus::Pending,
|
||||
DiffScope::Full,
|
||||
@@ -49,7 +49,7 @@ fn fresh_branch_does_not_trigger_stale_policy() {
|
||||
let fresh_context = LaneContext::new(
|
||||
"fresh-lane",
|
||||
0,
|
||||
Duration::from_secs(30 * 60), // 30 min stale — under 1 hour threshold
|
||||
Duration::from_mins(30), // 30 min stale — under 1 hour threshold
|
||||
LaneBlocker::None,
|
||||
ReviewStatus::Pending,
|
||||
DiffScope::Full,
|
||||
@@ -96,9 +96,7 @@ fn green_contract_unsatisfied_blocks_merge() {
|
||||
false,
|
||||
);
|
||||
|
||||
// This is a conceptual test — we need a way to express "requires workspace green"
|
||||
// Currently LaneContext has raw green_level: u8, not a contract
|
||||
// For now we just verify the policy condition works
|
||||
// The context has a test level but lacks the full green contract, so merge stays blocked.
|
||||
let engine = PolicyEngine::new(vec![PolicyRule::new(
|
||||
"workspace-green-required",
|
||||
PolicyCondition::GreenAt { level: 3 }, // GreenLevel::Workspace
|
||||
@@ -212,8 +210,8 @@ fn end_to_end_stale_lane_gets_merge_forward_action() {
|
||||
// when: build context and evaluate policy
|
||||
let context = LaneContext::new(
|
||||
"lane-9411",
|
||||
3, // Workspace green
|
||||
Duration::from_secs(5 * 60 * 60), // 5 hours stale, definitely over threshold
|
||||
3, // Workspace green
|
||||
Duration::from_hours(5), // 5 hours stale, definitely over threshold
|
||||
LaneBlocker::None,
|
||||
ReviewStatus::Approved,
|
||||
DiffScope::Scoped,
|
||||
@@ -261,13 +259,14 @@ fn end_to_end_stale_lane_gets_merge_forward_action() {
|
||||
fn fresh_approved_lane_gets_merge_action() {
|
||||
let context = LaneContext::new(
|
||||
"fresh-approved-lane",
|
||||
3, // Workspace green
|
||||
Duration::from_secs(30 * 60), // 30 min — under 1 hour threshold = fresh
|
||||
3, // Workspace green
|
||||
Duration::from_mins(30), // 30 min — under 1 hour threshold = fresh
|
||||
LaneBlocker::None,
|
||||
ReviewStatus::Approved,
|
||||
DiffScope::Scoped,
|
||||
false,
|
||||
);
|
||||
)
|
||||
.with_green_contract_satisfied(true);
|
||||
|
||||
let engine = PolicyEngine::new(vec![PolicyRule::new(
|
||||
"merge-if-green-approved-not-stale",
|
||||
@@ -347,7 +346,7 @@ fn worker_provider_failure_flows_through_recovery_to_policy() {
|
||||
// (Simulating the policy check that would happen after successful recovery)
|
||||
let recovery_success = matches!(result, RecoveryResult::Recovered { .. });
|
||||
let green_level = 3; // Workspace green
|
||||
let not_stale = Duration::from_secs(30 * 60); // 30 min — fresh
|
||||
let not_stale = Duration::from_mins(30); // 30 min — fresh
|
||||
|
||||
let post_recovery_context = LaneContext::new(
|
||||
"recovered-lane",
|
||||
@@ -357,7 +356,8 @@ fn worker_provider_failure_flows_through_recovery_to_policy() {
|
||||
ReviewStatus::Approved,
|
||||
DiffScope::Scoped,
|
||||
false,
|
||||
);
|
||||
)
|
||||
.with_green_contract_satisfied(true);
|
||||
|
||||
let policy_engine = PolicyEngine::new(vec![
|
||||
// Rule: if recovered from failure + green + approved -> merge
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -215,6 +215,48 @@ fn doctor_command_runs_as_a_local_shell_entrypoint() {
|
||||
fs::remove_dir_all(temp_dir).expect("cleanup temp dir");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn local_smoke_commands_do_not_require_live_credentials() {
|
||||
let temp_dir = unique_temp_dir("offline-local-smoke path with spaces");
|
||||
let config_home = temp_dir.join("home with spaces").join(".claw");
|
||||
fs::create_dir_all(&config_home).expect("config home should exist");
|
||||
fs::create_dir_all(&temp_dir).expect("temp dir should exist");
|
||||
|
||||
for args in [
|
||||
&["help"][..],
|
||||
&["status"][..],
|
||||
&["config", "env"][..],
|
||||
&["doctor"][..],
|
||||
] {
|
||||
let output = offline_command_in(&temp_dir, &config_home)
|
||||
.args(args)
|
||||
.output()
|
||||
.unwrap_or_else(|error| panic!("claw {args:?} should launch: {error}"));
|
||||
|
||||
assert_success(&output);
|
||||
let stdout = String::from_utf8(output.stdout).expect("stdout should be utf8");
|
||||
let stderr = String::from_utf8(output.stderr).expect("stderr should be utf8");
|
||||
assert!(
|
||||
stdout.contains("claw")
|
||||
|| stdout.contains("Status")
|
||||
|| stdout.contains("Config")
|
||||
|| stdout.contains("Doctor"),
|
||||
"unexpected stdout for {args:?}: {stdout}"
|
||||
);
|
||||
assert!(
|
||||
!stderr.contains("missing Anthropic credentials")
|
||||
&& !stderr.contains("auth_unavailable"),
|
||||
"local smoke command {args:?} should not require live credentials: {stderr}"
|
||||
);
|
||||
assert!(
|
||||
!stdout.contains("Thinking"),
|
||||
"local smoke command {args:?} should not enter prompt runtime"
|
||||
);
|
||||
}
|
||||
|
||||
fs::remove_dir_all(temp_dir).expect("cleanup temp dir");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn local_subcommand_help_does_not_fall_through_to_runtime_or_provider_calls() {
|
||||
let temp_dir = unique_temp_dir("subcommand-help");
|
||||
@@ -258,6 +300,19 @@ fn local_subcommand_help_does_not_fall_through_to_runtime_or_provider_calls() {
|
||||
fs::remove_dir_all(temp_dir).expect("cleanup temp dir");
|
||||
}
|
||||
|
||||
fn offline_command_in(cwd: &Path, config_home: &Path) -> Command {
|
||||
let mut command = command_in(cwd);
|
||||
command
|
||||
.env("CLAW_CONFIG_HOME", config_home)
|
||||
.env_remove("ANTHROPIC_API_KEY")
|
||||
.env_remove("ANTHROPIC_AUTH_TOKEN")
|
||||
.env_remove("OPENAI_API_KEY")
|
||||
.env_remove("XAI_API_KEY")
|
||||
.env_remove("DASHSCOPE_API_KEY")
|
||||
.env("ANTHROPIC_BASE_URL", "http://127.0.0.1:9");
|
||||
command
|
||||
}
|
||||
|
||||
fn command_in(cwd: &Path) -> Command {
|
||||
let mut command = Command::new(env!("CARGO_BIN_EXE_claw"));
|
||||
command.current_dir(cwd);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user