mirror of
https://github.com/instructkr/claude-code.git
synced 2026-05-17 03:16:44 +00:00
Compare commits
46 Commits
main
...
feat/jobdo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb4b1ebc9b | ||
|
|
2fcb85ce4e | ||
|
|
f1103332d0 | ||
|
|
186d42f979 | ||
|
|
5f8d1b92a6 | ||
|
|
84466bbb6c | ||
|
|
fbcbe9d8d5 | ||
|
|
dd0993c157 | ||
|
|
b903e1605f | ||
|
|
de368a2615 | ||
|
|
af306d489e | ||
|
|
fef249d9e7 | ||
|
|
7724bf98fd | ||
|
|
70b2f6a66f | ||
|
|
1d155e4304 | ||
|
|
0b5dffb9da | ||
|
|
932710a626 | ||
|
|
3262cb3a87 | ||
|
|
8247d7d2eb | ||
|
|
517d7e224e | ||
|
|
c73423871b | ||
|
|
373dd9b848 | ||
|
|
11f9e8a5a2 | ||
|
|
97c4b130dc | ||
|
|
290ab7e41f | ||
|
|
ded0c5bbc1 | ||
|
|
40c17d8f2a | ||
|
|
b048de8899 | ||
|
|
5a18e3aa1a | ||
|
|
7fb95e95f6 | ||
|
|
60925fa9f7 | ||
|
|
01dca90e95 | ||
|
|
524edb2b2e | ||
|
|
455bdec06c | ||
|
|
85de7f9814 | ||
|
|
178c8fac28 | ||
|
|
d453eedae6 | ||
|
|
79a9f0e6f6 | ||
|
|
4813a2b351 | ||
|
|
3f4d46d7b4 | ||
|
|
6a76cc7c08 | ||
|
|
527c0f971c | ||
|
|
504d238af1 | ||
|
|
41a6091355 | ||
|
|
bc94870a54 | ||
|
|
ee3aa29a5e |
54
.github/ISSUE_TEMPLATE/anti_slop_triage.yml
vendored
54
.github/ISSUE_TEMPLATE/anti_slop_triage.yml
vendored
@@ -1,54 +0,0 @@
|
||||
name: Anti-slop triage
|
||||
about: Classify low-signal, duplicate, generated, or unsafe reports before engineering work starts.
|
||||
title: "triage: "
|
||||
labels: ["needs-triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Use this form for issue intake that needs evidence-backed classification before anyone closes, fixes, or escalates it.
|
||||
Do not paste secrets, live tokens, private logs, or non-public customer data.
|
||||
- type: dropdown
|
||||
id: classification
|
||||
attributes:
|
||||
label: Initial classification
|
||||
description: Pick the strongest current classification. Update it if evidence changes.
|
||||
options:
|
||||
- actionable-bug
|
||||
- actionable-docs
|
||||
- actionable-feature
|
||||
- duplicate
|
||||
- spam-or-promotion
|
||||
- generated-slop-or-hallucinated
|
||||
- unsafe-or-security-sensitive
|
||||
- not-reproducible-yet
|
||||
- externally-blocked
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: evidence
|
||||
attributes:
|
||||
label: Evidence
|
||||
description: Link the PR, issue, command output, docs page, reproduction, duplicate, or policy that supports the classification.
|
||||
placeholder: "Evidence: ..."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: safe_next_action
|
||||
attributes:
|
||||
label: Safe next action
|
||||
description: State the next non-destructive action. If closure or merge is proposed, name the required owner/gate.
|
||||
placeholder: "Next action: label only / request repro / link duplicate / fix docs / defer with rationale / owner review required"
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: guardrails
|
||||
attributes:
|
||||
label: Guardrails
|
||||
options:
|
||||
- label: I did not close, merge, or mutate remote state as part of this triage-only report.
|
||||
required: true
|
||||
- label: I checked for duplicates or related PRs/issues before recommending action.
|
||||
required: true
|
||||
- label: If this touches credentials, security, or private data, I avoided public reproduction details and routed to the appropriate private/security path.
|
||||
required: true
|
||||
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,17 +0,0 @@
|
||||
## Summary
|
||||
- TBD
|
||||
|
||||
## Anti-slop triage
|
||||
- Classification: <!-- actionable-fix | docs-only | duplicate | generated-slop | unsafe | out-of-scope | needs-maintainer-decision -->
|
||||
- Evidence: <!-- issue link, repro command, failing test, docs source, or duplicate PR -->
|
||||
- Non-destructive review result: <!-- merge candidate | request changes | close/defer with rationale | needs owner gate -->
|
||||
|
||||
## Verification
|
||||
- [ ] Targeted tests/docs checks ran, or the gap is explicitly recorded.
|
||||
- [ ] `git diff --check` passes.
|
||||
- [ ] No live secrets, tokens, private logs, or unrelated generated churn are included.
|
||||
|
||||
## Resolution gate
|
||||
- [ ] If this PR resolves an issue, the issue number and fix evidence are linked.
|
||||
- [ ] If this PR should not merge, the rejection/defer rationale is evidence-backed and does not rely on vibes.
|
||||
- [ ] I did not merge/close remote PRs or issues from an automation lane without owner approval.
|
||||
169
.github/scripts/check_release_readiness.py
vendored
169
.github/scripts/check_release_readiness.py
vendored
@@ -1,169 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Validate release-readiness docs that are easy to regress.
|
||||
|
||||
The check is intentionally dependency-free so it can run on developer machines,
|
||||
Windows CI, and minimal release jobs. It validates:
|
||||
|
||||
* required repository policy files exist;
|
||||
* local Markdown links and image targets resolve;
|
||||
* local heading anchors referenced from Markdown resolve; and
|
||||
* command examples do not present the deprecated `cargo install claw-code`
|
||||
package as an executable install path.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from urllib.parse import unquote, urlparse
|
||||
import re
|
||||
import sys
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[2]
|
||||
|
||||
REQUIRED_POLICY_FILES = [
|
||||
"LICENSE",
|
||||
"CONTRIBUTING.md",
|
||||
"SECURITY.md",
|
||||
"SUPPORT.md",
|
||||
"CODE_OF_CONDUCT.md",
|
||||
]
|
||||
|
||||
MARKDOWN_ROOTS = [
|
||||
ROOT / "README.md",
|
||||
ROOT / "USAGE.md",
|
||||
ROOT / "PARITY.md",
|
||||
ROOT / "PHILOSOPHY.md",
|
||||
ROOT / "ROADMAP.md",
|
||||
ROOT / "CONTRIBUTING.md",
|
||||
ROOT / "SECURITY.md",
|
||||
ROOT / "SUPPORT.md",
|
||||
ROOT / "CODE_OF_CONDUCT.md",
|
||||
ROOT / "docs",
|
||||
ROOT / "rust" / "README.md",
|
||||
ROOT / "rust" / "USAGE.md",
|
||||
ROOT / "rust" / "MOCK_PARITY_HARNESS.md",
|
||||
]
|
||||
|
||||
LINK_PATTERN = re.compile(r"(?<!!)\[[^\]\n]+\]\(([^)\s]+)(?:\s+\"[^\"]*\")?\)")
|
||||
HTML_LINK_PATTERN = re.compile(r"""<(?:a|img)\b[^>]*(?:href|src)=["']([^"']+)["']""", re.I)
|
||||
FENCE_PATTERN = re.compile(r"```(?P<lang>[^\n`]*)\n(?P<body>.*?)```", re.S)
|
||||
|
||||
|
||||
def iter_markdown_files() -> list[Path]:
|
||||
files: set[Path] = set()
|
||||
for entry in MARKDOWN_ROOTS:
|
||||
if entry.is_file():
|
||||
files.add(entry)
|
||||
elif entry.is_dir():
|
||||
files.update(entry.rglob("*.md"))
|
||||
return sorted(files)
|
||||
|
||||
|
||||
def github_anchor(heading: str) -> str:
|
||||
anchor = heading.strip().lower()
|
||||
anchor = re.sub(r"<[^>]+>", "", anchor)
|
||||
anchor = re.sub(r"`([^`]*)`", r"\1", anchor)
|
||||
anchor = re.sub(r"[^a-z0-9 _-]", "", anchor)
|
||||
anchor = anchor.replace(" ", "-")
|
||||
anchor = re.sub(r"-+", "-", anchor)
|
||||
return anchor.strip("-")
|
||||
|
||||
|
||||
def anchors_for(path: Path) -> set[str]:
|
||||
anchors: set[str] = set()
|
||||
for line in path.read_text(encoding="utf-8").splitlines():
|
||||
match = re.match(r"^(#{1,6})\s+(.+?)\s*#*\s*$", line)
|
||||
if match:
|
||||
anchors.add(github_anchor(match.group(2)))
|
||||
return anchors
|
||||
|
||||
|
||||
def is_external(target: str) -> bool:
|
||||
parsed = urlparse(target)
|
||||
return parsed.scheme in {"http", "https", "mailto"}
|
||||
|
||||
|
||||
def validate_policies(errors: list[str]) -> None:
|
||||
for relative in REQUIRED_POLICY_FILES:
|
||||
path = ROOT / relative
|
||||
if not path.is_file():
|
||||
errors.append(f"missing required policy file: {relative}")
|
||||
|
||||
|
||||
def validate_markdown_links(errors: list[str]) -> None:
|
||||
anchor_cache: dict[Path, set[str]] = {}
|
||||
for path in iter_markdown_files():
|
||||
text = path.read_text(encoding="utf-8")
|
||||
candidates = [m.group(1) for m in LINK_PATTERN.finditer(text)]
|
||||
candidates.extend(m.group(1) for m in HTML_LINK_PATTERN.finditer(text))
|
||||
for target in candidates:
|
||||
if (
|
||||
not target
|
||||
or is_external(target)
|
||||
or target.startswith(("mailto:", "tel:", "data:"))
|
||||
):
|
||||
continue
|
||||
link_path, _, raw_anchor = target.partition("#")
|
||||
if not link_path:
|
||||
destination = path
|
||||
else:
|
||||
destination = (path.parent / unquote(link_path)).resolve()
|
||||
try:
|
||||
destination.relative_to(ROOT)
|
||||
except ValueError:
|
||||
errors.append(
|
||||
f"{path.relative_to(ROOT)}: link escapes repo root: {target}"
|
||||
)
|
||||
continue
|
||||
if not destination.exists():
|
||||
errors.append(
|
||||
f"{path.relative_to(ROOT)}: missing local link target: {target}"
|
||||
)
|
||||
continue
|
||||
if raw_anchor and destination.suffix.lower() == ".md":
|
||||
anchor = unquote(raw_anchor).lower()
|
||||
anchor_cache.setdefault(destination, anchors_for(destination))
|
||||
if anchor not in anchor_cache[destination]:
|
||||
errors.append(
|
||||
f"{path.relative_to(ROOT)}: missing anchor `{raw_anchor}` in "
|
||||
f"{destination.relative_to(ROOT)}"
|
||||
)
|
||||
|
||||
|
||||
def validate_command_examples(errors: list[str]) -> None:
|
||||
for path in iter_markdown_files():
|
||||
text = path.read_text(encoding="utf-8")
|
||||
for match in FENCE_PATTERN.finditer(text):
|
||||
lang = match.group("lang").strip().lower()
|
||||
if lang not in {"bash", "sh", "shell", "zsh", "powershell", "ps1"}:
|
||||
continue
|
||||
body = match.group("body")
|
||||
for offset, line in enumerate(body.splitlines(), start=1):
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith(("#", ">")):
|
||||
continue
|
||||
if re.search(r"\bcargo\s+install\s+claw-code\b", stripped):
|
||||
line_no = text.count("\n", 0, match.start()) + offset + 1
|
||||
errors.append(
|
||||
f"{path.relative_to(ROOT)}:{line_no}: deprecated "
|
||||
"`cargo install claw-code` appears in an executable "
|
||||
"command block; use build-from-source docs instead"
|
||||
)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
errors: list[str] = []
|
||||
validate_policies(errors)
|
||||
validate_markdown_links(errors)
|
||||
validate_command_examples(errors)
|
||||
if errors:
|
||||
print("release-readiness check failed:", file=sys.stderr)
|
||||
for error in errors:
|
||||
print(f" - {error}", file=sys.stderr)
|
||||
return 1
|
||||
print("release-readiness check passed")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
15
.github/workflows/release.yml
vendored
15
.github/workflows/release.yml
vendored
@@ -32,10 +32,6 @@ jobs:
|
||||
os: macos-14
|
||||
bin: claw
|
||||
artifact_name: claw-macos-arm64
|
||||
- name: windows-x64
|
||||
os: windows-latest
|
||||
bin: claw.exe
|
||||
artifact_name: claw-windows-x64.exe
|
||||
defaults:
|
||||
run:
|
||||
working-directory: rust
|
||||
@@ -51,27 +47,22 @@ jobs:
|
||||
- name: Build release binary
|
||||
run: cargo build --release -p rusty-claude-cli
|
||||
|
||||
- name: Package artifact and checksum
|
||||
- name: Package artifact
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p dist
|
||||
cp "target/release/${{ matrix.bin }}" "dist/${{ matrix.artifact_name }}"
|
||||
chmod +x "dist/${{ matrix.artifact_name }}"
|
||||
(cd dist && sha256sum "${{ matrix.artifact_name }}" > "${{ matrix.artifact_name }}.sha256")
|
||||
|
||||
- name: Upload workflow artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.artifact_name }}
|
||||
path: |
|
||||
rust/dist/${{ matrix.artifact_name }}
|
||||
rust/dist/${{ matrix.artifact_name }}.sha256
|
||||
path: rust/dist/${{ matrix.artifact_name }}
|
||||
|
||||
- name: Upload release asset
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: |
|
||||
rust/dist/${{ matrix.artifact_name }}
|
||||
rust/dist/${{ matrix.artifact_name }}.sha256
|
||||
files: rust/dist/${{ matrix.artifact_name }}
|
||||
fail_on_unmatched_files: true
|
||||
|
||||
53
.github/workflows/rust-ci.yml
vendored
53
.github/workflows/rust-ci.yml
vendored
@@ -9,14 +9,8 @@ on:
|
||||
paths:
|
||||
- .github/workflows/rust-ci.yml
|
||||
- .github/scripts/check_doc_source_of_truth.py
|
||||
- .github/scripts/check_release_readiness.py
|
||||
- .github/FUNDING.yml
|
||||
- CODE_OF_CONDUCT.md
|
||||
- CONTRIBUTING.md
|
||||
- LICENSE
|
||||
- README.md
|
||||
- SECURITY.md
|
||||
- SUPPORT.md
|
||||
- USAGE.md
|
||||
- PARITY.md
|
||||
- PHILOSOPHY.md
|
||||
@@ -29,14 +23,8 @@ on:
|
||||
paths:
|
||||
- .github/workflows/rust-ci.yml
|
||||
- .github/scripts/check_doc_source_of_truth.py
|
||||
- .github/scripts/check_release_readiness.py
|
||||
- .github/FUNDING.yml
|
||||
- CODE_OF_CONDUCT.md
|
||||
- CONTRIBUTING.md
|
||||
- LICENSE
|
||||
- README.md
|
||||
- SECURITY.md
|
||||
- SUPPORT.md
|
||||
- USAGE.md
|
||||
- PARITY.md
|
||||
- PHILOSOPHY.md
|
||||
@@ -70,8 +58,6 @@ jobs:
|
||||
python-version: "3.x"
|
||||
- name: Check docs and metadata for stale branding
|
||||
run: python .github/scripts/check_doc_source_of_truth.py
|
||||
- name: Check release policy docs and local links
|
||||
run: python .github/scripts/check_release_readiness.py
|
||||
|
||||
fmt:
|
||||
name: cargo fmt
|
||||
@@ -112,42 +98,3 @@ jobs:
|
||||
workspaces: rust -> target
|
||||
- name: Run workspace clippy
|
||||
run: cargo clippy --workspace
|
||||
|
||||
windows-smoke:
|
||||
name: windows PowerShell smoke
|
||||
runs-on: windows-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: rust
|
||||
shell: pwsh
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: rust -> target
|
||||
- name: Build CLI for Windows smoke
|
||||
run: cargo build -p rusty-claude-cli
|
||||
- name: Smoke local commands without live credentials
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ""
|
||||
ANTHROPIC_AUTH_TOKEN: ""
|
||||
OPENAI_API_KEY: ""
|
||||
XAI_API_KEY: ""
|
||||
DASHSCOPE_API_KEY: ""
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
$env:CLAW_CONFIG_HOME = Join-Path $env:RUNNER_TEMP "claw config home"
|
||||
New-Item -ItemType Directory -Force -Path $env:CLAW_CONFIG_HOME | Out-Null
|
||||
$workspace = Join-Path $env:RUNNER_TEMP "claw path smoke"
|
||||
New-Item -ItemType Directory -Force -Path $workspace | Out-Null
|
||||
$claw = Join-Path $env:GITHUB_WORKSPACE "rust\target\debug\claw.exe"
|
||||
Push-Location $workspace
|
||||
try {
|
||||
& $claw help
|
||||
& $claw status
|
||||
& $claw config env
|
||||
& $claw doctor
|
||||
} finally {
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -8,7 +8,8 @@ archive/
|
||||
# Claw Code local artifacts
|
||||
.claw/settings.local.json
|
||||
.claw/sessions/
|
||||
# #160/#166: default session storage directory (flush-transcript output,
|
||||
# dogfood runs, etc.). Claws specifying --directory elsewhere are fine.
|
||||
.port_sessions/
|
||||
.clawhip/
|
||||
status-help.txt
|
||||
# Legacy Python port session scratch artifacts
|
||||
.port_sessions/
|
||||
|
||||
14886
.omx/cc2/board.json
14886
.omx/cc2/board.json
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,429 +0,0 @@
|
||||
{
|
||||
"schema_version": "cc2.issue_parity_intake.v1",
|
||||
"generated_at": "2026-05-14T08:02:00Z",
|
||||
"task_id": "3",
|
||||
"owner": "worker-2",
|
||||
"goal": "G001-stream0-board",
|
||||
"notes": [
|
||||
"Leader owns Ultragoal; this artifact does not mutate .omx/ultragoal.",
|
||||
"Rows are scoped intake/classification evidence for Worker 1/Task 2 board integration."
|
||||
],
|
||||
"source_manifest": {
|
||||
"claw_open_latest": {
|
||||
"path": ".omx/research/claw-open-latest.json",
|
||||
"sha256_prefix_from_plan": "89e3e027fa735f38",
|
||||
"covered_issue_numbers": [3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038]
|
||||
},
|
||||
"claw_issues": {
|
||||
"path": ".omx/research/claw-issues.json",
|
||||
"sha256_prefix_from_plan": "e64fdba7df3b78ed",
|
||||
"covered_issue_numbers": [2997, 3003, 3004, 3005, 3006, 3007, 3020, 3023]
|
||||
},
|
||||
"opencode": {
|
||||
"repo_path": ".omx/research/repos/opencode",
|
||||
"metadata_path": ".omx/research/opencode-repo.json",
|
||||
"issues_path": ".omx/research/opencode-issues.json",
|
||||
"head_from_plan": "27ac53aaacc677b1401c4e75ca7a7dadf8b2c349"
|
||||
},
|
||||
"codex": {
|
||||
"repo_path": ".omx/research/repos/codex",
|
||||
"metadata_path": ".omx/research/codex-repo.json",
|
||||
"issues_path": ".omx/research/codex-issues.json",
|
||||
"head_from_plan": "6a225e4005209f2325ab3c681c7c6beba2907d4d"
|
||||
}
|
||||
},
|
||||
"issue_clusters": [
|
||||
{
|
||||
"id": "CC2-ISSUE-3007",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3007",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3007,
|
||||
"title": "Permission modes do not enforce path scope on file tools or shell expansion in bash",
|
||||
"theme": "security/path-scope",
|
||||
"release_bucket": "alpha_blocker",
|
||||
"lifecycle_status": "active",
|
||||
"roadmap_anchor": "ROADMAP.md#11-policy-engine-for-autonomous-coding; ROADMAP.md#9-green-ness-contract",
|
||||
"dependencies": ["permission path canonicalization", "file tool target validation", "bash command/path validation reachability", "policy regression fixtures"],
|
||||
"verification_required": ["workspace-write cannot read/write/delete outside workspace", "shell expansion and symlink traversal are rejected or policy-blocked", "file tools and bash use the same target-scope decision record"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Security/sandbox escape class; plan names #3007 as alpha blocker."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3020",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3020",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3020,
|
||||
"title": "OpenAI-compatible model IDs with slashes are stripped before request",
|
||||
"theme": "provider/model-routing",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#provider-routing-model-name-prefix-must-win-over-env-var-presence-fixed-2026-04-08-0530c50",
|
||||
"dependencies": ["provider profile contract", "wire model-id preservation option", "routing-prefix source reporting"],
|
||||
"verification_required": ["OpenAI-compatible endpoint receives exact model id when preservation is enabled", "status JSON reports raw model input, route, and wire model id"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Core provider correctness but below alpha state/security contracts unless it blocks the selected alpha model path."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3006",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3006",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3006,
|
||||
"title": "Not Working in windows",
|
||||
"theme": "windows/install",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#immediate-backlog-from-current-real-pain",
|
||||
"dependencies": ["Windows support policy", "PowerShell install path", "dependency/version matrix", "diagnostic setup output"],
|
||||
"verification_required": ["fresh Windows/PowerShell setup smoke documented", "unsupported native paths fail with actionable WSL2/native guidance"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Real adoption blocker; plan places Windows/install in beta adoption overlay."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3005",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3005",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3005,
|
||||
"title": "DeepSeek V4-flash/pro fails with 400 Bad Request (missing reasoning_content) while deepseek-reasoner works",
|
||||
"theme": "provider/response-shape",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#5-failure-taxonomy; ROADMAP.md#provider-routing-model-name-prefix-must-win-over-env-var-presence-fixed-2026-04-08-0530c50",
|
||||
"dependencies": ["OpenAI-compatible diagnostics playbook", "provider error taxonomy", "reasoning/thinking field compatibility tests"],
|
||||
"verification_required": ["provider 400 response classified with actionable remediation", "DeepSeek-compatible response-shape fixture does not hide assistant output"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Provider compatibility issue that shares the #3032 diagnostics lane."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3004",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3004",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3004,
|
||||
"title": "When can we adapt to zed?",
|
||||
"theme": "ide/acp",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"roadmap_anchor": "ROADMAP.md#phase-5-plugin-and-mcp-lifecycle-maturity",
|
||||
"dependencies": ["stable session/control API", "plugin/MCP lifecycle", "engine API or ACP bridge decision"],
|
||||
"verification_required": ["Zed/ACP smoke once core state/control contracts exist"],
|
||||
"deferral_rationale": "IDE integration is valuable but should wait until boot/session/event/control truth surfaces are stable.",
|
||||
"classification_rationale": "Matches plan's GA ecosystem lane for Zed/ACP."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3003",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3003",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3003,
|
||||
"title": ".claude/sessions should not be submitted to repo",
|
||||
"theme": "session-hygiene/gitignore",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#9-green-ness-contract; ROADMAP.md#8-recovery-recipes-for-common-failures",
|
||||
"dependencies": ["artifact ignore policy", "session storage boundary docs", "repo hygiene check"],
|
||||
"verification_required": ["session directories are ignored", "status/doctor warns about tracked session artifacts"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Small but user-visible session hygiene and data-leak prevention item."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-2997",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/2997",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 2997,
|
||||
"title": "License?",
|
||||
"theme": "docs/license",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#immediate-backlog-from-current-real-pain",
|
||||
"dependencies": ["maintainer license decision", "LICENSE file", "README/USAGE attribution wording"],
|
||||
"verification_required": ["repository license file exists", "package metadata and docs reference the same license"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Adoption/readiness documentation gap; requires maintainer decision before implementation."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3023",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3023",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3023,
|
||||
"title": "Protect claw-code from AI slop PRs",
|
||||
"theme": "repo-hygiene/anti-slop",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#immediate-backlog-from-current-real-pain",
|
||||
"dependencies": ["contributor policy", "PR quality gate selection", "false-positive review escape hatch"],
|
||||
"verification_required": ["selected PR quality gate runs on sample good/bad PR fixtures", "maintainers can override false positives"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Protects project throughput but should not precede alpha core safety contracts."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3028",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3028",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3028,
|
||||
"title": "docs: add navigation and file-context usage guide",
|
||||
"theme": "docs/navigation-context",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#7-human-ux-still-leaks-into-claw-workflows",
|
||||
"dependencies": ["current TUI/shell key behavior inventory", "file context syntax docs", "secret-handling guidance"],
|
||||
"verification_required": ["docs include terminal history, scrollback, @file context, attach/external file caveats", "examples work against current CLI"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Documentation support item from latest open issue refresh."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3029",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3029",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3029,
|
||||
"title": "build: add cross-platform installer path and release artifact quickstart",
|
||||
"theme": "install/distribution",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#immediate-backlog-from-current-real-pain",
|
||||
"dependencies": ["release artifact policy", "install.sh/install.ps1 contract", "PATH/update/uninstall instructions"],
|
||||
"verification_required": ["install quickstart smoke on supported OS/arch", "failed install prints actionable diagnostics"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Distribution friction belongs in adoption overlay."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3030",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3030",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3030,
|
||||
"title": "feat: make provider/model setup less env-var-driven",
|
||||
"theme": "provider/setup-profiles",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#3-structured-session-control-api; ROADMAP.md#145-boot-preflight-doctor-contract",
|
||||
"dependencies": ["provider profiles", "setup wizard or dry-run", "secret redaction", "base-url/model smoke test"],
|
||||
"verification_required": ["setup validates provider route without echoing keys", "session-only versus persisted profile behavior is explicit"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Directly reduces current provider setup support churn."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3031",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3031",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3031,
|
||||
"title": "feat: auto-compact or clearly recover from context-window provider errors",
|
||||
"theme": "session-recovery/context-window",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#8-recovery-recipes-for-common-failures; ROADMAP.md#158-compact_messages_if_needed-drops-turns-silently-no-structured-compaction-event-emitted",
|
||||
"dependencies": ["provider error classifier", "safe compact retry policy", "compaction event/audit trail", "retry loop cap"],
|
||||
"verification_required": ["context-window error either compacts+retries once safely or emits exact recovery command", "compaction event is machine-visible"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Recovery reliability item; promoted only if selected alpha provider path hits it."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3032",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3032",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3032,
|
||||
"title": "docs: add OpenAI-compatible/local provider diagnostics playbook",
|
||||
"theme": "provider/diagnostics-docs",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#5-failure-taxonomy",
|
||||
"dependencies": ["raw chat-completions smoke tests", "tool-call response-shape examples", "provider failure taxonomy"],
|
||||
"verification_required": ["playbook distinguishes Claw bugs from wrapper/tool-call-shape bugs", "curl examples cover non-streaming and streaming tool calls"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Shared diagnostic lane for #3005/#3020/local model reports."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3033",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3033",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3033,
|
||||
"title": "feat: add minimal claw serve JSON-RPC engine API",
|
||||
"theme": "engine-api/control-plane",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"roadmap_anchor": "ROADMAP.md#3-structured-session-control-api; ROADMAP.md#phase-4-claws-first-task-execution",
|
||||
"dependencies": ["stable session state API", "event schema v1", "permission policy contract", "cancel/prompt stream semantics"],
|
||||
"verification_required": ["protocol conformance fixtures for session/create prompt/stream cancel error", "capability negotiation backwards compatibility"],
|
||||
"deferral_rationale": "Engine API should expose, not invent, stable core control-plane semantics after alpha contracts land.",
|
||||
"classification_rationale": "Useful integration surface but too broad for alpha unless narrowed to existing session control API."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3034",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3034",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3034,
|
||||
"title": "docs: define evidence-gated Hermes handoff loop for Claw Code execution",
|
||||
"theme": "sdlc/evidence-handoff",
|
||||
"release_bucket": "post_2_0_research",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"roadmap_anchor": "ROADMAP.md#4-canonical-lane-event-schema; ROADMAP.md#10-typed-task-packet-format",
|
||||
"dependencies": ["typed task packet", "evidence bundle schema", "report gate status vocabulary"],
|
||||
"verification_required": ["handoff packet fixture validates scope/success/test evidence fields", "post-flight gate consumes evidence instead of free-text summary"],
|
||||
"deferral_rationale": "Can inform event/report/task contracts, but Hermes-specific loop should stay research/docs until core schemas are stable.",
|
||||
"classification_rationale": "Only the generic evidence-gated contract is Claw 2.0; Hermes branding is not core."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3035",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3035",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3035,
|
||||
"title": "fix: improve compacted session resume discoverability",
|
||||
"theme": "session-resume/discoverability",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#8-recovery-recipes-for-common-failures; ROADMAP.md#160-session_store-has-no-list_sessions-delete_session-or-session_exists",
|
||||
"dependencies": ["session enumeration", "latest-session workspace search boundary", "compacted session marker"],
|
||||
"verification_required": ["/resume latest finds newest eligible compacted session", "/session or status lists resumable compacted sessions with path/id"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Session recovery/adoption item."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3036",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3036",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3036,
|
||||
"title": "docs: add official Ollama/llama.cpp/vLLM local model examples",
|
||||
"theme": "provider/local-docs",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#145-boot-preflight-doctor-contract; ROADMAP.md#5-failure-taxonomy",
|
||||
"dependencies": ["known-good local provider examples", "raw /v1 smoke test", "tool-call limitation warning"],
|
||||
"verification_required": ["docs include Ollama/llama.cpp/vLLM examples and HELLO smoke", "tool-call caveats are explicit"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Local provider adoption support."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3037",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3037",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3037,
|
||||
"title": "docs: clarify Claw Code positioning as multi-provider Claude-Code-shaped runtime",
|
||||
"theme": "docs/product-positioning",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"roadmap_anchor": "ROADMAP.md#goal; ROADMAP.md#definition-of-clawable",
|
||||
"dependencies": ["README positioning copy", "provider support truth table", "identity leak bug policy"],
|
||||
"verification_required": ["README/docs answer Claude-only question directly", "provider support wording matches implemented routes"],
|
||||
"deferral_rationale": null,
|
||||
"classification_rationale": "Clarifies product identity for adoption without broad implementation."
|
||||
},
|
||||
{
|
||||
"id": "CC2-ISSUE-3038",
|
||||
"source_anchor": "https://github.com/ultraworkers/claw-code/issues/3038",
|
||||
"source_type": "github_issue",
|
||||
"source_number": 3038,
|
||||
"title": "roadmap: track skills/plugins/marketplace ecosystem gap after core UX stabilizes",
|
||||
"theme": "plugin-marketplace/ecosystem",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"roadmap_anchor": "ROADMAP.md#13-first-class-pluginmcp-lifecycle-contract; ROADMAP.md#14-mcp-end-to-end-lifecycle-parity",
|
||||
"dependencies": ["plugin/MCP lifecycle contract", "extension point inventory", "discovery/install/update flow design"],
|
||||
"verification_required": ["extension point inventory exists", "marketplace work explicitly depends on core UX stabilization"],
|
||||
"deferral_rationale": "Marketplace breadth should wait until core setup/auth/provider/session UX and plugin lifecycle are reliable.",
|
||||
"classification_rationale": "Matches plan's ga_ecosystem/post-2.0 caution for marketplace parity."
|
||||
}
|
||||
],
|
||||
"parity_rows": [
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-PLUGIN-ECOSYSTEM",
|
||||
"source_anchor": "anomalyco/opencode@27ac53aa packages/app/web/desktop/plugin/sdk/extensions/zed/slack/containers plus issue #3038",
|
||||
"source_type": "repo_clone_and_local_issue",
|
||||
"title": "Plugin/skills/marketplace ecosystem inventory",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"dependencies": ["Claw plugin/MCP lifecycle contract", "current extension-point inventory"],
|
||||
"verification_required": ["inventory maps current Claw plugin/skill/MCP extension points before marketplace implementation"],
|
||||
"deferral_rationale": "Adapt ecosystem discovery only after core setup/provider/session reliability is stable."
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-PERMISSION-PRESETS",
|
||||
"source_anchor": "https://github.com/anomalyco/opencode/issues/27464 and ROADMAP.md#11-policy-engine-for-autonomous-coding",
|
||||
"source_type": "external_issue_and_roadmap",
|
||||
"title": "Quick permission preset switching mapped onto Claw policy profiles",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["policy profile model", "approval-token audit trail"],
|
||||
"verification_required": ["preset switch is visible in status/report output and cannot bypass path-scope enforcement"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-CUSTOM-PROVIDER-PARAMS",
|
||||
"source_anchor": "https://github.com/anomalyco/opencode/issues/27462 and #3030/#3032",
|
||||
"source_type": "external_issue_and_local_issue",
|
||||
"title": "Custom API parameter passthrough for provider profiles",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["provider profile schema", "secret redaction", "request audit surface"],
|
||||
"verification_required": ["custom params are schema-validated, redacted, and visible as provenance without leaking secrets"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-TODOWRITE-AUTOCOMPLETE",
|
||||
"source_anchor": "https://github.com/anomalyco/opencode/issues/27453 and ROADMAP.md#10-typed-task-packet-format",
|
||||
"source_type": "external_issue_and_roadmap",
|
||||
"title": "Task/Todo completion assistance via typed task lifecycle",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"dependencies": ["typed task packet", "task lifecycle events", "evidence-gated completion"],
|
||||
"verification_required": ["auto-complete suggestions cannot mark work complete without evidence bundle or explicit user approval"],
|
||||
"deferral_rationale": "Useful UX should follow, not precede, typed task lifecycle and evidence contract."
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-OPENCODE-WINDOWS-DISTRIBUTION",
|
||||
"source_anchor": "https://github.com/anomalyco/opencode/issues/27476 https://github.com/anomalyco/opencode/issues/27459 https://github.com/anomalyco/opencode/issues/27470 and #3006/#3029",
|
||||
"source_type": "external_issues_and_local_issues",
|
||||
"title": "Windows/GLIBC/distribution reliability parity lessons",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["install artifact matrix", "Windows encoding guidance", "minimum Linux/GLIBC support statement"],
|
||||
"verification_required": ["release quickstart documents supported OS matrix and known terminal/encoding caveats"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-CODEX-GRANULAR-PERMISSIONS",
|
||||
"source_anchor": "https://github.com/openai/codex/issues/22595 and Codex docs permissions/app/plugin concepts",
|
||||
"source_type": "external_issue_and_docs",
|
||||
"title": "Granular app/plugin permissions adapted to Claw policy engine",
|
||||
"release_bucket": "alpha_blocker",
|
||||
"lifecycle_status": "active",
|
||||
"dependencies": ["permission enforcer path-scope fix", "plugin/MCP capability model", "approval-token replay protection"],
|
||||
"verification_required": ["granular permission grants do not widen workspace path scope implicitly"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-CODEX-SESSION-RECOVERY",
|
||||
"source_anchor": "https://github.com/openai/codex/issues/22619 https://github.com/openai/codex/issues/22597 https://github.com/openai/codex/issues/22593 and #3035",
|
||||
"source_type": "external_issues_and_local_issue",
|
||||
"title": "Safe local session/thread recovery without storage amplification",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["session enumeration", "resume latest boundary", "JSONL/storage compaction policy"],
|
||||
"verification_required": ["recoverable sessions are discoverable and session forks avoid unbounded duplicate history"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-CODEX-PROXY-NETWORK",
|
||||
"source_anchor": "https://github.com/openai/codex/issues/22623 and #3032",
|
||||
"source_type": "external_issue_and_local_issue",
|
||||
"title": "Provider/network diagnostics include proxy behavior",
|
||||
"release_bucket": "beta_adoption",
|
||||
"lifecycle_status": "open",
|
||||
"dependencies": ["HTTP client proxy detection", "provider diagnostics playbook"],
|
||||
"verification_required": ["diagnostics report whether proxy env/config is honored for provider calls"],
|
||||
"deferral_rationale": null
|
||||
},
|
||||
{
|
||||
"id": "CC2-PARITY-CODEX-CLI-AGENT-FLAG",
|
||||
"source_anchor": "https://github.com/openai/codex/issues/22615 and ROADMAP.md#10-typed-task-packet-format",
|
||||
"source_type": "external_issue_and_roadmap",
|
||||
"title": "CLI flag for agent/subagent mode mapped to Claw typed task packets",
|
||||
"release_bucket": "ga_ecosystem",
|
||||
"lifecycle_status": "deferred_with_rationale",
|
||||
"dependencies": ["typed task packet", "session control API", "policy-scoped worker launch"],
|
||||
"verification_required": ["CLI agent mode cannot bypass task policy or evidence requirements"],
|
||||
"deferral_rationale": "Implement only after core task/session control contracts are stable."
|
||||
}
|
||||
],
|
||||
"coverage": {
|
||||
"required_latest_open_range_3028_3038": [3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038],
|
||||
"required_existing_issue_numbers": [3007, 3006, 3020, 3005, 3003, 2997, 3023, 3004],
|
||||
"issue_rows_expected": 19,
|
||||
"parity_rows_expected_minimum": 6
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
# CC2 Issue / Parity Intake Mapping
|
||||
|
||||
Generated by `worker-2` for team task 3 (`G001 issue/parity intake mapping`). This is a board-integration fragment for Stream 0; it intentionally does **not** mutate `.omx/ultragoal`.
|
||||
|
||||
## Covered local issue clusters
|
||||
|
||||
| Issue | Theme | Bucket | Lifecycle | Board anchor |
|
||||
|---:|---|---|---|---|
|
||||
| #3007 | security/path-scope | `alpha_blocker` | `active` | Policy engine + green-ness contract |
|
||||
| #3020 | provider/model-routing | `beta_adoption` | `open` | Provider routing/model source status |
|
||||
| #3006 | windows/install | `beta_adoption` | `open` | Immediate backlog / install readiness |
|
||||
| #3005 | provider/response-shape | `beta_adoption` | `open` | Failure taxonomy / provider diagnostics |
|
||||
| #3004 | ide/acp | `ga_ecosystem` | `deferred_with_rationale` | Plugin/MCP lifecycle maturity |
|
||||
| #3003 | session-hygiene/gitignore | `beta_adoption` | `open` | Green-ness / recovery hygiene |
|
||||
| #2997 | docs/license | `beta_adoption` | `open` | Adoption docs/license readiness |
|
||||
| #3023 | repo-hygiene/anti-slop | `beta_adoption` | `open` | Immediate backlog / PR quality gate |
|
||||
| #3028 | docs/navigation-context | `beta_adoption` | `open` | Human UX leaks into claw workflows |
|
||||
| #3029 | install/distribution | `beta_adoption` | `open` | Cross-platform release quickstart |
|
||||
| #3030 | provider/setup-profiles | `beta_adoption` | `open` | Boot preflight / structured session control |
|
||||
| #3031 | session-recovery/context-window | `beta_adoption` | `open` | Recovery recipes / compaction event |
|
||||
| #3032 | provider/diagnostics-docs | `beta_adoption` | `open` | Failure taxonomy |
|
||||
| #3033 | engine-api/control-plane | `ga_ecosystem` | `deferred_with_rationale` | Structured session control API |
|
||||
| #3034 | sdlc/evidence-handoff | `post_2_0_research` | `deferred_with_rationale` | Event/report/task contract input |
|
||||
| #3035 | session-resume/discoverability | `beta_adoption` | `open` | Recovery recipes / session enumeration |
|
||||
| #3036 | provider/local-docs | `beta_adoption` | `open` | Provider setup and diagnostics docs |
|
||||
| #3037 | docs/product-positioning | `beta_adoption` | `open` | Goal / definition of clawable |
|
||||
| #3038 | plugin-marketplace/ecosystem | `ga_ecosystem` | `deferred_with_rationale` | Plugin/MCP lifecycle maturity |
|
||||
|
||||
## Parity intake rows
|
||||
|
||||
| Row | Source | Bucket | Lifecycle | Adaptation rule |
|
||||
|---|---|---|---|---|
|
||||
| `CC2-PARITY-OPENCODE-PLUGIN-ECOSYSTEM` | opencode repo + #3038 | `ga_ecosystem` | `deferred_with_rationale` | Inventory Claw extension points before marketplace work. |
|
||||
| `CC2-PARITY-OPENCODE-PERMISSION-PRESETS` | opencode #27464 | `beta_adoption` | `open` | Permission preset UX must not bypass Claw path-scope policy. |
|
||||
| `CC2-PARITY-OPENCODE-CUSTOM-PROVIDER-PARAMS` | opencode #27462 + #3030/#3032 | `beta_adoption` | `open` | Custom provider params need schema validation, redaction, and provenance. |
|
||||
| `CC2-PARITY-OPENCODE-TODOWRITE-AUTOCOMPLETE` | opencode #27453 | `ga_ecosystem` | `deferred_with_rationale` | Auto-complete task UX follows typed task lifecycle/evidence gates. |
|
||||
| `CC2-PARITY-OPENCODE-WINDOWS-DISTRIBUTION` | opencode #27476/#27459/#27470 + #3006/#3029 | `beta_adoption` | `open` | Use external pain as release-matrix and diagnostics evidence. |
|
||||
| `CC2-PARITY-CODEX-GRANULAR-PERMISSIONS` | Codex #22595 + docs | `alpha_blocker` | `active` | Adapt granular permissions only through Claw policy engine and approval tokens. |
|
||||
| `CC2-PARITY-CODEX-SESSION-RECOVERY` | Codex #22619/#22597/#22593 + #3035 | `beta_adoption` | `open` | Session discovery/recovery must avoid storage amplification. |
|
||||
| `CC2-PARITY-CODEX-PROXY-NETWORK` | Codex #22623 + #3032 | `beta_adoption` | `open` | Provider diagnostics should expose proxy behavior. |
|
||||
| `CC2-PARITY-CODEX-CLI-AGENT-FLAG` | Codex #22615 | `ga_ecosystem` | `deferred_with_rationale` | CLI agent mode waits for typed task/session control contracts. |
|
||||
|
||||
Validation command:
|
||||
|
||||
```bash
|
||||
python3 .omx/cc2/validate_issue_parity_intake.py
|
||||
```
|
||||
@@ -1,250 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Render the Claw Code 2.0 canonical board JSON as a human-readable Markdown board."""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from collections import Counter, defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
STATUS_DESCRIPTIONS = {
|
||||
"context": "Context-only heading or evidence anchor; not an implementation work item.",
|
||||
"active": "Current Claw Code 2.0 implementation surface that should remain visible on the board.",
|
||||
"open": "Actionable unresolved work that needs implementation or acceptance evidence.",
|
||||
"done_verify": "Marked as done upstream but retained for verification against current CC2 behavior.",
|
||||
"stale_done": "Historically completed or merged work that may be stale and needs freshness checks before relying on it.",
|
||||
"superseded": "Replaced by a newer item; keep as traceability context only.",
|
||||
"deferred_with_rationale": "Intentionally deferred; rationale must be present in the board item.",
|
||||
"rejected_not_claw": "Excluded because it is not Claw Code product work.",
|
||||
}
|
||||
|
||||
BUCKET_DESCRIPTIONS = {
|
||||
"alpha_blocker": "Must be resolved before alpha-quality autonomous coding lanes are dependable.",
|
||||
"beta_adoption": "Important for broader dogfood/adoption once alpha blockers are controlled.",
|
||||
"ga_ecosystem": "Required for mature plugin/MCP/provider ecosystem behavior.",
|
||||
"2.x_intake": "Post-2.0 intake or follow-up candidate retained for sequencing.",
|
||||
"post_2_0_research": "Research-oriented item not required for the CC2 board cut.",
|
||||
"context": "Non-actionable roadmap context.",
|
||||
"rejected_not_claw": "Explicit non-Claw rejection bucket.",
|
||||
}
|
||||
|
||||
LANE_TITLES = {
|
||||
"stream_0_governance": "Stream 0 — Governance, intake, and cross-cutting roadmap triage",
|
||||
"stream_1_worker_boot_session_control": "Stream 1 — Worker boot and session control",
|
||||
"stream_2_event_reporting_contracts": "Stream 2 — Event/reporting contracts",
|
||||
"stream_3_branch_test_recovery": "Stream 3 — Branch/test recovery",
|
||||
"stream_4_claws_first_execution": "Stream 4 — Claws-first task execution",
|
||||
"stream_5_plugin_mcp_lifecycle": "Stream 5 — Plugin/MCP lifecycle",
|
||||
"adoption_overlay": "Adoption overlay — user-visible parity and release polish",
|
||||
"parity_overlay": "Parity overlay — opencode/codex comparison context",
|
||||
}
|
||||
|
||||
REQUIRED_ITEM_FIELDS = [
|
||||
"id",
|
||||
"title",
|
||||
"source_anchor",
|
||||
"source_type",
|
||||
"release_bucket",
|
||||
"lifecycle_status",
|
||||
"dependencies",
|
||||
"verification_required",
|
||||
"deferral_rationale",
|
||||
]
|
||||
|
||||
|
||||
def load_board(path: Path) -> dict[str, Any]:
|
||||
with path.open() as f:
|
||||
board = json.load(f)
|
||||
if not isinstance(board, dict):
|
||||
raise ValueError("board JSON root must be an object")
|
||||
items = board.get("items")
|
||||
if not isinstance(items, list):
|
||||
raise ValueError("board JSON must contain an items array")
|
||||
return board
|
||||
|
||||
|
||||
def validate_board(board: dict[str, Any]) -> list[str]:
|
||||
errors: list[str] = []
|
||||
coverage = board.get("coverage", {})
|
||||
if coverage.get("unmapped_roadmap_heading_lines"):
|
||||
errors.append(f"unmapped roadmap heading lines: {coverage['unmapped_roadmap_heading_lines']}")
|
||||
if coverage.get("roadmap_headings_mapped") != coverage.get("roadmap_headings_total"):
|
||||
errors.append("roadmap heading coverage is incomplete")
|
||||
if coverage.get("roadmap_actions_mapped") != coverage.get("roadmap_actions_total"):
|
||||
errors.append("roadmap ordered-action coverage is incomplete")
|
||||
|
||||
allowed_status = set(board.get("generation_policy", {}).get("status_values", []))
|
||||
allowed_buckets = set(board.get("generation_policy", {}).get("release_buckets", []))
|
||||
seen_ids: set[str] = set()
|
||||
for index, item in enumerate(board["items"], 1):
|
||||
for field in REQUIRED_ITEM_FIELDS:
|
||||
if field not in item:
|
||||
errors.append(f"item {index} missing required field {field}")
|
||||
item_id = item.get("id")
|
||||
if item_id in seen_ids:
|
||||
errors.append(f"duplicate item id {item_id}")
|
||||
seen_ids.add(item_id)
|
||||
status = item.get("lifecycle_status")
|
||||
bucket = item.get("release_bucket")
|
||||
if allowed_status and status not in allowed_status:
|
||||
errors.append(f"{item_id} has unknown lifecycle_status {status!r}")
|
||||
if allowed_buckets and bucket not in allowed_buckets:
|
||||
errors.append(f"{item_id} has unknown release_bucket {bucket!r}")
|
||||
if status == "deferred_with_rationale" and not str(item.get("deferral_rationale", "")).strip():
|
||||
errors.append(f"{item_id} is deferred without deferral_rationale")
|
||||
return errors
|
||||
|
||||
|
||||
def table(headers: list[str], rows: list[list[Any]]) -> list[str]:
|
||||
out = ["| " + " | ".join(headers) + " |", "| " + " | ".join("---" for _ in headers) + " |"]
|
||||
for row in rows:
|
||||
out.append("| " + " | ".join(str(cell) for cell in row) + " |")
|
||||
return out
|
||||
|
||||
|
||||
def fmt_list(value: Any) -> str:
|
||||
if not value:
|
||||
return "none"
|
||||
if isinstance(value, list):
|
||||
return ", ".join(f"`{v}`" for v in value) if value else "none"
|
||||
return f"`{value}`"
|
||||
|
||||
|
||||
def render(board: dict[str, Any]) -> str:
|
||||
items: list[dict[str, Any]] = board["items"]
|
||||
summary = board.get("summary", {})
|
||||
coverage = board.get("coverage", {})
|
||||
sources = board.get("sources", {})
|
||||
policy = board.get("generation_policy", {})
|
||||
by_lane = Counter(item.get("owner_lane", "unassigned") for item in items)
|
||||
by_status = Counter(item.get("lifecycle_status", "unknown") for item in items)
|
||||
by_bucket = Counter(item.get("release_bucket", "unknown") for item in items)
|
||||
by_source = Counter(item.get("source_type", "unknown") for item in items)
|
||||
|
||||
lines: list[str] = []
|
||||
lines.append("# Claw Code 2.0 Canonical Board")
|
||||
lines.append("")
|
||||
lines.append(f"Generated from board schema: `{board.get('generated_at', 'unknown')}`")
|
||||
lines.append(f"Schema version: `{board.get('schema_version', 'unknown')}`")
|
||||
lines.append("Ultragoal mutation policy: `.omx/ultragoal` is leader-owned and was not modified by this rendering task.")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Evidence Freeze")
|
||||
lines.append("")
|
||||
roadmap = sources.get("roadmap", {})
|
||||
research = sources.get("research", {})
|
||||
plan = sources.get("approved_plan", {})
|
||||
lines.extend(table(["Source", "Frozen evidence"], [
|
||||
["Roadmap", f"`{roadmap.get('path', 'ROADMAP.md')}` sha256 prefix `{roadmap.get('sha256_prefix', 'unknown')}`; {roadmap.get('heading_count', '?')} headings; {roadmap.get('ordered_action_count', '?')} ordered actions"],
|
||||
["Approved plan", f"`{plan.get('path', '.omx/plans/claw-code-2-0-adaptive-plan.md')}` sha256 prefix `{plan.get('sha256_prefix', 'unknown')}`"],
|
||||
["Research bundle", f"root `{research.get('root', '.omx/research')}`; latest open issues {research.get('claw_open_latest_count', '?')}; issue corpus {research.get('claw_issues_count', '?')}; codex/opencode clone metadata included"],
|
||||
]))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Roadmap Coverage Summary")
|
||||
lines.append("")
|
||||
heading_total = coverage.get("roadmap_headings_total", 0)
|
||||
heading_mapped = coverage.get("roadmap_headings_mapped", 0)
|
||||
action_total = coverage.get("roadmap_actions_total", 0)
|
||||
action_mapped = coverage.get("roadmap_actions_mapped", 0)
|
||||
lines.extend(table(["Coverage gate", "Mapped", "Total", "Status"], [
|
||||
["ROADMAP headings", heading_mapped, heading_total, "PASS" if heading_mapped == heading_total and not coverage.get("unmapped_roadmap_heading_lines") else "FAIL"],
|
||||
["ROADMAP ordered actions", action_mapped, action_total, "PASS" if action_mapped == action_total else "FAIL"],
|
||||
["Duplicate heading lines", len(coverage.get("duplicate_roadmap_heading_lines", [])), 0, "PASS" if not coverage.get("duplicate_roadmap_heading_lines") else "WARN"],
|
||||
]))
|
||||
lines.append("")
|
||||
lines.append(f"Total canonical board items: **{len(items)}**")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Lifecycle Enum Reference")
|
||||
lines.append("")
|
||||
status_rows = []
|
||||
for status in policy.get("status_values", sorted(by_status)):
|
||||
status_rows.append([f"`{status}`", by_status.get(status, 0), STATUS_DESCRIPTIONS.get(status, "Board-defined lifecycle status.")])
|
||||
lines.extend(table(["Lifecycle", "Count", "Meaning"], status_rows))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Release Bucket Reference")
|
||||
lines.append("")
|
||||
bucket_rows = []
|
||||
for bucket in policy.get("release_buckets", sorted(by_bucket)):
|
||||
bucket_rows.append([f"`{bucket}`", by_bucket.get(bucket, 0), BUCKET_DESCRIPTIONS.get(bucket, "Board-defined release bucket.")])
|
||||
lines.extend(table(["Bucket", "Count", "Meaning"], bucket_rows))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Stream Summaries")
|
||||
lines.append("")
|
||||
lane_rows = []
|
||||
for lane, count in sorted(by_lane.items()):
|
||||
lane_items = [item for item in items if item.get("owner_lane") == lane]
|
||||
lane_status = Counter(item.get("lifecycle_status") for item in lane_items)
|
||||
open_like = lane_status.get("active", 0) + lane_status.get("open", 0) + lane_status.get("done_verify", 0)
|
||||
lane_rows.append([
|
||||
LANE_TITLES.get(lane, lane),
|
||||
count,
|
||||
open_like,
|
||||
", ".join(f"`{k}` {v}" for k, v in sorted(lane_status.items())),
|
||||
])
|
||||
lines.extend(table(["Stream / lane", "Items", "Active+open+verify", "Lifecycle mix"], lane_rows))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Source-Type Mix")
|
||||
lines.append("")
|
||||
lines.extend(table(["Source type", "Items"], [[f"`{k}`", v] for k, v in sorted(by_source.items())]))
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Board Items by Stream")
|
||||
lines.append("")
|
||||
for lane in sorted(by_lane):
|
||||
lane_items = [item for item in items if item.get("owner_lane") == lane]
|
||||
lines.append(f"### {LANE_TITLES.get(lane, lane)}")
|
||||
lines.append("")
|
||||
lines.extend(table(
|
||||
["ID", "Title", "Source", "Bucket", "Lifecycle", "Verification", "Dependencies", "Deferral"],
|
||||
[[
|
||||
f"`{item.get('id')}`",
|
||||
str(item.get("title", "")).replace("|", "\\|"),
|
||||
f"`{item.get('source_anchor')}` / `{item.get('source_type')}`",
|
||||
f"`{item.get('release_bucket')}`",
|
||||
f"`{item.get('lifecycle_status')}`",
|
||||
f"`{item.get('verification_required')}`",
|
||||
fmt_list(item.get("dependencies")),
|
||||
str(item.get("deferral_rationale") or "—").replace("|", "\\|"),
|
||||
] for item in lane_items]
|
||||
))
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines).rstrip() + "\n"
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("board_json", type=Path)
|
||||
parser.add_argument("board_md", type=Path)
|
||||
parser.add_argument("--check", action="store_true", help="fail if board_md is not up to date")
|
||||
args = parser.parse_args()
|
||||
|
||||
board = load_board(args.board_json)
|
||||
errors = validate_board(board)
|
||||
if errors:
|
||||
for error in errors:
|
||||
print(f"ERROR: {error}", file=sys.stderr)
|
||||
return 1
|
||||
rendered = render(board)
|
||||
if args.check:
|
||||
existing = args.board_md.read_text() if args.board_md.exists() else ""
|
||||
if existing != rendered:
|
||||
print(f"ERROR: {args.board_md} is not up to date", file=sys.stderr)
|
||||
return 1
|
||||
print(f"PASS: {args.board_md} is up to date and roadmap coverage is complete")
|
||||
return 0
|
||||
args.board_md.parent.mkdir(parents=True, exist_ok=True)
|
||||
args.board_md.write_text(rendered)
|
||||
print(f"wrote {args.board_md}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Validate the worker-2 CC2 issue/parity intake fragment."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[2]
|
||||
INTAKE = ROOT / ".omx" / "cc2" / "issue-parity-intake.json"
|
||||
REQUIRED_ISSUES = set(range(3028, 3039)) | {3007, 3006, 3020, 3005, 3003, 2997, 3023, 3004}
|
||||
ALLOWED_STATUS = {
|
||||
"context",
|
||||
"active",
|
||||
"open",
|
||||
"done_verify",
|
||||
"stale_done",
|
||||
"superseded",
|
||||
"deferred_with_rationale",
|
||||
"rejected_not_claw",
|
||||
}
|
||||
ALLOWED_BUCKETS = {"alpha_blocker", "beta_adoption", "ga_ecosystem", "post_2_0_research"}
|
||||
|
||||
|
||||
def require(condition: bool, message: str) -> None:
|
||||
if not condition:
|
||||
raise SystemExit(f"FAIL: {message}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
data = json.loads(INTAKE.read_text())
|
||||
issue_rows = data.get("issue_clusters", [])
|
||||
parity_rows = data.get("parity_rows", [])
|
||||
|
||||
seen = {row.get("source_number") for row in issue_rows}
|
||||
missing = sorted(REQUIRED_ISSUES - seen)
|
||||
extra = sorted(seen - REQUIRED_ISSUES)
|
||||
require(not missing, f"missing required issue rows: {missing}")
|
||||
require(not extra, f"unexpected issue rows in scoped intake: {extra}")
|
||||
require(len(issue_rows) == len(REQUIRED_ISSUES), "duplicate or missing issue row count")
|
||||
|
||||
ids = [row.get("id") for row in issue_rows + parity_rows]
|
||||
require(len(ids) == len(set(ids)), "duplicate ids present")
|
||||
|
||||
for row in issue_rows + parity_rows:
|
||||
row_id = row.get("id")
|
||||
for field in ["source_anchor", "source_type", "release_bucket", "lifecycle_status", "dependencies", "verification_required"]:
|
||||
require(row.get(field) not in (None, "", []), f"{row_id} missing {field}")
|
||||
require(row["release_bucket"] in ALLOWED_BUCKETS, f"{row_id} invalid release_bucket {row['release_bucket']}")
|
||||
require(row["lifecycle_status"] in ALLOWED_STATUS, f"{row_id} invalid lifecycle_status {row['lifecycle_status']}")
|
||||
if row["lifecycle_status"] == "deferred_with_rationale":
|
||||
require(row.get("deferral_rationale"), f"{row_id} deferred without rationale")
|
||||
|
||||
require(len(parity_rows) >= data["coverage"]["parity_rows_expected_minimum"], "not enough parity rows")
|
||||
print(f"PASS issue/parity intake: {len(issue_rows)} issue rows, {len(parity_rows)} parity rows")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,583 +0,0 @@
|
||||
G010 final leader verification rerun started 2026-05-15T02:19:36Z
|
||||
== artifact checklist ==
|
||||
PASS docs/g010-clone-disambiguation-metadata.md exists
|
||||
PASS docs/g010-session-hygiene-verification-map.md exists
|
||||
.claw/sessions/example.jsonl
|
||||
rust/.claw/sessions/example.jsonl
|
||||
.claude/sessions/example.json
|
||||
== fmt ==
|
||||
== runtime session_control retry ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.13s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 15 tests
|
||||
test session_control::tests::latest_session_prefers_semantic_updated_at_over_file_mtime ... ok
|
||||
test session_control::tests::session_store_from_cwd_canonicalizes_equivalent_paths ... ok
|
||||
test session_control::tests::session_store_fork_stays_in_same_namespace ... ok
|
||||
test session_control::tests::session_exists_and_delete_are_scoped_to_workspace_store ... ok
|
||||
test session_control::tests::resolves_latest_alias_and_loads_session_from_workspace_root ... ok
|
||||
test session_control::tests::forks_session_into_managed_storage_with_lineage ... ok
|
||||
test session_control::tests::workspace_fingerprint_is_deterministic_and_differs_per_path ... ok
|
||||
test session_control::tests::session_store_create_and_load_round_trip ... ok
|
||||
test session_control::tests::session_store_from_cwd_isolates_sessions_by_workspace ... ok
|
||||
test session_control::tests::creates_and_lists_managed_sessions ... ok
|
||||
test session_control::tests::session_store_from_data_dir_namespaces_by_workspace ... ok
|
||||
test session_control::tests::session_store_latest_and_resolve_reference ... ok
|
||||
test session_control::tests::session_store_loads_safe_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::session_store_rejects_legacy_session_from_other_workspace ... ok
|
||||
test session_control::tests::session_store_loads_unbound_legacy_session_from_same_workspace ... ok
|
||||
|
||||
test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 542 filtered out; finished in 0.02s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime jsonl safeguards ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 1 test
|
||||
test session::tests::jsonl_persistence_redacts_and_truncates_oversized_payload_fields ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 556 filtered out; finished in 0.02s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime compact ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 17 tests
|
||||
test compact::tests::compaction_does_not_split_tool_use_tool_result_pair ... ok
|
||||
test compact::tests::formats_compact_summary_like_upstream ... ok
|
||||
test compact::tests::ignores_existing_compacted_summary_when_deciding_to_recompact ... ok
|
||||
test compact::tests::infers_pending_work_from_recent_messages ... ok
|
||||
test compact::tests::extracts_key_files_from_message_content ... ok
|
||||
test compact::tests::leaves_small_sessions_unchanged ... ok
|
||||
test compact::tests::truncates_long_blocks_in_summary ... ok
|
||||
test conversation::tests::auto_compaction_threshold_defaults_and_parses_values ... ok
|
||||
test compact::tests::compacts_older_messages_into_a_system_summary ... ok
|
||||
test conversation::tests::compaction_health_probe_skips_empty_compacted_session ... ok
|
||||
test conversation::tests::compaction_health_probe_blocks_turn_when_tool_executor_is_broken ... ok
|
||||
test conversation::tests::auto_compacts_when_cumulative_input_threshold_is_crossed ... ok
|
||||
test conversation::tests::skips_auto_compaction_below_threshold ... ok
|
||||
test prompt::tests::displays_context_paths_compactly ... ok
|
||||
test conversation::tests::compacts_session_after_turns ... ok
|
||||
test compact::tests::keeps_previous_compacted_context_when_compacting_again ... ok
|
||||
test session::tests::persists_compaction_metadata ... ok
|
||||
|
||||
test result: ok. 17 passed; 0 failed; 0 ignored; 0 measured; 540 filtered out; finished in 0.01s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== commands parses_supported_slash_commands ==
|
||||
Compiling commands v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/commands)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 2.34s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/commands-0104b50ff2e54ccc)
|
||||
|
||||
running 1 test
|
||||
test tests::parses_supported_slash_commands ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 41 filtered out; finished in 0.00s
|
||||
|
||||
== commands compacts_sessions_via_slash_command ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/commands-0104b50ff2e54ccc)
|
||||
|
||||
running 1 test
|
||||
test tests::compacts_sessions_via_slash_command ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 41 filtered out; finished in 0.00s
|
||||
|
||||
== cli session json contracts ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 3.47s
|
||||
Running unittests src/main.rs (rust/target/debug/deps/claw-f425f0b21e915b27)
|
||||
|
||||
running 1 test
|
||||
test tests::session_exists_resume_command_reports_json_contract ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 193 filtered out; finished in 0.00s
|
||||
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 2.76s
|
||||
Running unittests src/main.rs (rust/target/debug/deps/claw-f425f0b21e915b27)
|
||||
|
||||
running 1 test
|
||||
test tests::resumed_session_exists_and_delete_have_json_contracts ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 193 filtered out; finished in 0.01s
|
||||
|
||||
== cli resume slash commands ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 4.23s
|
||||
Running tests/resume_slash_commands.rs (rust/target/debug/deps/resume_slash_commands-6c1fb347be3842ef)
|
||||
|
||||
running 12 tests
|
||||
test resumed_stub_command_emits_not_implemented_json ... ok
|
||||
test resumed_help_command_emits_structured_json ... ok
|
||||
test resumed_no_command_emits_restored_json ... ok
|
||||
test resumed_sandbox_command_emits_structured_json_when_requested ... ok
|
||||
test resumed_export_command_emits_structured_json ... ok
|
||||
test resumed_config_command_loads_settings_files_end_to_end ... ok
|
||||
test resumed_binary_accepts_slash_commands_with_arguments ... ok
|
||||
test resumed_version_command_emits_structured_json ... ok
|
||||
test resumed_status_surfaces_persisted_model ... ok
|
||||
test resume_latest_restores_the_most_recent_managed_session ... ok
|
||||
test status_command_applies_cli_flags_end_to_end ... ok
|
||||
test resumed_status_command_emits_structured_json_when_requested ... ok
|
||||
|
||||
test result: ok. 12 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 2.25s
|
||||
|
||||
== cli compact output ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 2.72s
|
||||
Running tests/compact_output.rs (rust/target/debug/deps/compact_output-988ab05f11fedc49)
|
||||
|
||||
running 4 tests
|
||||
test compact_flag_with_json_output_emits_structured_json ... ok
|
||||
test compact_flag_streaming_text_only_emits_final_message_text ... ok
|
||||
test compact_flag_prints_only_final_assistant_text_without_tool_call_details ... ok
|
||||
test text_prompt_mode_prints_final_assistant_text_after_spinner ... ok
|
||||
|
||||
test result: ok. 4 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 2.14s
|
||||
|
||||
== workspace check ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.49s
|
||||
== diff check ==
|
||||
G010 final leader verification rerun completed 2026-05-15T02:20:06Z
|
||||
@@ -1,644 +0,0 @@
|
||||
G010 final leader verification started 2026-05-15T02:17:45Z
|
||||
== artifact checklist ==
|
||||
PASS docs/g010-clone-disambiguation-metadata.md exists
|
||||
PASS docs/g010-session-hygiene-verification-map.md exists
|
||||
.gitignore:.claw/sessions/
|
||||
rust/.gitignore:.claw/sessions/
|
||||
.claw/sessions/example.jsonl
|
||||
rust/.claw/sessions/example.jsonl
|
||||
.claude/sessions/example.json
|
||||
== fmt ==
|
||||
== runtime session_control ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.14s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 15 tests
|
||||
test session_control::tests::latest_session_prefers_semantic_updated_at_over_file_mtime ... ok
|
||||
test session_control::tests::session_store_from_cwd_canonicalizes_equivalent_paths ... ok
|
||||
|
||||
thread 'session_control::tests::session_store_fork_stays_in_same_namespace' (403821665) panicked at crates/runtime/src/session_control.rs:775:14:
|
||||
session should persist: Io(Os { code: 2, kind: NotFound, message: "No such file or directory" })
|
||||
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
|
||||
test session_control::tests::session_exists_and_delete_are_scoped_to_workspace_store ... ok
|
||||
test session_control::tests::forks_session_into_managed_storage_with_lineage ... ok
|
||||
test session_control::tests::creates_and_lists_managed_sessions ... ok
|
||||
test session_control::tests::session_store_from_cwd_isolates_sessions_by_workspace ... ok
|
||||
test session_control::tests::session_store_latest_and_resolve_reference ... ok
|
||||
test session_control::tests::session_store_loads_safe_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::workspace_fingerprint_is_deterministic_and_differs_per_path ... ok
|
||||
test session_control::tests::session_store_create_and_load_round_trip ... ok
|
||||
test session_control::tests::session_store_fork_stays_in_same_namespace ... FAILED
|
||||
test session_control::tests::session_store_loads_unbound_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::session_store_from_data_dir_namespaces_by_workspace ... ok
|
||||
test session_control::tests::session_store_rejects_legacy_session_from_other_workspace ... ok
|
||||
test session_control::tests::resolves_latest_alias_and_loads_session_from_workspace_root ... ok
|
||||
|
||||
failures:
|
||||
|
||||
failures:
|
||||
session_control::tests::session_store_fork_stays_in_same_namespace
|
||||
|
||||
test result: FAILED. 14 passed; 1 failed; 0 ignored; 0 measured; 542 filtered out; finished in 0.03s
|
||||
|
||||
error: test failed, to rerun pass `-p runtime --lib`
|
||||
== runtime jsonl safeguards ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.13s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 1 test
|
||||
test session::tests::jsonl_persistence_redacts_and_truncates_oversized_payload_fields ... ok
|
||||
|
||||
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 556 filtered out; finished in 0.02s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime compact ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.13s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 17 tests
|
||||
test compact::tests::formats_compact_summary_like_upstream ... ok
|
||||
test compact::tests::truncates_long_blocks_in_summary ... ok
|
||||
test compact::tests::ignores_existing_compacted_summary_when_deciding_to_recompact ... ok
|
||||
test compact::tests::infers_pending_work_from_recent_messages ... ok
|
||||
test compact::tests::compaction_does_not_split_tool_use_tool_result_pair ... ok
|
||||
test compact::tests::leaves_small_sessions_unchanged ... ok
|
||||
test conversation::tests::auto_compaction_threshold_defaults_and_parses_values ... ok
|
||||
test compact::tests::extracts_key_files_from_message_content ... ok
|
||||
test compact::tests::compacts_older_messages_into_a_system_summary ... ok
|
||||
test prompt::tests::displays_context_paths_compactly ... ok
|
||||
test conversation::tests::skips_auto_compaction_below_threshold ... ok
|
||||
test conversation::tests::compaction_health_probe_skips_empty_compacted_session ... ok
|
||||
test conversation::tests::compacts_session_after_turns ... ok
|
||||
test conversation::tests::compaction_health_probe_blocks_turn_when_tool_executor_is_broken ... ok
|
||||
test conversation::tests::auto_compacts_when_cumulative_input_threshold_is_crossed ... ok
|
||||
test compact::tests::keeps_previous_compacted_context_when_compacting_again ... ok
|
||||
test session::tests::persists_compaction_metadata ... ok
|
||||
|
||||
test result: ok. 17 passed; 0 failed; 0 ignored; 0 measured; 540 filtered out; finished in 0.01s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== commands session/compact slash ==
|
||||
error: unexpected argument 'compacts_sessions_via_slash_command' found
|
||||
|
||||
Usage: cargo test [OPTIONS] [TESTNAME] [-- [ARGS]...]
|
||||
|
||||
For more information, try '--help'.
|
||||
== cli session json contracts ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw" test) due to 1 previous error
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw" test) due to 1 previous error
|
||||
== cli resume slash commands ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== cli compact output ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== workspace check ==
|
||||
Checking runtime v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/runtime)
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Checking api v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/api)
|
||||
Checking commands v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/commands)
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Checking tools v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/tools)
|
||||
Checking mock-anthropic-service v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/mock-anthropic-service)
|
||||
Checking compat-harness v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/compat-harness)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3823:11
|
||||
|
|
||||
3823 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: Some(_), target: None }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4200 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4201 ~ &SlashCommand::Session { action: Some(_), target: None } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== diff check ==
|
||||
G010 final leader verification completed 2026-05-15T02:18:11Z
|
||||
@@ -1,321 +0,0 @@
|
||||
== fmt ==
|
||||
== runtime session_control ==
|
||||
Compiling runtime v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/runtime)
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 10.29s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 15 tests
|
||||
test session_control::tests::latest_session_prefers_semantic_updated_at_over_file_mtime ... ok
|
||||
test session_control::tests::session_store_from_cwd_canonicalizes_equivalent_paths ... ok
|
||||
test session_control::tests::session_store_create_and_load_round_trip ... ok
|
||||
test session_control::tests::session_exists_and_delete_are_scoped_to_workspace_store ... ok
|
||||
test session_control::tests::forks_session_into_managed_storage_with_lineage ... ok
|
||||
test session_control::tests::workspace_fingerprint_is_deterministic_and_differs_per_path ... ok
|
||||
test session_control::tests::session_store_from_cwd_isolates_sessions_by_workspace ... ok
|
||||
test session_control::tests::creates_and_lists_managed_sessions ... ok
|
||||
test session_control::tests::session_store_fork_stays_in_same_namespace ... ok
|
||||
test session_control::tests::session_store_from_data_dir_namespaces_by_workspace ... ok
|
||||
test session_control::tests::session_store_latest_and_resolve_reference ... ok
|
||||
test session_control::tests::session_store_loads_safe_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::session_store_loads_unbound_legacy_session_from_same_workspace ... ok
|
||||
test session_control::tests::session_store_rejects_legacy_session_from_other_workspace ... ok
|
||||
test session_control::tests::resolves_latest_alias_and_loads_session_from_workspace_root ... ok
|
||||
|
||||
test result: ok. 15 passed; 0 failed; 0 ignored; 0 measured; 542 filtered out; finished in 0.02s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime session jsonl/bloat ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.18s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 8 tests
|
||||
test session::tests::rejects_jsonl_record_with_unknown_type ... ok
|
||||
test session::tests::rejects_jsonl_message_record_without_message_payload ... ok
|
||||
test session::tests::rejects_jsonl_record_without_type ... ok
|
||||
test session::tests::persists_assistant_thinking_block_round_trip_through_jsonl ... ok
|
||||
test session::tests::persists_and_restores_session_jsonl ... ok
|
||||
test conversation::tests::persists_conversation_turn_messages_to_jsonl_session ... ok
|
||||
test session::tests::appends_messages_to_persisted_jsonl_session ... ok
|
||||
test session::tests::jsonl_persistence_redacts_and_truncates_oversized_payload_fields ... ok
|
||||
|
||||
test result: ok. 8 passed; 0 failed; 0 ignored; 0 measured; 549 filtered out; finished in 0.04s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== runtime compact ==
|
||||
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running unittests src/lib.rs (rust/target/debug/deps/runtime-0e7d3d46ae40aa07)
|
||||
|
||||
running 17 tests
|
||||
test compact::tests::formats_compact_summary_like_upstream ... ok
|
||||
test compact::tests::ignores_existing_compacted_summary_when_deciding_to_recompact ... ok
|
||||
test compact::tests::compaction_does_not_split_tool_use_tool_result_pair ... ok
|
||||
test compact::tests::leaves_small_sessions_unchanged ... ok
|
||||
test compact::tests::infers_pending_work_from_recent_messages ... ok
|
||||
test compact::tests::truncates_long_blocks_in_summary ... ok
|
||||
test conversation::tests::auto_compaction_threshold_defaults_and_parses_values ... ok
|
||||
test compact::tests::extracts_key_files_from_message_content ... ok
|
||||
test compact::tests::compacts_older_messages_into_a_system_summary ... ok
|
||||
test conversation::tests::compaction_health_probe_blocks_turn_when_tool_executor_is_broken ... ok
|
||||
test conversation::tests::skips_auto_compaction_below_threshold ... ok
|
||||
test conversation::tests::auto_compacts_when_cumulative_input_threshold_is_crossed ... ok
|
||||
test conversation::tests::compaction_health_probe_skips_empty_compacted_session ... ok
|
||||
test conversation::tests::compacts_session_after_turns ... ok
|
||||
test prompt::tests::displays_context_paths_compactly ... ok
|
||||
test compact::tests::keeps_previous_compacted_context_when_compacting_again ... ok
|
||||
test session::tests::persists_compaction_metadata ... ok
|
||||
|
||||
test result: ok. 17 passed; 0 failed; 0 ignored; 0 measured; 540 filtered out; finished in 0.01s
|
||||
|
||||
Running tests/g004_conformance.rs (rust/target/debug/deps/g004_conformance-90f36d1f871b6313)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s
|
||||
|
||||
Running tests/integration_tests.rs (rust/target/debug/deps/integration_tests-526d4f853fc590de)
|
||||
|
||||
running 0 tests
|
||||
|
||||
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s
|
||||
|
||||
== cli resume_slash_commands ==
|
||||
Compiling runtime v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/runtime)
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
Compiling api v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/api)
|
||||
Compiling commands v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/commands)
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Compiling tools v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/tools)
|
||||
Compiling mock-anthropic-service v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/mock-anthropic-service)
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling compat-harness v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/compat-harness)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: None, .. }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3794:11
|
||||
|
|
||||
3794 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: None, .. }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4197 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4198 ~ &SlashCommand::Session { action: None, .. } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== cli compact_output ==
|
||||
warning: enum `ProviderWireProtocol` is never used
|
||||
--> crates/api/src/providers/mod.rs:54:10
|
||||
|
|
||||
54 | pub enum ProviderWireProtocol {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
|
||||
= note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
|
||||
|
||||
warning: enum `ProviderFeatureSupport` is never used
|
||||
--> crates/api/src/providers/mod.rs:61:10
|
||||
|
|
||||
61 | pub enum ProviderFeatureSupport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderCapabilityReport` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:68:12
|
||||
|
|
||||
68 | pub struct ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: enum `ProviderDiagnosticSeverity` is never used
|
||||
--> crates/api/src/providers/mod.rs:88:10
|
||||
|
|
||||
88 | pub enum ProviderDiagnosticSeverity {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: struct `ProviderDiagnostic` is never constructed
|
||||
--> crates/api/src/providers/mod.rs:94:12
|
||||
|
|
||||
94 | pub struct ProviderDiagnostic {
|
||||
| ^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_capabilities_for_model` is never used
|
||||
--> crates/api/src/providers/mod.rs:384:8
|
||||
|
|
||||
384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_diagnostics_for_request` is never used
|
||||
--> crates/api/src/providers/mod.rs:452:8
|
||||
|
|
||||
452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `metadata_for_provider_kind` is never used
|
||||
--> crates/api/src/providers/mod.rs:517:4
|
||||
|
|
||||
517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `provider_label` is never used
|
||||
--> crates/api/src/providers/mod.rs:541:10
|
||||
|
|
||||
541 | const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
| ^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `has_openai_tuning_parameters` is never used
|
||||
--> crates/api/src/providers/mod.rs:550:4
|
||||
|
|
||||
550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `declares_tool` is never used
|
||||
--> crates/api/src/providers/mod.rs:558:4
|
||||
|
|
||||
558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
warning: function `web_passthrough_diagnostic` is never used
|
||||
--> crates/api/src/providers/mod.rs:567:4
|
||||
|
|
||||
567 | fn web_passthrough_diagnostic(
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: function `strip_routing_prefix` is never used
|
||||
--> crates/api/src/providers/openai_compat.rs:901:4
|
||||
|
|
||||
901 | fn strip_routing_prefix(model: &str) -> &str {
|
||||
| ^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
warning: `api` (lib) generated 13 warnings
|
||||
Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)
|
||||
error[E0004]: non-exhaustive patterns: `&SlashCommand::Session { action: None, .. }` not covered
|
||||
--> crates/rusty-claude-cli/src/main.rs:3794:11
|
||||
|
|
||||
3794 | match command {
|
||||
| ^^^^^^^ pattern `&SlashCommand::Session { action: None, .. }` not covered
|
||||
|
|
||||
note: `SlashCommand` defined here
|
||||
--> crates/commands/src/lib.rs:1040:1
|
||||
|
|
||||
1040 | pub enum SlashCommand {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^
|
||||
...
|
||||
1089 | Session {
|
||||
| ------- not covered
|
||||
= note: the matched value is of type `&SlashCommand`
|
||||
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
||||
|
|
||||
4197 ~ | SlashCommand::AddDir { .. } => Err("unsupported resumed slash command".into()),
|
||||
4198 ~ &SlashCommand::Session { action: None, .. } => todo!(),
|
||||
|
|
||||
|
||||
For more information about this error, try `rustc --explain E0004`.
|
||||
error: could not compile `rusty-claude-cli` (bin "claw") due to 1 previous error
|
||||
== diff check ==
|
||||
@@ -1 +0,0 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":4536320,"timeUsedSeconds":13975,"createdAt":1778745278,"updatedAt":1778810208},"remainingTokens":null,"completionBudgetReport":null}
|
||||
@@ -1 +0,0 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":4747486,"timeUsedSeconds":14669,"createdAt":1778745278,"updatedAt":1778810902},"remainingTokens":null,"completionBudgetReport":null}
|
||||
@@ -1 +0,0 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":4771357,"timeUsedSeconds":14733,"createdAt":1778745278,"updatedAt":1778810966},"remainingTokens":null,"completionBudgetReport":null}
|
||||
@@ -1 +0,0 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":4726793,"timeUsedSeconds":14653,"createdAt":1778745278,"updatedAt":1778810885},"remainingTokens":null,"completionBudgetReport":null}
|
||||
@@ -1 +0,0 @@
|
||||
{"goal":{"threadId":"019e2560-a38d-7282-bb33-58c944cdcbc9","objective":"Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan.","status":"active","tokensUsed":5024990,"timeUsedSeconds":15387,"createdAt":1778745278,"updatedAt":1778811620},"remainingTokens":null,"completionBudgetReport":null}
|
||||
@@ -1,154 +0,0 @@
|
||||
{
|
||||
"version": 1,
|
||||
"createdAt": "2026-05-14T07:53:46.061Z",
|
||||
"updatedAt": "2026-05-15T04:38:54.887Z",
|
||||
"briefPath": ".omx/ultragoal/brief.md",
|
||||
"goalsPath": ".omx/ultragoal/goals.json",
|
||||
"ledgerPath": ".omx/ultragoal/ledger.jsonl",
|
||||
"codexGoalMode": "aggregate",
|
||||
"goals": [
|
||||
{
|
||||
"id": "G001-stream0-board",
|
||||
"title": "Stream 0: Generate canonical CC2 board",
|
||||
"objective": "Generate the canonical Claw Code 2.0 board from frozen ROADMAP.md, latest issue snapshot, parity evidence, and approved plan. Classify every actionable roadmap item and context heading with source_anchor, source_type, release_bucket, lifecycle status, dependencies, verification_required, and deferral rationale. Emit machine JSON plus human markdown.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T08:14:23.206Z",
|
||||
"startedAt": "2026-05-14T07:54:26.032Z",
|
||||
"completedAt": "2026-05-14T08:14:23.206Z",
|
||||
"evidence": "G001-stream0-board complete via team ultragoal-g001-stream-e61d2271: team status phase=team-verify, tasks 5/5 completed; worker-2 produced issue/parity intake, worker-3 produced board Markdown/rendering, worker-4 recorded validation evidence, worker-1 completed initial board artifacts. Leader reconciliation commit 45b43b5 aligned scripts/generate_cc2_board.py, scripts/validate_cc2_board.py, scripts/cc2_board.py, .omx/cc2/render_board_md.py. Evidence artifacts: .omx/cc2/board.json, .omx/cc2/board.md, .omx/cc2/issue-parity-intake.json, .omx/cc2/issue-parity-intake.md; .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl remain leader-owned. Verification passed: python3 scripts/generate_cc2_board.py; python3 scripts/validate_cc2_board.py; python3 scripts/cc2_board.py validate; python3 .omx/cc2/validate_issue_parity_intake.py; python3 .omx/cc2/render_board_md.py .omx/cc2/board.json .omx/cc2/board.md --check; python3 -m py_compile scripts/generate_cc2_board.py scripts/validate_cc2_board.py scripts/cc2_board.py .omx/cc2/validate_issue_parity_intake.py .omx/cc2/render_board_md.py; cargo check --manifest-path rust/Cargo.toml --workspace."
|
||||
},
|
||||
{
|
||||
"id": "G002-alpha-security",
|
||||
"title": "Stream 6: Day-one security and permissions gate",
|
||||
"objective": "Implement/verify alpha-blocking security scope: file tools and shell enforce workspace/path scope across direct paths, symlinks, globbing, shell expansion, worktrees, and Windows path cases. Add regression fixtures for #3007 class behavior and permission-mode event/status visibility.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T08:34:04.243Z",
|
||||
"startedAt": "2026-05-14T08:14:46.422Z",
|
||||
"completedAt": "2026-05-14T08:34:04.243Z",
|
||||
"evidence": "G002-alpha-security team ultragoal-g002-alpha-e61d2271 reached phase=complete with 5/5 tasks completed and no worker .omx/ultragoal mutation. Integrated commits through 37b2b75 on main: workspace/path enforcement in rust/crates/runtime/src/file_ops.rs, rust/crates/runtime/src/lib.rs, rust/crates/tools/src/lib.rs, regressions in rust/crates/tools/tests/path_scope_enforcement.rs and rust/crates/rusty-claude-cli/tests/output_format_contract.rs, verification map docs/g002-security-verification-map.md. Fresh leader validation passed: git diff --check; cargo fmt --manifest-path rust/Cargo.toml --all -- --check; cargo test --manifest-path rust/Cargo.toml -p tools path_scope -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p tools --test path_scope_enforcement -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p runtime workspace_ -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract -- --nocapture; python3 -m pytest tests/test_security_scope.py -q; cargo check --manifest-path rust/Cargo.toml --workspace. .omx/ultragoal artifacts retained as leader-owned durable audit trail; fresh get_goal JSON captured at .omx/ultragoal/get-goal-G002-alpha-security.json. Known unrelated non-gating gaps from worker verification: full cargo test --workspace has pre-existing session_lifecycle_prefers_running_process_over_idle_shell failure; clippy all-targets has pre-existing runtime lint warnings."
|
||||
},
|
||||
{
|
||||
"id": "G003-boot-session",
|
||||
"title": "Stream 1: Reliable worker boot/session control",
|
||||
"objective": "Implement/verify worker lifecycle, first prompt acceptance SLA, startup-no-evidence classifier, trust resolver/default trusted roots, structured session control API, and boot preflight/doctor JSON contracts.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T08:54:40.729Z",
|
||||
"startedAt": "2026-05-14T08:34:19.605Z",
|
||||
"completedAt": "2026-05-14T08:54:40.729Z",
|
||||
"evidence": "G003-boot-session team g003-boot-session-ult-e61d2271 reached phase=complete with 5/5 tasks completed and no worker .omx/ultragoal mutation. Implemented/verified Stream 1 reliable worker boot/session control: worker lifecycle/prompt SLA and path guardrails, default trusted roots merge via runtime config and WorkerCreate, startup-no-evidence evidence/classifier timestamp coverage, structured boot preflight/status/doctor JSON, and docs/g003-boot-session-verification-map.md. Integrated/pushed through origin/main aec291c. Final leader validation passed: git diff --check; cargo fmt --manifest-path rust/Cargo.toml --all -- --check; cargo test --manifest-path rust/Cargo.toml -p runtime trusted_roots -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p runtime trust_resolver -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p runtime startup -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p runtime worker_boot -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p tools worker_create_merges_config_trusted_roots -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p tools path_scope -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli boot_preflight -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli branch_freshness -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli status_json_surfaces_session_lifecycle_for_clawhip -- --nocapture; cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract -- --nocapture; cargo check --manifest-path rust/Cargo.toml --workspace; python3 scripts/validate_cc2_board.py --board .omx/cc2/board.json; python3 .omx/cc2/validate_issue_parity_intake.py .omx/cc2/issue-parity-intake.json. Fresh get_goal JSON captured at .omx/ultragoal/get-goal-G003-boot-session.complete.json and .omx/ultragoal goals/ledger remain leader-owned audit artifacts. Known non-gating gaps from worker clippy attempts are pre-existing unrelated runtime clippy warnings and full workspace tests remain deferred to final gates."
|
||||
},
|
||||
{
|
||||
"id": "G004-events-reports",
|
||||
"title": "Stream 2: Event/report contract families",
|
||||
"objective": "Implement/verify canonical lane events, ordering/provenance/identity/dedupe/ownership, report schema/projection/redaction/capability negotiation, approval-token chain, and pinpoint closure batches with golden fixtures.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T09:15:44.223Z",
|
||||
"startedAt": "2026-05-14T08:54:55.093Z",
|
||||
"completedAt": "2026-05-14T09:15:44.223Z",
|
||||
"evidence": "G004-events-reports complete: team g004-events-reports-u-e61d2271 phase complete with 7/7 tasks completed; pushed main through 879962b; leader verification passed cargo fmt --manifest-path rust/Cargo.toml --all -- --check, cargo check --manifest-path rust/Cargo.toml -p runtime, cargo test --manifest-path rust/Cargo.toml -p runtime -- --nocapture (535 unit + g004_conformance 2 + integration 12 + doctests), python3 .github/scripts/check_doc_source_of_truth.py; evidence recorded against .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl"
|
||||
},
|
||||
{
|
||||
"id": "G005-branch-recovery",
|
||||
"title": "Stream 3: Branch/test awareness and recovery",
|
||||
"objective": "Implement/verify stale branch detection before broad tests, recovery recipes and ledger, green-ness contract, test provenance, hung-test classification, and recovery/status reporting.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-14T12:41:48.997Z",
|
||||
"startedAt": "2026-05-14T09:16:01.781Z",
|
||||
"completedAt": "2026-05-14T12:41:48.997Z",
|
||||
"evidence": "G005-branch-recovery complete and pushed at 7426ede; team g005-branch-recovery-e61d2271 has 5/5 tasks completed; leader verification passed for branch freshness before broad tests, recovery ledger/status reporting, green-ness contract/test provenance, stale-base doctor/status consistency, hung-test classification, and docs/g005-branch-recovery-verification-map.md. Evidence recorded against .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G006-task-policy-board",
|
||||
"title": "Stream 4: Task packets, policy engine, lane board",
|
||||
"objective": "Implement/verify typed task packet schema, executable policy engine, active lane board/dashboard, running-state liveness heartbeat, and task/lane status JSON.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T00:42:05.094Z",
|
||||
"startedAt": "2026-05-14T12:41:57.815Z",
|
||||
"completedAt": "2026-05-15T00:42:05.094Z",
|
||||
"evidence": "G006-task-policy-board complete in pushed origin/main commit 65a144c; team g006-task-policy-boar-e61d2271 terminal with 5 completed/0 failed after leader reconciliation; verification map docs/g006-task-policy-board-verification-map.md plus quality gate JSON record cargo fmt/check/tests/diff/push; .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl preserved; workers did not mutate .omx/ultragoal."
|
||||
},
|
||||
{
|
||||
"id": "G007-plugin-mcp",
|
||||
"title": "Stream 5: Plugin/MCP lifecycle maturity",
|
||||
"objective": "Implement/verify plugin/MCP lifecycle states, healthy/degraded/failed startup, required vs optional behavior, malformed config consistency across status/doctor/mcp/plugins, and mock MCP/plugin tests.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T01:16:43.414Z",
|
||||
"startedAt": "2026-05-15T00:42:16.309Z",
|
||||
"completedAt": "2026-05-15T01:16:43.414Z",
|
||||
"evidence": "G007-plugin-mcp complete: team g007-plugin-mcp-ultra-e61d2271 phase complete with 13/13 tasks completed, verification passed, pushed head 2202410, and durable ultragoal artifacts updated in .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G008-provider-compat",
|
||||
"title": "Stream 7: Provider/model compatibility",
|
||||
"objective": "Implement/verify OpenAI-compatible slash-containing model IDs, provider prefix routing over env sniffing, DeepSeek/reasoning diagnostics, web search/fetch behavior, proxy/custom parameter passthrough, token/cost accounting, and provider diagnostics.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T01:38:22.717Z",
|
||||
"startedAt": "2026-05-15T01:17:53.783Z",
|
||||
"completedAt": "2026-05-15T01:38:22.717Z",
|
||||
"evidence": "G008-provider-compat complete: team g008-provider-compat-e61d2271 phase complete with 5/5 tasks terminal; provider/model compatibility implemented and verified; pushed origin/main 2cac66c..8c9a05e; evidence recorded in .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl plus quality gate .omx/ultragoal/quality-gate-G008-provider-compat.json."
|
||||
},
|
||||
{
|
||||
"id": "G009-windows-docs-release",
|
||||
"title": "Stream 8: Windows/install/docs/license readiness",
|
||||
"objective": "Implement/verify PowerShell-first docs, safe provider switching examples, Windows smoke CI, release artifact quickstart, license/contribution/security/support policies, and command/link validation.",
|
||||
"status": "complete",
|
||||
"attempt": 0,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T01:57:41.565Z",
|
||||
"completedAt": "2026-05-15T01:57:41.565Z",
|
||||
"evidence": "G009-windows-docs-release complete at commit 5294648 with team g009-windows-docs-rel-e61d2271 phase complete, 5/5 tasks completed; evidence in .omx/ultragoal/quality-gate-G009-windows-docs-release.json, .omx/ultragoal/get-goal-G009-windows-docs-release.complete.json, .omx/ultragoal/goals.json, and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G010-session-hygiene",
|
||||
"title": "Stream 9: Session hygiene/local state/recovery UX",
|
||||
"objective": "Implement/verify session file hygiene, .gitignore state paths, per-worktree session isolation, list/delete/exists/compact/resume, compact/provider-context recovery, JSONL payload bloat safeguards, interrupt recovery, and clone disambiguation metadata.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T02:20:46.558Z",
|
||||
"startedAt": "2026-05-15T01:59:22.219Z",
|
||||
"completedAt": "2026-05-15T02:20:46.558Z",
|
||||
"evidence": "G010-session-hygiene complete: team g010-session-hygiene-e61d2271 phase complete with 7/7 tasks completed; final verification passed in .omx/ultragoal/g010-final-quality-gate-rerun.log; durable state recorded in .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G011-ecosystem-ops-ux",
|
||||
"title": "Streams 10–12: Ecosystem, issue ops, and UX laterals",
|
||||
"objective": "Implement/verify gated ACP/Zed/JSON-RPC serve plan/status, anti-slop issue/PR triage, issue templates, navigation/file-context docs, TUI/rendering/copy/paste/clickable path improvements, and defer desktop/marketplace features until contracts are stable.",
|
||||
"status": "complete",
|
||||
"attempt": 1,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T02:55:26.988Z",
|
||||
"startedAt": "2026-05-15T02:21:31.360Z",
|
||||
"completedAt": "2026-05-15T02:55:26.988Z",
|
||||
"evidence": "G011-ecosystem-ops-ux complete: team g011-ecosystem-ops-ux-e61d2271 phase=complete with 7/7 tasks completed; final pushed HEAD 1ac8ce8; verification evidence in .omx/ultragoal/g011-final-quality-gate.log and .omx/ultragoal/quality-gate-G011-ecosystem-ops-ux.json; ultragoal artifacts tracked in .omx/ultragoal/goals.json and .omx/ultragoal/ledger.jsonl."
|
||||
},
|
||||
{
|
||||
"id": "G012-final-gate",
|
||||
"title": "Final release gate: Verify Claw Code 2.0 delivery",
|
||||
"objective": "Run final cross-stream quality gate: roadmap board has no unmapped actionable items, fmt/clippy/tests and focused contract suites pass, ai-slop-cleaner on changed files passes/no-ops, code-review approves, and final alpha/beta/GA readiness report is written. Final completion is blocked until docs/pr-issue-resolution-gate.md has fresh evidence showing every open PR and issue was triaged, with correct PRs merged and resolvable correct issues fixed or closed.",
|
||||
"status": "complete",
|
||||
"attempt": 0,
|
||||
"createdAt": "2026-05-14T07:54:21.409575Z",
|
||||
"updatedAt": "2026-05-15T04:38:54.887Z",
|
||||
"evidence": "G012-final-gate complete: team g012-final-gate-ultra-e61d2271 8/8 tasks complete; final gate log /tmp/g012-final-quality-gate-pass4.log; commit 04c2abb pushed; docs/pr-triage-g012-final-gate.json docs/pr-issue-resolution-gate.md docs/g012-final-release-readiness-report.md; .omx/ultragoal/goals.json and ledger.jsonl updated; aiSlopCleaner and codeReview evidence included in quality gate JSON.",
|
||||
"completedAt": "2026-05-15T04:38:54.887Z"
|
||||
}
|
||||
],
|
||||
"codexObjective": "Complete the approved Claw Code 2.0 ultragoal delivery: implement all classified ROADMAP.md backlog work through execution-sized stream goals G001-G012, using .omx/ultragoal/ledger.jsonl as the durable audit trail and .omx/plans/claw-code-2-0-adaptive-plan.md as the source plan."
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"goal_id": "G009-windows-docs-release",
|
||||
"timestamp_utc": "2026-05-15T01:57:16Z",
|
||||
"commit": "a3af0133e0cf8d529465950ada88623e3cf3b3f2",
|
||||
"team": "g009-windows-docs-rel-e61d2271",
|
||||
"team_phase": "complete",
|
||||
"tasks": "5/5 completed",
|
||||
"verification": {
|
||||
"release_readiness": "passed",
|
||||
"doc_source_of_truth": "passed",
|
||||
"cargo_fmt": "passed",
|
||||
"targeted_windows_no_credentials_smoke_test": "passed",
|
||||
"cargo_check_workspace": "passed with existing api dead_code warnings",
|
||||
"git_diff_check": "passed",
|
||||
"coverage_check": "passed"
|
||||
},
|
||||
"known_gaps": [
|
||||
{
|
||||
"scope": "actual GitHub windows-latest execution",
|
||||
"status": "not run locally"
|
||||
},
|
||||
{
|
||||
"scope": "full cargo test --workspace",
|
||||
"status": "known pre-existing unrelated CLI failures reported by workers; targeted changed-surface tests pass"
|
||||
}
|
||||
],
|
||||
"artifacts": [
|
||||
".github/workflows/rust-ci.yml",
|
||||
".github/workflows/release.yml",
|
||||
"docs/windows-install-release.md",
|
||||
"docs/g009-windows-docs-release-verification-map.md",
|
||||
"LICENSE",
|
||||
"CONTRIBUTING.md",
|
||||
"SECURITY.md",
|
||||
"SUPPORT.md",
|
||||
"CODE_OF_CONDUCT.md",
|
||||
".github/scripts/check_release_readiness.py",
|
||||
"/tmp/g009-final-verify.log"
|
||||
],
|
||||
"git_status": "## main...origin/main [ahead 13]",
|
||||
"log_tail": " | ^^^^^^^^^^^^^^^^^^^^\n |\n = note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default\n\nwarning: enum `ProviderFeatureSupport` is never used\n --> crates/api/src/providers/mod.rs:61:10\n |\n61 | pub enum ProviderFeatureSupport {\n | ^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: struct `ProviderCapabilityReport` is never constructed\n --> crates/api/src/providers/mod.rs:68:12\n |\n68 | pub struct ProviderCapabilityReport {\n | ^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: enum `ProviderDiagnosticSeverity` is never used\n --> crates/api/src/providers/mod.rs:88:10\n |\n88 | pub enum ProviderDiagnosticSeverity {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: struct `ProviderDiagnostic` is never constructed\n --> crates/api/src/providers/mod.rs:94:12\n |\n94 | pub struct ProviderDiagnostic {\n | ^^^^^^^^^^^^^^^^^^\n\nwarning: function `provider_capabilities_for_model` is never used\n --> crates/api/src/providers/mod.rs:384:8\n |\n384 | pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `provider_diagnostics_for_request` is never used\n --> crates/api/src/providers/mod.rs:452:8\n |\n452 | pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `metadata_for_provider_kind` is never used\n --> crates/api/src/providers/mod.rs:517:4\n |\n517 | fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `provider_label` is never used\n --> crates/api/src/providers/mod.rs:541:10\n |\n541 | const fn provider_label(provider: ProviderKind) -> &'static str {\n | ^^^^^^^^^^^^^^\n\nwarning: function `has_openai_tuning_parameters` is never used\n --> crates/api/src/providers/mod.rs:550:4\n |\n550 | fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `declares_tool` is never used\n --> crates/api/src/providers/mod.rs:558:4\n |\n558 | fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {\n | ^^^^^^^^^^^^^\n\nwarning: function `web_passthrough_diagnostic` is never used\n --> crates/api/src/providers/mod.rs:567:4\n |\n567 | fn web_passthrough_diagnostic(\n | ^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nwarning: function `strip_routing_prefix` is never used\n --> crates/api/src/providers/openai_compat.rs:901:4\n |\n901 | fn strip_routing_prefix(model: &str) -> &str {\n | ^^^^^^^^^^^^^^^^^^^^\n\nwarning: `api` (lib) generated 13 warnings\n Compiling rusty-claude-cli v0.1.0 (/Users/bellman/Documents/Workspace/claw-code/rust/crates/rusty-claude-cli)\n Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.35s\nG009 coverage check passed"
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
{
|
||||
"goal_id": "G010-session-hygiene",
|
||||
"status": "passed",
|
||||
"team": "g010-session-hygiene-e61d2271",
|
||||
"team_phase": "complete",
|
||||
"tasks": {"completed": 7, "failed": 0, "blocked": 0, "pending": 0, "in_progress": 0},
|
||||
"evidence": [
|
||||
".omx/ultragoal/g010-final-quality-gate-rerun.log",
|
||||
"docs/g010-clone-disambiguation-metadata.md",
|
||||
"docs/g010-session-hygiene-verification-map.md",
|
||||
".omx/ultragoal/goals.json",
|
||||
".omx/ultragoal/ledger.jsonl"
|
||||
],
|
||||
"verification_passed": [
|
||||
"cargo fmt --manifest-path rust/Cargo.toml --all -- --check",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p runtime session_control -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p runtime jsonl_persistence_redacts_and_truncates_oversized_payload_fields -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p runtime compact -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p commands parses_supported_slash_commands -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p commands compacts_sessions_via_slash_command -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --bin claw session_exists_resume_command_reports_json_contract -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --bin claw resumed_session_exists_and_delete_have_json_contracts -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test resume_slash_commands -- --nocapture",
|
||||
"cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test compact_output -- --nocapture",
|
||||
"cargo check --manifest-path rust/Cargo.toml --workspace",
|
||||
"git diff --check"
|
||||
],
|
||||
"known_gaps": [
|
||||
"full cargo test --workspace not run for G010",
|
||||
"clippy -D warnings remains blocked by pre-existing unrelated lint debt noted in task 5/task 7 results"
|
||||
]
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"session_id": "b035f648d5b549aa836ea01f6727ec62",
|
||||
"messages": [
|
||||
"review MCP tool"
|
||||
],
|
||||
"input_tokens": 3,
|
||||
"output_tokens": 13
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"session_id": "b234acb1eb8c486e80544ddc7e13e6d8",
|
||||
"messages": [
|
||||
"review MCP tool",
|
||||
"review MCP tool"
|
||||
],
|
||||
"input_tokens": 6,
|
||||
"output_tokens": 32
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"session_id": "b67e062748f04e10ac5770df9285e4bd",
|
||||
"messages": [
|
||||
"review MCP tool",
|
||||
"review MCP tool"
|
||||
],
|
||||
"input_tokens": 6,
|
||||
"output_tokens": 32
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"session_id": "bb88fd20433840a8b19237e3f306c6e3",
|
||||
"messages": [
|
||||
"review MCP tool",
|
||||
"review MCP tool"
|
||||
],
|
||||
"input_tokens": 6,
|
||||
"output_tokens": 32
|
||||
}
|
||||
204
CLAUDE.md
204
CLAUDE.md
@@ -1,21 +1,195 @@
|
||||
# CLAUDE.md
|
||||
# CLAUDE.md — Python Reference Implementation
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
**This file guides work on `src/` and `tests/` — the Python reference harness for claw-code protocol.**
|
||||
|
||||
## Detected stack
|
||||
- Languages: Rust.
|
||||
- Frameworks: none detected from the supported starter markers.
|
||||
The production CLI lives in `rust/`; this directory (`src/`, `tests/`, `.py` files) is a **protocol validation and dogfood surface**.
|
||||
|
||||
## Verification
|
||||
- Run Rust verification from repo root: `scripts/fmt.sh --check`; for formatting use `scripts/fmt.sh`. Run Rust clippy/tests from `rust/`: `cargo clippy --workspace --all-targets -- -D warnings`, `cargo test --workspace`
|
||||
- `src/` and `tests/` are both present; update both surfaces together when behavior changes.
|
||||
## What this Python harness does
|
||||
|
||||
**Machine-first orchestration layer** — proves that the claw-code JSON protocol is:
|
||||
- Deterministic and recoverable (every output is reproducible)
|
||||
- Self-describing (SCHEMAS.md documents every field)
|
||||
- Clawable (external agents can build ONE error handler for all commands)
|
||||
|
||||
## Stack
|
||||
- **Language:** Python 3.13+
|
||||
- **Dependencies:** minimal (no frameworks; pure stdlibs + attrs/dataclasses)
|
||||
- **Test runner:** pytest
|
||||
- **Protocol contract:** SCHEMAS.md (machine-readable JSON envelope)
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# 1. Install dependencies (if not already in venv)
|
||||
python3 -m venv .venv && source .venv/bin/activate
|
||||
# (dependencies minimal; standard library mostly)
|
||||
|
||||
# 2. Run tests
|
||||
python3 -m pytest tests/ -q
|
||||
|
||||
# 3. Try a command
|
||||
python3 -m src.main bootstrap "hello" --output-format json | python3 -m json.tool
|
||||
```
|
||||
|
||||
## Verification workflow
|
||||
|
||||
```bash
|
||||
# Unit tests (fast)
|
||||
python3 -m pytest tests/ -q 2>&1 | tail -3
|
||||
|
||||
# Type checking (optional but recommended)
|
||||
python3 -m mypy src/ --ignore-missing-imports 2>&1 | tail -5
|
||||
```
|
||||
|
||||
## Repository shape
|
||||
- `rust/` contains the Rust workspace and active CLI/runtime implementation.
|
||||
- `src/` contains source files that should stay consistent with generated guidance and tests.
|
||||
- `tests/` contains validation surfaces that should be reviewed alongside code changes.
|
||||
|
||||
## Working agreement
|
||||
- Prefer small, reviewable changes and keep generated bootstrap files aligned with actual repo workflows.
|
||||
- Keep shared defaults in `.claude.json`; reserve `.claude/settings.local.json` for machine-local overrides.
|
||||
- Do not overwrite existing `CLAUDE.md` content automatically; update it intentionally when repo workflows change.
|
||||
- **`src/`** — Python reference harness implementing SCHEMAS.md protocol
|
||||
- `main.py` — CLI entry point; all 14 clawable commands
|
||||
- `query_engine.py` — core TurnResult / QueryEngineConfig
|
||||
- `runtime.py` — PortRuntime; turn loop + cancellation (#164 Stage A/B)
|
||||
- `session_store.py` — session persistence
|
||||
- `transcript.py` — turn transcript assembly
|
||||
- `commands.py`, `tools.py` — simulated command/tool trees
|
||||
- `models.py` — PermissionDenial, UsageSummary, etc.
|
||||
|
||||
- **`tests/`** — comprehensive protocol validation (22 baseline → 192 passing as of 2026-04-22)
|
||||
- `test_cli_parity_audit.py` — proves all 14 clawable commands accept --output-format
|
||||
- `test_json_envelope_field_consistency.py` — validates SCHEMAS.md contract
|
||||
- `test_cancel_observed_field.py` — #164 Stage B: cancellation observability + safe-to-reuse semantics
|
||||
- `test_run_turn_loop_*.py` — turn loop behavior (timeout, cancellation, continuation, permissions)
|
||||
- `test_submit_message_*.py` — budget, cancellation contracts
|
||||
- `test_*_cli.py` — command-specific JSON output validation
|
||||
|
||||
- **`SCHEMAS.md`** — canonical JSON contract
|
||||
- Common fields (all envelopes): timestamp, command, exit_code, output_format, schema_version
|
||||
- Error envelope shape
|
||||
- Not-found envelope shape
|
||||
- Per-command success schemas (14 commands documented)
|
||||
- Turn Result fields (including cancel_observed as of #164 Stage B)
|
||||
|
||||
- **`.gitignore`** — excludes `.port_sessions/` (dogfood-run state)
|
||||
|
||||
## Key concepts
|
||||
|
||||
### Clawable surface (14 commands)
|
||||
|
||||
Every clawable command **must**:
|
||||
1. Accept `--output-format {text,json}`
|
||||
2. Return JSON envelopes matching SCHEMAS.md
|
||||
3. Use common fields (timestamp, command, exit_code, output_format, schema_version)
|
||||
4. Exit 0 on success, 1 on error/not-found, 2 on timeout
|
||||
|
||||
**Commands:** list-sessions, delete-session, load-session, flush-transcript, show-command, show-tool, exec-command, exec-tool, route, bootstrap, command-graph, tool-pool, bootstrap-graph, turn-loop
|
||||
|
||||
**Validation:** `test_cli_parity_audit.py` auto-tests all 14 for --output-format acceptance.
|
||||
|
||||
### OPT_OUT surfaces (12 commands)
|
||||
|
||||
Explicitly exempt from --output-format requirement (for now):
|
||||
- Rich-Markdown reports: summary, manifest, parity-audit, setup-report
|
||||
- List commands with query filters: subsystems, commands, tools
|
||||
- Simulation/debug: remote-mode, ssh-mode, teleport-mode, direct-connect-mode, deep-link-mode
|
||||
|
||||
**Future work:** audit OPT_OUT surfaces for JSON promotion (post-#164).
|
||||
|
||||
### Protocol layers
|
||||
|
||||
**Coverage (#167–#170):** All clawable commands emit JSON
|
||||
**Enforcement (#171):** Parity CI prevents new commands skipping JSON
|
||||
**Documentation (#172):** SCHEMAS.md locks field contract
|
||||
**Alignment (#173):** Test framework validates docs ↔ code match
|
||||
**Field evolution (#164 Stage B):** cancel_observed proves protocol extensibility
|
||||
|
||||
## Testing & coverage
|
||||
|
||||
### Run full suite
|
||||
```bash
|
||||
python3 -m pytest tests/ -q
|
||||
```
|
||||
|
||||
### Run one test file
|
||||
```bash
|
||||
python3 -m pytest tests/test_cancel_observed_field.py -v
|
||||
```
|
||||
|
||||
### Run one test
|
||||
```bash
|
||||
python3 -m pytest tests/test_cancel_observed_field.py::TestCancelObservedField::test_default_value_is_false -v
|
||||
```
|
||||
|
||||
### Check coverage (optional)
|
||||
```bash
|
||||
python3 -m pip install coverage # if not already installed
|
||||
python3 -m coverage run -m pytest tests/
|
||||
python3 -m coverage report --skip-covered
|
||||
```
|
||||
|
||||
Target: >90% line coverage for src/ (currently ~85%).
|
||||
|
||||
## Common workflows
|
||||
|
||||
### Add a new clawable command
|
||||
|
||||
1. Add parser in `main.py` (argparse)
|
||||
2. Add `--output-format` flag
|
||||
3. Emit JSON envelope using `wrap_json_envelope(data, command_name)`
|
||||
4. Add command to CLAWABLE_SURFACES in test_cli_parity_audit.py
|
||||
5. Document in SCHEMAS.md (schema + example)
|
||||
6. Write test in tests/test_*_cli.py or tests/test_json_envelope_field_consistency.py
|
||||
7. Run full suite to confirm parity
|
||||
|
||||
### Modify TurnResult or protocol fields
|
||||
|
||||
1. Update dataclass in `query_engine.py`
|
||||
2. Update SCHEMAS.md with new field + rationale
|
||||
3. Write test in `tests/test_json_envelope_field_consistency.py` that validates field presence
|
||||
4. Update all places that construct TurnResult (grep for `TurnResult(`)
|
||||
5. Update bootstrap/turn-loop JSON builders in main.py
|
||||
6. Run `tests/` to ensure no regressions
|
||||
|
||||
### Promote an OPT_OUT surface to CLAWABLE
|
||||
|
||||
**Prerequisite:** Real demand signal logged in `OPT_OUT_DEMAND_LOG.md` (threshold: 2+ independent signals per surface). Speculative promotions are not allowed.
|
||||
|
||||
Once demand is evidenced:
|
||||
1. Add --output-format flag to argparse
|
||||
2. Emit wrap_json_envelope() output in JSON path
|
||||
3. Move command from OPT_OUT_SURFACES to CLAWABLE_SURFACES
|
||||
4. Document in SCHEMAS.md
|
||||
5. Write test for JSON output
|
||||
6. Run parity audit to confirm no regressions
|
||||
7. Update `OPT_OUT_DEMAND_LOG.md` to mark signal as resolved
|
||||
|
||||
### File a demand signal (when a claw actually needs JSON from an OPT_OUT surface)
|
||||
|
||||
1. Open `OPT_OUT_DEMAND_LOG.md`
|
||||
2. Find the surface's entry under Group A/B/C
|
||||
3. Append a dated entry with Source, Use Case, and Markdown-alternative-checked explanation
|
||||
4. If this is the 2nd signal for the same surface, file a promotion pinpoint in ROADMAP.md
|
||||
|
||||
## Dogfood principles
|
||||
|
||||
The Python harness is continuously dogfood-tested:
|
||||
- Every cycle ships to `main` with detailed commit messages
|
||||
- New tests are written before/alongside implementation
|
||||
- Test suite must pass before pushing (zero-regression principle)
|
||||
- Commits grouped by pinpoint (#159, #160, ..., #174)
|
||||
- Failure modes classified per exit code: 0=success, 1=error, 2=timeout
|
||||
|
||||
## Protocol governance
|
||||
|
||||
- **SCHEMAS.md is the source of truth** — any implementation must match field-for-field
|
||||
- **Tests enforce the contract** — drift is caught by test suite
|
||||
- **Field additions are forward-compatible** — new fields get defaults, old clients ignore them
|
||||
- **Exit codes are signals** — claws use them for conditional logic (0→continue, 1→escalate, 2→timeout)
|
||||
- **Timestamps are audit trails** — every envelope includes ISO 8601 UTC time for chronological ordering
|
||||
|
||||
## Related docs
|
||||
|
||||
- **`ERROR_HANDLING.md`** — Unified error-handling pattern for claws (one handler for all 14 clawable commands)
|
||||
- **`SCHEMAS.md`** — JSON protocol specification (read before implementing)
|
||||
- **`OPT_OUT_AUDIT.md`** — Governance for the 12 non-clawable surfaces
|
||||
- **`OPT_OUT_DEMAND_LOG.md`** — Active survey recording real demand signals (evidence base for decisions)
|
||||
- **`ROADMAP.md`** — macro roadmap and macro pain points
|
||||
- **`PHILOSOPHY.md`** — system design intent
|
||||
- **`PARITY.md`** — status of Python ↔ Rust protocol equivalence
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
# Code of Conduct
|
||||
|
||||
## Our pledge
|
||||
|
||||
We aim to make Claw Code a practical, respectful, and evidence-oriented
|
||||
community. Contributors and maintainers are expected to communicate with
|
||||
patience, assume good intent, and focus critique on the work rather than the
|
||||
person.
|
||||
|
||||
## Expected behavior
|
||||
|
||||
- Be respectful and direct.
|
||||
- Welcome newcomers and explain project-specific context when it matters.
|
||||
- Give actionable feedback with evidence, commands, logs, or links.
|
||||
- Respect privacy and do not pressure others to disclose credentials, private
|
||||
prompts, employer information, or personal details.
|
||||
|
||||
## Unacceptable behavior
|
||||
|
||||
- Harassment, threats, insults, or discriminatory language.
|
||||
- Publishing another person's private information without permission.
|
||||
- Sharing secrets, exploit payloads, or private vulnerability details in public
|
||||
channels.
|
||||
- Repeated off-topic disruption after maintainers ask for a thread to stop or
|
||||
move.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Maintainers may remove comments, close threads, restrict participation, or ban
|
||||
accounts that violate this code of conduct. Report concerns through the support
|
||||
or security paths described in [SUPPORT.md](./SUPPORT.md) and
|
||||
[SECURITY.md](./SECURITY.md).
|
||||
@@ -1,66 +0,0 @@
|
||||
# Contributing to Claw Code
|
||||
|
||||
Thanks for helping improve Claw Code. This repository is a Rust-first CLI
|
||||
workspace with supporting docs and compatibility fixtures.
|
||||
|
||||
## Ground rules
|
||||
|
||||
- Keep changes small, reviewable, and tied to a concrete issue or behavior.
|
||||
- Do not commit secrets, API keys, session transcripts with credentials, or
|
||||
generated build output.
|
||||
- Prefer existing crate boundaries and utilities before adding dependencies.
|
||||
- Update documentation when a user-facing command, config key, or provider
|
||||
behavior changes.
|
||||
- Keep examples copy/paste safe. Use placeholder keys such as `sk-ant-...` and
|
||||
avoid commands that require live credentials unless the text explicitly says
|
||||
so.
|
||||
|
||||
## Local setup
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ultraworkers/claw-code
|
||||
cd claw-code/rust
|
||||
cargo build --workspace
|
||||
cargo test --workspace
|
||||
```
|
||||
|
||||
On Windows PowerShell, build from the same `rust` workspace and run the binary
|
||||
with the `.exe` suffix:
|
||||
|
||||
```powershell
|
||||
cd claw-code\rust
|
||||
cargo build --workspace
|
||||
.\target\debug\claw.exe --help
|
||||
```
|
||||
|
||||
## Checks before opening a pull request
|
||||
|
||||
Run the smallest relevant tests for your change, then the broader checks when
|
||||
you touch shared runtime, CLI, or docs surfaces:
|
||||
|
||||
```bash
|
||||
cd rust
|
||||
cargo fmt --all --check
|
||||
cargo test --workspace
|
||||
cargo clippy --workspace
|
||||
```
|
||||
|
||||
For documentation and release-readiness changes, also run:
|
||||
|
||||
```bash
|
||||
python .github/scripts/check_doc_source_of_truth.py
|
||||
python .github/scripts/check_release_readiness.py
|
||||
```
|
||||
|
||||
## Pull request guidance
|
||||
|
||||
- Describe the user-visible reason for the change.
|
||||
- List the commands you ran and any known gaps.
|
||||
- Call out compatibility risks for CLI output, JSON schemas, plugin contracts,
|
||||
provider behavior, or Windows/PowerShell examples.
|
||||
- Keep unrelated cleanup out of feature or fix pull requests.
|
||||
|
||||
## License
|
||||
|
||||
By contributing, you agree that your contributions are licensed under the
|
||||
project's [MIT License](./LICENSE).
|
||||
489
ERROR_HANDLING.md
Normal file
489
ERROR_HANDLING.md
Normal file
@@ -0,0 +1,489 @@
|
||||
# Error Handling for Claw Code Claws
|
||||
|
||||
**Purpose:** Build a unified error handler for orchestration code using claw-code as a library or subprocess.
|
||||
|
||||
After cycles #178–#179 (parser-front-door hole closure), claw-code's error interface is deterministic, machine-readable, and clawable: **one error handler for all 14 clawable commands.**
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference: Exit Codes and Envelopes
|
||||
|
||||
Every clawable command returns JSON on stdout when `--output-format json` is requested.
|
||||
|
||||
**IMPORTANT:** The exit code contract below applies **only when `--output-format json` is explicitly set**. Text mode follows argparse conventions and may return different exit codes (e.g., `2` for argparse parse errors). Claws consuming claw-code as a subprocess MUST always pass `--output-format json` to get the documented contract.
|
||||
|
||||
| Exit Code | Meaning | Response Format | Example |
|
||||
|---|---|---|---|
|
||||
| **0** | Success | `{success fields}` | `{"session_id": "...", "loaded": true}` |
|
||||
| **1** | Error / Not Found | `{error: {kind, message, ...}}` | `{"error": {"kind": "session_not_found", ...}}` |
|
||||
| **2** | Timeout | `{final_stop_reason: "timeout", final_cancel_observed: ...}` | `{"final_stop_reason": "timeout", ...}` |
|
||||
|
||||
### Text mode vs JSON mode exit codes
|
||||
|
||||
| Scenario | Text mode exit | JSON mode exit | Why |
|
||||
|---|---|---|---|
|
||||
| Unknown subcommand | 2 (argparse default) | 1 (parse error envelope) | argparse defaults to 2; JSON mode normalizes to contract |
|
||||
| Missing required arg | 2 (argparse default) | 1 (parse error envelope) | Same reason |
|
||||
| Session not found | 1 | 1 | Application-level error, same in both |
|
||||
| Command executed OK | 0 | 0 | Success path, identical |
|
||||
| Turn-loop timeout | 2 | 2 | Identical (#161 implementation) |
|
||||
|
||||
**Practical rule for claws:** always pass `--output-format json`. This eliminates text-mode surprises and gives you the documented exit-code contract for every error path.
|
||||
|
||||
---
|
||||
|
||||
## One-Handler Pattern
|
||||
|
||||
Build a single error-recovery function that works for all 14 clawable commands:
|
||||
|
||||
```python
|
||||
import subprocess
|
||||
import json
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
def run_claw_command(command: list[str], timeout_seconds: float = 30.0) -> dict[str, Any]:
|
||||
"""
|
||||
Run a clawable claw-code command and handle errors uniformly.
|
||||
|
||||
Args:
|
||||
command: Full command list, e.g. ["claw", "load-session", "id", "--output-format", "json"]
|
||||
timeout_seconds: Wall-clock timeout
|
||||
|
||||
Returns:
|
||||
Parsed JSON result from stdout
|
||||
|
||||
Raises:
|
||||
ClawError: Classified by error.kind (parse, session_not_found, runtime, timeout, etc.)
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout_seconds,
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
raise ClawError(
|
||||
kind='subprocess_timeout',
|
||||
message=f'Command exceeded {timeout_seconds}s wall-clock timeout',
|
||||
retryable=True, # Caller's decision; subprocess timeout != engine timeout
|
||||
)
|
||||
|
||||
# Parse JSON (valid for all success/error/timeout paths in claw-code)
|
||||
try:
|
||||
envelope = json.loads(result.stdout)
|
||||
except json.JSONDecodeError as err:
|
||||
raise ClawError(
|
||||
kind='parse_failure',
|
||||
message=f'Command output is not JSON: {err}',
|
||||
hint='Check that --output-format json is being passed',
|
||||
retryable=False,
|
||||
)
|
||||
|
||||
# Classify by exit code and error.kind
|
||||
match (result.returncode, envelope.get('error', {}).get('kind')):
|
||||
case (0, _):
|
||||
# Success
|
||||
return envelope
|
||||
|
||||
case (1, 'parse'):
|
||||
# #179: argparse error — typically a typo or missing required argument
|
||||
raise ClawError(
|
||||
kind='parse',
|
||||
message=envelope['error']['message'],
|
||||
hint=envelope['error'].get('hint'),
|
||||
retryable=False, # Typos don't fix themselves
|
||||
)
|
||||
|
||||
case (1, 'session_not_found'):
|
||||
# Common: load-session on nonexistent ID
|
||||
raise ClawError(
|
||||
kind='session_not_found',
|
||||
message=envelope['error']['message'],
|
||||
session_id=envelope.get('session_id'),
|
||||
retryable=False, # Session won't appear on retry
|
||||
)
|
||||
|
||||
case (1, 'filesystem'):
|
||||
# Directory missing, permission denied, disk full
|
||||
raise ClawError(
|
||||
kind='filesystem',
|
||||
message=envelope['error']['message'],
|
||||
retryable=True, # Might be transient (disk space, NFS flake)
|
||||
)
|
||||
|
||||
case (1, 'runtime'):
|
||||
# Generic engine error (unexpected exception, malformed input, etc.)
|
||||
raise ClawError(
|
||||
kind='runtime',
|
||||
message=envelope['error']['message'],
|
||||
retryable=envelope['error'].get('retryable', False),
|
||||
)
|
||||
|
||||
case (1, _):
|
||||
# Catch-all for any new error.kind values
|
||||
raise ClawError(
|
||||
kind=envelope['error']['kind'],
|
||||
message=envelope['error']['message'],
|
||||
retryable=envelope['error'].get('retryable', False),
|
||||
)
|
||||
|
||||
case (2, _):
|
||||
# Timeout (engine was asked to cancel and had fair chance to observe)
|
||||
cancel_observed = envelope.get('final_cancel_observed', False)
|
||||
raise ClawError(
|
||||
kind='timeout',
|
||||
message=f'Turn exceeded timeout (cancel_observed={cancel_observed})',
|
||||
cancel_observed=cancel_observed,
|
||||
retryable=True, # Caller can retry with a fresh session
|
||||
safe_to_reuse_session=(cancel_observed is True),
|
||||
)
|
||||
|
||||
case (exit_code, _):
|
||||
# Unexpected exit code
|
||||
raise ClawError(
|
||||
kind='unexpected_exit_code',
|
||||
message=f'Unexpected exit code {exit_code}',
|
||||
retryable=False,
|
||||
)
|
||||
|
||||
|
||||
class ClawError(Exception):
|
||||
"""Unified error type for claw-code commands."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
kind: str,
|
||||
message: str,
|
||||
hint: str | None = None,
|
||||
retryable: bool = False,
|
||||
cancel_observed: bool = False,
|
||||
safe_to_reuse_session: bool = False,
|
||||
session_id: str | None = None,
|
||||
):
|
||||
self.kind = kind
|
||||
self.message = message
|
||||
self.hint = hint
|
||||
self.retryable = retryable
|
||||
self.cancel_observed = cancel_observed
|
||||
self.safe_to_reuse_session = safe_to_reuse_session
|
||||
self.session_id = session_id
|
||||
super().__init__(self.message)
|
||||
|
||||
def __str__(self) -> str:
|
||||
parts = [f"{self.kind}: {self.message}"]
|
||||
if self.hint:
|
||||
parts.append(f"Hint: {self.hint}")
|
||||
if self.retryable:
|
||||
parts.append("(retryable)")
|
||||
if self.cancel_observed:
|
||||
parts.append(f"(safe_to_reuse_session={self.safe_to_reuse_session})")
|
||||
return "\n".join(parts)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Practical Recovery Patterns
|
||||
|
||||
### Pattern 1: Retry on transient errors
|
||||
|
||||
```python
|
||||
from time import sleep
|
||||
|
||||
def run_with_retry(
|
||||
command: list[str],
|
||||
max_attempts: int = 3,
|
||||
backoff_seconds: float = 0.5,
|
||||
) -> dict:
|
||||
"""Retry on transient errors (filesystem, timeout)."""
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
try:
|
||||
return run_claw_command(command)
|
||||
except ClawError as err:
|
||||
if not err.retryable:
|
||||
raise # Non-transient; fail fast
|
||||
|
||||
if attempt == max_attempts:
|
||||
raise # Last attempt; propagate
|
||||
|
||||
print(f"Attempt {attempt} failed ({err.kind}); retrying in {backoff_seconds}s...", file=sys.stderr)
|
||||
sleep(backoff_seconds)
|
||||
backoff_seconds *= 1.5 # exponential backoff
|
||||
|
||||
raise RuntimeError("Unreachable")
|
||||
```
|
||||
|
||||
### Pattern 2: Reuse session after timeout (if safe)
|
||||
|
||||
```python
|
||||
def run_with_timeout_recovery(
|
||||
command: list[str],
|
||||
timeout_seconds: float = 30.0,
|
||||
fallback_timeout: float = 60.0,
|
||||
) -> dict:
|
||||
"""
|
||||
On timeout, check cancel_observed. If True, the session is safe for retry.
|
||||
If False, the session is potentially wedged; use a fresh one.
|
||||
"""
|
||||
try:
|
||||
return run_claw_command(command, timeout_seconds=timeout_seconds)
|
||||
except ClawError as err:
|
||||
if err.kind != 'timeout':
|
||||
raise
|
||||
|
||||
if err.safe_to_reuse_session:
|
||||
# Engine saw the cancel signal; safe to reuse this session with a larger timeout
|
||||
print(f"Timeout observed (cancel_observed=true); retrying with {fallback_timeout}s...", file=sys.stderr)
|
||||
return run_claw_command(command, timeout_seconds=fallback_timeout)
|
||||
else:
|
||||
# Engine didn't see the cancel signal; session may be wedged
|
||||
print(f"Timeout not observed (cancel_observed=false); session is potentially wedged", file=sys.stderr)
|
||||
raise # Caller should allocate a fresh session
|
||||
```
|
||||
|
||||
### Pattern 3: Detect parse errors (typos in command-line construction)
|
||||
|
||||
```python
|
||||
def validate_command_before_dispatch(command: list[str]) -> None:
|
||||
"""
|
||||
Dry-run with --help to detect obvious syntax errors before dispatching work.
|
||||
|
||||
This is cheap (no API call) and catches typos like:
|
||||
- Unknown subcommand: `claw typo-command`
|
||||
- Unknown flag: `claw bootstrap --invalid-flag`
|
||||
- Missing required argument: `claw load-session` (no session_id)
|
||||
"""
|
||||
help_cmd = command + ['--help']
|
||||
try:
|
||||
result = subprocess.run(help_cmd, capture_output=True, timeout=2.0)
|
||||
if result.returncode != 0:
|
||||
print(f"Warning: {' '.join(help_cmd)} returned {result.returncode}", file=sys.stderr)
|
||||
print("(This doesn't prove the command is invalid, just that --help failed)", file=sys.stderr)
|
||||
except subprocess.TimeoutExpired:
|
||||
pass # --help shouldn't hang, but don't block on it
|
||||
```
|
||||
|
||||
### Pattern 4: Log and forward errors to observability
|
||||
|
||||
```python
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def run_claw_with_logging(command: list[str]) -> dict:
|
||||
"""Run command and log errors for observability."""
|
||||
try:
|
||||
result = run_claw_command(command)
|
||||
logger.info(f"Claw command succeeded: {' '.join(command)}")
|
||||
return result
|
||||
except ClawError as err:
|
||||
logger.error(
|
||||
"Claw command failed",
|
||||
extra={
|
||||
'command': ' '.join(command),
|
||||
'error_kind': err.kind,
|
||||
'error_message': err.message,
|
||||
'retryable': err.retryable,
|
||||
'cancel_observed': err.cancel_observed,
|
||||
},
|
||||
)
|
||||
raise
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Kinds (Enumeration)
|
||||
|
||||
After cycles #178–#179, the complete set of `error.kind` values is:
|
||||
|
||||
| Kind | Exit Code | Meaning | Retryable | Notes |
|
||||
|---|---|---|---|---|
|
||||
| **parse** | 1 | Argparse error (unknown command, missing arg, invalid flag) | No | Real error message included (#179); valid choices list for discoverability |
|
||||
| **session_not_found** | 1 | load-session target doesn't exist | No | session_id and directory included in envelope |
|
||||
| **filesystem** | 1 | Directory missing, permission denied, disk full | Yes | Transient issues (disk space, NFS flake) can be retried |
|
||||
| **runtime** | 1 | Engine error (unexpected exception, malformed input) | Depends | `error.retryable` field in envelope specifies |
|
||||
| **timeout** | 2 | Engine timeout with cooperative cancellation | Yes* | `cancel_observed` field signals session safety (#164) |
|
||||
|
||||
*Retry safety depends on `cancel_observed`:
|
||||
- `cancel_observed=true` → session is safe to reuse
|
||||
- `cancel_observed=false` → session may be wedged; allocate fresh one
|
||||
|
||||
---
|
||||
|
||||
## What We Did to Make This Work
|
||||
|
||||
### Cycle #178: Parse-Error Envelope
|
||||
|
||||
**Problem:** `claw nonexistent --output-format json` returned argparse help text on stderr instead of an envelope.
|
||||
**Solution:** Catch argparse `SystemExit` in JSON mode and emit a structured error envelope.
|
||||
**Benefit:** Claws no longer need to parse human help text to understand parse errors.
|
||||
|
||||
### Cycle #179: Stderr Hygiene + Real Error Message
|
||||
|
||||
**Problem:** Even after #178, argparse usage was leaking to stderr AND the envelope message was generic ("invalid command or argument").
|
||||
**Solution:** Monkey-patch `parser.error()` in JSON mode to raise an internal exception, preserving argparse's real message verbatim. Suppress stderr entirely in JSON mode.
|
||||
**Benefit:** Claws see one stream (stdout), one envelope, and real error context (e.g., "invalid choice: typo (choose from ...)") for discoverability.
|
||||
|
||||
### Contract: #164 Stage B (`cancel_observed` field)
|
||||
|
||||
**Problem:** Timeout results didn't signal whether the engine actually observed the cancellation request.
|
||||
**Solution:** Add `cancel_observed: bool` field to timeout TurnResult; signal true iff the engine had a fair chance to observe the cancel event.
|
||||
**Benefit:** Claws can decide "retry with fresh session" vs "reuse this session with larger timeout" based on a single boolean.
|
||||
|
||||
---
|
||||
|
||||
## Common Mistakes to Avoid
|
||||
|
||||
❌ **Don't parse exit code alone**
|
||||
```python
|
||||
# BAD: Exit code 1 could mean parse error, not-found, filesystem, or runtime
|
||||
if result.returncode == 1:
|
||||
# What should I do? Unclear.
|
||||
pass
|
||||
```
|
||||
|
||||
✅ **Do parse error.kind**
|
||||
```python
|
||||
# GOOD: error.kind tells you exactly how to recover
|
||||
match envelope['error']['kind']:
|
||||
case 'parse': ...
|
||||
case 'session_not_found': ...
|
||||
case 'filesystem': ...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
❌ **Don't capture both stdout and stderr and assume they're separate concerns**
|
||||
```python
|
||||
# BAD (pre-#179): Capture stdout + stderr, then parse stdout as JSON
|
||||
# But stderr might contain argparse noise that you have to string-match
|
||||
result = subprocess.run(..., capture_output=True, text=True)
|
||||
if "invalid choice" in result.stderr:
|
||||
# ... custom error handling
|
||||
```
|
||||
|
||||
✅ **Do silence stderr in JSON mode**
|
||||
```python
|
||||
# GOOD (post-#179): In JSON mode, stderr is guaranteed silent
|
||||
# Envelope on stdout is your single source of truth
|
||||
result = subprocess.run(..., capture_output=True, text=True)
|
||||
envelope = json.loads(result.stdout) # Always valid in JSON mode
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
❌ **Don't retry on parse errors**
|
||||
```python
|
||||
# BAD: Typos don't fix themselves
|
||||
error_kind = envelope['error']['kind']
|
||||
if error_kind == 'parse':
|
||||
retry() # Will fail again
|
||||
```
|
||||
|
||||
✅ **Do check retryable before retrying**
|
||||
```python
|
||||
# GOOD: Let the error tell you
|
||||
error = envelope['error']
|
||||
if error.get('retryable', False):
|
||||
retry()
|
||||
else:
|
||||
raise
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
❌ **Don't reuse a session after timeout without checking cancel_observed**
|
||||
```python
|
||||
# BAD: Reuse session = potential wedge
|
||||
result = run_claw_command(...) # times out
|
||||
# ... later, reuse same session
|
||||
result = run_claw_command(...) # might be stuck in the previous turn
|
||||
```
|
||||
|
||||
✅ **Do allocate a fresh session if cancel_observed=false**
|
||||
```python
|
||||
# GOOD: Allocate fresh session if wedge is suspected
|
||||
try:
|
||||
result = run_claw_command(...)
|
||||
except ClawError as err:
|
||||
if err.cancel_observed:
|
||||
# Safe to reuse
|
||||
result = run_claw_command(...)
|
||||
else:
|
||||
# Allocate fresh session
|
||||
fresh_session = create_session()
|
||||
result = run_claw_command_in_session(fresh_session, ...)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Your Error Handler
|
||||
|
||||
```python
|
||||
def test_error_handler_parse_error():
|
||||
"""Verify parse errors are caught and classified."""
|
||||
try:
|
||||
run_claw_command(['claw', 'nonexistent', '--output-format', 'json'])
|
||||
assert False, "Should have raised ClawError"
|
||||
except ClawError as err:
|
||||
assert err.kind == 'parse'
|
||||
assert 'invalid choice' in err.message.lower()
|
||||
assert err.retryable is False
|
||||
|
||||
def test_error_handler_timeout_safe():
|
||||
"""Verify timeout with cancel_observed=true marks session as safe."""
|
||||
# Requires a live claw-code server; mock this test
|
||||
try:
|
||||
run_claw_command(
|
||||
['claw', 'turn-loop', '"x"', '--timeout-seconds', '0.0001'],
|
||||
timeout_seconds=2.0,
|
||||
)
|
||||
assert False, "Should have raised ClawError"
|
||||
except ClawError as err:
|
||||
assert err.kind == 'timeout'
|
||||
assert err.safe_to_reuse_session is True # cancel_observed=true
|
||||
|
||||
def test_error_handler_not_found():
|
||||
"""Verify session_not_found is clearly classified."""
|
||||
try:
|
||||
run_claw_command(['claw', 'load-session', 'nonexistent', '--output-format', 'json'])
|
||||
assert False, "Should have raised ClawError"
|
||||
except ClawError as err:
|
||||
assert err.kind == 'session_not_found'
|
||||
assert err.retryable is False
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Appendix: SCHEMAS.md Error Shape
|
||||
|
||||
For reference, the canonical JSON error envelope shape (SCHEMAS.md):
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T11:40:00Z",
|
||||
"command": "load-session",
|
||||
"exit_code": 1,
|
||||
"output_format": "json",
|
||||
"schema_version": "1.0",
|
||||
"error": {
|
||||
"kind": "session_not_found",
|
||||
"operation": "session_store.load_session",
|
||||
"target": "nonexistent",
|
||||
"retryable": false,
|
||||
"message": "session 'nonexistent' not found in .port_sessions",
|
||||
"hint": "use 'list-sessions' to see available sessions"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
All commands that emit errors follow this shape (with error.kind varying). See `SCHEMAS.md` for the complete contract.
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
After cycles #178–#179, **one error handler works for all 14 clawable commands.** No more string-matching, no more stderr parsing, no more exit-code ambiguity. Just parse the JSON, check `error.kind`, and decide: retry, escalate, or reuse session (if safe).
|
||||
|
||||
The handler itself is ~80 lines of Python; the patterns are reusable across any language that can speak JSON.
|
||||
21
LICENSE
21
LICENSE
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2026 UltraWorkers and Claw Code contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
151
OPT_OUT_AUDIT.md
Normal file
151
OPT_OUT_AUDIT.md
Normal file
@@ -0,0 +1,151 @@
|
||||
# OPT_OUT Surface Audit Roadmap
|
||||
|
||||
**Status:** Pre-audit (decision table ready, survey pending)
|
||||
|
||||
This document governs the audit and potential promotion of 12 OPT_OUT surfaces (commands that currently do **not** support `--output-format json`).
|
||||
|
||||
## OPT_OUT Classification Rationale
|
||||
|
||||
A surface is classified as OPT_OUT when:
|
||||
1. **Human-first by nature:** Rich Markdown prose / diagrams / structured text where JSON would be information loss
|
||||
2. **Query-filtered alternative exists:** Commands with internal `--query` / `--limit` don't need JSON (users already have escape hatch)
|
||||
3. **Simulation/debug only:** Not meant for production orchestration (e.g., mode simulators)
|
||||
4. **Future JSON work is planned:** Documented in ROADMAP with clear upgrade path
|
||||
|
||||
---
|
||||
|
||||
## OPT_OUT Surfaces (12 Total)
|
||||
|
||||
### Group A: Rich-Markdown Reports (4 commands)
|
||||
|
||||
**Rationale:** These emit structured narrative prose. JSON would require lossy serialization.
|
||||
|
||||
| Command | Output | Current use | JSON case |
|
||||
|---|---|---|---|
|
||||
| `summary` | Multi-section workspace summary (Markdown) | Human readability | Not applicable; Markdown is the output |
|
||||
| `manifest` | Workspace manifest with project tree (Markdown) | Human readability | Not applicable; Markdown is the output |
|
||||
| `parity-audit` | TypeScript/Python port comparison report (Markdown) | Human readability | Not applicable; Markdown is the output |
|
||||
| `setup-report` | Preflight + startup diagnostics (Markdown) | Human readability | Not applicable; Markdown is the output |
|
||||
|
||||
**Audit decision:** These likely remain OPT_OUT long-term (Markdown-as-output is intentional). If JSON version needed in future, would be a separate `--output-format json` path generating structured data (project summary object, manifest array, audit deltas, setup checklist) — but that's a **new contract**, not an addition to existing Markdown surfaces.
|
||||
|
||||
**Pinpoint:** #175 (deferred) — audit whether `summary`/`manifest` should emit JSON structured versions *in parallel* with Markdown, or if Markdown-only is the right UX.
|
||||
|
||||
---
|
||||
|
||||
### Group B: List Commands with Query Filters (3 commands)
|
||||
|
||||
**Rationale:** These already support `--query` and `--limit` for filtering. JSON output would be redundant; users can pipe to `jq`.
|
||||
|
||||
| Command | Filtering | Current output | JSON case |
|
||||
|---|---|---|---|
|
||||
| `subsystems` | `--limit` | Human-readable list | Use `--query` to filter, users can parse if needed |
|
||||
| `commands` | `--query`, `--limit`, `--no-plugin-commands`, `--no-skill-commands` | Human-readable list | Use `--query` to filter, users can parse if needed |
|
||||
| `tools` | `--query`, `--limit`, `--simple-mode` | Human-readable list | Use `--query` to filter, users can parse if needed |
|
||||
|
||||
**Audit decision:** `--query` / `--limit` are already the machine-friendly escape hatch. These commands are **intentionally** list-filter-based (not orchestration-primary). Promoting to CLAWABLE would require:
|
||||
1. Formalizing what the structured output *is* (command array? tool array?)
|
||||
2. Versioning the schema per command
|
||||
3. Updating tests to validate per-command schemas
|
||||
|
||||
**Cost-benefit:** Low. Users who need structured data can already use `--query` to narrow results, then parse. Effort to promote > value.
|
||||
|
||||
**Pinpoint:** #176 (backlog) — audit `--query` UX; consider if a `--query-json` escape hatch (output JSON of matching items) is worth the schema tax.
|
||||
|
||||
---
|
||||
|
||||
### Group C: Simulation / Debug Surfaces (5 commands)
|
||||
|
||||
**Rationale:** These are intentionally **not production-orchestrated**. They simulate behavior, test modes, or debug scenarios. JSON output doesn't add value.
|
||||
|
||||
| Command | Purpose | Output | Use case |
|
||||
|---|---|---|---|
|
||||
| `remote-mode` | Simulate remote execution | Text (mock session) | Testing harness behavior under remote constraints |
|
||||
| `ssh-mode` | Simulate SSH execution | Text (mock SSH session) | Testing harness behavior over SSH-like transport |
|
||||
| `teleport-mode` | Simulate teleport hop | Text (mock hop session) | Testing harness behavior with teleport bouncing |
|
||||
| `direct-connect-mode` | Simulate direct network | Text (mock session) | Testing harness behavior with direct connectivity |
|
||||
| `deep-link-mode` | Simulate deep-link invocation | Text (mock deep-link) | Testing harness behavior from URL/deeplink |
|
||||
|
||||
**Audit decision:** These are **intentionally simulation-only**. Promoting to CLAWABLE means:
|
||||
1. "This simulated mode is now a valid orchestration surface"
|
||||
2. Need to define what JSON output *means* (mock session state? simulation log?)
|
||||
3. Need versioning + test coverage
|
||||
|
||||
**Cost-benefit:** Very low. These are debugging tools, not orchestration endpoints. Effort to promote >> value.
|
||||
|
||||
**Pinpoint:** #177 (backlog) — decide if mode simulators should ever be CLAWABLE (probably no).
|
||||
|
||||
---
|
||||
|
||||
## Audit Workflow (Future Cycles)
|
||||
|
||||
### For each surface:
|
||||
1. **Survey:** Check if any external claw actually uses --output-format with this surface
|
||||
2. **Cost estimate:** How much schema work + testing?
|
||||
3. **Value estimate:** How much demand for JSON version?
|
||||
4. **Decision:** CLAWABLE, remain OPT_OUT, or new pinpoint?
|
||||
|
||||
### Promotion criteria (if promoting to CLAWABLE):
|
||||
|
||||
A surface moves from OPT_OUT → CLAWABLE **only if**:
|
||||
- ✅ Clear use case for JSON (not just "hypothetically could be JSON")
|
||||
- ✅ Schema is simple and stable (not 20+ fields)
|
||||
- ✅ At least one external claw has requested it
|
||||
- ✅ Tests can be added without major refactor
|
||||
- ✅ Maintainability burden is worth the value
|
||||
|
||||
### Demote criteria (if staying OPT_OUT):
|
||||
|
||||
A surface stays OPT_OUT **if**:
|
||||
- ✅ JSON would be information loss (Markdown reports)
|
||||
- ✅ Equivalent filtering already exists (`--query` / `--limit`)
|
||||
- ✅ Use case is simulation/debug, not production
|
||||
- ✅ Promotion effort > value to users
|
||||
|
||||
---
|
||||
|
||||
## Post-Audit Outcomes
|
||||
|
||||
### Likely scenario (high confidence)
|
||||
|
||||
**Group A (Markdown reports):** Remain OPT_OUT
|
||||
- `summary`, `manifest`, `parity-audit`, `setup-report` are **intentionally** human-first
|
||||
- If JSON-like structure is needed in future, would be separate `*-json` commands or distinct `--output-format`, not added to Markdown surfaces
|
||||
|
||||
**Group B (List filters):** Remain OPT_OUT
|
||||
- `subsystems`, `commands`, `tools` have `--query` / `--limit` as query layer
|
||||
- Users who need structured data already have escape hatch
|
||||
|
||||
**Group C (Mode simulators):** Remain OPT_OUT
|
||||
- `remote-mode`, `ssh-mode`, etc. are debug tools, not orchestration endpoints
|
||||
- No demand for JSON version; promotion would be forced, not driven
|
||||
|
||||
**Result:** OPT_OUT audit concludes that 12/12 surfaces should **remain OPT_OUT** (no promotions).
|
||||
|
||||
### If demand emerges
|
||||
|
||||
If external claws report needing JSON from any OPT_OUT surface:
|
||||
1. File pinpoint with use case + rationale
|
||||
2. Estimate cost + value
|
||||
3. If value > cost, promote to CLAWABLE with full test coverage
|
||||
4. Update SCHEMAS.md
|
||||
5. Update CLAUDE.md
|
||||
|
||||
---
|
||||
|
||||
## Timeline
|
||||
|
||||
- **Post-#174 (now):** OPT_OUT audit documented (this file)
|
||||
- **Cycles #19–#21 (deferred):** Survey period — collect data on external demand
|
||||
- **Cycle #22 (deferred):** Final audit decision + any promotions
|
||||
- **Post-audit:** Move to protocol maintenance mode (new commands/fields/surfaces)
|
||||
|
||||
---
|
||||
|
||||
## Related
|
||||
|
||||
- **OPT_OUT_DEMAND_LOG.md** — Active survey recording real demand signals (evidentiary base for any promotion decision)
|
||||
- **SCHEMAS.md** — Clawable surface contracts
|
||||
- **CLAUDE.md** — Development guidance
|
||||
- **test_cli_parity_audit.py** — Parametrized tests for CLAWABLE_SURFACES enforcement
|
||||
- **ROADMAP.md** — Macro phases (this audit is Phase 3 before Phase 2 closure)
|
||||
167
OPT_OUT_DEMAND_LOG.md
Normal file
167
OPT_OUT_DEMAND_LOG.md
Normal file
@@ -0,0 +1,167 @@
|
||||
# OPT_OUT Demand Log
|
||||
|
||||
**Purpose:** Record real demand signals for promoting OPT_OUT surfaces to CLAWABLE. Without this log, the audit criteria in `OPT_OUT_AUDIT.md` have no evidentiary base.
|
||||
|
||||
**Status:** Active survey window (post-#178/#179, cycles #21+)
|
||||
|
||||
## How to file a demand signal
|
||||
|
||||
When any external claw, operator, or downstream consumer actually needs JSON output from one of the 12 OPT_OUT surfaces, add an entry below. **Speculation, "could be useful someday," and internal hypotheticals do NOT count.**
|
||||
|
||||
A valid signal requires:
|
||||
- **Source:** Who/what asked (human, automation, agent session, external tool)
|
||||
- **Surface:** Which OPT_OUT command (from the 12)
|
||||
- **Use case:** The concrete orchestration problem they're trying to solve
|
||||
- **Would-parse-Markdown alternative checked?** Why the existing OPT_OUT output is insufficient
|
||||
- **Date:** When the signal was received
|
||||
|
||||
## Promotion thresholds
|
||||
|
||||
Per `OPT_OUT_AUDIT.md` criteria:
|
||||
- **2+ independent signals** for the same surface within a survey window → file promotion pinpoint
|
||||
- **1 signal + existing stable schema** → file pinpoint for discussion
|
||||
- **0 signals** → surface stays OPT_OUT (documented rationale in audit file)
|
||||
|
||||
The threshold is intentionally high. Single-use hacks can be served via one-off Markdown parsing; schema promotion is expensive (docs, tests, maintenance).
|
||||
|
||||
---
|
||||
|
||||
## Demand Signals Received
|
||||
|
||||
### Group A: Rich-Markdown Reports
|
||||
|
||||
#### `summary`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: No demand recorded. Markdown output is intentional and useful for human review.
|
||||
|
||||
#### `manifest`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: No demand recorded.
|
||||
|
||||
#### `parity-audit`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: No demand recorded. Report consumers are humans reviewing porting progress, not automation.
|
||||
|
||||
#### `setup-report`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: No demand recorded.
|
||||
|
||||
---
|
||||
|
||||
### Group B: List Commands with Query Filters
|
||||
|
||||
#### `subsystems`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: `--limit` already provides filtering. No claws requesting JSON.
|
||||
|
||||
#### `commands`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: `--query`, `--limit`, `--no-plugin-commands`, `--no-skill-commands` already allow filtering. No demand recorded.
|
||||
|
||||
#### `tools`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: `--query`, `--limit`, `--simple-mode` provide filtering. No demand recorded.
|
||||
|
||||
---
|
||||
|
||||
### Group C: Simulation / Debug Surfaces
|
||||
|
||||
#### `remote-mode`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: Simulation-only. No production orchestration need.
|
||||
|
||||
#### `ssh-mode`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: Simulation-only.
|
||||
|
||||
#### `teleport-mode`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: Simulation-only.
|
||||
|
||||
#### `direct-connect-mode`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: Simulation-only.
|
||||
|
||||
#### `deep-link-mode`
|
||||
**Signals received: 0**
|
||||
|
||||
Notes: Simulation-only.
|
||||
|
||||
---
|
||||
|
||||
## Survey Window Status
|
||||
|
||||
| Cycle | Date | New Signals | Running Total | Action |
|
||||
|---|---|---|---|---|
|
||||
| #21 | 2026-04-22 | 0 | 0 | Survey opened; log established |
|
||||
|
||||
**Current assessment:** Zero demand for any OPT_OUT surface promotion. This is consistent with `OPT_OUT_AUDIT.md` prediction that all 12 likely stay OPT_OUT long-term.
|
||||
|
||||
---
|
||||
|
||||
## Signal Entry Template
|
||||
|
||||
```
|
||||
### <surface-name>
|
||||
**Signal received: [N]**
|
||||
|
||||
Entry N (YYYY-MM-DD):
|
||||
- Source: <who/what>
|
||||
- Use case: <concrete orchestration problem>
|
||||
- Markdown-alternative-checked: <yes/no + why insufficient>
|
||||
- Follow-up: <filed pinpoint / discussion thread / closed>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Decision Framework
|
||||
|
||||
At cycle #22 (or whenever survey window closes):
|
||||
|
||||
### If 0 signals total (likely):
|
||||
- Move all 12 surfaces to `PERMANENTLY_OPT_OUT` or similar
|
||||
- Remove `OPT_OUT_SURFACES` from `test_cli_parity_audit.py` (everything is explicitly non-goal)
|
||||
- Update `CLAUDE.md` to reflect maintainership mode
|
||||
- Close `OPT_OUT_AUDIT.md` with "audit complete, no promotions"
|
||||
|
||||
### If 1–2 signals on isolated surfaces:
|
||||
- File individual promotion pinpoints per surface with demand evidence
|
||||
- Each goes through standard #171/#172/#173 loop (parity audit, SCHEMAS.md, consistency test)
|
||||
|
||||
### If high demand (3+ signals):
|
||||
- Reopen audit: is the OPT_OUT classification actually correct?
|
||||
- Review whether protocol expansion is warranted
|
||||
|
||||
---
|
||||
|
||||
## Related Files
|
||||
|
||||
- **`OPT_OUT_AUDIT.md`** — Audit criteria, decision table, rationale by group
|
||||
- **`SCHEMAS.md`** — JSON contract for the 14 CLAWABLE surfaces
|
||||
- **`tests/test_cli_parity_audit.py`** — Machine enforcement of CLAWABLE/OPT_OUT classification
|
||||
- **`CLAUDE.md`** — Development posture (maintainership mode)
|
||||
|
||||
---
|
||||
|
||||
## Philosophy
|
||||
|
||||
**Prevent speculative expansion.** The discipline of requiring real signals before promotion protects the protocol from schema bloat. Every new CLAWABLE surface adds:
|
||||
- A SCHEMAS.md section (maintenance burden)
|
||||
- Test coverage (test suite tax)
|
||||
- Documentation (cognitive load for new developers)
|
||||
- Version compatibility (schema_version bump risk)
|
||||
|
||||
If a claw can't articulate *why* it needs JSON for `summary` beyond "it would be nice," then JSON for `summary` is not needed. The Markdown output is a feature, not a gap.
|
||||
|
||||
The audit log closes the loop on "governed non-goals": OPT_OUT surfaces are intentionally not clawable until proven otherwise by evidence.
|
||||
@@ -8,7 +8,7 @@ Last updated: 2026-04-03
|
||||
- Requested 9-lane checkpoint: **All 9 lanes merged on `main`.**
|
||||
- Current `main` HEAD: `ee31e00` (stub implementations replaced with real AskUserQuestion + RemoteTrigger).
|
||||
- Repository stats at this checkpoint: **292 commits on `main` / 293 across all branches**, **9 crates**, **48,599 tracked Rust LOC**, **2,568 test LOC**, **3 authors**, date range **2026-03-31 → 2026-04-03**.
|
||||
- Mock parity harness stats: **12 scripted scenarios**, **21 captured `/v1/messages` requests** in `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs`.
|
||||
- Mock parity harness stats: **10 scripted scenarios**, **19 captured `/v1/messages` requests** in `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs`.
|
||||
|
||||
## Mock parity harness — milestone 1
|
||||
|
||||
@@ -23,8 +23,6 @@ Last updated: 2026-04-03
|
||||
- [x] Scripted permission prompt coverage: `bash_permission_prompt_approved`, `bash_permission_prompt_denied`
|
||||
- [x] Scripted plugin-path coverage: `plugin_tool_roundtrip`
|
||||
- [x] Behavioral diff/checklist runner: `rust/scripts/run_mock_parity_diff.py`
|
||||
- [x] Scripted session-compaction metadata coverage: `auto_compact_triggered`
|
||||
- [x] Scripted token/cost JSON coverage: `token_cost_reporting`
|
||||
|
||||
## Harness v2 behavioral checklist
|
||||
|
||||
@@ -174,9 +172,8 @@ Canonical scenario map: `rust/mock_parity_scenarios.json`
|
||||
|
||||
- [ ] End-to-end MCP runtime lifecycle beyond the registry bridge now on `main`
|
||||
- [x] Output truncation (large stdout/file content)
|
||||
- [x] Session compaction behavior matching
|
||||
- auto_compaction threshold from env
|
||||
- [x] Token counting / cost tracking accuracy
|
||||
- [ ] Session compaction behavior matching
|
||||
- [ ] Token counting / cost tracking accuracy
|
||||
- [x] Bash validation lane merged onto `main`
|
||||
- [ ] CI green on every commit
|
||||
|
||||
|
||||
32
README.md
32
README.md
@@ -5,16 +5,14 @@
|
||||
·
|
||||
<a href="./USAGE.md">Usage</a>
|
||||
·
|
||||
<a href="./ERROR_HANDLING.md">Error Handling</a>
|
||||
·
|
||||
<a href="./rust/README.md">Rust workspace</a>
|
||||
·
|
||||
<a href="./PARITY.md">Parity</a>
|
||||
·
|
||||
<a href="./ROADMAP.md">Roadmap</a>
|
||||
·
|
||||
<a href="./CONTRIBUTING.md">Contributing</a>
|
||||
·
|
||||
<a href="./SECURITY.md">Security</a>
|
||||
·
|
||||
<a href="https://discord.gg/5TUQKqFWd">UltraWorkers Discord</a>
|
||||
</p>
|
||||
|
||||
@@ -36,17 +34,19 @@ Claw Code is the public Rust implementation of the `claw` CLI agent harness.
|
||||
The canonical implementation lives in [`rust/`](./rust), and the current source of truth for this repository is **ultraworkers/claw-code**.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Start with [`USAGE.md`](./USAGE.md) for build, auth, CLI, session, and parity-harness workflows. For file submission/navigation questions, see [Navigation and file context](./docs/navigation-file-context.md). For local OpenAI-compatible models and offline skill installs, see [Local OpenAI-compatible providers and skills setup](./docs/local-openai-compatible-providers.md). Windows users can jump to the PowerShell-first [Windows install and release quickstart](./docs/windows-install-release.md). Make `claw doctor` your first health check after building, use [`rust/README.md`](./rust/README.md) for crate-level details, read [`PARITY.md`](./PARITY.md) for the current Rust-port checkpoint, and see [`docs/container.md`](./docs/container.md) for the container-first workflow.
|
||||
> Start with [`USAGE.md`](./USAGE.md) for build, auth, CLI, session, and parity-harness workflows. Make `claw doctor` your first health check after building, use [`rust/README.md`](./rust/README.md) for crate-level details, read [`PARITY.md`](./PARITY.md) for the current Rust-port checkpoint, and see [`docs/container.md`](./docs/container.md) for the container-first workflow.
|
||||
>
|
||||
> **ACP / Zed status:** `claw-code` does not ship an ACP/Zed daemon or JSON-RPC entrypoint yet. Run `claw acp` (or `claw --acp`) for the current status instead of guessing from source layout; `claw acp serve` is currently a discoverability alias only, returns status with exit code 0, and real ACP support remains tracked separately in `ROADMAP.md`. For the public JSON contract, see [`docs/g011-acp-json-rpc-status-contract.md`](./docs/g011-acp-json-rpc-status-contract.md).
|
||||
> **ACP / Zed status:** `claw-code` does not ship an ACP/Zed daemon entrypoint yet. Run `claw acp` (or `claw --acp`) for the current status instead of guessing from source layout; `claw acp serve` is currently a discoverability alias only, and real ACP support remains tracked separately in `ROADMAP.md`.
|
||||
|
||||
## Current repository shape
|
||||
|
||||
- **`rust/`** — canonical Rust workspace and the `claw` CLI binary
|
||||
- **`USAGE.md`** — task-oriented usage guide for the current product surface
|
||||
- **`ERROR_HANDLING.md`** — unified error-handling pattern for orchestration code
|
||||
- **`PARITY.md`** — Rust-port parity status and migration notes
|
||||
- **`ROADMAP.md`** — active roadmap and cleanup backlog
|
||||
- **`PHILOSOPHY.md`** — project intent and system-design framing
|
||||
- **`SCHEMAS.md`** — JSON protocol contract (Python harness reference)
|
||||
- **`src/` + `tests/`** — companion Python/reference workspace and audit helpers; not the primary runtime surface
|
||||
|
||||
## Quick start
|
||||
@@ -100,8 +100,6 @@ export ANTHROPIC_API_KEY="sk-ant-..."
|
||||
.\target\debug\claw.exe prompt "say hello"
|
||||
```
|
||||
|
||||
For release ZIPs, PATH setup, provider switching, and notification smoke checks, see [`docs/windows-install-release.md`](./docs/windows-install-release.md).
|
||||
|
||||
**Git Bash / WSL** are optional alternatives, not requirements. If you prefer bash-style paths (`/c/Users/you/...` instead of `C:\Users\you\...`), Git Bash (ships with Git for Windows) works well. In Git Bash, the `MINGW64` prompt is expected and normal — not a broken install.
|
||||
|
||||
## Post-build: locate the binary and verify
|
||||
@@ -136,18 +134,6 @@ Test the binary directly using its path:
|
||||
.\rust\target\debug\claw.exe doctor
|
||||
```
|
||||
|
||||
PowerShell smoke commands that do not require live credentials:
|
||||
|
||||
```powershell
|
||||
$env:CLAW_CONFIG_HOME = Join-Path $env:TEMP "claw config home"
|
||||
New-Item -ItemType Directory -Force -Path $env:CLAW_CONFIG_HOME | Out-Null
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY, Env:\ANTHROPIC_AUTH_TOKEN, Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
.\rust\target\debug\claw.exe help
|
||||
.\rust\target\debug\claw.exe status
|
||||
.\rust\target\debug\claw.exe config env
|
||||
.\rust\target\debug\claw.exe doctor
|
||||
```
|
||||
|
||||
If these commands succeed, the build is working. `claw doctor` is your first health check — it validates your API key, model access, and tool configuration.
|
||||
|
||||
### Optional: Add to PATH
|
||||
@@ -206,17 +192,11 @@ cargo test --workspace
|
||||
## Documentation map
|
||||
|
||||
- [`USAGE.md`](./USAGE.md) — quick commands, auth, sessions, config, parity harness
|
||||
- [`docs/navigation-file-context.md`](./docs/navigation-file-context.md) — terminal navigation, scrollback, `@path` file context, attachments, and secret-safety guidance
|
||||
- [`docs/local-openai-compatible-providers.md`](./docs/local-openai-compatible-providers.md) — Ollama/llama.cpp/vLLM setup, Claw multi-provider positioning, and local skills install checks
|
||||
- [`docs/windows-install-release.md`](./docs/windows-install-release.md) — PowerShell-first install, release artifact, provider switching, and Windows/WSL notification smoke paths
|
||||
- [`rust/README.md`](./rust/README.md) — crate map, CLI surface, features, workspace layout
|
||||
- [`PARITY.md`](./PARITY.md) — parity status for the Rust port
|
||||
- [`rust/MOCK_PARITY_HARNESS.md`](./rust/MOCK_PARITY_HARNESS.md) — deterministic mock-service harness details
|
||||
- [`ROADMAP.md`](./ROADMAP.md) — active roadmap and open cleanup work
|
||||
- [`docs/g004-events-reports-contract.md`](./docs/g004-events-reports-contract.md) — Stream 2 lane event/report contract guidance for consumers
|
||||
- [`PHILOSOPHY.md`](./PHILOSOPHY.md) — why the project exists and how it is operated
|
||||
- [`CONTRIBUTING.md`](./CONTRIBUTING.md), [`SECURITY.md`](./SECURITY.md), [`SUPPORT.md`](./SUPPORT.md), and [`CODE_OF_CONDUCT.md`](./CODE_OF_CONDUCT.md) — contribution, vulnerability-reporting, support, and community policies
|
||||
- [`LICENSE`](./LICENSE) — MIT license for this repository
|
||||
|
||||
## Ecosystem
|
||||
|
||||
|
||||
837
ROADMAP.md
837
ROADMAP.md
File diff suppressed because one or more lines are too long
377
SCHEMAS.md
Normal file
377
SCHEMAS.md
Normal file
@@ -0,0 +1,377 @@
|
||||
# JSON Envelope Schemas — Clawable CLI Contract
|
||||
|
||||
This document locks the field-level contract for all clawable-surface commands. Every command accepting `--output-format json` must conform to the envelope shapes below.
|
||||
|
||||
**Target audience:** Claws building orchestrators, automation, or monitoring against claw-code's JSON output.
|
||||
|
||||
---
|
||||
|
||||
## Common Fields (All Envelopes)
|
||||
|
||||
Every command response, success or error, carries:
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "list-sessions",
|
||||
"exit_code": 0,
|
||||
"output_format": "json",
|
||||
"schema_version": "1.0"
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Required | Notes |
|
||||
|---|---|---|---|
|
||||
| `timestamp` | ISO 8601 UTC | Yes | Time command completed |
|
||||
| `command` | string | Yes | argv[1] (e.g. "list-sessions") |
|
||||
| `exit_code` | int (0/1/2) | Yes | 0=success, 1=error/not-found, 2=timeout |
|
||||
| `output_format` | string | Yes | Always "json" (for symmetry with text mode) |
|
||||
| `schema_version` | string | Yes | "1.0" (bump for breaking changes) |
|
||||
|
||||
---
|
||||
|
||||
## Turn Result Fields (Multi-Turn Sessions)
|
||||
|
||||
When a command's response includes a `turn` object (e.g., in `bootstrap` or `turn-loop`), it carries:
|
||||
|
||||
| Field | Type | Required | Notes |
|
||||
|---|---|---|---|
|
||||
| `prompt` | string | Yes | User input for this turn |
|
||||
| `output` | string | Yes | Assistant response |
|
||||
| `stop_reason` | enum | Yes | One of: `completed`, `timeout`, `cancelled`, `max_budget_reached`, `max_turns_reached` |
|
||||
| `cancel_observed` | bool | Yes | #164 Stage B: cancellation was signaled and observed (#161/#164) |
|
||||
|
||||
---
|
||||
|
||||
## Error Envelope
|
||||
|
||||
When a command fails (exit code 1), responses carry:
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "exec-command",
|
||||
"exit_code": 1,
|
||||
"error": {
|
||||
"kind": "filesystem",
|
||||
"operation": "write",
|
||||
"target": "/tmp/nonexistent/out.md",
|
||||
"retryable": true,
|
||||
"message": "No such file or directory",
|
||||
"hint": "intermediate directory does not exist; try mkdir -p /tmp/nonexistent"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Required | Notes |
|
||||
|---|---|---|---|
|
||||
| `error.kind` | enum | Yes | One of: `filesystem`, `auth`, `session`, `parse`, `runtime`, `mcp`, `delivery`, `usage`, `policy`, `unknown` |
|
||||
| `error.operation` | string | Yes | Syscall/method that failed (e.g. "write", "open", "resolve_session") |
|
||||
| `error.target` | string | Yes | Resource that failed (path, session-id, server-name, etc.) |
|
||||
| `error.retryable` | bool | Yes | Whether caller can safely retry without intervention |
|
||||
| `error.message` | string | Yes | Platform error message (e.g. errno text) |
|
||||
| `error.hint` | string | No | Optional actionable next step |
|
||||
|
||||
---
|
||||
|
||||
## Not-Found Envelope
|
||||
|
||||
When an entity does not exist (exit code 1, but not a failure):
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "load-session",
|
||||
"exit_code": 1,
|
||||
"name": "does-not-exist",
|
||||
"found": false,
|
||||
"error": {
|
||||
"kind": "session_not_found",
|
||||
"message": "session 'does-not-exist' not found in .claw/sessions/",
|
||||
"retryable": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Required | Notes |
|
||||
|---|---|---|---|
|
||||
| `name` | string | Yes | Entity name/id that was looked up |
|
||||
| `found` | bool | Yes | Always `false` for not-found |
|
||||
| `error.kind` | enum | Yes | One of: `command_not_found`, `tool_not_found`, `session_not_found` |
|
||||
| `error.message` | string | Yes | User-visible explanation |
|
||||
| `error.retryable` | bool | Yes | Usually `false` (entity will not magically appear) |
|
||||
|
||||
---
|
||||
|
||||
## Per-Command Success Schemas
|
||||
|
||||
### `list-sessions`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "list-sessions",
|
||||
"exit_code": 0,
|
||||
"output_format": "json",
|
||||
"schema_version": "1.0",
|
||||
"directory": ".claw/sessions",
|
||||
"sessions_count": 2,
|
||||
"sessions": [
|
||||
{
|
||||
"session_id": "sess_abc123",
|
||||
"created_at": "2026-04-21T15:30:00Z",
|
||||
"last_modified": "2026-04-22T09:45:00Z",
|
||||
"prompt_count": 5,
|
||||
"stopped": false
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### `delete-session`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "delete-session",
|
||||
"exit_code": 0,
|
||||
"session_id": "sess_abc123",
|
||||
"deleted": true,
|
||||
"directory": ".claw/sessions"
|
||||
}
|
||||
```
|
||||
|
||||
### `load-session`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "load-session",
|
||||
"exit_code": 0,
|
||||
"session_id": "sess_abc123",
|
||||
"loaded": true,
|
||||
"directory": ".claw/sessions",
|
||||
"path": ".claw/sessions/sess_abc123.jsonl"
|
||||
}
|
||||
```
|
||||
|
||||
### `flush-transcript`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "flush-transcript",
|
||||
"exit_code": 0,
|
||||
"session_id": "sess_abc123",
|
||||
"path": ".claw/sessions/sess_abc123.jsonl",
|
||||
"flushed": true,
|
||||
"messages_count": 12,
|
||||
"input_tokens": 4500,
|
||||
"output_tokens": 1200
|
||||
}
|
||||
```
|
||||
|
||||
### `show-command`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "show-command",
|
||||
"exit_code": 0,
|
||||
"name": "add-dir",
|
||||
"found": true,
|
||||
"source_hint": "commands/add-dir/add-dir.tsx",
|
||||
"responsibility": "creates a new directory in the worktree"
|
||||
}
|
||||
```
|
||||
|
||||
### `show-tool`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "show-tool",
|
||||
"exit_code": 0,
|
||||
"name": "BashTool",
|
||||
"found": true,
|
||||
"source_hint": "tools/BashTool/BashTool.tsx"
|
||||
}
|
||||
```
|
||||
|
||||
### `exec-command`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "exec-command",
|
||||
"exit_code": 0,
|
||||
"name": "add-dir",
|
||||
"prompt": "create src/util/",
|
||||
"handled": true,
|
||||
"message": "created directory",
|
||||
"source_hint": "commands/add-dir/add-dir.tsx"
|
||||
}
|
||||
```
|
||||
|
||||
### `exec-tool`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "exec-tool",
|
||||
"exit_code": 0,
|
||||
"name": "BashTool",
|
||||
"payload": "cargo build",
|
||||
"handled": true,
|
||||
"message": "exit code 0",
|
||||
"source_hint": "tools/BashTool/BashTool.tsx"
|
||||
}
|
||||
```
|
||||
|
||||
### `route`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "route",
|
||||
"exit_code": 0,
|
||||
"prompt": "add a test",
|
||||
"limit": 10,
|
||||
"match_count": 3,
|
||||
"matches": [
|
||||
{
|
||||
"kind": "command",
|
||||
"name": "add-file",
|
||||
"score": 0.92,
|
||||
"source_hint": "commands/add-file/add-file.tsx"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### `bootstrap`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "bootstrap",
|
||||
"exit_code": 0,
|
||||
"prompt": "hello",
|
||||
"setup": {
|
||||
"python_version": "3.13.12",
|
||||
"implementation": "CPython",
|
||||
"platform_name": "darwin",
|
||||
"test_command": "pytest"
|
||||
},
|
||||
"routed_matches": [
|
||||
{"kind": "command", "name": "init", "score": 0.85, "source_hint": "..."}
|
||||
],
|
||||
"turn": {
|
||||
"prompt": "hello",
|
||||
"output": "...",
|
||||
"stop_reason": "completed"
|
||||
},
|
||||
"persisted_session_path": ".claw/sessions/sess_abc.jsonl"
|
||||
}
|
||||
```
|
||||
|
||||
### `command-graph`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "command-graph",
|
||||
"exit_code": 0,
|
||||
"builtins_count": 185,
|
||||
"plugin_like_count": 20,
|
||||
"skill_like_count": 2,
|
||||
"total_count": 207,
|
||||
"builtins": [
|
||||
{"name": "add-dir", "source_hint": "commands/add-dir/add-dir.tsx"}
|
||||
],
|
||||
"plugin_like": [],
|
||||
"skill_like": []
|
||||
}
|
||||
```
|
||||
|
||||
### `tool-pool`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "tool-pool",
|
||||
"exit_code": 0,
|
||||
"simple_mode": false,
|
||||
"include_mcp": true,
|
||||
"tool_count": 184,
|
||||
"tools": [
|
||||
{"name": "BashTool", "source_hint": "tools/BashTool/BashTool.tsx"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### `bootstrap-graph`
|
||||
|
||||
```json
|
||||
{
|
||||
"timestamp": "2026-04-22T10:10:00Z",
|
||||
"command": "bootstrap-graph",
|
||||
"exit_code": 0,
|
||||
"stages": ["stage 1", "stage 2", "..."],
|
||||
"note": "bootstrap-graph is markdown-only in this version"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Versioning & Compatibility
|
||||
|
||||
- **schema_version = "1.0":** Current as of 2026-04-22. Covers all 13 clawable commands.
|
||||
- **Breaking changes** (e.g. renaming a field) bump schema_version to "2.0".
|
||||
- **Additive changes** (e.g. new optional field) stay at "1.0" and are backward compatible.
|
||||
- Downstream claws **must** check `schema_version` before relying on field presence.
|
||||
|
||||
---
|
||||
|
||||
## Regression Testing
|
||||
|
||||
Each command is covered by:
|
||||
1. **Fixture file** (golden JSON snapshot under `tests/fixtures/json/<command>.json`)
|
||||
2. **Parametrised test** in `test_cli_parity_audit.py::TestJsonOutputContractEndToEnd`
|
||||
3. **Field consistency test** (new, tracked as ROADMAP #172)
|
||||
|
||||
To update a fixture after a intentional schema change:
|
||||
```bash
|
||||
claw <command> --output-format json <args> > tests/fixtures/json/<command>.json
|
||||
# Review the diff, commit
|
||||
git add tests/fixtures/json/<command>.json
|
||||
```
|
||||
|
||||
To verify no regressions:
|
||||
```bash
|
||||
cargo test --release test_json_envelope_field_consistency
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Design Notes
|
||||
|
||||
**Why common fields on every response?**
|
||||
- Downstream claws can build one error handler that works for all commands
|
||||
- Timestamp + command + exit_code give context without scraping argv or timestamps from command output
|
||||
- `schema_version` signals compatibility for future upgrades
|
||||
|
||||
**Why both "found" and "error" on not-found?**
|
||||
- Exit code 1 covers both "entity missing" and "operation failed"
|
||||
- `found=false` distinguishes not-found from error without string matching
|
||||
- `error.kind` and `error.retryable` let automation decide: retry a temporary miss vs escalate a permanent refusal
|
||||
|
||||
**Why "operation" and "target" in error?**
|
||||
- Claws can aggregate failures by operation type (e.g. "how many `write` ops failed?")
|
||||
- Claws can implement per-target retry policy (e.g. "skip missing files, retry networking")
|
||||
- Pure text errors ("No such file") do not provide enough structure for pattern matching
|
||||
|
||||
**Why "handled" vs "found"?**
|
||||
- `show-command` reports `found: bool` (inventory signal: "does this exist?")
|
||||
- `exec-command` reports `handled: bool` (operational signal: "was this work performed?")
|
||||
- The names matter: a command can be found but not handled (e.g. too large for context window), or handled silently (no output message)
|
||||
49
SECURITY.md
49
SECURITY.md
@@ -1,49 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported versions
|
||||
|
||||
Security fixes target the current `main` branch and the latest published
|
||||
release artifacts when available. Older experimental branches are not supported
|
||||
unless a maintainer explicitly marks them as supported.
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
Please do **not** open a public issue for a suspected vulnerability. Use GitHub
|
||||
private vulnerability reporting for `ultraworkers/claw-code` when available, or
|
||||
contact a maintainer through the repository's published support channel with a
|
||||
minimal, non-destructive reproduction.
|
||||
|
||||
Include:
|
||||
|
||||
- affected command, crate, or workflow;
|
||||
- operating system and shell, especially for Windows/PowerShell path issues;
|
||||
- whether live credentials, MCP servers, plugins, or workspace filesystem
|
||||
access are involved;
|
||||
- expected impact and any safe proof-of-concept steps.
|
||||
|
||||
Do not include real API keys, private prompts, session transcripts with secrets,
|
||||
or exploit payloads that modify third-party systems.
|
||||
|
||||
## Scope
|
||||
|
||||
In scope:
|
||||
|
||||
- workspace path traversal or symlink escapes;
|
||||
- permission bypasses, sandbox misreporting, or unsafe tool execution;
|
||||
- credential disclosure in logs, JSON output, telemetry, docs, or examples;
|
||||
- plugin, hook, MCP, provider, or config behavior that can unexpectedly execute
|
||||
code or leak secrets.
|
||||
|
||||
Out of scope:
|
||||
|
||||
- social engineering;
|
||||
- denial-of-service without a practical security impact;
|
||||
- issues that require already-compromised local developer credentials;
|
||||
- reports against third-party providers or upstream tools without a Claw Code
|
||||
integration issue.
|
||||
|
||||
## Handling expectations
|
||||
|
||||
Maintainers will acknowledge valid private reports as soon as practical, keep
|
||||
discussion private until a fix or mitigation is available, and credit reporters
|
||||
when requested and appropriate.
|
||||
24
SUPPORT.md
24
SUPPORT.md
@@ -1,24 +0,0 @@
|
||||
# Support
|
||||
|
||||
Use the lightest support path that fits the request:
|
||||
|
||||
- **Usage questions:** start with [USAGE.md](./USAGE.md) and
|
||||
[rust/README.md](./rust/README.md).
|
||||
- **Bugs or regressions:** open a GitHub issue with the command, OS/shell,
|
||||
expected behavior, actual behavior, and relevant non-secret output.
|
||||
- **Security issues:** follow [SECURITY.md](./SECURITY.md) instead of opening a
|
||||
public issue.
|
||||
- **Community discussion:** use the UltraWorkers Discord linked from
|
||||
[README.md](./README.md).
|
||||
|
||||
When asking for help, include:
|
||||
|
||||
```text
|
||||
claw --version
|
||||
claw doctor
|
||||
operating system and shell
|
||||
command you ran
|
||||
```
|
||||
|
||||
Redact API keys, bearer tokens, private prompts, session transcripts, and local
|
||||
paths that reveal sensitive information before sharing output.
|
||||
82
USAGE.md
82
USAGE.md
@@ -2,6 +2,9 @@
|
||||
|
||||
This guide covers the current Rust workspace under `rust/` and the `claw` CLI binary. If you are brand new, make the doctor health check your first run: start `claw`, then run `/doctor`.
|
||||
|
||||
> [!TIP]
|
||||
> **Building orchestration code that calls `claw` as a subprocess?** See [`ERROR_HANDLING.md`](./ERROR_HANDLING.md) for the unified error-handling pattern (one handler for all 14 clawable commands, exit codes, JSON envelope contract, and recovery strategies).
|
||||
|
||||
## Quick-start health check
|
||||
|
||||
Run this before prompts, sessions, or automation:
|
||||
@@ -31,7 +34,7 @@ cd rust
|
||||
cargo build --workspace
|
||||
```
|
||||
|
||||
The CLI binary is available at `rust/target/debug/claw` after a debug build (`rust\target\debug\claw.exe` on Windows). Make the doctor check above your first post-build step. For PowerShell-first install, release ZIP, PATH, provider-switching, and Windows/WSL notification examples, see [`docs/windows-install-release.md`](./docs/windows-install-release.md).
|
||||
The CLI binary is available at `rust/target/debug/claw` after a debug build. Make the doctor check above your first post-build step.
|
||||
|
||||
## Quick start
|
||||
|
||||
@@ -95,11 +98,17 @@ cd rust
|
||||
|
||||
### JSON output for scripting
|
||||
|
||||
All clawable commands support `--output-format json` for machine-readable output. Every invocation returns a consistent JSON envelope with `exit_code`, `command`, `timestamp`, and either `{success fields}` or `{error: {kind, message, ...}}`.
|
||||
|
||||
```bash
|
||||
cd rust
|
||||
./target/debug/claw --output-format json prompt "status"
|
||||
./target/debug/claw --output-format json load-session my-session-id
|
||||
./target/debug/claw --output-format json turn-loop "analyze logs" --max-turns 1
|
||||
```
|
||||
|
||||
**Building a dispatcher or orchestration script?** See [`ERROR_HANDLING.md`](./ERROR_HANDLING.md) for the unified error-handling pattern. One code example works for all 14 clawable commands: parse the exit code, classify by `error.kind`, apply recovery strategies (retry, timeout recovery, validation, logging). Use that pattern instead of reimplementing error handling per command.
|
||||
|
||||
### Inspect worker state
|
||||
|
||||
The `claw state` command reads `.claw/worker-state.json`, which is written by the interactive REPL or a one-shot prompt when a worker executes a task. This file contains the worker ID, session reference, model, and permission mode.
|
||||
@@ -230,37 +239,9 @@ export ANTHROPIC_AUTH_TOKEN="anthropic-oauth-or-proxy-bearer-token"
|
||||
|
||||
**If you meant a different provider:** if `claw` reports missing Anthropic credentials but you already have `OPENAI_API_KEY`, `XAI_API_KEY`, or `DASHSCOPE_API_KEY` exported, you most likely forgot to prefix the model name with the provider's routing prefix. Use `--model openai/gpt-4.1-mini` (OpenAI-compat / OpenRouter / Ollama), `--model grok` (xAI), or `--model qwen-plus` (DashScope) and the prefix router will select the right backend regardless of the ambient credentials. The error message now includes a hint that names the detected env var.
|
||||
|
||||
|
||||
### Windows PowerShell provider switching
|
||||
|
||||
The same provider rules work in PowerShell. Use placeholder values in docs and tests; put real keys only in your private environment. Remove unrelated provider env vars when validating a switch so failures are easy to diagnose.
|
||||
|
||||
`CLAUDE_CODE_PROVIDER` is not required for normal Claw routing; prefer explicit model prefixes such as `openai/` and provider-specific env vars so PowerShell examples stay portable.
|
||||
|
||||
```powershell
|
||||
# Anthropic direct
|
||||
$env:ANTHROPIC_API_KEY = "sk-ant-REPLACE_ME"
|
||||
Remove-Item Env:\OPENAI_BASE_URL -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
.\target\debug\claw.exe --model "sonnet" prompt "reply with ready"
|
||||
|
||||
# OpenAI-compatible gateway / OpenRouter
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
$env:OPENAI_BASE_URL = "https://openrouter.ai/api/v1"
|
||||
$env:OPENAI_API_KEY = "sk-or-v1-REPLACE_ME"
|
||||
.\target\debug\claw.exe --model "openai/gpt-4.1-mini" prompt "reply with ready"
|
||||
|
||||
# Local OpenAI-compatible server
|
||||
$env:OPENAI_BASE_URL = "http://127.0.0.1:11434/v1"
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
.\target\debug\claw.exe --model "llama3.2" prompt "reply with ready"
|
||||
```
|
||||
|
||||
See the full [Windows install and release quickstart](./docs/windows-install-release.md) for release artifact setup, persistent `setx` usage, and WSL notes.
|
||||
|
||||
## Local Models
|
||||
|
||||
`claw` can talk to local servers and provider gateways through either Anthropic-compatible or OpenAI-compatible endpoints. Use `ANTHROPIC_BASE_URL` with `ANTHROPIC_AUTH_TOKEN` for Anthropic-compatible services, or `OPENAI_BASE_URL` with `OPENAI_API_KEY` for OpenAI-compatible services. For copyable Ollama, llama.cpp, vLLM, raw `/v1/chat/completions`, and local skills install examples, see [`docs/local-openai-compatible-providers.md`](./docs/local-openai-compatible-providers.md).
|
||||
`claw` can talk to local servers and provider gateways through either Anthropic-compatible or OpenAI-compatible endpoints. Use `ANTHROPIC_BASE_URL` with `ANTHROPIC_AUTH_TOKEN` for Anthropic-compatible services, or `OPENAI_BASE_URL` with `OPENAI_API_KEY` for OpenAI-compatible services.
|
||||
|
||||
### Anthropic-compatible endpoint
|
||||
|
||||
@@ -334,7 +315,7 @@ Reasoning variants (`qwen-qwq-*`, `qwq-*`, `*-thinking`) automatically strip `te
|
||||
|
||||
The OpenAI-compatible backend also serves as the gateway for **OpenRouter**, **Ollama**, and any other service that speaks the OpenAI `/v1/chat/completions` wire format — just point `OPENAI_BASE_URL` at the service.
|
||||
|
||||
**Model-name prefix routing:** If a model name starts with `openai/`, `gpt-`, `qwen/`, `qwen-`, `kimi/`, or `kimi-`, the provider is selected by the prefix regardless of which env vars are set. This prevents accidental misrouting to Anthropic when multiple credentials exist in the environment. For the default OpenAI API, `openai/` is a routing prefix and is stripped before the request hits the wire. For a custom `OPENAI_BASE_URL`, slash-containing OpenAI-compatible slugs (for example OpenRouter-style `openai/gpt-4.1-mini`) are preserved so the gateway receives the model ID it expects.
|
||||
**Model-name prefix routing:** If a model name starts with `openai/`, `gpt-`, `qwen/`, or `qwen-`, the provider is selected by the prefix regardless of which env vars are set. This prevents accidental misrouting to Anthropic when multiple credentials exist in the environment.
|
||||
|
||||
### Tested models and aliases
|
||||
|
||||
@@ -348,11 +329,8 @@ These are the models registered in the built-in alias table with known token lim
|
||||
| `grok` / `grok-3` | `grok-3` | xAI | 64 000 | 131 072 |
|
||||
| `grok-mini` / `grok-3-mini` | `grok-3-mini` | xAI | 64 000 | 131 072 |
|
||||
| `grok-2` | `grok-2` | xAI | — | — |
|
||||
| `kimi` | `kimi-k2.5` | DashScope | 16 384 | 256 000 |
|
||||
| `gpt-4.1` / `gpt-4.1-mini` / `gpt-4.1-nano` | same | OpenAI-compatible | 32 768 | 1 047 576 |
|
||||
| `gpt-5.4` / `gpt-5.4-mini` / `gpt-5.4-nano` | same | OpenAI-compatible | 128 000 | 1 000 000 / 400 000 |
|
||||
|
||||
Any model name that does not match an alias is passed through verbatim after provider routing is resolved. This is how you use OpenRouter model slugs (`openai/gpt-4.1-mini` with a custom `OPENAI_BASE_URL`), Ollama tags (`llama3.2`), or full Anthropic model IDs (`claude-sonnet-4-20250514`).
|
||||
Any model name that does not match an alias is passed through verbatim. This is how you use OpenRouter model slugs (`openai/gpt-4.1-mini`), Ollama tags (`llama3.2`), or full Anthropic model IDs (`claude-sonnet-4-20250514`).
|
||||
|
||||
### User-defined aliases
|
||||
|
||||
@@ -374,29 +352,11 @@ Local project settings override user-level settings. Aliases resolve through the
|
||||
|
||||
1. If the resolved model name starts with `claude` → Anthropic.
|
||||
2. If it starts with `grok` → xAI.
|
||||
3. If it starts with `openai/` or `gpt-` → OpenAI-compatible.
|
||||
4. If it starts with `qwen/`, `qwen-`, `kimi/`, or `kimi-` → DashScope-compatible OpenAI wire format.
|
||||
5. If `OPENAI_BASE_URL` and `OPENAI_API_KEY` are set, unknown model names route to the OpenAI-compatible client for local/gateway servers.
|
||||
6. Otherwise, `claw` checks which credential is set: Anthropic first, then OpenAI, then xAI. If only `OPENAI_BASE_URL` is set, it still routes to OpenAI-compatible for authless local servers.
|
||||
7. If nothing matches, it defaults to Anthropic.
|
||||
|
||||
|
||||
### Provider diagnostics and custom OpenAI-compatible parameters
|
||||
|
||||
The API layer exposes a provider diagnostics snapshot via `api::provider_diagnostics_for_model(model)`. It reports the resolved provider, auth/base-url environment variables, default base URL, whether the provider uses the OpenAI-compatible wire format, whether reasoning tuning parameters are stripped, whether DeepSeek V4 reasoning history is preserved, proxy support, extra-body support, and whether slash-containing model IDs are preserved for custom OpenAI-compatible gateways.
|
||||
|
||||
For gateway features that are not first-class request fields yet, `MessageRequest::extra_body` passes through provider-specific JSON parameters such as `web_search_options` or `parallel_tool_calls`. Core protocol fields (`model`, `messages`, `stream`, `tools`, `tool_choice`, `max_tokens`, and `max_completion_tokens`) are protected and cannot be overridden through `extra_body`.
|
||||
|
||||
## File context and navigation
|
||||
|
||||
Use `@path/to/file` in prompts to submit repository files as context, for example `Read @src/app.ts and explain the bug`, `Compare @old.md and @new.md`, or `Use @logs/error.txt as context and suggest a fix`. Prompt history, `Ctrl-r`, and long-output scrolling come from your shell, terminal, or tmux rather than from Claw itself. See [`docs/navigation-file-context.md`](./docs/navigation-file-context.md) for scrollback, attachment, and secret-redaction guidance.
|
||||
3. Otherwise, `claw` checks which credential is set: `ANTHROPIC_API_KEY`/`ANTHROPIC_AUTH_TOKEN` first, then `OPENAI_API_KEY`, then `XAI_API_KEY`.
|
||||
4. If nothing matches, it defaults to Anthropic.
|
||||
|
||||
## FAQ
|
||||
|
||||
### Is Claw Code Claude-only?
|
||||
|
||||
No. Claw Code is a Claude-Code-shaped workflow/runtime, not a Claude-only product. It can target Anthropic and OpenAI-compatible/provider-routed/local models depending on config. Non-Claude providers may require stricter response-shape and tool-call compatibility, so some workflows can be rougher than first-party Anthropic/OpenAI paths; provider-specific identity leaks are bugs, not product intent. See [`docs/local-openai-compatible-providers.md`](./docs/local-openai-compatible-providers.md) for local provider examples.
|
||||
|
||||
### What about Codex?
|
||||
|
||||
The name "codex" appears in the Claw Code ecosystem but it does **not** refer to OpenAI Codex (the code-generation model). Here is what it means in this project:
|
||||
@@ -450,18 +410,6 @@ let client = build_http_client_with(&config).expect("proxy client");
|
||||
- Empty values are treated as unset, so leaving `HTTPS_PROXY=""` in your shell will not enable a proxy.
|
||||
- If a proxy URL cannot be parsed, `claw` falls back to a direct (no-proxy) client so existing workflows keep working; double-check the URL if you expected the request to be tunnelled.
|
||||
|
||||
## Skills
|
||||
|
||||
Use `/skills list` in the interactive REPL or `claw skills --output-format json` from the direct CLI to inspect installed skills. For offline/local installs, install the directory that contains `SKILL.md`, then verify the discovered name before invoking it:
|
||||
|
||||
```text
|
||||
/skills install /absolute/path/to/my-skill
|
||||
/skills list
|
||||
/skills my-skill
|
||||
```
|
||||
|
||||
If install succeeds but invocation fails with a provider HTTP error, treat provider setup separately: run `claw doctor` and a one-shot prompt smoke test before reinstalling the skill. See [`docs/local-openai-compatible-providers.md`](./docs/local-openai-compatible-providers.md#local-skills-install-from-disk) for the full checklist.
|
||||
|
||||
## Common operational commands
|
||||
|
||||
```bash
|
||||
|
||||
@@ -9,8 +9,7 @@ This document describes model-specific handling in the OpenAI-compatible provide
|
||||
- [Kimi Models (is_error Exclusion)](#kimi-models-is_error-exclusion)
|
||||
- [Reasoning Models (Tuning Parameter Stripping)](#reasoning-models-tuning-parameter-stripping)
|
||||
- [GPT-5 (max_completion_tokens)](#gpt-5-max_completion_tokens)
|
||||
- [Qwen and Kimi Models (DashScope Routing)](#qwen-and-kimi-models-dashscope-routing)
|
||||
- [Custom Gateway Slugs and Extra Body Parameters](#custom-gateway-slugs-and-extra-body-parameters)
|
||||
- [Qwen Models (DashScope Routing)](#qwen-models-dashscope-routing)
|
||||
- [Implementation Details](#implementation-details)
|
||||
- [Adding New Models](#adding-new-models)
|
||||
- [Testing](#testing)
|
||||
@@ -23,8 +22,6 @@ The `openai_compat.rs` provider translates Claude Code's internal message format
|
||||
- Sampling parameters (temperature, top_p, etc.)
|
||||
- Token limit fields (`max_tokens` vs `max_completion_tokens`)
|
||||
- Base URL routing
|
||||
- Provider-specific extra body parameters (`web_search_options`, `parallel_tool_calls`, local-server switches, etc.)
|
||||
- Provider diagnostics for status/doctor-style surfaces
|
||||
|
||||
## Model-Specific Handling
|
||||
|
||||
@@ -49,7 +46,7 @@ The `openai_compat.rs` provider translates Claude Code's internal message format
|
||||
fn model_rejects_is_error_field(model: &str) -> bool {
|
||||
let lowered = model.to_ascii_lowercase();
|
||||
let canonical = lowered.rsplit('/').next().unwrap_or(lowered.as_str());
|
||||
canonical.starts_with("kimi")
|
||||
canonical.starts_with("kimi-")
|
||||
}
|
||||
```
|
||||
|
||||
@@ -123,13 +120,13 @@ let max_tokens_key = if wire_model.starts_with("gpt-5") {
|
||||
|
||||
---
|
||||
|
||||
### Qwen and Kimi Models (DashScope Routing)
|
||||
### Qwen Models (DashScope Routing)
|
||||
|
||||
**Affected models:** All models with `qwen` or `kimi` prefixes, including `qwen/`, `qwen-`, `kimi/`, and `kimi-` forms.
|
||||
**Affected models:** All models with `qwen` prefix
|
||||
|
||||
**Behavior:** Routed to DashScope (`https://dashscope.aliyuncs.com/compatible-mode/v1`) rather than ambient-credential fallback providers. Known routing prefixes are stripped before sending the wire model.
|
||||
**Behavior:** Routed to DashScope (`https://dashscope.aliyuncs.com/compatible-mode/v1`) rather than default providers.
|
||||
|
||||
**Rationale:** Qwen and Kimi compatible-mode models are hosted through Alibaba Cloud's DashScope service, not OpenAI or Anthropic.
|
||||
**Rationale:** Qwen models are hosted by Alibaba Cloud's DashScope service, not OpenAI or Anthropic.
|
||||
|
||||
**Configuration:**
|
||||
```rust
|
||||
@@ -140,21 +137,6 @@ pub const DEFAULT_DASHSCOPE_BASE_URL: &str = "https://dashscope.aliyuncs.com/com
|
||||
|
||||
**Note:** Some Qwen models are also reasoning models (see [Reasoning Models](#reasoning-models-tuning-parameter-stripping) above) and receive both treatments.
|
||||
|
||||
|
||||
---
|
||||
|
||||
### Custom Gateway Slugs and Extra Body Parameters
|
||||
|
||||
**Affected models:** Slash-containing model IDs routed through the OpenAI-compatible provider, especially custom gateways configured with `OPENAI_BASE_URL` such as OpenRouter, local routers, or other `/v1/chat/completions` services.
|
||||
|
||||
**Behavior:**
|
||||
- The default OpenAI API treats `openai/` as a routing prefix and sends the bare model name on the wire.
|
||||
- Custom OpenAI-compatible base URLs preserve slash-containing slugs such as `openai/gpt-4.1-mini` so the gateway receives the exact model ID it expects.
|
||||
- `MessageRequest::extra_body` passes through custom request JSON after core fields are populated. This supports provider-specific options such as `web_search_options` and `parallel_tool_calls`.
|
||||
- Protected core fields (`model`, `messages`, `stream`, `tools`, `tool_choice`, `max_tokens`, `max_completion_tokens`) cannot be overridden through `extra_body`.
|
||||
|
||||
**Testing:** See `custom_openai_gateway_preserves_slash_model_ids_and_extra_body_params` in `openai_compat_integration.rs` and `extra_body_params_are_passed_through_without_overriding_core_fields` in `openai_compat.rs`.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### File Location
|
||||
@@ -170,8 +152,7 @@ rust/crates/api/src/providers/openai_compat.rs
|
||||
| `model_rejects_is_error_field()` | Detects models that don't support `is_error` in tool results |
|
||||
| `is_reasoning_model()` | Detects reasoning models that need tuning param stripping |
|
||||
| `translate_message()` | Converts internal messages to OpenAI format (applies `is_error` logic) |
|
||||
| `build_chat_completion_request()` | Constructs full request payload (applies all model-specific logic and safe `extra_body` passthrough) |
|
||||
| `provider_diagnostics_for_model()` | Produces provider/status diagnostics including auth/base-url vars, reasoning behavior, proxy support, extra-body support, and slash-model preservation |
|
||||
| `build_chat_completion_request()` | Constructs full request payload (applies all model-specific logic) |
|
||||
|
||||
### Provider Prefix Handling
|
||||
|
||||
@@ -184,7 +165,7 @@ let canonical = model.to_ascii_lowercase()
|
||||
.unwrap_or(model);
|
||||
```
|
||||
|
||||
This ensures consistent detection regardless of whether models are referenced with or without provider prefixes. Wire-model handling is more specific: known routing prefixes are stripped for provider-native defaults, while custom OpenAI-compatible base URLs preserve slash-containing gateway slugs.
|
||||
This ensures consistent detection regardless of whether models are referenced with or without provider prefixes.
|
||||
|
||||
## Adding New Models
|
||||
|
||||
@@ -202,15 +183,11 @@ When adding support for new models:
|
||||
- Does it require `max_completion_tokens` instead of `max_tokens`?
|
||||
- Update the `max_tokens_key` logic
|
||||
|
||||
4. **Check custom gateway behavior**
|
||||
- Should slash-containing IDs be preserved for custom `OPENAI_BASE_URL` gateways?
|
||||
- Does the feature belong in a typed request field or `extra_body` passthrough?
|
||||
|
||||
5. **Add tests**
|
||||
4. **Add tests**
|
||||
- Unit test for detection function
|
||||
- Integration test in `build_chat_completion_request`
|
||||
|
||||
6. **Update this documentation**
|
||||
5. **Update this documentation**
|
||||
- Add the model to the affected lists
|
||||
- Document any special behavior
|
||||
|
||||
@@ -227,8 +204,6 @@ cargo test --package api model_rejects_is_error_field
|
||||
cargo test --package api reasoning_model
|
||||
cargo test --package api gpt5
|
||||
cargo test --package api qwen
|
||||
cargo test --package api custom_openai_gateway_preserves_slash_model_ids_and_extra_body_params
|
||||
cargo test --package api provider_diagnostics_explain_openai_compatible_capabilities
|
||||
```
|
||||
|
||||
### Test Files
|
||||
@@ -256,6 +231,6 @@ fn my_new_model_is_detected() {
|
||||
|
||||
---
|
||||
|
||||
*Last updated: 2026-05-15*
|
||||
*Last updated: 2026-04-16*
|
||||
|
||||
For questions or updates, see the implementation in `rust/crates/api/src/providers/openai_compat.rs`.
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
# Anti-slop issue and PR triage
|
||||
|
||||
Use this checklist before spending engineering time on low-signal issues, generated PRs, duplicate fixes, or broad unsolicited changes. The goal is not to reject community work by default; it is to make each merge, defer, or close recommendation evidence-backed and safe.
|
||||
|
||||
## Classifications
|
||||
|
||||
| Classification | Use when | Required evidence | Safe action |
|
||||
| --- | --- | --- | --- |
|
||||
| `actionable-bug` | The report has a reproducible product failure. | Repro steps, failing test, logs with secrets removed, or matching roadmap item. | Fix, assign, or link to an existing fix. |
|
||||
| `actionable-docs` | The report identifies missing, stale, or confusing documentation. | Current doc path plus desired corrected source of truth. | Patch docs or link to the owning docs lane. |
|
||||
| `actionable-feature` | The request matches Claw Code direction and has a concrete acceptance shape. | Issue/PR link plus roadmap or maintainer rationale. | Defer to planning or implement if already scoped. |
|
||||
| `duplicate` | Another issue/PR already covers the same user-visible outcome. | Link the canonical issue/PR and note any extra evidence worth preserving. | Cross-link; close only with maintainer/owner policy. |
|
||||
| `spam-or-promotion` | The content is promotional, irrelevant, or abusive. | URL/title/body excerpt summary, not a full repost. | Label/close per repository policy. |
|
||||
| `generated-slop-or-hallucinated` | The change is broad, mechanically generated, unreviewable, or names APIs/files that do not exist. | Diff/path examples, missing symbols, or unverifiable claims. | Request a narrow repro or reject/defer with rationale. |
|
||||
| `unsafe-or-security-sensitive` | The report includes secrets, exploit detail, or risky operational instructions. | Redacted summary and security policy link. | Move to the private/security path; do not expand public details. |
|
||||
| `not-reproducible-yet` | The claim might be valid but lacks enough evidence to act. | Missing command, environment, expected/actual behavior, or version. | Ask for repro details; do not implement speculative fixes. |
|
||||
| `externally-blocked` | Progress depends on upstream services, credentials, policy, or unavailable owner approval. | Blocking dependency and owner/gate. | Defer with a concrete unblock condition. |
|
||||
|
||||
## PR review gate
|
||||
|
||||
Every PR triage note should answer:
|
||||
|
||||
1. Is the PR a merge candidate, a request-changes candidate, a duplicate, unsafe, out-of-scope, or generated slop?
|
||||
2. What exact evidence supports that classification?
|
||||
3. Which tests/docs checks were run or intentionally skipped?
|
||||
4. Which issue, roadmap row, or user problem does it resolve?
|
||||
5. If it should not merge now, what is the minimal non-destructive next action?
|
||||
|
||||
Automation lanes must not merge or close remote PRs/issues. They may produce a ledger row, add local documentation/templates, and report recommended actions for a maintainer-owned final gate.
|
||||
|
||||
## Issue intake gate
|
||||
|
||||
Every issue triage note should answer:
|
||||
|
||||
1. Is the issue correct, duplicate, spam, invalid, externally blocked, or not reproducible yet?
|
||||
2. If correct and resolvable, what fix path or already-merged commit resolves it?
|
||||
3. If not currently resolvable, what evidence would change the classification?
|
||||
4. Are secrets, private data, or security details present that require a private path?
|
||||
|
||||
## Template locations
|
||||
|
||||
- Issue intake form: `.github/ISSUE_TEMPLATE/anti_slop_triage.yml`
|
||||
- PR review checklist: `.github/PULL_REQUEST_TEMPLATE.md`
|
||||
- Final aggregate gate: `docs/pr-issue-resolution-gate.md`
|
||||
@@ -1,185 +0,0 @@
|
||||
# G002 alpha security map and verification plan
|
||||
|
||||
Generated by `worker-4` for OMX team task 5 on 2026-05-14.
|
||||
|
||||
## Scope and coordination
|
||||
|
||||
- Active goal context: `G002-alpha-security` / Stream 6 day-one security and permissions gate.
|
||||
- Worker ownership: `worker-1` owns minimal implementation changes for workspace/path enforcement. `worker-4` owns this repository map, integration verification plan, changed-file/commit report, and exact verification evidence.
|
||||
- Boundary: this report does not mutate `.omx/ultragoal` and does not edit shared security/path tests.
|
||||
- Parallel probe status: three native subagents were spawned for repository map, test probe, and change-slice probe, but all failed before returning findings with `429 Too Many Requests`; local mapping below is based on direct repository inspection.
|
||||
|
||||
## Current permission and path enforcement map
|
||||
|
||||
### Runtime permission policy and enforcer
|
||||
|
||||
- `rust/crates/runtime/src/permissions.rs`
|
||||
- Owns the `PermissionMode` ordering and `PermissionPolicy` authorization contract.
|
||||
- Existing tests cover read-only denial, workspace-write escalation, prompt approvals/denials, danger-full-access allowance, override recording, and required-mode reporting.
|
||||
- Integration risk: any new dynamic file/path rule must preserve the existing `PermissionPolicy::authorize` semantics so prompt/override audit events remain stable.
|
||||
|
||||
- `rust/crates/runtime/src/permission_enforcer.rs`
|
||||
- `PermissionEnforcer::check`, `check_with_required_mode`, `check_file_write`, and `check_bash` convert policy outcomes into structured `EnforcementResult` payloads.
|
||||
- `check_file_write` currently has the direct write gate for workspace-write mode.
|
||||
- `is_within_workspace` is a string-prefix boundary check after simple relative-path joining; it does not canonicalize symlinks, `..`, Windows drive prefixes, or case variants.
|
||||
- Existing tests cover read-only denial, workspace-write inside/outside paths, trailing slashes, root equality, bash read-only heuristics, prompt-mode denial payloads, and structured denied fields.
|
||||
|
||||
### File tool path handling
|
||||
|
||||
- `rust/crates/runtime/src/file_ops.rs`
|
||||
- `read_file`, `write_file`, and `edit_file` normalize paths before filesystem operations but do not themselves require a workspace root.
|
||||
- `read_file_in_workspace`, `write_file_in_workspace`, and `edit_file_in_workspace` exist as boundary-enforced wrappers.
|
||||
- `validate_workspace_boundary` canonicalizes through the caller-provided resolved path and checks `starts_with(workspace_root)`.
|
||||
- `is_symlink_escape` detects direct symlink escapes by comparing canonical target to canonical workspace root.
|
||||
- Search tools (`glob_search`, `grep_search`) derive walk roots and prune heavy directories, but they are separate from the write enforcement path.
|
||||
- Existing tests cover oversized/binary reads, workspace-boundary read rejection, symlink escape detection, glob brace expansion, ignored directories, and grep/glob behavior.
|
||||
|
||||
### Bash command validation
|
||||
|
||||
- `rust/crates/runtime/src/bash_validation.rs`
|
||||
- `validate_command` runs mode validation, sed validation, destructive warning checks, then path validation.
|
||||
- `validate_read_only` blocks write-like commands, state-modifying commands, write redirects, and mutating git subcommands in read-only mode.
|
||||
- `validate_mode` warns when workspace-write commands appear to target hard-coded system paths.
|
||||
- `validate_paths` warns for `../`, `~/`, and `$HOME` references; it is intentionally heuristic and does not resolve shell expansion or canonical targets.
|
||||
- Existing tests cover read-only blockers, destructive warnings, sed in-place blocking, path traversal/home warnings, command classification, and full pipeline allow/block/warn outcomes.
|
||||
|
||||
### Sandbox and diagnostics surfaces
|
||||
|
||||
- `rust/crates/runtime/src/sandbox.rs`
|
||||
- Owns container/sandbox status detection and workspace-only sandbox command construction.
|
||||
- Relevant for day-one security because sandbox status must not overstate filesystem isolation.
|
||||
|
||||
- `rust/crates/rusty-claude-cli/src/main.rs`
|
||||
- Owns CLI permission-mode parsing, direct JSON/text diagnostic output, `/permissions`, `/status`, `/doctor`, and command dispatch paths.
|
||||
- Existing CLI integration tests under `rust/crates/rusty-claude-cli/tests/` cover permission prompt scenarios and output-format contracts.
|
||||
|
||||
- `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs`
|
||||
- End-to-end harness includes `bash_permission_prompt_approved`, `bash_permission_prompt_denied`, read/write file allow/deny, and plugin workspace-write scenarios.
|
||||
|
||||
## Existing G002-adjacent coverage
|
||||
|
||||
- Unit-level permission coverage:
|
||||
- `cargo test -p runtime permissions::tests`
|
||||
- `cargo test -p runtime permission_enforcer::tests`
|
||||
- `cargo test -p runtime bash_validation::tests`
|
||||
- `cargo test -p runtime file_ops::tests`
|
||||
|
||||
- CLI and integration coverage:
|
||||
- `cargo test -p rusty-claude-cli --test mock_parity_harness`
|
||||
- `cargo test -p rusty-claude-cli --test output_format_contract`
|
||||
- `cargo test -p rusty-claude-cli --test cli_flags_and_config_defaults`
|
||||
|
||||
- Board/report validation coverage:
|
||||
- `python3 scripts/validate_cc2_board.py --board .omx/cc2/board.json`
|
||||
- `python3 .omx/cc2/validate_issue_parity_intake.py .omx/cc2/issue-parity-intake.json`
|
||||
|
||||
## Recommended safe work slices
|
||||
|
||||
### Implementation lane (owned by worker-1 unless re-scoped)
|
||||
|
||||
1. Replace string-prefix workspace boundary checks with canonical path comparison in the runtime enforcement path.
|
||||
- Primary files: `rust/crates/runtime/src/permission_enforcer.rs`, possibly shared helper extraction from `rust/crates/runtime/src/file_ops.rs`.
|
||||
- Regression cases: `../` traversal, symlink escape, root prefix collision (`/workspace` vs `/workspacex`), relative paths, trailing slash root equality.
|
||||
|
||||
2. Ensure direct file tools call workspace-aware wrappers when active permission mode is `workspace-write`.
|
||||
- Primary files: likely `rust/crates/runtime/src/mcp_tool_bridge.rs` and/or the runtime tool execution bridge that calls `file_ops`.
|
||||
- Regression cases: direct read/write paths, missing parent creation, symlink parent escape, and error payload stability.
|
||||
|
||||
3. Keep bash validation as a warning/classification layer unless a real shell-expansion resolver is introduced.
|
||||
- Primary files: `rust/crates/runtime/src/bash_validation.rs`, `rust/crates/runtime/src/bash.rs`.
|
||||
- Risk: heuristic parsing cannot faithfully resolve shell expansion, globs, aliases, or platform-specific path rules; avoid claiming hard enforcement unless execution sandbox or command resolver proves it.
|
||||
|
||||
### Test lane (coordinate with worker-3/worker-1 before editing)
|
||||
|
||||
1. Add unit regressions close to each enforcement function before changing behavior.
|
||||
- `permission_enforcer.rs`: canonical path boundary and Windows-shaped path cases.
|
||||
- `file_ops.rs`: write/edit workspace wrappers with symlink parent escapes and missing file parent canonicalization.
|
||||
- `bash_validation.rs`: shell expansion/glob/path warnings remain warnings unless a resolver is introduced.
|
||||
|
||||
2. Add at least one integration test proving the runtime bridge actually routes file tools through workspace enforcement, not only helper functions.
|
||||
- Candidate: `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs` for direct write denial and no file created outside workspace.
|
||||
|
||||
3. Preserve existing prompt/event visibility tests.
|
||||
- Candidate surfaces: permission prompt scenarios in `mock_parity_harness.rs`, status/doctor JSON in `output_format_contract.rs`.
|
||||
|
||||
### Docs/reporting lane (owned by worker-4)
|
||||
|
||||
1. Keep this file as the integration handoff artifact for G002 mapping and verification.
|
||||
2. Report changed files and commits relative to `origin/main` so the leader can integrate worker branches deterministically.
|
||||
3. Include exact command evidence in the task lifecycle result.
|
||||
|
||||
## Changed files relative to `origin/main` at map time
|
||||
|
||||
The worktree currently contains these files added relative to `origin/main` before this task report:
|
||||
|
||||
- `.omx/cc2/board.json`
|
||||
- `.omx/cc2/board.md`
|
||||
- `.omx/cc2/issue-parity-intake.json`
|
||||
- `.omx/cc2/issue-parity-intake.md`
|
||||
- `.omx/cc2/render_board_md.py`
|
||||
- `.omx/cc2/validate_issue_parity_intake.py`
|
||||
- `scripts/cc2_board.py`
|
||||
- `scripts/generate_cc2_board.py`
|
||||
- `scripts/validate_cc2_board.py`
|
||||
|
||||
This task adds:
|
||||
|
||||
- `docs/g002-security-verification-map.md`
|
||||
|
||||
## Commits relative to `origin/main` at map time
|
||||
|
||||
- `8311655` — `omx(team): auto-checkpoint worker-1 [1]`
|
||||
- `c6e2a7d` — `omx(team): merge worker-1`
|
||||
- `481585f` — `omx(team): auto-checkpoint worker-1 [1]`
|
||||
- `74bbf4b` — `omx(team): auto-checkpoint worker-4 [unknown]`
|
||||
- `5c77896` — `omx(team): auto-checkpoint worker-1 [1]`
|
||||
- `07dad88` — `Classify issue and parity intake for CC2 board integration`
|
||||
- `424825f` — `task: G001 human board and docs rendering`
|
||||
- `d15268e` — `Create a canonical CC2 board so every frozen ROADMAP heading is verifiably mapped`
|
||||
- `45b43b5` — `Make the CC2 board schema executable for G001`
|
||||
|
||||
## Verification checklist for leader integration
|
||||
|
||||
Run these from the repository root unless noted:
|
||||
|
||||
1. Python board/schema validation:
|
||||
- `python3 scripts/validate_cc2_board.py --board .omx/cc2/board.json`
|
||||
- `python3 .omx/cc2/validate_issue_parity_intake.py .omx/cc2/issue-parity-intake.json`
|
||||
|
||||
2. Rust formatting and lint/type checks:
|
||||
- `scripts/fmt.sh --check`
|
||||
- `(cd rust && cargo check --workspace)`
|
||||
- `(cd rust && cargo clippy --workspace --all-targets -- -D warnings)`
|
||||
|
||||
3. Targeted G002 security tests:
|
||||
- `(cd rust && cargo test -p runtime permissions::tests permission_enforcer::tests bash_validation::tests file_ops::tests)`
|
||||
- `(cd rust && cargo test -p rusty-claude-cli --test mock_parity_harness)`
|
||||
|
||||
4. Full regression:
|
||||
- `(cd rust && cargo test --workspace)`
|
||||
|
||||
|
||||
## Worker-4 verification evidence (2026-05-14)
|
||||
|
||||
PASS:
|
||||
|
||||
- `python3 scripts/validate_cc2_board.py --board .omx/cc2/board.json` → `PASS cc2 board validation`; 729 items; ROADMAP headings `124/124`; ROADMAP actions `542/542`.
|
||||
- `python3 .omx/cc2/validate_issue_parity_intake.py .omx/cc2/issue-parity-intake.json` → `PASS issue/parity intake: 19 issue rows, 9 parity rows`.
|
||||
- `scripts/fmt.sh --check` → no output and zero exit before Rust checks continued.
|
||||
- `(cd rust && cargo check --workspace)` → `Finished dev profile` successfully.
|
||||
- `(cd rust && cargo test -p runtime permissions::tests)` → 9 passed.
|
||||
- `(cd rust && cargo test -p runtime permission_enforcer::tests)` → 21 passed.
|
||||
- `(cd rust && cargo test -p runtime bash_validation::tests)` → 32 passed.
|
||||
- `(cd rust && cargo test -p runtime file_ops::tests)` → 14 passed.
|
||||
- `(cd rust && cargo test -p rusty-claude-cli --test mock_parity_harness)` → 1 passed.
|
||||
|
||||
FAIL / integration blockers observed on this worktree:
|
||||
|
||||
- `(cd rust && cargo clippy --workspace --all-targets -- -D warnings)` failed in existing runtime code, not this docs-only task:
|
||||
- `rust/crates/runtime/src/compact.rs:215` / `:216`: `clippy::match_same_arms`.
|
||||
- `rust/crates/runtime/src/policy_engine.rs:5`: `clippy::duration-suboptimal-units`.
|
||||
- `rust/crates/runtime/src/sandbox.rs:295-302`: `clippy::map_unwrap_or`.
|
||||
- `(cd rust && cargo test --workspace)` failed after broad success in API/commands/plugins/runtime tests because `rusty-claude-cli` unit test `tests::session_lifecycle_prefers_running_process_over_idle_shell` asserted `RunningProcess` but observed `IdleShell`.
|
||||
- Rerun of the specific failing test confirmed deterministic failure: `(cd rust && cargo test -p rusty-claude-cli --bin claw tests::session_lifecycle_prefers_running_process_over_idle_shell -- --exact --nocapture)` → 0 passed, 1 failed with the same `IdleShell` vs `RunningProcess` assertion.
|
||||
|
||||
Recommended owner for failures: not `worker-4` unless re-scoped. These failures are outside the docs/report artifact and touch shared runtime/CLI implementation files.
|
||||
@@ -1,96 +0,0 @@
|
||||
# G003 boot/session/preflight verification map
|
||||
|
||||
Generated by `worker-1` for OMX team task 2 on 2026-05-14.
|
||||
|
||||
## Scope and coordination
|
||||
|
||||
- Active goal context: `G003-boot-session` / Stream 1 reliable worker boot and session control.
|
||||
- Boundary: this artifact is an audit/integration map only. It does not mutate `.omx/ultragoal` and it does not change shared implementation or tests.
|
||||
- Current worker split from leader mailbox:
|
||||
- `worker-1`: task 1 worker boot / prompt SLA plus this task 2 audit map.
|
||||
- `worker-2`: default trusted roots / trust resolver.
|
||||
- `worker-3`: startup-no-evidence classifier.
|
||||
- `worker-4`: session control plus preflight/doctor JSON surfaces.
|
||||
- Native subagent probes were attempted for Task 2 (`test probe` and `debug/root-cause probe`) but both failed before returning findings with `429 Too Many Requests`; the map below is based on direct repository inspection.
|
||||
|
||||
## Implementation surface map
|
||||
|
||||
### Worker boot lifecycle and prompt SLA
|
||||
|
||||
- `rust/crates/runtime/src/worker_boot.rs`
|
||||
- Core state types: `WorkerStatus`, `WorkerFailureKind`, `WorkerEventKind`, `WorkerEventPayload`, `StartupFailureClassification`, `StartupEvidenceBundle`, `WorkerTaskReceipt`, and `WorkerReadySnapshot`.
|
||||
- Control plane: `WorkerRegistry::{create,get,observe,resolve_trust,send_prompt,await_ready,restart,terminate,observe_completion,observe_startup_timeout}`.
|
||||
- Lifecycle states currently covered in code: `spawning`, `trust_required`, `tool_permission_required`, `ready_for_prompt`, `running`, `finished`, and `failed`.
|
||||
- Prompt delivery semantics currently use `Running` events and fields `prompt_in_flight`, `last_prompt`, `expected_receipt`, `replay_prompt`, and `prompt_delivery_attempts`.
|
||||
- Startup-no-evidence surface: `observe_startup_timeout` builds `StartupEvidenceBundle` and classifies trust, tool permission, prompt acceptance timeout, prompt misdelivery, transport death, worker crash, or unknown.
|
||||
- File observability surface: `emit_state_file` writes `.claw/worker-state.json` with status, readiness, trust state, prompt-in-flight flag, last event, and update age.
|
||||
|
||||
- `rust/crates/tools/src/lib.rs`
|
||||
- Tool APIs expose the worker control plane through `WorkerCreate`, `WorkerGet`, `WorkerObserve`, `WorkerResolveTrust`, `WorkerAwaitReady`, `WorkerSendPrompt`, `WorkerRestart`, `WorkerTerminate`, and `WorkerObserveCompletion`.
|
||||
- `WorkerCreate` merges `ConfigLoader::trusted_roots()` with per-call `trusted_roots` before calling `WorkerRegistry::create`.
|
||||
- Tool-level tests exercise worker create/observe/send/restart/terminate/completion and state-file transitions.
|
||||
|
||||
### Trust resolver and default trusted roots
|
||||
|
||||
- `rust/crates/runtime/src/trust_resolver.rs`
|
||||
- `TrustConfig`, `TrustAllowlistEntry`, and `TrustResolver` model trust prompts, allowlist/denylist policy, auto-trust, manual approval, and emitted trust events.
|
||||
- `path_matches_trusted_root` and internal `path_matches` canonicalize paths when possible.
|
||||
- Hazard: prefix matching must avoid accidental sibling matches such as `/tmp/work` matching `/tmp/work-evil`; worker-2 owns any changes here.
|
||||
|
||||
- `rust/crates/runtime/src/config.rs`
|
||||
- `trustedRoots` is parsed by `parse_optional_trusted_roots` and exposed through `RuntimeConfig::trusted_roots()` / feature config accessors.
|
||||
- Current default is empty when unset; any project default roots work belongs to worker-2.
|
||||
|
||||
### Session control
|
||||
|
||||
- `rust/crates/runtime/src/session_control.rs`
|
||||
- `SessionStore` namespaces sessions by canonical workspace fingerprint.
|
||||
- Key API: `from_cwd`, `from_data_dir`, `create_handle`, `resolve_reference`, `resolve_managed_path`, `list_sessions`, `latest_session`, `load_session`, and `fork_session`.
|
||||
- Guardrail: `validate_loaded_session` rejects cross-workspace sessions and allows legacy sessions only when their path remains inside the current workspace.
|
||||
- Worker-4 owns changes to this lane.
|
||||
|
||||
### CLI doctor/status/preflight and bootstrap-adjacent surfaces
|
||||
|
||||
- `rust/crates/commands/src/lib.rs`
|
||||
- Slash command definitions include `/status`, `/sandbox`, and `/doctor`.
|
||||
- JSON rendering for command surfaces exists through handler functions and tests in the same module.
|
||||
|
||||
- `rust/crates/tools/src/lib.rs`
|
||||
- Bash and PowerShell tool runners include `workspace_test_branch_preflight`, which returns structured output with `return_code_interpretation: preflight_blocked:branch_divergence` for broad workspace tests on stale branches.
|
||||
- Tests around `bash_workspace_tests_are_blocked_when_branch_is_behind_main` and targeted-test skipping protect this preflight behavior.
|
||||
|
||||
## Existing focused verification commands
|
||||
|
||||
Run from `rust/` unless noted.
|
||||
|
||||
- Worker boot runtime contract:
|
||||
- `cargo test -p runtime worker_boot -- --nocapture`
|
||||
- Worker tool API contract:
|
||||
- `cargo test -p tools worker_ -- --nocapture`
|
||||
- Session control contract:
|
||||
- `cargo test -p runtime session_control -- --nocapture`
|
||||
- Trust resolver/config trusted roots:
|
||||
- `cargo test -p runtime trust_resolver -- --nocapture`
|
||||
- `cargo test -p runtime config::tests::parses_trusted_roots_from_settings config::tests::trusted_roots_default_is_empty_when_unset -- --nocapture`
|
||||
- Preflight/tool branch guardrails:
|
||||
- `cargo test -p tools bash_workspace_tests_are_blocked_when_branch_is_behind_main bash_targeted_tests_skip_branch_preflight -- --nocapture`
|
||||
- Formatting/type/lint baseline:
|
||||
- `../scripts/fmt.sh --check`
|
||||
- `cargo check -p runtime -p tools -p commands`
|
||||
- `cargo clippy -p runtime -p tools -p commands --all-targets --no-deps -- -D warnings`
|
||||
|
||||
## Gaps and hazards for leader integration
|
||||
|
||||
- Prompt SLA event naming is partially implicit: `send_prompt` emits `WorkerEventKind::Running`; it does not expose separate `prompt.sent`, `prompt.accepted`, `prompt.acceptance_delayed`, or `prompt.acceptance_timeout` event names. The current equivalent evidence is `prompt_in_flight`, `Running`, `observe_completion`, and startup-timeout classification.
|
||||
- `StartupFailureClassification::PromptAcceptanceTimeout` is covered in `worker_boot` tests; full terminal/transport integration should still be verified by the leader or worker-3 if a real pane watcher exists outside the in-memory registry.
|
||||
- Default trusted roots are parsed and merged into `WorkerCreate`, but unset config currently means no default roots. Worker-2 owns any change to default root selection.
|
||||
- Session control protects workspace fingerprints at load/fork time; worker-4 owns CLI/doctor/preflight JSON contract changes.
|
||||
- Full-workspace clippy currently has known unrelated runtime findings observed during task 1 verification; do not block this docs-only map on those unless leader re-scopes cleanup.
|
||||
|
||||
## Recommended safe integration order
|
||||
|
||||
1. Integrate worker boot / prompt SLA changes first and run `cargo test -p runtime worker_boot -- --nocapture` plus `cargo test -p tools worker_ -- --nocapture`.
|
||||
2. Integrate trust-root changes and rerun trust/config tests plus the worker create config merge test.
|
||||
3. Integrate startup-no-evidence classifier changes and rerun `cargo test -p runtime worker_boot -- --nocapture`.
|
||||
4. Integrate session control / preflight / doctor JSON changes and rerun session-control, commands JSON, and preflight tests.
|
||||
5. Run final formatting, targeted cargo check/clippy, then broader workspace tests with known full-workspace failures documented separately.
|
||||
@@ -1,67 +0,0 @@
|
||||
# G004 event and report contract guidance
|
||||
|
||||
Captured: 2026-05-14 during the Stream 2 `G004-events-reports` team run.
|
||||
|
||||
Purpose: keep the user/developer-facing contract guidance for ROADMAP Phase 2 in one tracked source that points back to the code and roadmap anchors. This document is intentionally not the implementation map for task 5; it describes the interoperability contract consumers should rely on as the lane-event, report-schema, approval-token, and capability-negotiation lanes land.
|
||||
|
||||
## Source-of-truth anchors
|
||||
|
||||
| Contract family | Roadmap anchor | Current implementation / owner-facing anchor | Consumer guidance |
|
||||
| --- | --- | --- | --- |
|
||||
| Canonical lane events | `ROADMAP.md` Phase 2 §4, §4.5, §4.6, §4.7 | `rust/crates/runtime/src/lane_events.rs` (`LaneEventName`, `LaneEventStatus`, `LaneEventMetadata`, terminal reconciliation helpers) | Consume `event`, `status`, `emittedAt`, and `metadata` fields as the canonical state stream; do not infer lane state from terminal text when a structured event is present. |
|
||||
| Report schema v1 and projections | `ROADMAP.md` §4.25-§4.34 | Stream 2 report-schema lane / fixtures as they land | Treat a report as a versioned canonical payload plus derived projections. A projection may omit or transform fields only with explicit provenance: compatibility downgrade, redaction policy, truncation, or source absence. |
|
||||
| Policy-blocked handoff and approval-token chain | `ROADMAP.md` §4.37-§4.39 | Stream 2 approval-token lane as it lands | Treat policy blocks and owner approvals as typed artifacts, not prose. Execute an exception only when the approval token matches actor, policy, action, repo/branch/commit scope, expiry, and one-time-use state. |
|
||||
| Capability negotiation | `ROADMAP.md` §4.25, §4.26, §4.32, §4.34 | Report-schema/projection fixtures and consumer conformance cases as they land | Consumers must advertise supported schema versions, optional field families, projection views, redaction semantics, and downgrade handling before relying on reduced payloads. |
|
||||
|
||||
## Lane event contract
|
||||
|
||||
The lane-event stream is the first machine-trustworthy surface for Stream 2. Consumers should expect these invariants when reading `LaneEvent` payloads:
|
||||
|
||||
- `event` is a typed event name, currently including the core lane lifecycle (`lane.started`, `lane.ready`, `lane.blocked`, `lane.red`, `lane.green`, `lane.finished`, `lane.failed`), branch health (`branch.stale_against_main`, `branch.workspace_mismatch`), reconciliation (`lane.reconciled`, `lane.superseded`, `lane.closed`), and ship provenance (`ship.prepared`, `ship.commits_selected`, `ship.merged`, `ship.pushed_main`).
|
||||
- `status` is the normalized state for the event; consumers should prefer it over freeform `detail` text for automation.
|
||||
- `metadata.seq`, `metadata.timestamp_ms`, and terminal fingerprints are the ordering/deduplication hooks. Consumers should use terminal reconciliation output rather than double-reporting contradictory terminal bursts.
|
||||
- `metadata.provenance`, `metadata.environment_label`, `metadata.emitter_identity`, and `metadata.confidence_level` tell consumers whether an event is live lane truth, test traffic, healthcheck/replay output, or transport-layer evidence.
|
||||
- `metadata.session_identity` and `metadata.ownership` bind a lane event to the session, workspace, workflow scope, owner, and watcher action. A watcher should not act on events whose ownership says `observe` or `ignore`.
|
||||
|
||||
Minimal consumer rule: if a structured event exists, pane text is supporting evidence only. Pane scraping must not override a higher-confidence typed event with matching session/workflow ownership.
|
||||
|
||||
## Report schema v1 contract
|
||||
|
||||
A Stream 2 report should be treated as a canonical fact record with optional projections. Consumers should preserve these semantics even when they receive only a downgraded view:
|
||||
|
||||
- Every report payload declares a schema version and a stable report identity/content hash for the full-fidelity canonical payload.
|
||||
- Assertions are labeled as `fact`, `hypothesis`, or another declared evidence class, with confidence and source references. Negative evidence is first-class: `not observed`, `checked and absent`, and `redacted` are distinct states.
|
||||
- Field deltas name the field, previous value/state, new value/state, attribution, and whether the delta came from source content, projection, downgrade, or redaction policy.
|
||||
- Projections carry lineage back to the canonical report id/content hash and name the projection view, capability set, schema version, redaction policy, and deterministic rendering inputs.
|
||||
- Redaction provenance is explicit. A missing field without a redaction/downgrade/source-absence reason is not enough evidence for an automated consumer to conclude the underlying fact is absent.
|
||||
|
||||
Minimal consumer rule: store the canonical identity and projection metadata together. Do not compare two projections as state changes unless their canonical content hash or declared projection inputs differ.
|
||||
|
||||
## Approval-token and policy-blocked contract
|
||||
|
||||
Policy-blocked actions and owner-approved exceptions belong in the same structured event/report family:
|
||||
|
||||
- A policy block names the typed reason, policy source, actor scope, blocked action, and safe fallback path.
|
||||
- An approval token names the approving actor, policy exception, action, repository/worktree/branch/commit scope, expiry, and allowed use count.
|
||||
- Token consumption records the exact action and scope that spent the token. Replays, scope expansion, expired tokens, and revoked tokens should surface typed policy errors.
|
||||
- Delegation traceability stays attached when another worker/lane executes the approved action; the executor must be able to prove which approval artifact authorized the exception.
|
||||
|
||||
Minimal consumer rule: prose such as "approved" is not an executable approval. Require the structured token and verify that it is unconsumed and scoped to the exact action before proceeding.
|
||||
|
||||
## Capability negotiation and conformance
|
||||
|
||||
Mixed-version consumers are expected during Stream 2 rollout. Producers and consumers should negotiate instead of silently dropping fields:
|
||||
|
||||
- Consumers advertise supported report schema versions, field families, projection views, redaction states, downgrade semantics, and fixture/conformance suite version.
|
||||
- Producers preserve one canonical full-fidelity report and emit downgraded projections only with `downgraded_for_compatibility` metadata.
|
||||
- Deterministic projection inputs include schema version, consumer capability set, projection policy version, redaction policy version, and canonical content hash.
|
||||
- Consumer conformance should distinguish syntax acceptance from semantic correctness, especially for `redacted` vs `missing`, stale vs current projections, negative evidence, and approval-token replay states.
|
||||
|
||||
Minimal consumer rule: an older consumer may accept a downgraded projection, but it must surface the downgrade as a capability limitation rather than treating omitted fields as canonical absence.
|
||||
|
||||
## Documentation maintenance rules
|
||||
|
||||
- Keep ROADMAP Phase 2 as the product requirement source and this file as the contract-reading guide.
|
||||
- Keep Rust type names and event names aligned with `rust/crates/runtime/src/lane_events.rs`; update this document in the same change when public event names or metadata semantics change.
|
||||
- Keep report-schema examples/fixtures aligned with this guide once the schema lane lands; fixture updates should explain intentional schema or projection changes.
|
||||
- Do not mutate `.omx/ultragoal` from worker lanes. Leader-owned Ultragoal checkpointing consumes commits and verification evidence from task results.
|
||||
@@ -1,57 +0,0 @@
|
||||
# G004 events/reports verification map
|
||||
|
||||
Scope source: OMX team `g004-events-reports-u-e61d2271`, worker-1 tasks 1, 2, 4, 5. Workers must not mutate `.omx/ultragoal`; leader owns aggregate checkpoints.
|
||||
|
||||
## Ownership boundaries
|
||||
|
||||
- **Lane events / event identity / terminal reconciliation** — `rust/crates/runtime/src/lane_events.rs`, exported through `rust/crates/runtime/src/lib.rs`; tool-manifest consumers in `rust/crates/tools/src/lib.rs` write `LaneEvent` vectors.
|
||||
- **Report schema v1 / projection / redaction / capability negotiation** — `rust/crates/runtime/src/report_schema.rs`, exported through `rust/crates/runtime/src/lib.rs`; fixture note at `rust/crates/runtime/tests/fixtures/report_schema_v1/README.md`.
|
||||
- **Approval-token chain** — ROADMAP §§4.38-4.40; owned by worker-2 for this team split. Worker-1 did not edit it.
|
||||
- **Pinpoint closure batch** — runtime hygiene across compact/search-parser/policy/sandbox/integration-test surfaces: `rust/crates/runtime/src/compact.rs`, `rust/crates/runtime/src/file_ops.rs`, `rust/crates/runtime/src/policy_engine.rs`, `rust/crates/runtime/src/sandbox.rs`, `rust/crates/runtime/tests/integration_tests.rs`.
|
||||
- **Regression harness / docs alignment** — worker-3/worker-4 lanes per leader split. Coordinate before editing shared docs/tests.
|
||||
|
||||
## Relevant symbols and files
|
||||
|
||||
- `LaneEventName`, `LaneEventStatus`, `LaneEventMetadata`, `LaneEventBuilder`, `compute_event_fingerprint`, `dedupe_terminal_events`, `reconcile_terminal_events` in `runtime/src/lane_events.rs`.
|
||||
- `CanonicalReportV1`, `ReportClaim`, `NegativeEvidence`, `FieldDelta`, `ConsumerCapabilities`, `ReportProjectionV1`, `canonicalize_report`, `project_report`, `report_schema_v1_registry` in `runtime/src/report_schema.rs`.
|
||||
- `AgentOutput.lane_events`, `persist_agent_terminal_state`, `write_agent_manifest`, `maybe_commit_provenance` in `tools/src/lib.rs`.
|
||||
- Search/parser closure helpers: `summarize_messages` in `compact.rs`, `grep_search_impl` / `build_grep_content_output` in `file_ops.rs`.
|
||||
|
||||
## Completed worker-1 commits
|
||||
|
||||
- `f45f05e` / task 1 auto-checkpoint — terminal event fingerprints use stable SHA-256-derived canonical JSON, and production convenience terminal events attach/refresh fingerprints after payload changes.
|
||||
- `3989fc0` — report schema v1 contract, deterministic projection/redaction provenance, capability negotiation, and fixture note.
|
||||
- `7fff4c4` / task 4 auto-checkpoint — strict runtime clippy closure batch across compact/file_ops/policy/sandbox/integration tests.
|
||||
|
||||
## Current verification evidence
|
||||
|
||||
Run from `rust/` unless noted:
|
||||
|
||||
- `cargo test -p runtime lane_events -- --nocapture` — PASS, 46 lane-event tests.
|
||||
- `cargo test -p runtime report_schema -- --nocapture` — PASS, 4 report-schema tests.
|
||||
- `cargo check -p runtime` — PASS.
|
||||
- `cargo clippy -p runtime --all-targets -- -D warnings` — PASS after task 4 closure batch.
|
||||
- `cargo test -p runtime -- --nocapture` — PASS, 531 unit tests, 12 integration tests, doc-tests pass.
|
||||
- `cargo test -p tools lane_event_schema_serializes_to_canonical_names -- --nocapture` — PASS, 1 targeted tools contract test.
|
||||
|
||||
## Leader integration verification plan
|
||||
|
||||
1. Inspect worker commits: `git log --oneline --decorate --max-count=8`.
|
||||
2. Re-run focused contracts:
|
||||
- `cd rust && cargo test -p runtime lane_events -- --nocapture`
|
||||
- `cd rust && cargo test -p runtime report_schema -- --nocapture`
|
||||
- `cd rust && cargo test -p tools lane_event_schema_serializes_to_canonical_names -- --nocapture`
|
||||
3. Re-run runtime quality gate:
|
||||
- `cd rust && cargo check -p runtime`
|
||||
- `cd rust && cargo clippy -p runtime --all-targets -- -D warnings`
|
||||
- `cd rust && cargo test -p runtime -- --nocapture`
|
||||
4. If merging with worker-2 approval-token work, additionally run the worker-2 focused approval-token tests and check for export conflicts in `runtime/src/lib.rs`.
|
||||
5. If merging with worker-3/4 docs or harness work, re-run their named regression harnesses plus `git diff --check`.
|
||||
|
||||
## Integration hazards
|
||||
|
||||
- `runtime/src/lib.rs` export blocks are shared; resolve conflicts by keeping both lane-event and report-schema exports sorted enough to remain readable.
|
||||
- `tools/src/lib.rs` serializes lane events into agent manifests; terminal fingerprint changes intentionally affect `metadata.event_fingerprint` for finished/failed/superseded/merged/closed events with payloads.
|
||||
- `report_schema.rs` currently defines the reusable contract and in-code deterministic fixtures; it does not yet wire report emission into CLI/status surfaces.
|
||||
- ROADMAP approval-token §§4.38-4.40 remain a separate lane; do not treat worker-1 report schema as an approval artifact.
|
||||
- Full workspace checks may include unrelated slow/provider-dependent tests; the verified local gate for this stream is runtime + targeted tools tests above.
|
||||
@@ -1,40 +0,0 @@
|
||||
# G005 Branch Recovery Verification Map
|
||||
|
||||
Scope: worker-1 follow-up map for G005 branch/test awareness and recovery. This file intentionally does not mutate leader-owned `.omx/ultragoal` state.
|
||||
|
||||
## Covered ROADMAP / PRD pinpoints
|
||||
|
||||
- `ROADMAP.md:912-921` — Phase 3 §7 stale-branch detection before broad verification: broad workspace test commands are preflighted before execution, stale/diverged branches emit `branch.stale_against_main`, and targeted tests bypass the broad-test gate.
|
||||
- `ROADMAP.md:922-933` — Phase 3 §8 recovery recipes: stale-branch recovery remains represented by the `stale_branch` recipe, with one automatic attempt before escalation.
|
||||
- `ROADMAP.md:935-949` — Phase 3 §8.5 recovery attempt ledger: `RecoveryContext` now exposes ledger entries with recipe id, attempt count, state, started/finished markers, last failure summary, and escalation reason.
|
||||
- `ROADMAP.md:951-970` — Phase 3 §9 green-ness / hung-test reporting: timed-out test commands now classify as `test.hung` with structured provenance instead of generic timeout.
|
||||
- `prd.json:37-44` — US-003 stale-branch detection before broad verification: verified through the `workspace_test_branch_preflight` broad-test block and targeted-test bypass tests.
|
||||
- `prd.json:50-57` — US-004 recovery recipes with ledger: verified through recovery ledger unit coverage and serialization-compatible recovery structs.
|
||||
|
||||
## Implementation anchors
|
||||
|
||||
- `rust/crates/runtime/src/stale_branch.rs` — existing branch freshness model and policy actions for fresh, stale, and diverged branches.
|
||||
- `rust/crates/tools/src/lib.rs` — `workspace_test_branch_preflight`, `branch_divergence_output`, Bash/PowerShell broad-test gating, and `test.hung` structured timeout provenance on tool-shell timeouts.
|
||||
- `rust/crates/runtime/src/recovery_recipes.rs` — recovery recipes plus `RecoveryLedgerEntry` / `RecoveryAttemptState` ledger surface.
|
||||
- `rust/crates/runtime/src/bash.rs` — runtime Bash timeout classification and structured provenance for hung test commands.
|
||||
- `rust/crates/runtime/src/lib.rs` — public exports for the recovery ledger types.
|
||||
|
||||
## Verification evidence
|
||||
|
||||
- `cargo test -p runtime` → PASS: 538 unit tests, 2 G004 conformance tests, 12 integration tests, and doctests passed.
|
||||
- `cargo test -p tools bash_tool_classifies_test_timeout_as_hung_with_provenance -- --nocapture` → PASS.
|
||||
- `cargo test -p tools bash_workspace_tests_are_blocked_when_branch_is_behind_main -- --nocapture` → PASS.
|
||||
- `cargo test -p tools bash_targeted_tests_skip_branch_preflight -- --nocapture` → PASS.
|
||||
- `cargo check -p runtime -p tools` → PASS.
|
||||
- `cargo clippy -p runtime --all-targets -- -D warnings` → PASS.
|
||||
- `cargo clippy -p tools --lib --no-deps -- -D warnings` → PASS.
|
||||
|
||||
## Known unresolved / out-of-scope items
|
||||
|
||||
- Full `cargo test -p tools` is still red on six permission-enforcer expectation tests unrelated to G005 branch freshness, recovery ledger, or hung-test classification. The failing tests assert old permission wording/read-only behavior and pre-existed this follow-up scope.
|
||||
- ROADMAP stale-base JSON/doctor/status pinpoints remain broader CLI diagnostic-surface work, especially `ROADMAP.md:2425-2489`, `ROADMAP.md:4346-4431`, and `ROADMAP.md:5061-5086`. They are related to branch freshness, but task 1 only required the broad-test freshness gate and narrow reporting surfaces.
|
||||
- No `.omx/ultragoal` files were changed; leader-owned Ultragoal checkpointing remains outside worker scope.
|
||||
|
||||
## Delegation evidence
|
||||
|
||||
Subagent spawn evidence: 1, Repository map probe `019e25d5-9be9-7193-8a33-f21450beb62c`; spawned before further serial task-2 mapping per contract, but errored with 429 Too Many Requests, so direct repo evidence was integrated instead.
|
||||
@@ -1,34 +0,0 @@
|
||||
# G006 Task Policy Board Verification Map
|
||||
|
||||
Goal: `G006-task-policy-board` — Stream 4 task packets, executable policy engine, lane board/status JSON, and running-state liveness heartbeat.
|
||||
|
||||
## Prompt-to-artifact checklist
|
||||
|
||||
| Requirement | Artifact/evidence |
|
||||
| --- | --- |
|
||||
| Typed task packet schema with objective, scope, files/resources, acceptance criteria, model/provider, permission profile, recovery policy, verification plan, reporting targets | `rust/crates/runtime/src/task_packet.rs` extends `TaskPacket` with `acceptance_criteria`, `resources`, `model`, `provider`, `permission_profile`, `recovery_policy`, `verification_plan`, and `reporting_targets`; tests cover legacy defaulted JSON and rich CC2 roundtrip. |
|
||||
| Backwards compatibility for existing task packets and tool callers | `serde(default)`/optional fields in `task_packet.rs`; `rust/crates/tools/src/lib.rs` `run_task_packet_creates_packet_backed_task` updated for rich schema; legacy packet test keeps old JSON accepted. |
|
||||
| Executable policy decisions for retry/rebase/merge/escalate/stale cleanup/approval token | `rust/crates/runtime/src/policy_engine.rs` adds `RetryAvailable`, `RebaseRequired`, `StaleCleanupRequired`, approval-token conditions/actions, `PolicyEvaluation`, `PolicyDecisionEvent`, and decision-table tests. |
|
||||
| Policy decisions explainable and typed-event logged/emittable | `PolicyDecisionEvent` serializable typed event with `rule_name`, `priority`, `kind`, `explanation`, `approval_token_id`; `evaluate_with_events` emits event per flattened action. |
|
||||
| Active lane board/dashboard/status JSON over canonical state | `rust/crates/runtime/src/task_registry.rs` adds `LaneBoard`, `LaneBoardEntry`, `LaneFreshness`, `lane_board_at`, and `lane_status_json_at`; CLI status JSON advertises lane board contract in `rust/crates/rusty-claude-cli/src/main.rs`. |
|
||||
| Heartbeats independent of terminal rendering with healthy/stalled/transport-dead cases | `rust/crates/runtime/src/session.rs` adds `SessionHeartbeat`/`SessionLiveness` from persisted session health state; `task_registry.rs` heartbeat freshness is computed from canonical heartbeat timestamps and transport state. |
|
||||
| Task/lane status JSON shows active/blocked/finished lanes with heartbeat freshness | `task_registry::tests::lane_board_groups_active_blocked_finished_and_reports_freshness`; `status_json_surfaces_session_lifecycle_for_clawhip`/status JSON surfaces lane board metadata. |
|
||||
| Leader-owned ultragoal audit remains separate from workers | No worker changed `.omx/ultragoal`; leader will checkpoint with fresh `get_goal` only after terminal verification. |
|
||||
|
||||
## Verification run
|
||||
|
||||
- `git diff --check` — PASS
|
||||
- `cargo fmt --manifest-path rust/Cargo.toml --all -- --check` — PASS
|
||||
- `cargo check --manifest-path rust/Cargo.toml -p runtime -p tools -p rusty-claude-cli` — PASS
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p runtime task_packet -- --nocapture` — PASS (5 task packet tests)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p runtime policy_engine -- --nocapture` — PASS (12 unit + 1 integration match)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p runtime task_registry -- --nocapture` — PASS (17 task registry tests)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p runtime session_heartbeat -- --nocapture` — PASS (1 heartbeat test)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p tools run_task_packet_creates_packet_backed_task -- --nocapture` — PASS
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p tools lane_completion -- --nocapture` — PASS (6 tests)
|
||||
- `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli status_json_surfaces -- --nocapture` — PASS
|
||||
|
||||
## Remaining gates
|
||||
|
||||
- G006 can be checkpointed after team lifecycle is reconciled terminal and this commit is pushed.
|
||||
- Open PR/issue reconciliation remains explicitly deferred to G011/G012 via `docs/pr-issue-resolution-gate.md`.
|
||||
@@ -1,55 +0,0 @@
|
||||
# G007 MCP Lifecycle Mapping
|
||||
|
||||
This map captures the current MCP/plugin lifecycle implementation surfaces for the
|
||||
G007 plugin/MCP maturity lane. It is intentionally evidence-oriented: each row
|
||||
names the runtime surface, the code owner boundary, and the current gap when the
|
||||
surface is metadata-only.
|
||||
|
||||
## Degraded MCP startup
|
||||
|
||||
| Concern | Current surface | Notes |
|
||||
| --- | --- | --- |
|
||||
| Best-effort discovery | `rust/crates/runtime/src/mcp_stdio.rs` (`McpServerManager::discover_tools_best_effort`) | Discovers every configured stdio server, keeps tools from working servers, and records per-server failures without aborting the whole startup. |
|
||||
| Failure payload | `rust/crates/runtime/src/mcp_stdio.rs` (`McpDiscoveryFailure`, `UnsupportedMcpServer`) | Failure records include `server_name`, lifecycle `phase`, `required`, `error`, `recoverable`, and structured `context`. Unsupported non-stdio servers keep `transport`, `required`, and `reason`. |
|
||||
| Degraded report model | `rust/crates/runtime/src/mcp_lifecycle_hardened.rs` (`McpDegradedReport`, `McpFailedServer`, `McpErrorSurface`) | Normalizes degraded startup into working servers, failed servers, available tools, and missing tools. `McpErrorSurface` carries phase, server, message, context, and recoverability. |
|
||||
| CLI runtime handoff | `rust/crates/rusty-claude-cli/src/main.rs` (`RuntimeMcpState::new`) | Converts discovery failures and unsupported servers into a runtime degraded report, including `required` in the error context. |
|
||||
|
||||
## Required vs. optional MCP servers
|
||||
|
||||
| Concern | Current surface | Notes |
|
||||
| --- | --- | --- |
|
||||
| Config contract | `rust/crates/runtime/src/config.rs` (`ScopedMcpServerConfig.required`) | `mcpServers.<name>.required` parses as a boolean and defaults to `false`; invalid non-boolean values are rejected by the shared optional-bool parser. |
|
||||
| Scope merge | `rust/crates/runtime/src/config.rs` (`merge_mcp_servers`) | Requiredness is stored beside the scope and transport-specific config after normal user/project/local merging. |
|
||||
| Inventory/reporting | `rust/crates/commands/src/lib.rs` (`mcp_server_json`, `render_mcp_server_report`) | JSON reports expose `server.required`; text `show` reports include `Required`. |
|
||||
| Discovery propagation | `rust/crates/runtime/src/mcp_stdio.rs` | Requiredness is copied into managed stdio servers, unsupported server records, discovery failures, and degraded startup context. |
|
||||
| Cache/signature identity | `rust/crates/runtime/src/mcp.rs` (`scoped_mcp_config_hash`) | The hash includes `required:<bool>` so required/optional changes affect MCP config identity. |
|
||||
| Remaining policy gap | runtime behavior | The flag is currently surfaced and propagated as lifecycle metadata. It does not yet fail the whole runtime/session solely because a required server failed; consumers must inspect the degraded report context today. |
|
||||
|
||||
## Config interpolation and redaction surfaces
|
||||
|
||||
| Concern | Current surface | Notes |
|
||||
| --- | --- | --- |
|
||||
| Raw config parsing | `rust/crates/runtime/src/config.rs` (`parse_mcp_server_config`, `parse_mcp_remote_server_config`) | `command`, `args`, `url`, `headers`, and `headersHelper` are loaded as literal strings. No dedicated environment, tilde, or workspace-root interpolation pass is present in this parser. |
|
||||
| Redacted key reporting | `rust/crates/commands/src/lib.rs` (`mcp_server_details_json`, `render_mcp_server_report`) | Stdio env and remote/websocket header values are not printed; only `env_keys` / `Header keys` are surfaced. |
|
||||
| Unredacted reporting risk | `rust/crates/commands/src/lib.rs` (`mcp_server_summary`, `mcp_server_details_json`, text `show`) | Command, args, URL, `headers_helper`, OAuth metadata URL/client id, and managed proxy URL/id are currently emitted verbatim. Treat these fields as not-redacted unless a future policy layer classifies them safe. |
|
||||
| OAuth exposure | `rust/crates/commands/src/lib.rs` (`mcp_oauth_json`, `format_mcp_oauth`) | OAuth secret-like values are mostly absent from the current config model, but client id and metadata URL are still reported directly. |
|
||||
|
||||
## Plugin lifecycle contract adjacency
|
||||
|
||||
| Concern | Current surface | Notes |
|
||||
| --- | --- | --- |
|
||||
| Manifest lifecycle | `rust/crates/plugins/src/lib.rs` (`PluginLifecycle`) | Plugin manifests support `lifecycle.Init` and `lifecycle.Shutdown` command arrays. |
|
||||
| Registry summary | `rust/crates/plugins/src/lib.rs` (`PluginSummary::lifecycle_state`) | Installed summaries include enabled state, lifecycle commands, and derived lifecycle state (`ready` or `disabled`). Load failures remain first-class in registry reports. |
|
||||
| CLI JSON output | `rust/crates/rusty-claude-cli/src/main.rs` (`plugin_command_json`) | Plugin command JSON emits top-level `status`, per-plugin `lifecycle_state` and lifecycle command counts, plus `load_failures` with `lifecycle_state: load_failed`. |
|
||||
|
||||
## Verification anchors
|
||||
|
||||
The current regression anchors for this map are:
|
||||
|
||||
- `cargo test -p runtime parses_typed_mcp_and_oauth_config -- --nocapture`
|
||||
- `cargo test -p runtime manager_discovery_report_keeps_healthy_servers_when_one_server_fails -- --nocapture`
|
||||
- `cargo test -p runtime manager_records_unsupported_non_stdio_servers_without_panicking -- --nocapture`
|
||||
- `cargo test -p commands renders_mcp_reports -- --nocapture`
|
||||
- `cargo test -p plugins installed_plugin_registry_report_collects_load_failures_from_install_root -- --nocapture`
|
||||
- `cargo test -p rusty-claude-cli --test output_format_contract plugins_json_surfaces_lifecycle_contract_when_plugin_is_installed -- --nocapture`
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
# G007 Plugin/MCP Lifecycle Verification Map
|
||||
|
||||
Goal: `G007-plugin-mcp` — Stream 5 plugin/MCP lifecycle maturity from ROADMAP Phase 5.
|
||||
|
||||
Scope: worker-2 follow-up map for W4 mock integration and regression verification. This file intentionally does not mutate leader-owned `.omx/ultragoal` state.
|
||||
|
||||
## Covered ROADMAP / CC2 anchors
|
||||
|
||||
- `ROADMAP.md:55-57` — Current pain point §6: plugin/MCP startup failures, handshake failures, config errors, partial startup, and degraded mode need clean classification.
|
||||
- `ROADMAP.md:67` — Product principle §5: MCP partial success must be first-class and structurally report successful and failed servers.
|
||||
- `ROADMAP.md:1033-1059` — Phase 5: first-class plugin/MCP lifecycle contract and MCP end-to-end lifecycle parity.
|
||||
- `.omx/cc2/board.md` Stream 5 active headings: `CC2-RM-H0010`, `CC2-RM-H0080`, `CC2-RM-H0081`, and `CC2-RM-H0082` remain the goal-level source-of-truth anchors for plugin/MCP lifecycle maturity.
|
||||
- `PARITY.md` harness checklist: mock parity scenarios are the executable regression surface for streamed model turns, plugin tool roundtrips, permissions, compaction metadata, and token/cost output.
|
||||
|
||||
## Mock integration anchors
|
||||
|
||||
| Area | Artifact/evidence |
|
||||
| --- | --- |
|
||||
| Deterministic model server | `rust/crates/mock-anthropic-service/src/lib.rs` implements the Anthropic-compatible mock server and scenario router used by CLI parity tests. |
|
||||
| End-to-end CLI mock harness | `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs` starts the mock server, runs clean-environment `claw` commands, asserts JSON output, and optionally writes a machine-readable report via `MOCK_PARITY_REPORT_PATH`. |
|
||||
| Scenario manifest / docs parity guard | `rust/mock_parity_scenarios.json` is required to stay ordered with harness cases; `rust/scripts/run_mock_parity_diff.py --no-run` verifies every manifest `parity_refs[]` string exists in `PARITY.md`. |
|
||||
| Convenience runner | `rust/scripts/run_mock_parity_harness.sh` runs `cargo test -p rusty-claude-cli --test mock_parity_harness -- --nocapture`. |
|
||||
| Plugin-path regression | `plugin_tool_roundtrip` loads an external plugin fixture from isolated settings and executes `plugin_echo` through the runtime tool registry. |
|
||||
| Lifecycle-adjacent regression | `auto_compact_triggered` and `token_cost_reporting` prove runtime JSON keeps compaction and usage/cost fields parseable under mock responses, preventing parity drift in machine-readable output. |
|
||||
| MCP degraded-startup regression | `rust/crates/runtime/src/mcp_stdio.rs::manager_discovery_report_keeps_healthy_servers_when_one_server_fails` proves a healthy MCP server remains callable while a broken peer is surfaced in a structured degraded report. |
|
||||
| Plugin lifecycle state regression | `rust/crates/runtime/src/plugin_lifecycle.rs` unit tests cover healthy, degraded, failed, and shutdown states plus startup-event mapping. |
|
||||
|
||||
## Regression verification commands
|
||||
|
||||
Use the smallest command that proves the changed or audited surface, then broaden only when integration risk requires it.
|
||||
|
||||
- Mock scenario/docs map only:
|
||||
- `cd rust && python3 scripts/run_mock_parity_diff.py --no-run`
|
||||
- Full mock integration:
|
||||
- `cd rust && cargo test -p rusty-claude-cli --test mock_parity_harness -- --nocapture`
|
||||
- `cd rust && python3 scripts/run_mock_parity_diff.py`
|
||||
- Plugin/MCP lifecycle contract:
|
||||
- `cd rust && cargo test -p runtime plugin_lifecycle -- --nocapture`
|
||||
- `cd rust && cargo test -p runtime mcp_stdio::tests::manager_discovery_report_keeps_healthy_servers_when_one_server_fails -- --exact --nocapture`
|
||||
- Standard Rust gates for implementation changes touching these surfaces:
|
||||
- `cd rust && cargo fmt --all -- --check`
|
||||
- `cd rust && cargo check -p runtime -p rusty-claude-cli -p mock-anthropic-service`
|
||||
- `cd rust && cargo clippy -p runtime --all-targets -- -D warnings`
|
||||
|
||||
## Known gaps / follow-ups
|
||||
|
||||
- The mock parity harness validates plugin tool execution but does not yet spin up a real MCP stdio server through the CLI prompt path; MCP degraded-startup remains covered by runtime manager tests.
|
||||
- Worker-4 owns the plugin command fallthrough regression implementation lane (`task-10`); this map records the verification/docs boundary and should not duplicate that parser work.
|
||||
- Full `cargo clippy -p runtime --all-targets -- -D warnings` can be blocked by unrelated `policy_engine.rs` clippy violations in this worktree; when that happens, report the exact pre-existing diagnostics and keep focused lifecycle tests green.
|
||||
- No `.omx/ultragoal` files were changed; leader-owned Ultragoal checkpointing remains outside worker scope.
|
||||
|
||||
## Delegation evidence
|
||||
|
||||
Subagent spawn evidence: Task 9 spawned repository map probe `019e291d-e700-7171-b7bc-27ec0f6c850f`, debug/root-cause probe `019e291d-e86f-78d0-a137-214ede03285c`, and test/docs probe `019e291e-135c-79e1-80d0-9fd82866bd6e` before deeper local inspection. The repository-map probe errored with 429; the remaining probes did not return before the local verification map was grounded from repo evidence, so direct findings above were integrated.
|
||||
@@ -1,89 +0,0 @@
|
||||
# G009 Windows docs/release readiness verification map
|
||||
|
||||
## Scope and source
|
||||
|
||||
This map ties the Stream 8 acceptance target from `.omx/plans/claw-code-2-0-adaptive-plan.md` to repository artifacts and local verification. It is the worker-1 integration lane artifact; it does not mutate `.omx/ultragoal` and avoids duplicating peer implementation lanes for Windows CI, install/provider docs, and policy/link work.
|
||||
|
||||
Stream 8 source requirement summary:
|
||||
|
||||
- PowerShell-first docs and CLI examples.
|
||||
- Safe provider switching examples.
|
||||
- Staged packaging path: source-only alpha first, binary release matrix next, package managers later.
|
||||
- Windows smoke CI for help/doctor/config/status without live credentials.
|
||||
- License, contribution, security, and support policies.
|
||||
- Command/link validation for adoption docs.
|
||||
|
||||
## Acceptance-to-evidence matrix
|
||||
|
||||
| Acceptance area | Repository artifact(s) | Verification command(s) | Notes |
|
||||
|---|---|---|---|
|
||||
| PowerShell-first Windows install/run path | `README.md` (`Windows setup`, post-build binary location, PowerShell `.exe` examples); `install.sh` (Unix/WSL installer guard) | `python3 .github/scripts/check_doc_source_of_truth.py`; `cargo run -p rusty-claude-cli -- --help` | Current docs explicitly present Windows as a supported PowerShell path for source builds and `claw.exe`; `install.sh` is Linux/macOS/WSL-oriented, so native PowerShell binary usage and WSL installer usage must stay clearly separated. |
|
||||
| Safe provider switching examples | `USAGE.md` (`Auth`, `Local Models`, `Supported Providers & Models`); `docs/MODEL_COMPATIBILITY.md` | `cargo test -p api providers::`; `cargo test -p rusty-claude-cli --test output_format_contract provider_diagnostics_explain_openai_compatible_capabilities -- --nocapture` | Provider docs cover Anthropic API-key vs bearer-token shape, OpenAI-compatible routing, Ollama/OpenRouter/DashScope examples, and prefix routing to avoid ambient credential misrouting. |
|
||||
| Release artifact quickstart and staged packaging path | `README.md` (`Quick start`, `Post-build: locate the binary and verify`); `.github/workflows/release.yml`; `docs/windows-install-release.md` | `cargo build --release -p rusty-claude-cli`; `cargo run -p rusty-claude-cli -- version --output-format json`; `python3 .github/scripts/check_release_readiness.py (release-readiness gate)` | Release workflow packages Linux, macOS, and `claw-windows-x64.exe` assets with `.sha256` checksum files. README remains source-build-first, and the Windows quickstart names the checksum verification path. |
|
||||
| Windows smoke CI without live credentials | `.github/workflows/rust-ci.yml`; CLI local-only surfaces in `rust/crates/rusty-claude-cli/src/main.rs` (`help`, `doctor`, resumed `/config`, `status`) | `cargo run -p rusty-claude-cli -- --help`; `cargo run -p rusty-claude-cli -- doctor --output-format json`; `cargo run -p rusty-claude-cli -- status --output-format json`; `cargo run -p rusty-claude-cli -- config --output-format json` | The smoke target is local-only command execution with isolated config and no real provider credentials. If the Windows CI lane is not present in a branch, this map is the integration checklist for that lane. |
|
||||
| License metadata | `rust/Cargo.toml` (`workspace.package.license = "MIT"`) | `grep -n '^license = "MIT"' rust/Cargo.toml` | Cargo metadata declares MIT. A root `LICENSE` file remains the user-facing policy artifact to add if not already present in the policy lane. |
|
||||
| Contribution/security/support policies | Expected root policy docs: `CONTRIBUTING.md`, `SECURITY.md`, `SUPPORT.md`; existing support links in `README.md` | `test -f CONTRIBUTING.md`; `test -f SECURITY.md`; `test -f SUPPORT.md`; `python3 .github/scripts/check_doc_source_of_truth.py` | These files are policy-lane outputs. This map records the exact release gate so missing files fail visibly instead of being inferred from README links. |
|
||||
| Command/link validation | `.github/scripts/check_doc_source_of_truth.py`; `README.md`; `USAGE.md`; `docs/**` | `python3 .github/scripts/check_doc_source_of_truth.py`; `python3 - <<'PY' ...` link/reference check listed below | Existing validation catches stale branding/assets/invites across adoption docs. The lightweight reference check below catches broken relative Markdown links without network access. |
|
||||
|
||||
## Windows/local smoke command contract
|
||||
|
||||
Use isolated config and no live credentials. These commands must not require `ANTHROPIC_API_KEY`, `OPENAI_API_KEY`, `XAI_API_KEY`, or `DASHSCOPE_API_KEY`:
|
||||
|
||||
```powershell
|
||||
# From repository root on Windows PowerShell
|
||||
$env:CLAW_CONFIG_HOME = Join-Path $env:TEMP "claw-smoke-config"
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\ANTHROPIC_AUTH_TOKEN -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\XAI_API_KEY -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\DASHSCOPE_API_KEY -ErrorAction SilentlyContinue
|
||||
cd rust
|
||||
cargo run -p rusty-claude-cli -- --help
|
||||
cargo run -p rusty-claude-cli -- doctor --output-format json
|
||||
cargo run -p rusty-claude-cli -- status --output-format json
|
||||
cargo run -p rusty-claude-cli -- config --output-format json
|
||||
```
|
||||
|
||||
Equivalent Unix smoke used by this worker:
|
||||
|
||||
```bash
|
||||
env -u ANTHROPIC_API_KEY -u ANTHROPIC_AUTH_TOKEN -u OPENAI_API_KEY -u XAI_API_KEY -u DASHSCOPE_API_KEY \
|
||||
CLAW_CONFIG_HOME="$(mktemp -d)" cargo run -p rusty-claude-cli -- --help
|
||||
```
|
||||
|
||||
## Offline Markdown reference check
|
||||
|
||||
```bash
|
||||
python3 - <<'PY'
|
||||
from pathlib import Path
|
||||
import re, sys
|
||||
root = Path.cwd()
|
||||
errors = []
|
||||
for path in [Path('README.md'), Path('USAGE.md'), Path('PARITY.md'), Path('PHILOSOPHY.md'), *Path('docs').glob('*.md')]:
|
||||
if not path.exists():
|
||||
continue
|
||||
text = path.read_text(encoding='utf-8')
|
||||
for match in re.finditer(r'\[[^\]]+\]\(([^)]+)\)', text):
|
||||
target = match.group(1).split('#', 1)[0]
|
||||
if not target or '://' in target or target.startswith('mailto:'):
|
||||
continue
|
||||
if not (root / path.parent / target).resolve().exists():
|
||||
line = text.count('\n', 0, match.start()) + 1
|
||||
errors.append(f'{path}:{line}: missing relative link target {match.group(1)}')
|
||||
if errors:
|
||||
print('\n'.join(errors))
|
||||
sys.exit(1)
|
||||
print('offline markdown reference check passed')
|
||||
PY
|
||||
```
|
||||
|
||||
## Release gate
|
||||
|
||||
A Stream 8 release candidate is ready when all of the following are true:
|
||||
|
||||
1. PowerShell examples in `README.md` build and run `claw.exe` from a clean Windows checkout.
|
||||
2. Provider examples in `USAGE.md` show session-local/shell-local switching, include cleanup for conflicting ambient credentials (`unset` / `Remove-Item Env:`), and never instruct users to paste secrets into persistent config by default.
|
||||
3. Windows smoke CI runs help/doctor/config/status without live credentials, separates native PowerShell `claw.exe` smoke from WSL `install.sh` smoke, and archives JSON output on failure.
|
||||
4. Release artifacts include the documented platform matrix or the docs clearly state source-only alpha status.
|
||||
5. `LICENSE`, `CONTRIBUTING.md`, `SECURITY.md`, and `SUPPORT.md` exist or the policy lane records an explicit release-blocking exception.
|
||||
6. Doc source-of-truth and offline relative-link validation pass.
|
||||
@@ -1,62 +0,0 @@
|
||||
# G010 clone disambiguation metadata and verification map
|
||||
|
||||
Scope: worker-2 task 5 for `G010-session-hygiene` / Stream 9 session hygiene, local state, and recovery UX. This artifact maps the clone/worktree disambiguation contract and the focused verification surface without mutating leader-owned `.omx/ultragoal` state.
|
||||
|
||||
## Contract summary
|
||||
|
||||
Claw session state is intentionally scoped to the current workspace clone/worktree. Operators and automation should treat the **session partition**, not a bare session id or the flat `.claw/sessions/` directory, as the identity boundary.
|
||||
|
||||
Required metadata and behaviors:
|
||||
|
||||
- **Workspace-bound partition**: managed sessions live under `.claw/sessions/<workspace_fingerprint>/`, where the fingerprint is a stable 16-character FNV-1a digest of the canonical workspace path.
|
||||
- **Canonical path input**: `SessionStore::from_cwd` and `SessionStore::from_data_dir` canonicalize their workspace path before computing the partition, preventing `/tmp/foo` vs `/private/tmp/foo` and relative-vs-absolute spelling from creating two stores for the same clone.
|
||||
- **Clone/worktree isolation**: two distinct clones or worktrees must get different session partitions, even if session ids collide.
|
||||
- **Legacy safety**: flat legacy sessions under `.claw/sessions/` remain readable only when they are bound to the same workspace or are unbound but physically inside the current workspace; sessions whose persisted `workspace_root` points at another clone are rejected as `WorkspaceMismatch`.
|
||||
- **Fork lineage stays local**: `/session fork` / managed session forking keeps the forked session in the same workspace partition and records parent id plus optional branch name.
|
||||
- **User-facing disambiguation**: empty-session copy names the actual fingerprint directory and explains that sessions from other CWDs are intentionally invisible.
|
||||
|
||||
## Implementation anchors
|
||||
|
||||
| Contract area | Repo anchor | Evidence role |
|
||||
| --- | --- | --- |
|
||||
| Partition layout and canonical workspace root | `rust/crates/runtime/src/session_control.rs:10-18`, `:32-47`, `:54-71` | Documents and implements `.claw/sessions/<workspace_hash>/` for `from_cwd` and explicit data-dir stores. |
|
||||
| Fingerprint algorithm | `rust/crates/runtime/src/session_control.rs:300-312` | Defines the 16-character FNV-1a workspace fingerprint used as the clone disambiguator. |
|
||||
| Managed create/resolve/list/load/fork APIs | `rust/crates/runtime/src/session_control.rs:86-204` | Ensures handles, `latest`, load, and fork resolve inside the active partition. |
|
||||
| Legacy/cross-workspace guard | `rust/crates/runtime/src/session_control.rs:213-233`, `:557-567` | Rejects mismatched persisted `workspace_root` and allows only same-workspace legacy files. |
|
||||
| Empty partition copy | `rust/crates/runtime/src/session_control.rs:535-543` | Reports `.claw/sessions/<fingerprint>/` plus the workspace-partition note. |
|
||||
| CLI wrapper | `rust/crates/rusty-claude-cli/src/main.rs:5952-6040` | Routes session CLI helpers through `current_session_store()`, so CLI list/latest/load uses the same partition. |
|
||||
| CLI session-list lifecycle context | `rust/crates/rusty-claude-cli/src/main.rs:5991-6027`, `:12960-12990` | Renders saved-only/dirty/abandoned lifecycle context for the current partition. |
|
||||
| CLI session resolution regression | `rust/crates/rusty-claude-cli/src/main.rs:13470-13579` | Covers JSONL default, legacy flat resolution, latest selection, and workspace mismatch rejection from CLI wrappers. |
|
||||
|
||||
## Covered roadmap and dogfood anchors
|
||||
|
||||
- `ROADMAP.md:1125-1129` — session files are namespaced by workspace fingerprint, and wrong-workspace session access is rejected.
|
||||
- `ROADMAP.md:1419-1441` — empty/missing session messages must expose the fingerprint directory instead of implying a flat `.claw/sessions/` search.
|
||||
- `ROADMAP.md:1453-1476` — the session partition boundary must be visible or shared deliberately; current contract is visible CWD/workspace partitioning.
|
||||
- `ROADMAP.md:5797-5902` — canonicalization closes the symlink/path-equivalence split in workspace fingerprints.
|
||||
- `ROADMAP.md:6342-6366` and `ROADMAP.md:6384-6411` — remaining Stream 9 risks around reported CWD form, failed-resume filesystem side effects, and broad-CWD resume guards are related UX/recovery lanes, not clone identity itself.
|
||||
|
||||
## Focused verification map
|
||||
|
||||
| Claim | Focused check |
|
||||
| --- | --- |
|
||||
| Same canonical workspace spellings share one partition | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_from_cwd_canonicalizes_equivalent_paths -- --nocapture` |
|
||||
| Distinct clones/worktrees do not see each other's sessions | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_from_cwd_isolates_sessions_by_workspace -- --nocapture` |
|
||||
| Explicit data-dir stores still namespace by workspace | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_from_data_dir_namespaces_by_workspace -- --nocapture` |
|
||||
| Same-workspace legacy sessions are readable; cross-workspace ones are rejected | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_rejects_legacy_session_from_other_workspace session_store_loads_safe_legacy_session_from_same_workspace session_store_loads_unbound_legacy_session_from_same_workspace -- --nocapture` |
|
||||
| `latest` and managed reference resolution stay inside the active partition | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_latest_and_resolve_reference -- --nocapture` and `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli latest_session_alias_resolves_most_recent_managed_session -- --nocapture` |
|
||||
| Forks retain partition and lineage metadata | `cargo test --manifest-path rust/Cargo.toml -p runtime session_store_fork_stays_in_same_namespace -- --nocapture` |
|
||||
| CLI wrapper rejects wrong-workspace files | `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli load_session_reference_rejects_workspace_mismatch -- --nocapture` |
|
||||
| Docs-only map is syntactically clean | `git diff --check` |
|
||||
| Broader type/test gate for the touched domain | `cargo check --manifest-path rust/Cargo.toml -p runtime -p rusty-claude-cli` plus `cargo test --manifest-path rust/Cargo.toml -p runtime session_control -- --nocapture` |
|
||||
|
||||
## Known boundaries and integration notes
|
||||
|
||||
- This worker intentionally did **not** edit `docs/g010-session-hygiene-verification-map.md` because worker-4 task 7 also names that final aggregate map. This file is the worker-2 clone-disambiguation map that worker-4/leader can link or merge into the aggregate map.
|
||||
- The current `SessionStore::from_cwd` contract keys on the canonical current directory, not necessarily the git top-level. That is acceptable only if status/help surfaces keep the partition boundary visible; `ROADMAP.md:1453-1476` remains the product tradeoff record.
|
||||
- Failed-resume directory creation and broad-CWD guards are related session hygiene hazards but are owned by the Stream 9 CLI/recovery lanes, not this docs-only clone-disambiguation task.
|
||||
- No `.omx/ultragoal` files were changed; leader-owned aggregate checkpointing consumes this commit and task lifecycle evidence.
|
||||
|
||||
## Delegation evidence
|
||||
|
||||
Subagent spawn evidence: 1, repository map probe `019e295d-a3dc-7041-bc96-30ee52b95698`; spawned before deeper serial mapping per task contract, but it errored with `429 Too Many Requests`, so direct repo evidence above was integrated instead.
|
||||
@@ -1,21 +0,0 @@
|
||||
# G010 Session Hygiene Verification Map
|
||||
|
||||
Stream 9 session hygiene is implemented in the Rust runtime/CLI as workspace-scoped session storage plus resume-safe recovery commands.
|
||||
|
||||
## Acceptance mapping
|
||||
|
||||
| Acceptance area | Code surface | Evidence |
|
||||
| --- | --- | --- |
|
||||
| Generated session files are not accidentally committed | `.gitignore`, `rust/.gitignore` ignore `.claw/sessions/` and `.claude/sessions/` | `git check-ignore .claw/sessions/example.jsonl rust/.claw/sessions/example.jsonl .claude/sessions/example.json` |
|
||||
| Per-worktree session isolation | `rust/crates/runtime/src/session_control.rs` (`SessionStore`, `workspace_fingerprint`, workspace validation) | `cargo test -p runtime session_store_from_cwd_isolates_sessions_by_workspace` |
|
||||
| List/resume/delete/exists contracts | `rust/crates/commands/src/lib.rs` parses `/session list`, `/session exists`, `/session delete`, `/resume`; `rust/crates/rusty-claude-cli/src/main.rs` renders text/JSON resume-safe session commands | `cargo test -p rusty-claude-cli session_exists_resume_command_reports_json_contract`; `cargo test -p rusty-claude-cli resume_report_uses_sectioned_layout` |
|
||||
| Compact and provider context-window recovery | `rust/crates/runtime/src/compact.rs`; `rust/crates/rusty-claude-cli/src/main.rs` context-window error recovery guidance and resumed `/compact` | `cargo test -p rusty-claude-cli provider_context_window_errors_are_reframed_with_same_guidance`; `cargo test -p commands compacts_sessions_via_slash_command` |
|
||||
| JSONL bloat safeguards | `rust/crates/runtime/src/session.rs` rotates oversized JSONL session files and keeps bounded rotated logs | `cargo test -p runtime rotates_and_cleans_up_large_session_logs` |
|
||||
| Interrupt/recovery path | `rust/crates/rusty-claude-cli/src/main.rs` keeps `/clear --confirm`, `/compact`, `/status`, and `/resume latest` resume-safe for unusable threads | `cargo test -p rusty-claude-cli context_window_preflight_errors_render_recovery_steps`; `cargo test -p rusty-claude-cli parses_resume_flag_with_multiple_slash_commands` |
|
||||
| Clone/session disambiguation | `Session` persists `workspace_root`; forks persist parent/branch metadata; session list shows lineage and lifecycle | `cargo test -p runtime persists_workspace_root_round_trip_and_forks_inherit_it`; `cargo test -p runtime forks_sessions_with_branch_metadata_and_persists_it` |
|
||||
|
||||
## Notes for leader audit
|
||||
|
||||
- Workers did not mutate `.omx/ultragoal`; this file is a repo-local verification map for team evidence only.
|
||||
- Runtime-owned session state remains under ignored `.claw/sessions/<workspace-fingerprint>/` paths.
|
||||
- Resume-safe JSON output uses stable `kind` fields (`restored`, `compact`, `session_list`, `session_exists`, etc.) so claws can route without scraping text.
|
||||
@@ -1,68 +0,0 @@
|
||||
# G011 ACP/Zed and JSON-RPC status contract
|
||||
|
||||
Claw Code 2.0 keeps ACP/Zed and JSON-RPC serving behind the stable task,
|
||||
session-control, and event/report contracts from the roadmap. The current public
|
||||
surface is therefore a **truthful unsupported status**, not a hidden daemon.
|
||||
|
||||
## Supported status queries
|
||||
|
||||
The following commands are status queries and exit with code `0`:
|
||||
|
||||
```bash
|
||||
claw acp
|
||||
claw acp serve
|
||||
claw --acp
|
||||
claw -acp
|
||||
claw acp --output-format json
|
||||
claw acp serve --output-format json
|
||||
```
|
||||
|
||||
`serve` is deliberately an alias for status today. It does not bind a socket,
|
||||
start a daemon, or expose a JSON-RPC endpoint.
|
||||
|
||||
## JSON envelope
|
||||
|
||||
`claw acp --output-format json` returns a stable envelope for editor probes and
|
||||
CI checks:
|
||||
|
||||
```json
|
||||
{
|
||||
"schema_version": "1.0",
|
||||
"kind": "acp",
|
||||
"status": "unsupported",
|
||||
"phase": "discoverability_only",
|
||||
"supported": false,
|
||||
"exit_code": 0,
|
||||
"serve_alias_only": true,
|
||||
"protocol": {
|
||||
"name": "ACP/Zed",
|
||||
"json_rpc": false,
|
||||
"daemon": false,
|
||||
"endpoint": null,
|
||||
"serve_starts_daemon": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Consumers should check `kind == "acp"`, `supported == false`, and
|
||||
`protocol.json_rpc == false` instead of inferring support from command presence.
|
||||
|
||||
## Unsupported invocations
|
||||
|
||||
Malformed ACP invocations, such as `claw acp start`, exit with code `1`. With
|
||||
`--output-format json`, stderr uses the normal CLI error envelope and sets:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "error",
|
||||
"kind": "unsupported_acp_invocation",
|
||||
"exit_code": 1
|
||||
}
|
||||
```
|
||||
|
||||
## Deferral gate
|
||||
|
||||
Real ACP/Zed or JSON-RPC serve work remains deferred until the roadmap contracts
|
||||
for task packets, session control, and event/report schemas are stable. This
|
||||
keeps desktop, marketplace, and editor integrations from becoming alternate
|
||||
sources of truth before the CLI/file/API contracts are ready.
|
||||
@@ -1,62 +0,0 @@
|
||||
# G011 Ecosystem/Ops/UX Verification Map
|
||||
|
||||
G011 closes the laterals that were intentionally deferred from the earlier safety,
|
||||
session, MCP, Windows, and docs streams. This map is the cross-lane gate for the
|
||||
team run: it names the surfaces that can be verified locally, the exact checks to
|
||||
rerun after worker integrations, and the UX deferrals that must remain explicit
|
||||
until their product contracts are stable.
|
||||
|
||||
## Cross-lane acceptance matrix
|
||||
|
||||
| Lane | Owned surface | Regression evidence | Gate / gap |
|
||||
| --- | --- | --- | --- |
|
||||
| ACP/Zed status and JSON contracts | `rust/crates/rusty-claude-cli/src/main.rs` parses `claw acp`, `claw acp serve`, `--acp`, and `-acp`; `README.md` and `rust/README.md` document discoverability-only status | `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract acp_guidance_emits_json_when_requested -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli local_command_help_flags_stay_on_the_local_parser_path -- --nocapture` | Real ACP/Zed daemon support remains deferred; status output must not imply a running protocol endpoint. |
|
||||
| Plugin/marketplace local routing | `rust/crates/rusty-claude-cli/src/main.rs` routes `claw plugins`, `claw plugin`, and `claw marketplace` to local plugin handling; `rust/crates/commands/src/lib.rs` keeps `/plugin` aliases in shared slash-command help | `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli removed_login_and_logout_subcommands_error_helpfully -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli direct_slash_commands_surface_shared_validation_errors -- --nocapture`; `python3 -m unittest tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_filter_excludes_plugin_sources tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_aliases_execute_as_local_commands tests.test_porting_workspace.PortingWorkspaceTests.test_route_plugin_slash_commands_match_commands tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_stream_emits_command_match tests.test_porting_workspace.PortingWorkspaceTests.test_turn_loop_plugin_commands_are_not_prompt_only` | Marketplace is an alias to local plugin management only; no remote marketplace browsing/install contract is claimed. |
|
||||
| TUI/copy/paste/clickable path UX | `rust/crates/commands/src/lib.rs` advertises `/copy`, `/paste`, `/desktop`, and path-oriented commands; `rust/crates/rusty-claude-cli/src/main.rs` renders compact file/tool paths for terminal readability | `cargo test --manifest-path rust/Cargo.toml -p commands renders_help_with_grouped_categories_and_keyboard_shortcuts -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli tool_rendering_helpers_compact_output -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli tool_rendering_truncates_large_read_output_for_display_only -- --nocapture` | Clipboard integration, full-screen TUI mode, and clickable terminal hyperlinks are not stable product contracts yet; keep them as roadmap/UX follow-ups unless a targeted implementation lands. |
|
||||
| Desktop integration deferral | `rust/crates/commands/src/lib.rs` includes `/desktop`; `rust/crates/rusty-claude-cli/src/main.rs` treats it as not implemented in the current build | `cargo test --manifest-path rust/Cargo.toml -p commands renders_help_from_shared_specs -- --nocapture`; `cargo test --manifest-path rust/Cargo.toml -p commands renders_per_command_help_detail -- --nocapture` | `/desktop` must stay discoverable but non-committal until a desktop launch/API contract exists. |
|
||||
| Navigation/file-context/local-provider docs | `README.md`, `USAGE.md`, `rust/README.md`, `docs/MODEL_COMPATIBILITY.md`, and worker-2 docs updates | `python3 .github/scripts/check_doc_source_of_truth.py`; `python3 .github/scripts/check_release_readiness.py`; `git diff --check` | Re-run after docs integrations; this lane should not alter Rust behavior unless docs expose a code contract gap. |
|
||||
| Issue/PR ops gate | `docs/pr-issue-resolution-gate.md`, `docs/roadmap-pr-goals.md`, and issue/PR triage templates if present | `python3 .github/scripts/check_release_readiness.py`; `git diff --check`; optional `python3 scripts/validate_cc2_board.py` only when `.omx/cc2/board.md` changes | Worker lanes must not merge/close remote PRs or issues; final reconciliation remains leader-owned. |
|
||||
|
||||
## Task 5 UX/deferral support notes
|
||||
|
||||
- `/copy`, `/paste`, and `/desktop` are parsed slash-command names, but current
|
||||
runtime handling still reports unimplemented commands rather than performing
|
||||
clipboard or desktop side effects. That is safer than pretending support exists.
|
||||
- `/marketplace` is intentionally a plugin alias; it should not be described as
|
||||
a remote marketplace until install/search/update semantics and trust policy are
|
||||
specified.
|
||||
- Path readability is covered by terminal rendering helpers that compact long
|
||||
tool outputs and preserve paths in read/write/edit summaries. Clickable OSC-8
|
||||
links, if added later, need separate tests because terminal support varies.
|
||||
- Full-screen TUI mode remains aspirational (`rust/TUI-ENHANCEMENT-PLAN.md`);
|
||||
current verification should focus on the inline REPL/help/status surfaces.
|
||||
|
||||
## Final verification sequence
|
||||
|
||||
Run these after all G011 worker commits are integrated into the leader branch:
|
||||
|
||||
```bash
|
||||
git diff --check
|
||||
python3 .github/scripts/check_doc_source_of_truth.py
|
||||
python3 .github/scripts/check_release_readiness.py
|
||||
cargo check --manifest-path rust/Cargo.toml -p commands -p rusty-claude-cli
|
||||
cargo test --manifest-path rust/Cargo.toml -p commands renders_help_from_shared_specs -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p commands renders_help_with_grouped_categories_and_keyboard_shortcuts -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p commands renders_per_command_help_detail -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli removed_login_and_logout_subcommands_error_helpfully -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli direct_slash_commands_surface_shared_validation_errors -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli local_command_help_flags_stay_on_the_local_parser_path -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli tool_rendering_helpers_compact_output -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli tool_rendering_truncates_large_read_output_for_display_only -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract acp_guidance_emits_json_when_requested -- --nocapture
|
||||
cargo test --manifest-path rust/Cargo.toml -p rusty-claude-cli --test output_format_contract plugins_json_surfaces_lifecycle_contract_when_plugin_is_installed -- --nocapture
|
||||
python3 -m unittest tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_filter_excludes_plugin_sources tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_aliases_execute_as_local_commands tests.test_porting_workspace.PortingWorkspaceTests.test_route_plugin_slash_commands_match_commands tests.test_porting_workspace.PortingWorkspaceTests.test_plugin_command_stream_emits_command_match tests.test_porting_workspace.PortingWorkspaceTests.test_turn_loop_plugin_commands_are_not_prompt_only
|
||||
```
|
||||
|
||||
## Leader audit notes
|
||||
|
||||
- This map is repo-local evidence only; workers must not mutate `.omx/ultragoal`.
|
||||
- If a check fails because another lane is still in progress, record the failing
|
||||
command and rerun after that lane is integrated instead of weakening the gate.
|
||||
- The minimum terminal condition is: docs checks pass, Rust targeted tests pass,
|
||||
and any still-deferred UX surface is explicitly named above.
|
||||
@@ -1,73 +0,0 @@
|
||||
# G012 Final Release Readiness Report
|
||||
|
||||
Snapshot: 2026-05-15T02:59:29Z on `origin/main` / `HEAD` `2e93264919f38835410668ff6ca588606bc629f0`.
|
||||
|
||||
This is the worker-1 roadmap/board audit and release-readiness evidence map for the
|
||||
Claw Code 2.0 final gate. It is intentionally repo-local and non-destructive: it
|
||||
references `.omx/ultragoal` evidence without modifying leader-owned ultragoal
|
||||
state, and it does not merge PRs or close issues owned by the W3/W4 lanes.
|
||||
|
||||
## Release readiness summary
|
||||
|
||||
| Gate | Evidence | Result |
|
||||
| --- | --- | --- |
|
||||
| Ultragoal stream completion | `.omx/ultragoal/goals.json` shows G001-G011 complete and G012 pending at this snapshot. | PASS for pre-final stream completion; G012 remains the active final gate. |
|
||||
| Roadmap board coverage | `python3 scripts/validate_cc2_board.py` -> `PASS cc2 board validation`; 729 board items; 124/124 ROADMAP headings mapped; 542/542 ROADMAP actions mapped. | PASS |
|
||||
| Issue/parity intake coverage | `python3 .omx/cc2/validate_issue_parity_intake.py` -> `PASS issue/parity intake: 19 issue rows, 9 parity rows`. | PASS |
|
||||
| Release docs/readiness script | `python3 .github/scripts/check_release_readiness.py` -> `release-readiness check passed`. | PASS |
|
||||
| Documentation source-of-truth | `python3 .github/scripts/check_doc_source_of_truth.py` -> `doc source-of-truth check passed`. | PASS |
|
||||
| Fresh open PR snapshot | `gh pr list --state open --limit 1000 --json number,title,state,updatedAt,url,isDraft,mergeable` -> 51 open PR records; newest #3040. | PASS for snapshot capture; W3 owns reconciliation/action. |
|
||||
| Fresh open issue snapshot | `gh issue list --state open --limit 1000 --json number,title,state,updatedAt,url,labels` -> 1000 open issue records; newest returned #3036. | PASS for snapshot capture with limit caveat; W4 owns reconciliation/action. |
|
||||
|
||||
## Stream evidence index
|
||||
|
||||
| Goal | Status in local ultragoal state | Primary tracked evidence |
|
||||
| --- | --- | --- |
|
||||
| G001 Stream 0 board | complete | `.omx/cc2/board.json`, `.omx/cc2/board.md`, `scripts/validate_cc2_board.py` |
|
||||
| G002 security | complete | `docs/g002-security-verification-map.md` |
|
||||
| G003 boot/session | complete | `docs/g003-boot-session-verification-map.md` |
|
||||
| G004 events/reports | complete | `docs/g004-events-reports-verification-map.md`, `docs/g004-events-reports-contract.md` |
|
||||
| G005 branch/recovery | complete | `docs/g005-branch-recovery-verification-map.md` |
|
||||
| G006 task/policy/board | complete | `docs/g006-task-policy-board-verification-map.md` |
|
||||
| G007 plugin/MCP | complete | `docs/g007-plugin-mcp-verification-map.md`, `docs/g007-mcp-lifecycle-mapping.md` |
|
||||
| G008 provider compatibility | complete | `docs/local-openai-compatible-providers.md` plus ultragoal quality-gate artifact |
|
||||
| G009 Windows/docs/release | complete | `docs/g009-windows-docs-release-verification-map.md`, `docs/windows-install-release.md` |
|
||||
| G010 session hygiene | complete | `docs/g010-session-hygiene-verification-map.md`, `docs/g010-clone-disambiguation-metadata.md` |
|
||||
| G011 ecosystem/ops/UX | complete | `docs/g011-ecosystem-ops-ux-verification-map.md`, `docs/g011-acp-json-rpc-status-contract.md`, `docs/pr-issue-resolution-gate.md` |
|
||||
| G012 final gate | pending | This report plus W2/W3/W4 final gate reports. |
|
||||
|
||||
## Roadmap PR audit snapshot
|
||||
|
||||
`docs/roadmap-pr-goals.md` lists 17 roadmap/product-fit PRs that must be merged
|
||||
only when correct, resolvable, and safe. The fresh GitHub snapshot shows all 17
|
||||
remain open. Sixteen roadmap-doc PRs are currently `CONFLICTING`, so they are not
|
||||
safe direct-merge candidates from this worker lane. PR #2824 is `MERGEABLE`, but
|
||||
it is explicitly product-fit review rather than a direct roadmap merge candidate.
|
||||
|
||||
| PR | Title | Mergeable | Draft | Updated | Worker-1 final-gate disposition |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| #2824 | docs: personal assistant roadmap | MERGEABLE | false | 2026-04-28T13:05:03Z | Defer to product-fit/leader decision; do not auto-merge as CC2 release gate evidence. |
|
||||
| #2839 | docs(roadmap): add #330 — resume mode stats/cost always zero | CONFLICTING | false | 2026-04-29T12:36:19Z | Not mergeable without conflict resolution; mapped into completed session/status streams. |
|
||||
| #2841 | docs(roadmap): add #332 — doctor json missing top-level status field | CONFLICTING | false | 2026-04-29T13:04:12Z | Not mergeable without conflict resolution; mapped into completed boot/doctor streams. |
|
||||
| #2842 | docs(roadmap): add #334 — version json omits build_date and uses short sha only | CONFLICTING | false | 2026-04-29T13:35:01Z | Not mergeable without conflict resolution; release-readiness docs/scripts pass at HEAD. |
|
||||
| #2844 | docs(roadmap): add #336 — session subcommand resume inconsistency and type/kind error mismatch | CONFLICTING | false | 2026-04-29T14:03:19Z | Not mergeable without conflict resolution; mapped into completed session hygiene streams. |
|
||||
| #2846 | docs(roadmap): add #331 — export silently overwrites on repeated invocations | CONFLICTING | false | 2026-04-29T13:02:02Z | Not mergeable without conflict resolution; action remains W3/leader triage if still desired. |
|
||||
| #2848 | docs(roadmap): add #333 — no in-session settings inspect command | CONFLICTING | false | 2026-04-29T13:32:01Z | Not mergeable without conflict resolution; action remains W3/leader triage if still desired. |
|
||||
| #2850 | docs(roadmap): add #335 — session list omits created_at_ms field | CONFLICTING | false | 2026-04-29T14:01:29Z | Not mergeable without conflict resolution; mapped into completed session metadata streams. |
|
||||
| #2858 | docs(roadmap): add #343 — session subcommand resume-safety inconsistently enforced | CONFLICTING | false | 2026-04-29T16:02:45Z | Not mergeable without conflict resolution; mapped into completed session/recovery streams. |
|
||||
| #2862 | docs(roadmap): add #342 — status json omits active session ID, workspace counters ambiguous | CONFLICTING | false | 2026-04-29T19:04:31Z | Not mergeable without conflict resolution; mapped into completed status/session streams. |
|
||||
| #2864 | docs(roadmap): add #364 — /cost returns no cost_usd; identical to /stats | CONFLICTING | false | 2026-04-29T22:32:52Z | Not mergeable without conflict resolution; mapped into completed UX/status contract review. |
|
||||
| #2865 | docs(roadmap): add #362 — doctor auth false-positive: misses CLI session tokens | CONFLICTING | false | 2026-04-29T22:06:28Z | Not mergeable without conflict resolution; mapped into completed doctor/auth stream work. |
|
||||
| #2867 | docs(roadmap): add #368 — export always appends .txt; response.file reflects mangled path | CONFLICTING | false | 2026-04-29T23:35:35Z | Not mergeable without conflict resolution; action remains W3/leader triage if still desired. |
|
||||
| #2868 | docs(roadmap): add #356 — session list title always null; no rename command | CONFLICTING | false | 2026-04-29T20:36:43Z | Not mergeable without conflict resolution; mapped into completed session identity streams. |
|
||||
| #2869 | docs(roadmap): add #358 — history entries missing role field, no pagination | CONFLICTING | false | 2026-04-29T21:02:55Z | Not mergeable without conflict resolution; mapped into completed session/history review. |
|
||||
| #2872 | docs(roadmap): add #360 — /tokens, /stats, /cost identical output; no context-window or cost_usd | CONFLICTING | false | 2026-04-29T21:32:57Z | Not mergeable without conflict resolution; mapped into completed UX/status contract review. |
|
||||
| #2876 | docs(roadmap): add #354 — /cwd suggests itself in did-you-mean; self-referential loop | CONFLICTING | false | 2026-04-29T20:01:22Z | Not mergeable without conflict resolution; mapped into completed command UX review. |
|
||||
|
||||
## Final-gate stop condition for worker-1
|
||||
|
||||
Worker-1's release-readiness lane is complete when this report is committed and
|
||||
its checks pass. Overall G012 completion still requires the leader to integrate
|
||||
W2 quality-gate classification and W3/W4 PR/issue reconciliation evidence. This
|
||||
report does not claim the remote PR/issue backlog is resolved; it provides the
|
||||
fresh roadmap/board/readiness audit that those lanes can reference.
|
||||
@@ -1,150 +0,0 @@
|
||||
# Local OpenAI-compatible providers and skills setup
|
||||
|
||||
This guide covers two common offline/local workflows:
|
||||
|
||||
1. running Claw against an OpenAI-compatible local model server such as Ollama, llama.cpp, or vLLM; and
|
||||
2. installing local skills from disk so Claw can discover them without network access.
|
||||
|
||||
## Claw is not Claude-only
|
||||
|
||||
Claw Code is a Claude-Code-shaped workflow/runtime, not a Claude-only product. It supports Anthropic directly and can target OpenAI-compatible, provider-routed, and local models depending on configuration. Non-Claude providers are supported honestly: they may require stricter tool-call and response-shape compatibility, and some slash/tool workflows can be rougher than first-party Anthropic/OpenAI paths. Provider-specific identity leaks are bugs, not intended product positioning.
|
||||
|
||||
If you need the most polished daily-driver experience for a specific non-Claude model today, compare that provider’s native tools. If you need runtime/provider hackability, Claw’s OpenAI-compatible route is the intended extension path.
|
||||
|
||||
## OpenAI-compatible routing basics
|
||||
|
||||
Set `OPENAI_BASE_URL` to the server’s `/v1` endpoint and set `OPENAI_API_KEY` to either the required token or a harmless placeholder for local servers that expect an Authorization header. The model name must match what the server exposes.
|
||||
|
||||
```bash
|
||||
export OPENAI_BASE_URL="http://127.0.0.1:11434/v1"
|
||||
export OPENAI_API_KEY="local-dev-token"
|
||||
claw --model "qwen3:latest" prompt "Reply exactly HELLO_WORLD_123"
|
||||
```
|
||||
|
||||
Routing notes:
|
||||
|
||||
- Use the `openai/` prefix for OpenAI-compatible gateways when you need prefix routing to win over ambient Anthropic credentials, for example `--model "openai/gpt-4.1-mini"` with OpenRouter.
|
||||
- For local servers, prefer the exact model ID reported by the server (`qwen3:latest`, `llama3.2`, `Qwen/Qwen2.5-Coder-7B-Instruct`, etc.). If your local gateway exposes slash-containing IDs, use that exact slug.
|
||||
- If you have multiple provider keys in your environment, remove unrelated keys while smoke-testing a local route or choose a model prefix that unambiguously selects the intended provider.
|
||||
- Tool workflows need model/server support for OpenAI-compatible tool calls. Plain prompt smoke tests can pass even when slash/tool workflows still fail because the server returns an incompatible tool-call shape.
|
||||
|
||||
## Raw `/v1/chat/completions` smoke test
|
||||
|
||||
Before debugging Claw, verify the local server speaks the expected wire format:
|
||||
|
||||
```bash
|
||||
curl -sS "$OPENAI_BASE_URL/chat/completions" \
|
||||
-H "Authorization: Bearer ${OPENAI_API_KEY:-local-dev-token}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "qwen3:latest",
|
||||
"messages": [{"role": "user", "content": "Reply exactly HELLO_WORLD_123"}],
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
Expected result: a JSON response with one assistant message containing `HELLO_WORLD_123`. If this fails, fix the local server, model name, or auth token before changing Claw settings.
|
||||
|
||||
## Ollama
|
||||
|
||||
Start Ollama and pull a model:
|
||||
|
||||
```bash
|
||||
ollama pull qwen3:latest
|
||||
ollama serve
|
||||
```
|
||||
|
||||
In another shell:
|
||||
|
||||
```bash
|
||||
export OPENAI_BASE_URL="http://127.0.0.1:11434/v1"
|
||||
export OPENAI_API_KEY="local-dev-token"
|
||||
claw --model "qwen3:latest" prompt "Reply exactly HELLO_WORLD_123"
|
||||
```
|
||||
|
||||
If Ollama is running without auth and your build accepts authless local OpenAI-compatible servers, `unset OPENAI_API_KEY` is also acceptable. Use a placeholder token rather than a real cloud API key for local testing.
|
||||
|
||||
## llama.cpp server
|
||||
|
||||
Start a llama.cpp OpenAI-compatible server with the model name you want Claw to send:
|
||||
|
||||
```bash
|
||||
llama-server -m ./models/qwen2.5-coder.gguf --host 127.0.0.1 --port 8080 --alias qwen2.5-coder
|
||||
```
|
||||
|
||||
Then smoke-test through Claw:
|
||||
|
||||
```bash
|
||||
export OPENAI_BASE_URL="http://127.0.0.1:8080/v1"
|
||||
export OPENAI_API_KEY="local-dev-token"
|
||||
claw --model "qwen2.5-coder" prompt "Reply exactly HELLO_WORLD_123"
|
||||
```
|
||||
|
||||
## vLLM or another OpenAI-compatible server
|
||||
|
||||
Start vLLM with an OpenAI-compatible API server:
|
||||
|
||||
```bash
|
||||
vllm serve Qwen/Qwen2.5-Coder-7B-Instruct --host 127.0.0.1 --port 8000
|
||||
```
|
||||
|
||||
Then route Claw to it:
|
||||
|
||||
```bash
|
||||
export OPENAI_BASE_URL="http://127.0.0.1:8000/v1"
|
||||
export OPENAI_API_KEY="local-dev-token"
|
||||
claw --model "Qwen/Qwen2.5-Coder-7B-Instruct" prompt "Reply exactly HELLO_WORLD_123"
|
||||
```
|
||||
|
||||
## Local skills install from disk
|
||||
|
||||
Skills are discovered from Claw skill roots such as `.claw/skills/` in a workspace and `~/.claw/skills/` for user-level installs. Legacy `.codex/skills/` roots may also be scanned for compatibility, but new local Claw projects should prefer `.claw/skills/`.
|
||||
|
||||
A skill directory should contain a `SKILL.md` file with frontmatter:
|
||||
|
||||
```text
|
||||
my-skill/
|
||||
└── SKILL.md
|
||||
```
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: my-skill
|
||||
description: Explain when this skill should be used.
|
||||
---
|
||||
|
||||
# My Skill
|
||||
|
||||
Instructions for the agent go here.
|
||||
```
|
||||
|
||||
Install a skill from a local path in the interactive REPL:
|
||||
|
||||
```text
|
||||
/skills install /absolute/path/to/my-skill
|
||||
/skills list
|
||||
/skills my-skill
|
||||
```
|
||||
|
||||
Or inspect skills from the direct CLI surface:
|
||||
|
||||
```bash
|
||||
claw skills --output-format json
|
||||
```
|
||||
|
||||
Offline install checklist:
|
||||
|
||||
- Install the specific skill directory, not only the repository root, unless that repository root itself contains `SKILL.md`.
|
||||
- Keep the frontmatter `name` aligned with the directory name users will type.
|
||||
- After installing, run `/skills list` or `claw skills --output-format json` to confirm the discovered name and source path.
|
||||
- If a skill invocation fails with an HTTP/provider error, the skill may have installed correctly but the current model/provider call failed. Run `claw doctor`, verify provider credentials, and try a simple prompt smoke test before reinstalling the skill.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
| Symptom | Check |
|
||||
|---|---|
|
||||
| Claw still asks for Anthropic credentials | Use an explicit OpenAI-compatible model route or remove unrelated Anthropic env vars during local smoke tests. |
|
||||
| `model not found` from local server | Use the exact model ID exposed by Ollama/llama.cpp/vLLM. |
|
||||
| Plain prompt works but tools fail | Confirm the model/server supports OpenAI-compatible tool calls and response shapes. |
|
||||
| Skill says installed but `/skills <name>` fails | Check `/skills list` for the discovered name and source; verify provider credentials separately with `claw doctor`. |
|
||||
| A local docs/log file contains secrets | Redact it before using `@path` file context or attaching it to an issue. |
|
||||
@@ -1,69 +0,0 @@
|
||||
# Navigation and file context guide
|
||||
|
||||
This guide answers the common “how do I browse output?” and “how do I submit a file?” questions for Claw Code. Claw is an agent CLI, not a full file manager: terminal navigation comes from your shell or terminal, while file context is passed explicitly in prompts.
|
||||
|
||||
## Prompt and terminal navigation
|
||||
|
||||
Use your terminal’s normal controls for command history and long output:
|
||||
|
||||
- `Up` / `Down` usually move through shell or REPL prompt history.
|
||||
- `Ctrl-r` searches shell history in most shells.
|
||||
- Long command output is viewed with your terminal scrollback. In tmux, enter copy mode with `Ctrl-b [` then use arrows, PageUp/PageDown, search, or your mouse depending on tmux config.
|
||||
- If output is too large to scroll comfortably, redirect it to a file and give that file to Claw as context:
|
||||
```bash
|
||||
cargo test --workspace 2>&1 | tee logs/test-output.txt
|
||||
claw prompt "Use @logs/test-output.txt as context and summarize the failing tests."
|
||||
```
|
||||
|
||||
Claw may provide slash commands that inspect workspace state, but those commands do not replace your terminal’s scrollback or shell history.
|
||||
|
||||
## Submit repository files with `@path`
|
||||
|
||||
Mention files from the current workspace with `@` paths. Use relative paths from the repository or current working directory:
|
||||
|
||||
```text
|
||||
Read @src/app.ts and explain the bug.
|
||||
Compare @old.md and @new.md.
|
||||
Use @logs/error.txt as context and suggest a fix.
|
||||
Review @README.md and @docs/navigation-file-context.md for consistency.
|
||||
```
|
||||
|
||||
Tips:
|
||||
|
||||
- Prefer the smallest useful file set. Large directories or logs can consume context quickly.
|
||||
- Use exact paths when possible (`@rust/crates/runtime/src/lib.rs`) instead of vague descriptions.
|
||||
- For generated logs, save them under a temporary or ignored directory such as `logs/` and reference the file.
|
||||
- If the file is outside the repository, copy it into a safe workspace location first or use an app/UI attachment feature if your Claw surface supports attachments.
|
||||
|
||||
## Browse or inspect files
|
||||
|
||||
Claw can answer questions about files you reference, and you can ask it to inspect likely locations:
|
||||
|
||||
```text
|
||||
Find where provider routing is implemented and summarize the relevant files.
|
||||
Read @USAGE.md and tell me where local model setup is documented.
|
||||
Search for the command that handles skills install, then explain the control flow.
|
||||
```
|
||||
|
||||
For deterministic shell-side browsing, ordinary commands still work:
|
||||
|
||||
```bash
|
||||
find docs -maxdepth 2 -type f | sort
|
||||
rg -n "OPENAI_BASE_URL|skills install" USAGE.md docs rust
|
||||
sed -n '250,340p' USAGE.md
|
||||
```
|
||||
|
||||
## Attach external files where supported
|
||||
|
||||
Some UI surfaces let you drag and drop or attach files directly. When that is available, use attachments for files that should not be committed to the repo. In terminal-only usage, copy the file into the workspace, reference it with `@path`, then remove it when finished if it was temporary.
|
||||
|
||||
## Secret and credential safety
|
||||
|
||||
Do not paste real API keys, OAuth tokens, private logs, or customer data into prompts, issue comments, screenshots, or committed docs. Before submitting a file:
|
||||
|
||||
- Replace live keys with placeholders such as `sk-ant-REPLACE_ME`, `sk-or-v1-REPLACE_ME`, or `local-dev-token`.
|
||||
- Redact bearer tokens, cookies, session IDs, and private base URLs.
|
||||
- Prefer minimal reproductions over full production logs.
|
||||
- Keep `.env`, key files, and private logs out of git.
|
||||
|
||||
If a task requires credentials, describe the variable names and expected shapes instead of sharing the values.
|
||||
@@ -1,67 +0,0 @@
|
||||
# Claw Code 2.0 PR and Issue Resolution Gate
|
||||
|
||||
This gate was added to the Claw Code 2.0 Ultragoal after the explicit requirement:
|
||||
|
||||
> all PRs should be merged and all issues should be resolved if resolvable and correct.
|
||||
|
||||
## Scope
|
||||
|
||||
Before the Claw Code 2.0 Ultragoal can be marked complete:
|
||||
|
||||
1. Every open GitHub PR at the current final-gate snapshot must be triaged.
|
||||
2. PRs that are correct, compatible with Claw Code 2.0 direction, and pass required verification must be merged.
|
||||
3. PRs that are stale, incorrect, duplicative, unsafe, spam, or outside Claw Code scope must not be merged; each needs a recorded rationale.
|
||||
4. Every open GitHub issue at the current final-gate snapshot must be triaged.
|
||||
5. Issues that are resolvable and correct must be fixed or explicitly linked to a merged fix.
|
||||
6. Issues that are spam, duplicates, incorrect, unactionable, externally blocked, or not Claw Code work must be closed or labeled/commented with rationale when repository policy allows.
|
||||
7. The final completion audit must use a fresh GitHub snapshot, not only the planning snapshot.
|
||||
|
||||
## Current live snapshot
|
||||
|
||||
A fresh non-destructive snapshot was captured locally during G011 W3 execution:
|
||||
|
||||
- Command: `gh pr list --state open --limit 1000 --json number,title,state,updatedAt,url`
|
||||
- Command: `gh issue list --state open --limit 1000 --json number,title,state,updatedAt,url,labels`
|
||||
- Captured on: 2026-05-15T02:39:41Z during the active Ultragoal run.
|
||||
- Observed counts: 51 open PR records and 1000 open issue records from GitHub CLI list calls.
|
||||
- Most recent open PR in the snapshot: #3040, `fix: recognize OPENAI_API_KEY as valid auth for OpenAI-compatible endpoints`, updated 2026-05-14T11:35:23Z.
|
||||
- Most recent open issue in the snapshot: #3039, `How to install skills?`, updated 2026-05-14T08:14:36Z.
|
||||
- The issue snapshot hit the configured `--limit 1000`, so the final gate must treat the issue count as at least 1000 unless a higher-limit export or paginated ledger is captured.
|
||||
|
||||
These command outputs are evidence inputs, not final proof. The final gate must refresh them and compare deltas before any completion claim.
|
||||
|
||||
## Anti-slop triage templates
|
||||
|
||||
Use `docs/anti-slop-triage.md` plus the repository templates before acting on the live snapshot:
|
||||
|
||||
- `.github/ISSUE_TEMPLATE/anti_slop_triage.yml` records the initial issue classification, evidence, and non-destructive next action.
|
||||
- `.github/PULL_REQUEST_TEMPLATE.md` adds PR classification, verification, and resolution-gate checklist items.
|
||||
|
||||
The anti-slop classifications are: `actionable-bug`, `actionable-docs`, `actionable-feature`, `duplicate`, `spam-or-promotion`, `generated-slop-or-hallucinated`, `unsafe-or-security-sensitive`, `not-reproducible-yet`, and `externally-blocked`.
|
||||
|
||||
Automation lanes may recommend labels, comments, defer/close rationales, or merge candidates, but must not merge or close remote PRs/issues without maintainer-owned approval.
|
||||
|
||||
|
||||
## G012 final PR reconciliation snapshot
|
||||
|
||||
Worker-3 captured a fresh PR ledger for the final Claw Code 2.0 gate in `docs/pr-triage-g012-final-gate.json`.
|
||||
|
||||
- Captured on: 2026-05-15T02:58:00Z during G012 final-gate execution.
|
||||
- Commands: `gh pr list --state open --limit 100 ...` plus `gh pr view <number> ...` for per-PR file and merge-state evidence.
|
||||
- Observed count: 51 open PR records.
|
||||
- Merge action taken by worker-3: none. The safety policy requires correct, safe, non-conflicting, resolvable PRs with evidence; this snapshot found 32 PRs in `CONFLICTING`/`DIRTY` state and 19 `MERGEABLE` PRs that GitHub reported as `UNSTABLE` with no fresh check-rollup evidence in the live snapshot.
|
||||
- Docs-only candidate-review PRs: #3021 and #2824 remain deferred until content/source-of-truth review and fresh verification are available.
|
||||
|
||||
## Required final evidence
|
||||
|
||||
The final report must include:
|
||||
|
||||
- Fresh `gh pr list --state open` and `gh issue list --state open` snapshots.
|
||||
- A PR ledger with one row per PR: merge / reject / defer, reason, verification, commit/merge reference.
|
||||
- An issue ledger with one row per issue: fixed / duplicate / spam / invalid / deferred-with-rationale / externally-blocked, reason, and linked evidence.
|
||||
- Verification that no correct, mergeable PR remains unmerged without rationale.
|
||||
- Verification that no resolvable, correct issue remains open without a fix or rationale.
|
||||
|
||||
## Non-goals
|
||||
|
||||
This gate does not require merging unsafe, unverified, incompatible, spam, or incorrect contributions. It requires explicit evidence-backed triage and action for everything that is correct and resolvable.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,58 +0,0 @@
|
||||
# Roadmap PR goal intake
|
||||
|
||||
Captured: 2026-05-14 (Asia/Seoul) during the Claw Code 2.0 Ultragoal run.
|
||||
|
||||
Purpose: make the user's follow-up requirement durable: all roadmap PRs should be merged when correct/resolvable, and unresolved roadmap deltas should become Ultragoal work rather than being lost. This file is a tracked companion to the leader-owned `.omx/ultragoal/goals.json` and `.omx/ultragoal/ledger.jsonl` artifacts.
|
||||
|
||||
## Merge policy
|
||||
|
||||
- Merge only PRs that are still relevant to Claw Code 2.0, are non-draft, target `main`, and are conflict-free after a fresh mergeability refresh.
|
||||
- Prefer squash merges with a Lore-style body when GitHub allows a direct PR merge.
|
||||
- If a PR is documentation-only but adds a real roadmap gap, merging it is acceptable once checks/conflicts are clean.
|
||||
- If a PR is stale, duplicated by already-landed work, or not product-aligned, do not force-merge; record the rationale and map any still-correct requirement into G011/G012.
|
||||
- After merging roadmap PRs, refresh generated board artifacts (`.omx/cc2/board.json`, `.omx/cc2/board.md`) so Stream 0 coverage stays current.
|
||||
|
||||
## Open roadmap PRs with green historical checks
|
||||
|
||||
These are first-pass merge candidates, pending fresh mergeability and conflict checks against current `main`.
|
||||
|
||||
| PR | Title | Branch | Checks | Mergeable | URL |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| #2848 | docs(roadmap): add #333 — no in-session settings inspect command | `docs/roadmap-333-no-settings-inspect-command` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2848 |
|
||||
| #2846 | docs(roadmap): add #331 — export silently overwrites on repeated invocations | `docs/roadmap-331-export-filename-collision` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2846 |
|
||||
| #2869 | docs(roadmap): add #358 — history entries missing role field, no pagination | `docs/roadmap-348-history-entries-missing-role` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2869 |
|
||||
| #2850 | docs(roadmap): add #335 — session list omits created_at_ms field | `docs/roadmap-335-session-list-no-created-at` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2850 |
|
||||
| #2868 | docs(roadmap): add #356 — session list title always null; no rename command | `docs/roadmap-347-session-list-title-always-null` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2868 |
|
||||
| #2865 | docs(roadmap): add #362 — doctor auth false-positive: misses CLI session tokens | `docs/roadmap-345-doctor-auth-check-incomplete` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2865 |
|
||||
| #2864 | docs(roadmap): add #364 — /cost returns no cost_usd; identical to /stats | `docs/roadmap-344-cost-command-no-dollar-amount` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2864 |
|
||||
| #2867 | docs(roadmap): add #368 — export always appends .txt; response.file reflects mangled path | `docs/roadmap-346-export-forces-txt-extension` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2867 |
|
||||
| #2862 | docs(roadmap): add #342 — status json omits active session ID, workspace counters ambiguous | `docs/roadmap-342-v2` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2862 |
|
||||
| #2876 | docs(roadmap): add #354 — /cwd suggests itself in did-you-mean; self-referential loop | `docs/roadmap-354-cwd-self-referential-suggestion` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2876 |
|
||||
| #2872 | docs(roadmap): add #360 — /tokens, /stats, /cost identical output; no context-window or cost_usd | `docs/roadmap-349-tokens-stats-cost-identical` -> `main` | 4/4 checks successful | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2872 |
|
||||
|
||||
## Open roadmap PRs needing local validation or CI refresh
|
||||
|
||||
These have no check rollup in the live snapshot; validate locally or refresh CI before merging.
|
||||
|
||||
| PR | Title | Branch | Checks | Mergeable | URL |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| #2858 | docs(roadmap): add #343 — session subcommand resume-safety inconsistently enforced | `docs/roadmap-340-session-resume-safe-inconsistent` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2858 |
|
||||
| #2839 | docs(roadmap): add #330 — resume mode stats/cost always zero | `docs/roadmap-324-resume-stats-zero` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2839 |
|
||||
| #2841 | docs(roadmap): add #332 — doctor json missing top-level status field | `docs/roadmap-325-doctor-no-status-field` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2841 |
|
||||
| #2844 | docs(roadmap): add #336 — session subcommand resume inconsistency and type/kind error mismatch | `docs/roadmap-329-session-subcommand-resume-inconsistency` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2844 |
|
||||
| #2842 | docs(roadmap): add #334 — version json omits build_date and uses short sha only | `docs/roadmap-328-version-json-incomplete` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2842 |
|
||||
|
||||
## Product-fit review before merge
|
||||
|
||||
These may be broader than the Claw Code 2.0 roadmap scope and need a product-fit decision before merge.
|
||||
|
||||
| PR | Title | Branch | Checks | Mergeable | URL |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| #2824 | docs: personal assistant roadmap | `pr/docs-personal-assistant-roadmap` -> `main` | no checks reported | UNKNOWN | https://github.com/ultraworkers/claw-code/pull/2824 |
|
||||
|
||||
## Ultragoal mapping
|
||||
|
||||
- G003-G010: close implementation gaps that overlap a roadmap PR title if the requirement belongs to the active stream.
|
||||
- G011: reconcile ecosystem/ops/UX roadmap PRs and unresolved correct issues that do not fit earlier streams.
|
||||
- G012: final release gate must prove that every open roadmap PR was merged, closed as duplicate/obsolete, or converted into an explicit remaining goal with evidence.
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
# Windows install and release quickstart
|
||||
|
||||
This page is the PowerShell-first path for installing, verifying, and safely switching providers on Windows. It is intentionally copyable without embedding live secrets.
|
||||
|
||||
## Choose an install path
|
||||
|
||||
### Option A: build from source in PowerShell
|
||||
|
||||
Use this when you are developing Claw Code or testing a local checkout.
|
||||
|
||||
```powershell
|
||||
git clone https://github.com/ultraworkers/claw-code
|
||||
Set-Location .\claw-code\rust
|
||||
cargo build --workspace
|
||||
.\target\debug\claw.exe --help
|
||||
.\target\debug\claw.exe doctor
|
||||
```
|
||||
|
||||
For an optimized local binary:
|
||||
|
||||
```powershell
|
||||
Set-Location .\claw-code\rust
|
||||
cargo build --workspace --release
|
||||
.\target\release\claw.exe --help
|
||||
```
|
||||
|
||||
### Option B: use a release artifact
|
||||
|
||||
Use this when a GitHub release publishes a Windows artifact. The release workflow publishes `claw-windows-x64.exe` plus `claw-windows-x64.exe.sha256`; if a future release wraps the binary in a ZIP, prefer the `windows-x86_64` / `pc-windows-msvc` asset and its matching checksum file.
|
||||
|
||||
```powershell
|
||||
$Asset = "claw-windows-x64.exe"
|
||||
$InstallRoot = "$env:LOCALAPPDATA\Programs\claw"
|
||||
New-Item -ItemType Directory -Force $InstallRoot | Out-Null
|
||||
|
||||
# Download $Asset and $Asset.sha256 from the release page, then verify them:
|
||||
$Actual = (Get-FileHash ".\$Asset" -Algorithm SHA256).Hash.ToLowerInvariant()
|
||||
$Expected = (Get-Content ".\$Asset.sha256" | Select-Object -First 1).Split()[0].ToLowerInvariant()
|
||||
if ($Actual -ne $Expected) { throw "checksum mismatch for $Asset" }
|
||||
|
||||
Copy-Item ".\$Asset" "$InstallRoot\claw.exe" -Force
|
||||
& "$InstallRoot\claw.exe" --help
|
||||
& "$InstallRoot\claw.exe" doctor
|
||||
```
|
||||
|
||||
To make that binary available in new PowerShell windows:
|
||||
|
||||
```powershell
|
||||
$InstallRoot = "$env:LOCALAPPDATA\Programs\claw"
|
||||
[Environment]::SetEnvironmentVariable(
|
||||
"Path",
|
||||
[Environment]::GetEnvironmentVariable("Path", "User") + ";$InstallRoot",
|
||||
"User"
|
||||
)
|
||||
```
|
||||
|
||||
Open a new terminal before running `claw --help` from another directory.
|
||||
|
||||
### Option C: WSL
|
||||
|
||||
The repository `install.sh` path is for Linux, macOS, and Windows via WSL. Run it from inside your WSL distribution, not from native PowerShell:
|
||||
|
||||
```powershell
|
||||
wsl --install
|
||||
wsl
|
||||
```
|
||||
|
||||
Then inside WSL:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ultraworkers/claw-code
|
||||
cd claw-code
|
||||
./install.sh
|
||||
```
|
||||
|
||||
## First-run health checks
|
||||
|
||||
Run these before using live prompts:
|
||||
|
||||
```powershell
|
||||
Set-Location .\claw-code\rust
|
||||
.\target\debug\claw.exe --help
|
||||
.\target\debug\claw.exe doctor
|
||||
.\target\debug\claw.exe status --output-format json
|
||||
.\target\debug\claw.exe config --output-format json
|
||||
```
|
||||
|
||||
`doctor`, `status`, `config`, and `version` support `--output-format json`; do not use a separate `--json` suffix.
|
||||
|
||||
## Safe credential setup
|
||||
|
||||
Set keys only in your local environment or a private `.env` file. Do not paste real keys into shell history shared with others, issue trackers, or documentation.
|
||||
|
||||
Current PowerShell session only:
|
||||
|
||||
```powershell
|
||||
$env:ANTHROPIC_API_KEY = "sk-ant-REPLACE_ME"
|
||||
```
|
||||
|
||||
Persist for future PowerShell windows:
|
||||
|
||||
```powershell
|
||||
setx ANTHROPIC_API_KEY "sk-ant-REPLACE_ME"
|
||||
```
|
||||
|
||||
Open a new terminal after `setx`. To remove a session-local key while testing provider switching:
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
```
|
||||
|
||||
## Safe provider switching examples
|
||||
|
||||
Provider routing is model-prefix first. When multiple credentials exist, choose an explicit model prefix so `claw` does not infer the wrong backend.
|
||||
|
||||
### Anthropic direct
|
||||
|
||||
```powershell
|
||||
$env:ANTHROPIC_API_KEY = "sk-ant-REPLACE_ME"
|
||||
Remove-Item Env:\OPENAI_BASE_URL -ErrorAction SilentlyContinue
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
|
||||
.\target\debug\claw.exe --model "sonnet" prompt "reply with ready"
|
||||
```
|
||||
|
||||
### OpenAI-compatible gateway or OpenRouter
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
$env:OPENAI_BASE_URL = "https://openrouter.ai/api/v1"
|
||||
$env:OPENAI_API_KEY = "sk-or-v1-REPLACE_ME"
|
||||
|
||||
.\target\debug\claw.exe --model "openai/gpt-4.1-mini" prompt "reply with ready"
|
||||
```
|
||||
|
||||
For the default OpenAI-compatible API, omit `OPENAI_BASE_URL` or set it to `https://api.openai.com/v1`, and keep the `openai/` or `gpt-` model prefix explicit.
|
||||
|
||||
### Local OpenAI-compatible server
|
||||
|
||||
Use a loopback URL and a placeholder token unless your local server requires a real one:
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
$env:OPENAI_BASE_URL = "http://127.0.0.1:11434/v1"
|
||||
$env:OPENAI_API_KEY = "local-dev-token"
|
||||
|
||||
.\target\debug\claw.exe --model "llama3.2" prompt "reply with ready"
|
||||
```
|
||||
|
||||
If the local server is authless, remove `OPENAI_API_KEY` instead of putting a real cloud key into local testing:
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\OPENAI_API_KEY -ErrorAction SilentlyContinue
|
||||
```
|
||||
|
||||
### DashScope / Qwen
|
||||
|
||||
```powershell
|
||||
Remove-Item Env:\ANTHROPIC_API_KEY -ErrorAction SilentlyContinue
|
||||
$env:DASHSCOPE_API_KEY = "sk-REPLACE_ME"
|
||||
|
||||
.\target\debug\claw.exe --model "qwen-plus" prompt "reply with ready"
|
||||
```
|
||||
|
||||
## Windows and WSL notifications
|
||||
|
||||
Notification support is exposed through the `notifications` slash command in the interactive REPL. Use JSON/status commands first to confirm the CLI runs, then configure notifications from the REPL if your workflow needs them.
|
||||
|
||||
Native PowerShell smoke path:
|
||||
|
||||
```powershell
|
||||
Set-Location .\claw-code\rust
|
||||
.\target\debug\claw.exe
|
||||
# inside the REPL:
|
||||
/notifications
|
||||
```
|
||||
|
||||
WSL smoke path:
|
||||
|
||||
```bash
|
||||
cd claw-code/rust
|
||||
./target/debug/claw
|
||||
# inside the REPL:
|
||||
/notifications
|
||||
```
|
||||
|
||||
When moving between PowerShell and WSL, keep provider keys in the environment where `claw` is actually running; Windows user env vars set with `setx` are not automatically the same as WSL shell exports.
|
||||
|
||||
## Troubleshooting checklist
|
||||
|
||||
- `claw` not found: use `claw.exe` on Windows or run the binary by full path (`.\target\debug\claw.exe`).
|
||||
- `cargo` not found: reopen PowerShell after installing Rust from <https://rustup.rs/>.
|
||||
- `401 Invalid bearer token`: put `sk-ant-*` values in `ANTHROPIC_API_KEY`, not `ANTHROPIC_AUTH_TOKEN`.
|
||||
- Wrong provider selected: add an explicit model prefix such as `openai/gpt-4.1-mini`, `qwen-plus`, or `grok`.
|
||||
- Release ZIP extracted but command still fails: open a new terminal after updating the user `Path`, or call `& "$env:LOCALAPPDATA\Programs\claw\claw.exe"` directly.
|
||||
245
progress.txt
245
progress.txt
@@ -74,18 +74,6 @@ US-007 COMPLETE (Phase 5 - Plugin/MCP lifecycle maturity)
|
||||
- DegradedMode behavior
|
||||
- Tests: 11 unit tests passing
|
||||
|
||||
|
||||
Iteration 2026-04-27 - ROADMAP #200 COMPLETED
|
||||
------------------------------------------------
|
||||
- Selected next actionable backlog item because no active task was in progress.
|
||||
- ROADMAP #200: Interactive MCP/tool permission prompts are invisible blockers.
|
||||
- Files: rust/crates/runtime/src/worker_boot.rs, rust/crates/runtime/src/recovery_recipes.rs, ROADMAP.md, progress.txt.
|
||||
- Added tool_permission_required worker status and event classification for interactive MCP/tool permission gates.
|
||||
- Added structured ToolPermissionPrompt payload with server/tool identity and prompt preview.
|
||||
- Startup evidence now records tool_permission_prompt_detected and classifies timeout evidence as tool_permission_required.
|
||||
- Readiness snapshots now mark tool-permission-gated workers as blocked, not ready/idle.
|
||||
- Tests: targeted tool_permission regressions, full runtime test/clippy/fmt pending in Ralph verification loop.
|
||||
|
||||
VERIFICATION STATUS:
|
||||
------------------
|
||||
- cargo build --workspace: PASSED
|
||||
@@ -120,29 +108,6 @@ US-010 COMPLETED (Add model compatibility documentation)
|
||||
- Cross-referenced with existing code comments in openai_compat.rs
|
||||
- cargo clippy passes
|
||||
|
||||
Iteration 3: 2026-04-16
|
||||
------------------------
|
||||
|
||||
US-012 COMPLETED (Trust prompt resolver with allowlist auto-trust)
|
||||
- Files: rust/crates/runtime/src/trust_resolver.rs
|
||||
- Enhanced TrustConfig with pattern matching and serde support:
|
||||
- TrustAllowlistEntry struct with pattern, worktree_pattern, description
|
||||
- TrustResolution enum (AutoAllowlisted, ManualApproval)
|
||||
- Enhanced TrustEvent variants with serde tags and metadata
|
||||
- Glob pattern matching with * and ? wildcards
|
||||
- Support for path prefix matching and worktree patterns
|
||||
- Updated TrustResolver with new resolve() signature:
|
||||
- Added worktree parameter for worktree pattern matching
|
||||
- Proper event emission with TrustResolution
|
||||
- Manual approval detection from screen text
|
||||
- Added helper functions:
|
||||
- extract_repo_name() - extracts repo name from path
|
||||
- detect_manual_approval() - detects manual trust from screen text
|
||||
- glob_matches() - recursive backtracking glob matcher
|
||||
- Tests: 25 new tests for pattern matching, serialization, and resolver behavior
|
||||
- All 483 runtime tests pass
|
||||
- cargo clippy passes with no warnings
|
||||
|
||||
US-011 COMPLETED (Performance optimization: reduce API request serialization overhead)
|
||||
- Files:
|
||||
- rust/crates/api/Cargo.toml (added criterion dev-dependency and bench config)
|
||||
@@ -166,213 +131,3 @@ US-011 COMPLETED (Performance optimization: reduce API request serialization ove
|
||||
- is_reasoning_model detection: ~26-42ns depending on model
|
||||
- All tests pass (119 unit tests + 29 integration tests)
|
||||
- cargo clippy passes
|
||||
|
||||
VERIFICATION STATUS (Iteration 3):
|
||||
----------------------------------
|
||||
- cargo build --workspace: PASSED
|
||||
- cargo test --workspace: PASSED (891+ tests)
|
||||
- cargo clippy --workspace --all-targets -- -D warnings: PASSED
|
||||
- cargo fmt -- --check: PASSED
|
||||
|
||||
All 12 stories from prd.json now have passes: true
|
||||
- US-001 through US-007: Pre-existing implementations
|
||||
- US-008: kimi-k2.5 model API compatibility fix
|
||||
- US-009: Unit tests for kimi model compatibility
|
||||
- US-010: Model compatibility documentation
|
||||
- US-011: Performance optimization with criterion benchmarks
|
||||
- US-012: Trust prompt resolver with allowlist auto-trust
|
||||
|
||||
Iteration 4: 2026-04-16
|
||||
------------------------
|
||||
|
||||
US-013 COMPLETED (Phase 2 - Session event ordering + terminal-state reconciliation)
|
||||
- Files: rust/crates/runtime/src/lane_events.rs
|
||||
- Added EventTerminality enum (Terminal, Advisory, Uncertainty)
|
||||
- Added classify_event_terminality() function for event classification
|
||||
- Added reconcile_terminal_events() function for deterministic event ordering:
|
||||
- Sorts events by monotonic sequence number
|
||||
- Deduplicates terminal events by fingerprint
|
||||
- Detects transport death uncertainty (terminal + transport death)
|
||||
- Handles out-of-order event bursts
|
||||
- Added events_materially_differ() for detecting meaningful differences
|
||||
- Added 8 comprehensive tests for reconciliation logic:
|
||||
- reconcile_terminal_events_sorts_by_monotonic_sequence
|
||||
- reconcile_terminal_events_deduplicates_same_fingerprint
|
||||
- reconcile_terminal_events_detects_transport_death_uncertainty
|
||||
- reconcile_terminal_events_handles_completed_idle_error_completed_noise
|
||||
- reconcile_terminal_events_returns_none_for_empty_input
|
||||
- reconcile_terminal_events_preserves_advisory_events
|
||||
- events_materially_differ_detects_real_differences
|
||||
- classify_event_terminality_correctly_classifies
|
||||
- Fixed test compilation issues with LaneEventBuilder API
|
||||
|
||||
VERIFICATION STATUS (Iteration 4):
|
||||
----------------------------------
|
||||
- cargo build --workspace: PASSED
|
||||
- cargo test --workspace: PASSED (891+ tests)
|
||||
- cargo clippy --workspace --all-targets -- -D warnings: PASSED
|
||||
- cargo fmt -- --check: PASSED
|
||||
|
||||
US-013 marked passes: true in prd.json
|
||||
|
||||
US-014 COMPLETED (Phase 2 - Event provenance / environment labeling)
|
||||
- Files: rust/crates/runtime/src/lane_events.rs
|
||||
- Added ConfidenceLevel enum (High, Medium, Low, Unknown)
|
||||
- Added fields to LaneEventMetadata:
|
||||
- environment_label: Option<String> - environment/channel (production, staging, dev)
|
||||
- emitter_identity: Option<String> - emitter (clawd, plugin-name, operator-id)
|
||||
- confidence_level: Option<ConfidenceLevel> - trust level for automation
|
||||
- Added builder methods: with_environment(), with_emitter(), with_confidence()
|
||||
- Added filtering functions:
|
||||
- filter_by_provenance() - select events by source
|
||||
- filter_by_environment() - select events by environment label
|
||||
- filter_by_confidence() - select events above confidence threshold
|
||||
- is_test_event() - check if synthetic source (test, healthcheck, replay)
|
||||
- is_live_lane_event() - check if production event
|
||||
- Added 7 comprehensive tests for US-014:
|
||||
- confidence_level_round_trips_through_serialization
|
||||
- filter_by_provenance_selects_only_matching_events
|
||||
- filter_by_environment_selects_only_matching_environment
|
||||
- filter_by_confidence_selects_events_above_threshold
|
||||
- is_test_event_detects_synthetic_sources
|
||||
- is_live_lane_event_detects_production_events
|
||||
- lane_event_metadata_includes_us014_fields
|
||||
|
||||
US-016 COMPLETED (Phase 2 - Duplicate terminal-event suppression)
|
||||
- Files: rust/crates/runtime/src/lane_events.rs
|
||||
- Event fingerprinting already implemented via compute_event_fingerprint()
|
||||
- Fingerprint attached via LaneEventMetadata.event_fingerprint
|
||||
- Deduplication via dedupe_terminal_events() - returns first occurrence of each fingerprint
|
||||
- Raw event history preserved separately from deduplicated actionable events
|
||||
- Material difference detection via events_materially_differ():
|
||||
- Different event type (Finished vs Failed) is material
|
||||
- Different status is material
|
||||
- Different failure class is material
|
||||
- Different data payload is material
|
||||
- Reconcile function surfaces latest terminal event when materially different
|
||||
- Added 5 comprehensive tests for US-016:
|
||||
- canonical_terminal_event_fingerprint_attached_to_metadata
|
||||
- dedupe_terminal_events_suppresses_repeated_fingerprints
|
||||
- dedupe_preserves_raw_event_history_separately
|
||||
- events_materially_differ_detects_payload_differences
|
||||
- reconcile_terminal_events_surfaces_latest_when_different
|
||||
|
||||
US-017 COMPLETED (Phase 2 - Lane ownership / scope binding)
|
||||
- Files: rust/crates/runtime/src/lane_events.rs
|
||||
- LaneOwnership struct already existed with:
|
||||
- owner: String - owner/assignee identity
|
||||
- workflow_scope: String - workflow scope (claw-code-dogfood, etc.)
|
||||
- watcher_action: WatcherAction - Act, Observe, Ignore
|
||||
- Ownership preserved through lifecycle via with_ownership() builder method
|
||||
- All lifecycle events (Started -> Ready -> Finished) preserve ownership
|
||||
- Added 3 comprehensive tests for US-017:
|
||||
- lane_ownership_attached_to_metadata
|
||||
- lane_ownership_preserved_through_lifecycle_events
|
||||
- lane_ownership_watcher_action_variants
|
||||
|
||||
US-015 COMPLETED (Phase 2 - Session identity completeness at creation time)
|
||||
- Files: rust/crates/runtime/src/lane_events.rs
|
||||
- SessionIdentity struct already existed with:
|
||||
- title: String - stable title for the session
|
||||
- workspace: String - workspace/worktree path
|
||||
- purpose: String - lane/session purpose
|
||||
- placeholder_reason: Option<String> - reason for placeholder values
|
||||
- Added reconcile_enriched() method for updating session identity:
|
||||
- Updates title/workspace/purpose with newly available data
|
||||
- Clears placeholder_reason when real values are provided
|
||||
- Preserves existing values for fields not being updated
|
||||
- Allows incremental enrichment without ambiguity
|
||||
- Added 2 comprehensive tests:
|
||||
- session_identity_reconcile_enriched_updates_fields
|
||||
- session_identity_reconcile_preserves_placeholder_if_no_new_data
|
||||
|
||||
US-018 COMPLETED (Phase 2 - Nudge acknowledgment / dedupe contract)
|
||||
- Files: rust/crates/runtime/src/lane_events.rs
|
||||
- Added NudgeTracking struct:
|
||||
- nudge_id: String - unique nudge identifier
|
||||
- delivered_at: String - timestamp of delivery
|
||||
- acknowledged: bool - whether acknowledged
|
||||
- acknowledged_at: Option<String> - when acknowledged
|
||||
- is_retry: bool - whether this is a retry
|
||||
- original_nudge_id: Option<String> - original ID if retry
|
||||
- Added NudgeClassification enum (New, Retry, StaleDuplicate)
|
||||
- Added classify_nudge() function for deduplication logic
|
||||
- Added 6 comprehensive tests for US-018
|
||||
|
||||
US-019 COMPLETED (Phase 2 - Stable roadmap-id assignment)
|
||||
- Files: rust/crates/runtime/src/lane_events.rs
|
||||
- Added RoadmapId struct:
|
||||
- id: String - canonical unique identifier
|
||||
- filed_at: String - timestamp when filed
|
||||
- is_new_filing: bool - new vs update
|
||||
- supersedes: Option<String> - lineage for supersedes
|
||||
- Added builder methods: new_filing(), update(), supersedes()
|
||||
- Added 3 comprehensive tests for US-019
|
||||
|
||||
US-020 COMPLETED (Phase 2 - Roadmap item lifecycle state contract)
|
||||
- Files: rust/crates/runtime/src/lane_events.rs
|
||||
- Added RoadmapLifecycleState enum (Filed, Acknowledged, InProgress, Blocked, Done, Superseded)
|
||||
- Added RoadmapLifecycle struct:
|
||||
- state: RoadmapLifecycleState - current state
|
||||
- state_changed_at: String - last transition timestamp
|
||||
- filed_at: String - original filing timestamp
|
||||
- lineage: Vec<String> - supersession chain
|
||||
- Added methods: new_filed(), transition(), superseded_by(), is_terminal(), is_active()
|
||||
- Added 5 comprehensive tests for US-020
|
||||
|
||||
VERIFICATION STATUS (Iteration 7):
|
||||
----------------------------------
|
||||
- cargo build --workspace: PASSED
|
||||
- cargo test --workspace: PASSED (891+ tests)
|
||||
- cargo clippy --workspace --all-targets -- -D warnings: PASSED
|
||||
- cargo fmt -- --check: PASSED
|
||||
|
||||
US-013 through US-015 and US-018 through US-020 now marked passes: true
|
||||
|
||||
FINAL VERIFICATION (All 20 Stories Complete):
|
||||
------------------------------------------------
|
||||
- cargo build --workspace: PASSED
|
||||
- cargo test --workspace: PASSED (119+ API tests, 39 runtime tests, 12 integration tests)
|
||||
- cargo clippy --workspace --all-targets -- -D warnings: PASSED
|
||||
- cargo fmt -- --check: PASSED
|
||||
|
||||
ALL 20 STORIES FROM PRD COMPLETE:
|
||||
- US-001 through US-012: Pre-existing implementations (verified working)
|
||||
- US-013: Session event ordering + terminal-state reconciliation
|
||||
- US-014: Event provenance / environment labeling
|
||||
- US-015: Session identity completeness at creation time
|
||||
- US-016: Duplicate terminal-event suppression
|
||||
- US-017: Lane ownership / scope binding
|
||||
- US-018: Nudge acknowledgment / dedupe contract
|
||||
- US-019: Stable roadmap-id assignment
|
||||
- US-020: Roadmap item lifecycle state contract
|
||||
|
||||
Iteration 8: 2026-04-16
|
||||
------------------------
|
||||
|
||||
US-021 COMPLETED (Request body size pre-flight check - from dogfood findings)
|
||||
- Files:
|
||||
- rust/crates/api/src/error.rs (new error variant)
|
||||
- rust/crates/api/src/providers/openai_compat.rs
|
||||
- Added RequestBodySizeExceeded error variant with actionable message
|
||||
- Added max_request_body_bytes to OpenAiCompatConfig:
|
||||
- DashScope: 6MB (6_291_456 bytes) - from dogfood with kimi-k2.5
|
||||
- OpenAI: 100MB (104_857_600 bytes)
|
||||
- xAI: 50MB (52_428_800 bytes)
|
||||
- Added estimate_request_body_size() for pre-flight checks
|
||||
- Added check_request_body_size() for validation
|
||||
- Pre-flight check integrated in send_raw_request()
|
||||
- Tests: 5 new tests for size estimation and limit checking
|
||||
|
||||
PROJECT STATUS: COMPLETE (21/21 stories)
|
||||
|
||||
Iteration 2026-04-29 - ROADMAP #96 COMPLETED
|
||||
------------------------------------------------
|
||||
- Pulled origin/main: already up to date.
|
||||
- Selected ROADMAP #96 as a small repo-local Immediate Backlog item: the `claw --help` Resume-safe command summary leaked slash-command stubs despite the main Interactive command listing filtering them.
|
||||
- Files: rust/crates/rusty-claude-cli/src/main.rs, ROADMAP.md, progress.txt.
|
||||
- Changed help rendering to filter `resume_supported_slash_commands()` through `STUB_COMMANDS` before building the Resume-safe one-liner.
|
||||
- Added `stub_commands_absent_from_resume_safe_help` regression coverage so future stub additions cannot leak into the Resume-safe summary.
|
||||
- Targeted verification: `cargo test -p rusty-claude-cli stub_commands_absent_from_resume_safe_help -- --nocapture` passed; `cargo test -p rusty-claude-cli parses_direct_cli_actions -- --nocapture` passed.
|
||||
- Format/check verification: `cargo fmt --all --check`, `git diff --check`, and `cargo check -p rusty-claude-cli` passed.
|
||||
- Broader clippy note: `cargo clippy -p rusty-claude-cli --all-targets -- -D warnings` is blocked by pre-existing `clippy::unnecessary_wraps` failures in `rust/crates/commands/src/lib.rs` (`render_mcp_report_for`, `render_mcp_report_json_for`), outside this diff.
|
||||
|
||||
@@ -7,8 +7,7 @@ This file provides guidance to Claw Code (clawcode.dev) when working with code i
|
||||
- Frameworks: none detected from the supported starter markers.
|
||||
|
||||
## Verification
|
||||
- From the repository root, run Rust formatting with `scripts/fmt.sh` (or `scripts/fmt.sh --check` for CI-style checks). From this `rust/` directory, the equivalent command is `../scripts/fmt.sh`. Root-level `cargo fmt --manifest-path rust/Cargo.toml` is not the supported formatting command.
|
||||
- From this `rust/` directory, run Rust verification with `cargo clippy --workspace --all-targets -- -D warnings` and `cargo test --workspace`.
|
||||
- Run Rust verification from the repo root: `cargo fmt`, `cargo clippy --workspace --all-targets -- -D warnings`, `cargo test --workspace`
|
||||
|
||||
## Working agreement
|
||||
- Prefer small, reviewable changes and keep generated bootstrap files aligned with actual repo workflows.
|
||||
|
||||
@@ -16,7 +16,7 @@ unsafe_code = "forbid"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
all = { level = "warn", priority = -1 }
|
||||
pedantic = { level = "allow", priority = -1 }
|
||||
pedantic = { level = "warn", priority = -1 }
|
||||
module_name_repetitions = "allow"
|
||||
missing_panics_doc = "allow"
|
||||
missing_errors_doc = "allow"
|
||||
|
||||
@@ -22,8 +22,6 @@ The harness runs these scripted scenarios against a fresh workspace and isolated
|
||||
8. `bash_permission_prompt_approved`
|
||||
9. `bash_permission_prompt_denied`
|
||||
10. `plugin_tool_roundtrip`
|
||||
11. `auto_compact_triggered`
|
||||
12. `token_cost_reporting`
|
||||
|
||||
## Run
|
||||
|
||||
@@ -39,7 +37,7 @@ cd rust/
|
||||
python3 scripts/run_mock_parity_diff.py
|
||||
```
|
||||
|
||||
Scenario-to-PARITY mappings live in `mock_parity_scenarios.json`; keep this manifest aligned with `rust/crates/rusty-claude-cli/tests/mock_parity_harness.rs` and `PARITY.md` via `python3 scripts/run_mock_parity_diff.py --no-run`.
|
||||
Scenario-to-PARITY mappings live in `mock_parity_scenarios.json`.
|
||||
|
||||
## Manual mock server
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ Top-level commands:
|
||||
init
|
||||
```
|
||||
|
||||
`claw acp` is a local discoverability surface for editor-first users: it reports the current ACP/Zed status without starting the runtime. As of April 16, 2026, claw-code does **not** ship an ACP/Zed daemon or JSON-RPC entrypoint yet, and `claw acp serve` is only a status alias until the real protocol surface lands. Status queries exit 0 and expose the same machine-readable contract via `--output-format json`; malformed ACP invocations exit 1 with `kind: unsupported_acp_invocation`.
|
||||
`claw acp` is a local discoverability surface for editor-first users: it reports the current ACP/Zed status without starting the runtime. As of April 16, 2026, claw-code does **not** ship an ACP/Zed daemon entrypoint yet, and `claw acp serve` is only a status alias until the real protocol surface lands.
|
||||
|
||||
The command surface is moving quickly. For the canonical live help text, run:
|
||||
|
||||
|
||||
@@ -76,7 +76,6 @@ fn create_sample_request(message_count: usize) -> MessageRequest {
|
||||
presence_penalty: None,
|
||||
stop: None,
|
||||
reasoning_effort: None,
|
||||
extra_body: std::collections::BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,11 +14,6 @@ const CONTEXT_WINDOW_ERROR_MARKERS: &[&str] = &[
|
||||
"too many tokens",
|
||||
"prompt is too long",
|
||||
"input is too long",
|
||||
"input tokens exceed",
|
||||
"configured limit",
|
||||
"messages resulted in",
|
||||
"completion tokens",
|
||||
"prompt tokens",
|
||||
"request is too large",
|
||||
];
|
||||
|
||||
@@ -547,26 +542,6 @@ mod tests {
|
||||
assert_eq!(error.request_id(), Some("req_ctx_123"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn classifies_openai_configured_limit_errors_as_context_window_failures() {
|
||||
let error = ApiError::Api {
|
||||
status: reqwest::StatusCode::BAD_REQUEST,
|
||||
error_type: Some("invalid_request_error".to_string()),
|
||||
message: Some(
|
||||
"Input tokens exceed the configured limit of 922000 tokens. Your messages resulted in 1860900 tokens. Please reduce the length of the messages."
|
||||
.to_string(),
|
||||
),
|
||||
request_id: Some("req_ctx_openai_123".to_string()),
|
||||
body: String::new(),
|
||||
retryable: false,
|
||||
suggested_action: None,
|
||||
};
|
||||
|
||||
assert!(error.is_context_window_failure());
|
||||
assert_eq!(error.safe_failure_class(), "context_window");
|
||||
assert_eq!(error.request_id(), Some("req_ctx_openai_123"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_credentials_without_hint_renders_the_canonical_message() {
|
||||
// given
|
||||
|
||||
@@ -20,15 +20,12 @@ pub use prompt_cache::{
|
||||
};
|
||||
pub use providers::anthropic::{AnthropicClient, AnthropicClient as ApiClient, AuthSource};
|
||||
pub use providers::openai_compat::{
|
||||
build_chat_completion_request, check_request_body_size, estimate_request_body_size,
|
||||
flatten_tool_result_content, is_reasoning_model, model_rejects_is_error_field,
|
||||
model_requires_reasoning_content_in_history, translate_message, OpenAiCompatClient,
|
||||
OpenAiCompatConfig,
|
||||
build_chat_completion_request, flatten_tool_result_content, is_reasoning_model,
|
||||
model_rejects_is_error_field, translate_message, OpenAiCompatClient, OpenAiCompatConfig,
|
||||
};
|
||||
pub use providers::{
|
||||
detect_provider_kind, max_tokens_for_model, max_tokens_for_model_with_override,
|
||||
model_family_identity_for, model_family_identity_for_kind, provider_diagnostics_for_model,
|
||||
resolve_model_alias, ProviderDiagnostics, ProviderKind,
|
||||
resolve_model_alias, ProviderKind,
|
||||
};
|
||||
pub use sse::{parse_frame, SseParser};
|
||||
pub use types::{
|
||||
|
||||
@@ -600,9 +600,8 @@ fn jitter_for_base(base: Duration) -> Duration {
|
||||
}
|
||||
let raw_nanos = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map_or(0, |elapsed| {
|
||||
u64::try_from(elapsed.as_nanos()).unwrap_or(u64::MAX)
|
||||
});
|
||||
.map(|elapsed| u64::try_from(elapsed.as_nanos()).unwrap_or(u64::MAX))
|
||||
.unwrap_or(0);
|
||||
let tick = JITTER_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
// splitmix64 finalizer — mixes the low bits so large bases still see
|
||||
// jitter across their full range instead of being clamped to subsec nanos.
|
||||
@@ -845,17 +844,19 @@ impl MessageStream {
|
||||
StreamEvent::MessageDelta(MessageDeltaEvent { usage, .. }) => {
|
||||
self.latest_usage = Some(usage.clone());
|
||||
}
|
||||
StreamEvent::MessageStop(_) if !self.usage_recorded => {
|
||||
if let (Some(prompt_cache), Some(usage)) =
|
||||
(&self.prompt_cache, self.latest_usage.as_ref())
|
||||
{
|
||||
let record = prompt_cache.record_usage(&self.request, usage);
|
||||
*self
|
||||
.last_prompt_cache_record
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner) = Some(record);
|
||||
StreamEvent::MessageStop(_) => {
|
||||
if !self.usage_recorded {
|
||||
if let (Some(prompt_cache), Some(usage)) =
|
||||
(&self.prompt_cache, self.latest_usage.as_ref())
|
||||
{
|
||||
let record = prompt_cache.record_usage(&self.request, usage);
|
||||
*self
|
||||
.last_prompt_cache_record
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner) = Some(record);
|
||||
}
|
||||
self.usage_recorded = true;
|
||||
}
|
||||
self.usage_recorded = true;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#![allow(clippy::cast_possible_truncation)]
|
||||
#![allow(dead_code)]
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
@@ -29,7 +28,7 @@ pub trait Provider {
|
||||
) -> ProviderFuture<'a, Self::Stream>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ProviderKind {
|
||||
Anthropic,
|
||||
Xai,
|
||||
@@ -50,74 +49,6 @@ pub struct ModelTokenLimit {
|
||||
pub context_window_tokens: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ProviderWireProtocol {
|
||||
AnthropicMessages,
|
||||
OpenAiChatCompletions,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ProviderFeatureSupport {
|
||||
Supported,
|
||||
Unsupported,
|
||||
PassthroughAsTool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
pub struct ProviderCapabilityReport {
|
||||
pub provider: ProviderKind,
|
||||
pub wire_protocol: ProviderWireProtocol,
|
||||
pub auth_env: &'static str,
|
||||
pub base_url_env: &'static str,
|
||||
pub default_base_url: &'static str,
|
||||
pub tool_calls: ProviderFeatureSupport,
|
||||
pub streaming: ProviderFeatureSupport,
|
||||
pub streaming_usage: ProviderFeatureSupport,
|
||||
pub prompt_cache: ProviderFeatureSupport,
|
||||
pub custom_parameters: ProviderFeatureSupport,
|
||||
pub reasoning_effort: ProviderFeatureSupport,
|
||||
pub reasoning_content_history: ProviderFeatureSupport,
|
||||
pub fixed_sampling_reasoning_models: ProviderFeatureSupport,
|
||||
pub web_search: ProviderFeatureSupport,
|
||||
pub web_fetch: ProviderFeatureSupport,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ProviderDiagnosticSeverity {
|
||||
Info,
|
||||
Warning,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
pub struct ProviderDiagnostic {
|
||||
pub code: &'static str,
|
||||
pub severity: ProviderDiagnosticSeverity,
|
||||
pub message: String,
|
||||
pub action: String,
|
||||
}
|
||||
|
||||
#[allow(clippy::struct_excessive_bools)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
|
||||
pub struct ProviderDiagnostics {
|
||||
pub requested_model: String,
|
||||
pub resolved_model: String,
|
||||
pub provider: ProviderKind,
|
||||
pub auth_env: &'static str,
|
||||
pub base_url_env: &'static str,
|
||||
pub default_base_url: &'static str,
|
||||
pub openai_compatible: bool,
|
||||
pub reasoning_model: bool,
|
||||
pub preserves_reasoning_content_in_history: bool,
|
||||
pub strips_tuning_params: bool,
|
||||
pub supports_stream_usage: bool,
|
||||
pub honors_proxy_env: bool,
|
||||
pub supports_extra_body_params: bool,
|
||||
pub preserves_slash_model_ids_on_custom_base_url: bool,
|
||||
}
|
||||
|
||||
const MODEL_REGISTRY: &[(&str, ProviderMetadata)] = &[
|
||||
(
|
||||
"opus",
|
||||
@@ -288,55 +219,6 @@ pub fn metadata_for_model(model: &str) -> Option<ProviderMetadata> {
|
||||
None
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn provider_diagnostics_for_model(model: &str) -> ProviderDiagnostics {
|
||||
let resolved_model = resolve_model_alias(model);
|
||||
let metadata =
|
||||
metadata_for_model(&resolved_model).unwrap_or_else(|| {
|
||||
match detect_provider_kind(&resolved_model) {
|
||||
ProviderKind::Anthropic => ProviderMetadata {
|
||||
provider: ProviderKind::Anthropic,
|
||||
auth_env: "ANTHROPIC_API_KEY",
|
||||
base_url_env: "ANTHROPIC_BASE_URL",
|
||||
default_base_url: anthropic::DEFAULT_BASE_URL,
|
||||
},
|
||||
ProviderKind::Xai => ProviderMetadata {
|
||||
provider: ProviderKind::Xai,
|
||||
auth_env: "XAI_API_KEY",
|
||||
base_url_env: "XAI_BASE_URL",
|
||||
default_base_url: openai_compat::DEFAULT_XAI_BASE_URL,
|
||||
},
|
||||
ProviderKind::OpenAi => ProviderMetadata {
|
||||
provider: ProviderKind::OpenAi,
|
||||
auth_env: "OPENAI_API_KEY",
|
||||
base_url_env: "OPENAI_BASE_URL",
|
||||
default_base_url: openai_compat::DEFAULT_OPENAI_BASE_URL,
|
||||
},
|
||||
}
|
||||
});
|
||||
let openai_compatible = matches!(metadata.provider, ProviderKind::OpenAi | ProviderKind::Xai);
|
||||
let reasoning_model = openai_compatible && openai_compat::is_reasoning_model(&resolved_model);
|
||||
|
||||
ProviderDiagnostics {
|
||||
requested_model: model.to_string(),
|
||||
resolved_model: resolved_model.clone(),
|
||||
provider: metadata.provider,
|
||||
auth_env: metadata.auth_env,
|
||||
base_url_env: metadata.base_url_env,
|
||||
default_base_url: metadata.default_base_url,
|
||||
openai_compatible,
|
||||
reasoning_model,
|
||||
preserves_reasoning_content_in_history: openai_compatible
|
||||
&& openai_compat::model_requires_reasoning_content_in_history(&resolved_model),
|
||||
strips_tuning_params: reasoning_model,
|
||||
supports_stream_usage: metadata.provider == ProviderKind::OpenAi
|
||||
&& metadata.default_base_url == openai_compat::DEFAULT_OPENAI_BASE_URL,
|
||||
honors_proxy_env: true,
|
||||
supports_extra_body_params: openai_compatible,
|
||||
preserves_slash_model_ids_on_custom_base_url: metadata.provider == ProviderKind::OpenAi,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn detect_provider_kind(model: &str) -> ProviderKind {
|
||||
if let Some(metadata) = metadata_for_model(model) {
|
||||
@@ -368,231 +250,19 @@ pub fn detect_provider_kind(model: &str) -> ProviderKind {
|
||||
ProviderKind::Anthropic
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub const fn model_family_identity_for_kind(kind: ProviderKind) -> runtime::ModelFamilyIdentity {
|
||||
match kind {
|
||||
ProviderKind::Anthropic => runtime::ModelFamilyIdentity::Claude,
|
||||
ProviderKind::Xai | ProviderKind::OpenAi => runtime::ModelFamilyIdentity::Generic,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn model_family_identity_for(model: &str) -> runtime::ModelFamilyIdentity {
|
||||
model_family_identity_for_kind(detect_provider_kind(model))
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn provider_capabilities_for_model(model: &str) -> ProviderCapabilityReport {
|
||||
let metadata = metadata_for_model(model).unwrap_or_else(|| {
|
||||
let provider = detect_provider_kind(model);
|
||||
metadata_for_provider_kind(provider)
|
||||
});
|
||||
|
||||
let (
|
||||
wire_protocol,
|
||||
streaming_usage,
|
||||
prompt_cache,
|
||||
custom_parameters,
|
||||
reasoning_effort,
|
||||
reasoning_content_history,
|
||||
fixed_sampling_reasoning_models,
|
||||
) = match metadata.provider {
|
||||
ProviderKind::Anthropic => (
|
||||
ProviderWireProtocol::AnthropicMessages,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
),
|
||||
ProviderKind::Xai => (
|
||||
ProviderWireProtocol::OpenAiChatCompletions,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
),
|
||||
ProviderKind::OpenAi => (
|
||||
ProviderWireProtocol::OpenAiChatCompletions,
|
||||
ProviderFeatureSupport::Supported,
|
||||
ProviderFeatureSupport::Unsupported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
ProviderFeatureSupport::Supported,
|
||||
if openai_compat::model_requires_reasoning_content_in_history(model) {
|
||||
ProviderFeatureSupport::Supported
|
||||
} else {
|
||||
ProviderFeatureSupport::Unsupported
|
||||
},
|
||||
ProviderFeatureSupport::Supported,
|
||||
),
|
||||
};
|
||||
|
||||
ProviderCapabilityReport {
|
||||
provider: metadata.provider,
|
||||
wire_protocol,
|
||||
auth_env: metadata.auth_env,
|
||||
base_url_env: metadata.base_url_env,
|
||||
default_base_url: metadata.default_base_url,
|
||||
tool_calls: ProviderFeatureSupport::Supported,
|
||||
streaming: ProviderFeatureSupport::Supported,
|
||||
streaming_usage,
|
||||
prompt_cache,
|
||||
custom_parameters,
|
||||
reasoning_effort,
|
||||
reasoning_content_history,
|
||||
fixed_sampling_reasoning_models,
|
||||
web_search: ProviderFeatureSupport::PassthroughAsTool,
|
||||
web_fetch: ProviderFeatureSupport::PassthroughAsTool,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn provider_diagnostics_for_request(request: &MessageRequest) -> Vec<ProviderDiagnostic> {
|
||||
let capabilities = provider_capabilities_for_model(&request.model);
|
||||
let mut diagnostics = Vec::new();
|
||||
|
||||
if request.reasoning_effort.is_some()
|
||||
&& capabilities.reasoning_effort == ProviderFeatureSupport::Unsupported
|
||||
{
|
||||
diagnostics.push(ProviderDiagnostic {
|
||||
code: "reasoning_effort_unsupported",
|
||||
severity: ProviderDiagnosticSeverity::Warning,
|
||||
message: format!(
|
||||
"{} does not map `reasoning_effort` for model `{}`.",
|
||||
provider_label(capabilities.provider),
|
||||
request.model
|
||||
),
|
||||
action: "Remove `reasoning_effort` or route to an OpenAI-compatible reasoning model such as `openai/o4-mini`.".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if openai_compat::is_reasoning_model(&request.model)
|
||||
&& has_openai_tuning_parameters(request)
|
||||
&& capabilities.fixed_sampling_reasoning_models == ProviderFeatureSupport::Supported
|
||||
{
|
||||
diagnostics.push(ProviderDiagnostic {
|
||||
code: "reasoning_model_fixed_sampling",
|
||||
severity: ProviderDiagnosticSeverity::Info,
|
||||
message: format!(
|
||||
"Model `{}` is treated as a fixed-sampling reasoning model; tuning parameters are omitted before the provider call.",
|
||||
request.model
|
||||
),
|
||||
action: "Leave temperature/top_p/frequency_penalty/presence_penalty unset for reasoning models to match provider validation rules.".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if openai_compat::model_requires_reasoning_content_in_history(&request.model) {
|
||||
diagnostics.push(ProviderDiagnostic {
|
||||
code: "deepseek_v4_reasoning_history",
|
||||
severity: ProviderDiagnosticSeverity::Info,
|
||||
message: format!(
|
||||
"Model `{}` requires assistant thinking history to be echoed as `reasoning_content`.",
|
||||
request.model
|
||||
),
|
||||
action: "Keep prior assistant Thinking blocks in history; the OpenAI-compatible serializer will emit `reasoning_content` for DeepSeek V4 models.".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if declares_tool(request, "web_search") {
|
||||
diagnostics.push(web_passthrough_diagnostic(
|
||||
"web_search_passthrough_tool",
|
||||
"web_search",
|
||||
capabilities.provider,
|
||||
));
|
||||
}
|
||||
if declares_tool(request, "web_fetch") {
|
||||
diagnostics.push(web_passthrough_diagnostic(
|
||||
"web_fetch_passthrough_tool",
|
||||
"web_fetch",
|
||||
capabilities.provider,
|
||||
));
|
||||
}
|
||||
|
||||
diagnostics
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn metadata_for_provider_kind(provider: ProviderKind) -> ProviderMetadata {
|
||||
match provider {
|
||||
ProviderKind::Anthropic => ProviderMetadata {
|
||||
provider,
|
||||
auth_env: "ANTHROPIC_API_KEY",
|
||||
base_url_env: "ANTHROPIC_BASE_URL",
|
||||
default_base_url: anthropic::DEFAULT_BASE_URL,
|
||||
},
|
||||
ProviderKind::Xai => ProviderMetadata {
|
||||
provider,
|
||||
auth_env: "XAI_API_KEY",
|
||||
base_url_env: "XAI_BASE_URL",
|
||||
default_base_url: openai_compat::DEFAULT_XAI_BASE_URL,
|
||||
},
|
||||
ProviderKind::OpenAi => ProviderMetadata {
|
||||
provider,
|
||||
auth_env: "OPENAI_API_KEY",
|
||||
base_url_env: "OPENAI_BASE_URL",
|
||||
default_base_url: openai_compat::DEFAULT_OPENAI_BASE_URL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
const fn provider_label(provider: ProviderKind) -> &'static str {
|
||||
match provider {
|
||||
ProviderKind::Anthropic => "Anthropic",
|
||||
ProviderKind::Xai => "xAI",
|
||||
ProviderKind::OpenAi => "OpenAI-compatible",
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn has_openai_tuning_parameters(request: &MessageRequest) -> bool {
|
||||
request.temperature.is_some()
|
||||
|| request.top_p.is_some()
|
||||
|| request.frequency_penalty.is_some()
|
||||
|| request.presence_penalty.is_some()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn declares_tool(request: &MessageRequest, tool_name: &str) -> bool {
|
||||
request.tools.as_ref().is_some_and(|tools| {
|
||||
tools
|
||||
.iter()
|
||||
.any(|tool| tool.name.eq_ignore_ascii_case(tool_name))
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn web_passthrough_diagnostic(
|
||||
code: &'static str,
|
||||
tool_name: &'static str,
|
||||
provider: ProviderKind,
|
||||
) -> ProviderDiagnostic {
|
||||
ProviderDiagnostic {
|
||||
code,
|
||||
severity: ProviderDiagnosticSeverity::Info,
|
||||
message: format!(
|
||||
"`{tool_name}` is exposed to {} as a normal function tool, not as a provider-native web capability.",
|
||||
provider_label(provider)
|
||||
),
|
||||
action: format!(
|
||||
"Provide a local `{tool_name}` tool implementation or route through a provider adapter that explicitly supports native web tools."
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn max_tokens_for_model(model: &str) -> u32 {
|
||||
let canonical = resolve_model_alias(model);
|
||||
let heuristic = if canonical.contains("opus") {
|
||||
32_000
|
||||
} else {
|
||||
64_000
|
||||
};
|
||||
|
||||
model_token_limit(model).map_or(heuristic, |limit| heuristic.min(limit.max_output_tokens))
|
||||
model_token_limit(model).map_or_else(
|
||||
|| {
|
||||
let canonical = resolve_model_alias(model);
|
||||
if canonical.contains("opus") {
|
||||
32_000
|
||||
} else {
|
||||
64_000
|
||||
}
|
||||
},
|
||||
|limit| limit.max_output_tokens,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the effective max output tokens for a model, preferring a plugin
|
||||
@@ -606,8 +276,7 @@ pub fn max_tokens_for_model_with_override(model: &str, plugin_override: Option<u
|
||||
#[must_use]
|
||||
pub fn model_token_limit(model: &str) -> Option<ModelTokenLimit> {
|
||||
let canonical = resolve_model_alias(model);
|
||||
let base_model = canonical.rsplit('/').next().unwrap_or(canonical.as_str());
|
||||
match base_model {
|
||||
match canonical.as_str() {
|
||||
"claude-opus-4-6" => Some(ModelTokenLimit {
|
||||
max_output_tokens: 32_000,
|
||||
context_window_tokens: 200_000,
|
||||
@@ -620,20 +289,6 @@ pub fn model_token_limit(model: &str) -> Option<ModelTokenLimit> {
|
||||
max_output_tokens: 64_000,
|
||||
context_window_tokens: 131_072,
|
||||
}),
|
||||
// GPT-4.1 family via the OpenAI API.
|
||||
"gpt-4.1" | "gpt-4.1-mini" | "gpt-4.1-nano" => Some(ModelTokenLimit {
|
||||
max_output_tokens: 32_768,
|
||||
context_window_tokens: 1_047_576,
|
||||
}),
|
||||
// GPT-5.4 family via the OpenAI API.
|
||||
"gpt-5.4" => Some(ModelTokenLimit {
|
||||
max_output_tokens: 128_000,
|
||||
context_window_tokens: 1_000_000,
|
||||
}),
|
||||
"gpt-5.4-mini" | "gpt-5.4-nano" => Some(ModelTokenLimit {
|
||||
max_output_tokens: 128_000,
|
||||
context_window_tokens: 400_000,
|
||||
}),
|
||||
// Kimi models via DashScope (Moonshot AI)
|
||||
// Source: https://platform.moonshot.cn/docs/intro
|
||||
"kimi-k2.5" | "kimi-k1.5" => Some(ModelTokenLimit {
|
||||
@@ -815,10 +470,8 @@ mod tests {
|
||||
use super::{
|
||||
anthropic_missing_credentials, anthropic_missing_credentials_hint, detect_provider_kind,
|
||||
load_dotenv_file, max_tokens_for_model, max_tokens_for_model_with_override,
|
||||
model_family_identity_for, model_family_identity_for_kind, model_token_limit, parse_dotenv,
|
||||
preflight_message_request, provider_capabilities_for_model,
|
||||
provider_diagnostics_for_request, resolve_model_alias, ProviderFeatureSupport,
|
||||
ProviderKind, ProviderWireProtocol,
|
||||
model_token_limit, parse_dotenv, preflight_message_request, resolve_model_alias,
|
||||
ProviderKind,
|
||||
};
|
||||
|
||||
/// Serializes every test in this module that mutates process-wide
|
||||
@@ -877,141 +530,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn maps_provider_kind_to_model_family_identity() {
|
||||
// given: each supported provider kind
|
||||
let anthropic = ProviderKind::Anthropic;
|
||||
let openai = ProviderKind::OpenAi;
|
||||
let xai = ProviderKind::Xai;
|
||||
|
||||
// when: converting provider kinds to prompt model family identities
|
||||
let anthropic_identity = model_family_identity_for_kind(anthropic);
|
||||
let openai_identity = model_family_identity_for_kind(openai);
|
||||
let xai_identity = model_family_identity_for_kind(xai);
|
||||
|
||||
// then: Anthropic stays Claude and OpenAI-compatible providers are generic
|
||||
assert_eq!(anthropic_identity, runtime::ModelFamilyIdentity::Claude);
|
||||
assert_eq!(openai_identity, runtime::ModelFamilyIdentity::Generic);
|
||||
assert_eq!(xai_identity, runtime::ModelFamilyIdentity::Generic);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn maps_model_name_to_model_family_identity() {
|
||||
// given: Anthropic, OpenAI-compatible, and xAI model names
|
||||
let claude_model = "claude-opus-4-6";
|
||||
let openai_model = "openai/gpt-4.1-mini";
|
||||
let xai_model = "grok-3";
|
||||
|
||||
// when: detecting prompt model family identities from model names
|
||||
let claude_identity = model_family_identity_for(claude_model);
|
||||
let openai_identity = model_family_identity_for(openai_model);
|
||||
let xai_identity = model_family_identity_for(xai_model);
|
||||
|
||||
// then: Anthropic stays Claude and OpenAI-compatible providers are generic
|
||||
assert_eq!(claude_identity, runtime::ModelFamilyIdentity::Claude);
|
||||
assert_eq!(openai_identity, runtime::ModelFamilyIdentity::Generic);
|
||||
assert_eq!(xai_identity, runtime::ModelFamilyIdentity::Generic);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn provider_capability_matrix_snapshots_openai_compat_differences() {
|
||||
let openai = provider_capabilities_for_model("openai/gpt-4.1-mini");
|
||||
assert_eq!(openai.provider, ProviderKind::OpenAi);
|
||||
assert_eq!(
|
||||
openai.wire_protocol,
|
||||
ProviderWireProtocol::OpenAiChatCompletions
|
||||
);
|
||||
assert_eq!(openai.auth_env, "OPENAI_API_KEY");
|
||||
assert_eq!(openai.streaming_usage, ProviderFeatureSupport::Supported);
|
||||
assert_eq!(openai.reasoning_effort, ProviderFeatureSupport::Supported);
|
||||
assert_eq!(openai.web_search, ProviderFeatureSupport::PassthroughAsTool);
|
||||
assert_eq!(openai.web_fetch, ProviderFeatureSupport::PassthroughAsTool);
|
||||
|
||||
let deepseek = provider_capabilities_for_model("openai/deepseek-v4-pro");
|
||||
assert_eq!(
|
||||
deepseek.reasoning_content_history,
|
||||
ProviderFeatureSupport::Supported
|
||||
);
|
||||
|
||||
let xai = provider_capabilities_for_model("grok-3");
|
||||
assert_eq!(xai.provider, ProviderKind::Xai);
|
||||
assert_eq!(xai.auth_env, "XAI_API_KEY");
|
||||
assert_eq!(xai.reasoning_effort, ProviderFeatureSupport::Unsupported);
|
||||
assert_eq!(xai.streaming_usage, ProviderFeatureSupport::Unsupported);
|
||||
|
||||
let anthropic = provider_capabilities_for_model("claude-sonnet-4-6");
|
||||
assert_eq!(anthropic.provider, ProviderKind::Anthropic);
|
||||
assert_eq!(
|
||||
anthropic.wire_protocol,
|
||||
ProviderWireProtocol::AnthropicMessages
|
||||
);
|
||||
assert_eq!(anthropic.prompt_cache, ProviderFeatureSupport::Supported);
|
||||
assert_eq!(
|
||||
anthropic.custom_parameters,
|
||||
ProviderFeatureSupport::Unsupported
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn provider_diagnostics_explain_deepseek_reasoning_and_web_tool_passthrough() {
|
||||
let request = MessageRequest {
|
||||
model: "openai/deepseek-v4-pro".to_string(),
|
||||
max_tokens: 1024,
|
||||
messages: vec![InputMessage::user_text("research this")],
|
||||
tools: Some(vec![
|
||||
ToolDefinition {
|
||||
name: "web_search".to_string(),
|
||||
description: Some("Search the web".to_string()),
|
||||
input_schema: json!({"type": "object"}),
|
||||
},
|
||||
ToolDefinition {
|
||||
name: "web_fetch".to_string(),
|
||||
description: Some("Fetch a URL".to_string()),
|
||||
input_schema: json!({"type": "object"}),
|
||||
},
|
||||
]),
|
||||
stream: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let diagnostics = provider_diagnostics_for_request(&request);
|
||||
let codes = diagnostics
|
||||
.iter()
|
||||
.map(|diagnostic| diagnostic.code)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(codes.contains(&"deepseek_v4_reasoning_history"));
|
||||
assert!(codes.contains(&"web_search_passthrough_tool"));
|
||||
assert!(codes.contains(&"web_fetch_passthrough_tool"));
|
||||
assert!(diagnostics
|
||||
.iter()
|
||||
.any(|diagnostic| diagnostic.action.contains("provider adapter")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn provider_diagnostics_warn_for_unsupported_reasoning_effort() {
|
||||
let request = MessageRequest {
|
||||
model: "grok-3-mini".to_string(),
|
||||
max_tokens: 1024,
|
||||
messages: vec![InputMessage::user_text("think")],
|
||||
reasoning_effort: Some("high".to_string()),
|
||||
temperature: Some(0.7),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let diagnostics = provider_diagnostics_for_request(&request);
|
||||
let codes = diagnostics
|
||||
.iter()
|
||||
.map(|diagnostic| diagnostic.code)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(codes.contains(&"reasoning_effort_unsupported"));
|
||||
assert!(codes.contains(&"reasoning_model_fixed_sampling"));
|
||||
assert!(diagnostics.iter().any(|diagnostic| diagnostic
|
||||
.message
|
||||
.contains("does not map `reasoning_effort`")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn openai_namespaced_model_routes_to_openai_not_anthropic() {
|
||||
// Regression: "openai/gpt-4.1-mini" was misrouted to Anthropic when
|
||||
@@ -1092,32 +610,10 @@ mod tests {
|
||||
assert_eq!(super::resolve_model_alias("KIMI"), "kimi-k2.5"); // case insensitive
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn provider_diagnostics_explain_openai_compatible_capabilities() {
|
||||
let diagnostics = super::provider_diagnostics_for_model("openai/deepseek-v4-pro");
|
||||
|
||||
assert_eq!(diagnostics.provider, ProviderKind::OpenAi);
|
||||
assert_eq!(diagnostics.auth_env, "OPENAI_API_KEY");
|
||||
assert!(diagnostics.openai_compatible);
|
||||
assert!(diagnostics.preserves_reasoning_content_in_history);
|
||||
assert!(diagnostics.supports_extra_body_params);
|
||||
assert!(diagnostics.honors_proxy_env);
|
||||
assert!(diagnostics.preserves_slash_model_ids_on_custom_base_url);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keeps_existing_max_token_heuristic() {
|
||||
assert_eq!(max_tokens_for_model("opus"), 32_000);
|
||||
assert_eq!(max_tokens_for_model("grok-3"), 64_000);
|
||||
assert_eq!(max_tokens_for_model("gpt-5.4"), 64_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn caps_default_max_tokens_to_openai_model_limits() {
|
||||
assert_eq!(max_tokens_for_model("gpt-4.1-mini"), 32_768);
|
||||
assert_eq!(max_tokens_for_model("openai/gpt-4.1-mini"), 32_768);
|
||||
assert_eq!(max_tokens_for_model("gpt-5.4"), 64_000);
|
||||
assert_eq!(max_tokens_for_model("openai/gpt-5.4"), 64_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1184,18 +680,6 @@ mod tests {
|
||||
.context_window_tokens,
|
||||
131_072
|
||||
);
|
||||
assert_eq!(
|
||||
model_token_limit("openai/gpt-4.1-mini")
|
||||
.expect("openai/gpt-4.1-mini should be registered")
|
||||
.context_window_tokens,
|
||||
1_047_576
|
||||
);
|
||||
assert_eq!(
|
||||
model_token_limit("gpt-5.4")
|
||||
.expect("gpt-5.4 should be registered")
|
||||
.context_window_tokens,
|
||||
1_000_000
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1244,42 +728,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn preflight_blocks_oversized_requests_for_gpt_5_4() {
|
||||
let request = MessageRequest {
|
||||
model: "gpt-5.4".to_string(),
|
||||
max_tokens: 64_000,
|
||||
messages: vec![InputMessage {
|
||||
role: "user".to_string(),
|
||||
content: vec![InputContentBlock::Text {
|
||||
text: "x".repeat(3_900_000),
|
||||
}],
|
||||
}],
|
||||
system: Some("Keep the answer short.".to_string()),
|
||||
tools: None,
|
||||
tool_choice: None,
|
||||
stream: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let error = preflight_message_request(&request)
|
||||
.expect_err("oversized gpt-5.4 request should be rejected before the provider call");
|
||||
|
||||
match error {
|
||||
ApiError::ContextWindowExceeded {
|
||||
model,
|
||||
requested_output_tokens,
|
||||
context_window_tokens,
|
||||
..
|
||||
} => {
|
||||
assert_eq!(model, "gpt-5.4");
|
||||
assert_eq!(requested_output_tokens, 64_000);
|
||||
assert_eq!(context_window_tokens, 1_000_000);
|
||||
}
|
||||
other => panic!("expected context-window preflight failure, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn preflight_skips_unknown_models() {
|
||||
let request = MessageRequest {
|
||||
@@ -1305,14 +753,14 @@ mod tests {
|
||||
#[test]
|
||||
fn returns_context_window_metadata_for_kimi_models() {
|
||||
// kimi-k2.5
|
||||
let k25_limit =
|
||||
model_token_limit("kimi-k2.5").expect("kimi-k2.5 should have token limit metadata");
|
||||
let k25_limit = model_token_limit("kimi-k2.5")
|
||||
.expect("kimi-k2.5 should have token limit metadata");
|
||||
assert_eq!(k25_limit.max_output_tokens, 16_384);
|
||||
assert_eq!(k25_limit.context_window_tokens, 256_000);
|
||||
|
||||
// kimi-k1.5
|
||||
let k15_limit =
|
||||
model_token_limit("kimi-k1.5").expect("kimi-k1.5 should have token limit metadata");
|
||||
let k15_limit = model_token_limit("kimi-k1.5")
|
||||
.expect("kimi-k1.5 should have token limit metadata");
|
||||
assert_eq!(k15_limit.max_output_tokens, 16_384);
|
||||
assert_eq!(k15_limit.context_window_tokens, 256_000);
|
||||
}
|
||||
@@ -1320,13 +768,11 @@ mod tests {
|
||||
#[test]
|
||||
fn kimi_alias_resolves_to_kimi_k25_token_limits() {
|
||||
// The "kimi" alias resolves to "kimi-k2.5" via resolve_model_alias()
|
||||
let alias_limit =
|
||||
model_token_limit("kimi").expect("kimi alias should resolve to kimi-k2.5 limits");
|
||||
let direct_limit = model_token_limit("kimi-k2.5").expect("kimi-k2.5 should have limits");
|
||||
assert_eq!(
|
||||
alias_limit.max_output_tokens,
|
||||
direct_limit.max_output_tokens
|
||||
);
|
||||
let alias_limit = model_token_limit("kimi")
|
||||
.expect("kimi alias should resolve to kimi-k2.5 limits");
|
||||
let direct_limit = model_token_limit("kimi-k2.5")
|
||||
.expect("kimi-k2.5 should have limits");
|
||||
assert_eq!(alias_limit.max_output_tokens, direct_limit.max_output_tokens);
|
||||
assert_eq!(
|
||||
alias_limit.context_window_tokens,
|
||||
direct_limit.context_window_tokens
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
@@ -146,12 +145,6 @@ impl OpenAiCompatClient {
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_http_client(mut self, http: reqwest::Client) -> Self {
|
||||
self.http = http;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_retry_policy(
|
||||
mut self,
|
||||
@@ -274,18 +267,14 @@ impl OpenAiCompatClient {
|
||||
request: &MessageRequest,
|
||||
) -> Result<reqwest::Response, ApiError> {
|
||||
// Pre-flight check: verify request body size against provider limits
|
||||
check_request_body_size_for_base_url(request, self.config(), &self.base_url)?;
|
||||
check_request_body_size(request, self.config())?;
|
||||
|
||||
let request_url = chat_completions_endpoint(&self.base_url);
|
||||
self.http
|
||||
.post(&request_url)
|
||||
.header("content-type", "application/json")
|
||||
.bearer_auth(&self.api_key)
|
||||
.json(&build_chat_completion_request_for_base_url(
|
||||
request,
|
||||
self.config(),
|
||||
&self.base_url,
|
||||
))
|
||||
.json(&build_chat_completion_request(request, self.config()))
|
||||
.send()
|
||||
.await
|
||||
.map_err(ApiError::from)
|
||||
@@ -338,9 +327,8 @@ fn jitter_for_base(base: Duration) -> Duration {
|
||||
}
|
||||
let raw_nanos = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map_or(0, |elapsed| {
|
||||
u64::try_from(elapsed.as_nanos()).unwrap_or(u64::MAX)
|
||||
});
|
||||
.map(|elapsed| u64::try_from(elapsed.as_nanos()).unwrap_or(u64::MAX))
|
||||
.unwrap_or(0);
|
||||
let tick = JITTER_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
let mut mixed = raw_nanos
|
||||
.wrapping_add(tick)
|
||||
@@ -455,8 +443,6 @@ struct StreamState {
|
||||
stop_reason: Option<String>,
|
||||
usage: Option<Usage>,
|
||||
tool_calls: BTreeMap<u32, ToolCallState>,
|
||||
thinking_started: bool,
|
||||
thinking_finished: bool,
|
||||
}
|
||||
|
||||
impl StreamState {
|
||||
@@ -470,12 +456,9 @@ impl StreamState {
|
||||
stop_reason: None,
|
||||
usage: None,
|
||||
tool_calls: BTreeMap::new(),
|
||||
thinking_started: false,
|
||||
thinking_finished: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_lines)]
|
||||
fn ingest_chunk(&mut self, chunk: ChatCompletionChunk) -> Result<Vec<StreamEvent>, ApiError> {
|
||||
let mut events = Vec::new();
|
||||
if !self.message_started {
|
||||
@@ -501,65 +484,44 @@ impl StreamState {
|
||||
}
|
||||
|
||||
if let Some(usage) = chunk.usage {
|
||||
self.usage = Some(usage.normalized());
|
||||
self.usage = Some(Usage {
|
||||
input_tokens: usage.prompt_tokens,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
output_tokens: usage.completion_tokens,
|
||||
});
|
||||
}
|
||||
|
||||
for choice in chunk.choices {
|
||||
if let Some(reasoning) = choice
|
||||
.delta
|
||||
.reasoning_content
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
if !self.thinking_started {
|
||||
self.thinking_started = true;
|
||||
events.push(StreamEvent::ContentBlockStart(ContentBlockStartEvent {
|
||||
index: 0,
|
||||
content_block: OutputContentBlock::Thinking {
|
||||
thinking: String::new(),
|
||||
signature: None,
|
||||
},
|
||||
}));
|
||||
}
|
||||
events.push(StreamEvent::ContentBlockDelta(ContentBlockDeltaEvent {
|
||||
index: 0,
|
||||
delta: ContentBlockDelta::ThinkingDelta {
|
||||
thinking: reasoning,
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(content) = choice.delta.content.filter(|value| !value.is_empty()) {
|
||||
self.close_thinking(&mut events);
|
||||
if !self.text_started {
|
||||
self.text_started = true;
|
||||
events.push(StreamEvent::ContentBlockStart(ContentBlockStartEvent {
|
||||
index: self.text_block_index(),
|
||||
index: 0,
|
||||
content_block: OutputContentBlock::Text {
|
||||
text: String::new(),
|
||||
},
|
||||
}));
|
||||
}
|
||||
events.push(StreamEvent::ContentBlockDelta(ContentBlockDeltaEvent {
|
||||
index: self.text_block_index(),
|
||||
index: 0,
|
||||
delta: ContentBlockDelta::TextDelta { text: content },
|
||||
}));
|
||||
}
|
||||
|
||||
for tool_call in choice.delta.tool_calls {
|
||||
self.close_thinking(&mut events);
|
||||
let tool_index_offset = self.tool_index_offset();
|
||||
let state = self.tool_calls.entry(tool_call.index).or_default();
|
||||
state.apply(tool_call);
|
||||
let block_index = state.block_index(tool_index_offset);
|
||||
let block_index = state.block_index();
|
||||
if !state.started {
|
||||
if let Some(start_event) = state.start_event(tool_index_offset)? {
|
||||
if let Some(start_event) = state.start_event()? {
|
||||
state.started = true;
|
||||
events.push(StreamEvent::ContentBlockStart(start_event));
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(delta_event) = state.delta_event(tool_index_offset) {
|
||||
if let Some(delta_event) = state.delta_event() {
|
||||
events.push(StreamEvent::ContentBlockDelta(delta_event));
|
||||
}
|
||||
if choice.finish_reason.as_deref() == Some("tool_calls") && !state.stopped {
|
||||
@@ -573,12 +535,11 @@ impl StreamState {
|
||||
if let Some(finish_reason) = choice.finish_reason {
|
||||
self.stop_reason = Some(normalize_finish_reason(&finish_reason));
|
||||
if finish_reason == "tool_calls" {
|
||||
let tool_index_offset = self.tool_index_offset();
|
||||
for state in self.tool_calls.values_mut() {
|
||||
if state.started && !state.stopped {
|
||||
state.stopped = true;
|
||||
events.push(StreamEvent::ContentBlockStop(ContentBlockStopEvent {
|
||||
index: state.block_index(tool_index_offset),
|
||||
index: state.block_index(),
|
||||
}));
|
||||
}
|
||||
}
|
||||
@@ -596,21 +557,19 @@ impl StreamState {
|
||||
self.finished = true;
|
||||
|
||||
let mut events = Vec::new();
|
||||
self.close_thinking(&mut events);
|
||||
if self.text_started && !self.text_finished {
|
||||
self.text_finished = true;
|
||||
events.push(StreamEvent::ContentBlockStop(ContentBlockStopEvent {
|
||||
index: self.text_block_index(),
|
||||
index: 0,
|
||||
}));
|
||||
}
|
||||
|
||||
let tool_index_offset = self.tool_index_offset();
|
||||
for state in self.tool_calls.values_mut() {
|
||||
if !state.started {
|
||||
if let Some(start_event) = state.start_event(tool_index_offset)? {
|
||||
if let Some(start_event) = state.start_event()? {
|
||||
state.started = true;
|
||||
events.push(StreamEvent::ContentBlockStart(start_event));
|
||||
if let Some(delta_event) = state.delta_event(tool_index_offset) {
|
||||
if let Some(delta_event) = state.delta_event() {
|
||||
events.push(StreamEvent::ContentBlockDelta(delta_event));
|
||||
}
|
||||
}
|
||||
@@ -618,7 +577,7 @@ impl StreamState {
|
||||
if state.started && !state.stopped {
|
||||
state.stopped = true;
|
||||
events.push(StreamEvent::ContentBlockStop(ContentBlockStopEvent {
|
||||
index: state.block_index(tool_index_offset),
|
||||
index: state.block_index(),
|
||||
}));
|
||||
}
|
||||
}
|
||||
@@ -644,31 +603,6 @@ impl StreamState {
|
||||
}
|
||||
Ok(events)
|
||||
}
|
||||
|
||||
fn close_thinking(&mut self, events: &mut Vec<StreamEvent>) {
|
||||
if self.thinking_started && !self.thinking_finished {
|
||||
self.thinking_finished = true;
|
||||
events.push(StreamEvent::ContentBlockStop(ContentBlockStopEvent {
|
||||
index: 0,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
const fn text_block_index(&self) -> u32 {
|
||||
if self.thinking_started {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
const fn tool_index_offset(&self) -> u32 {
|
||||
if self.thinking_started {
|
||||
2
|
||||
} else {
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@@ -696,12 +630,12 @@ impl ToolCallState {
|
||||
}
|
||||
}
|
||||
|
||||
const fn block_index(&self, offset: u32) -> u32 {
|
||||
self.openai_index + offset
|
||||
const fn block_index(&self) -> u32 {
|
||||
self.openai_index + 1
|
||||
}
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn start_event(&self, offset: u32) -> Result<Option<ContentBlockStartEvent>, ApiError> {
|
||||
fn start_event(&self) -> Result<Option<ContentBlockStartEvent>, ApiError> {
|
||||
let Some(name) = self.name.clone() else {
|
||||
return Ok(None);
|
||||
};
|
||||
@@ -710,7 +644,7 @@ impl ToolCallState {
|
||||
.clone()
|
||||
.unwrap_or_else(|| format!("tool_call_{}", self.openai_index));
|
||||
Ok(Some(ContentBlockStartEvent {
|
||||
index: self.block_index(offset),
|
||||
index: self.block_index(),
|
||||
content_block: OutputContentBlock::ToolUse {
|
||||
id,
|
||||
name,
|
||||
@@ -719,14 +653,14 @@ impl ToolCallState {
|
||||
}))
|
||||
}
|
||||
|
||||
fn delta_event(&mut self, offset: u32) -> Option<ContentBlockDeltaEvent> {
|
||||
fn delta_event(&mut self) -> Option<ContentBlockDeltaEvent> {
|
||||
if self.emitted_len >= self.arguments.len() {
|
||||
return None;
|
||||
}
|
||||
let delta = self.arguments[self.emitted_len..].to_string();
|
||||
self.emitted_len = self.arguments.len();
|
||||
Some(ContentBlockDeltaEvent {
|
||||
index: self.block_index(offset),
|
||||
index: self.block_index(),
|
||||
delta: ContentBlockDelta::InputJsonDelta {
|
||||
partial_json: delta,
|
||||
},
|
||||
@@ -756,8 +690,6 @@ struct ChatMessage {
|
||||
#[serde(default)]
|
||||
content: Option<String>,
|
||||
#[serde(default)]
|
||||
reasoning_content: Option<String>,
|
||||
#[serde(default)]
|
||||
tool_calls: Vec<ResponseToolCall>,
|
||||
}
|
||||
|
||||
@@ -779,29 +711,6 @@ struct OpenAiUsage {
|
||||
prompt_tokens: u32,
|
||||
#[serde(default)]
|
||||
completion_tokens: u32,
|
||||
#[serde(default)]
|
||||
prompt_tokens_details: Option<OpenAiPromptTokensDetails>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct OpenAiPromptTokensDetails {
|
||||
#[serde(default)]
|
||||
cached_tokens: u32,
|
||||
}
|
||||
|
||||
impl OpenAiUsage {
|
||||
fn normalized(&self) -> Usage {
|
||||
let cached_tokens = self
|
||||
.prompt_tokens_details
|
||||
.as_ref()
|
||||
.map_or(0, |details| details.cached_tokens);
|
||||
Usage {
|
||||
input_tokens: self.prompt_tokens.saturating_sub(cached_tokens),
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: cached_tokens,
|
||||
output_tokens: self.completion_tokens,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -826,8 +735,6 @@ struct ChunkChoice {
|
||||
struct ChunkDelta {
|
||||
#[serde(default)]
|
||||
content: Option<String>,
|
||||
#[serde(default)]
|
||||
reasoning_content: Option<String>,
|
||||
#[serde(default, deserialize_with = "deserialize_null_as_empty_vec")]
|
||||
tool_calls: Vec<DeltaToolCall>,
|
||||
}
|
||||
@@ -886,19 +793,9 @@ pub fn is_reasoning_model(model: &str) -> bool {
|
||||
|| canonical.contains("thinking")
|
||||
}
|
||||
|
||||
/// Returns true for OpenAI-compatible `DeepSeek` V4 models that require prior
|
||||
/// assistant reasoning to be echoed back as `reasoning_content` in history.
|
||||
#[must_use]
|
||||
pub fn model_requires_reasoning_content_in_history(model: &str) -> bool {
|
||||
let lowered = model.to_ascii_lowercase();
|
||||
let canonical = lowered.rsplit('/').next().unwrap_or(lowered.as_str());
|
||||
canonical.starts_with("deepseek-v4")
|
||||
}
|
||||
|
||||
/// Strip routing prefix (e.g., "openai/gpt-4" → "gpt-4") for the wire.
|
||||
/// The prefix is used only to select transport; the backend expects the
|
||||
/// bare model id.
|
||||
#[allow(dead_code)]
|
||||
fn strip_routing_prefix(model: &str) -> &str {
|
||||
if let Some(pos) = model.find('/') {
|
||||
let prefix = &model[..pos];
|
||||
@@ -914,51 +811,10 @@ fn strip_routing_prefix(model: &str) -> &str {
|
||||
}
|
||||
}
|
||||
|
||||
fn wire_model_for_base_url<'a>(
|
||||
model: &'a str,
|
||||
config: OpenAiCompatConfig,
|
||||
base_url: &str,
|
||||
) -> Cow<'a, str> {
|
||||
let Some(pos) = model.find('/') else {
|
||||
return Cow::Borrowed(model);
|
||||
};
|
||||
let prefix = &model[..pos];
|
||||
let lowered_prefix = prefix.to_ascii_lowercase();
|
||||
|
||||
if lowered_prefix == "openai" {
|
||||
let trimmed_base_url = base_url.trim_end_matches('/');
|
||||
let default_openai = DEFAULT_OPENAI_BASE_URL.trim_end_matches('/');
|
||||
if config.provider_name == "OpenAI" && trimmed_base_url != default_openai {
|
||||
// OpenAI-compatible gateways such as OpenRouter commonly use
|
||||
// slash-containing model slugs (for example `openai/gpt-4.1-mini`).
|
||||
// Preserve the slug when the user configured a non-default OpenAI
|
||||
// base URL; the prefix still routed to the OpenAI-compatible client,
|
||||
// but the gateway owns the final model namespace.
|
||||
return Cow::Borrowed(model);
|
||||
}
|
||||
return Cow::Borrowed(&model[pos + 1..]);
|
||||
}
|
||||
|
||||
if matches!(lowered_prefix.as_str(), "xai" | "grok" | "qwen" | "kimi") {
|
||||
return Cow::Borrowed(&model[pos + 1..]);
|
||||
}
|
||||
|
||||
Cow::Borrowed(model)
|
||||
}
|
||||
|
||||
/// Estimate the serialized JSON size of a request payload in bytes.
|
||||
/// This is a pre-flight check to avoid hitting provider-specific size limits.
|
||||
#[must_use]
|
||||
pub fn estimate_request_body_size(request: &MessageRequest, config: OpenAiCompatConfig) -> usize {
|
||||
estimate_request_body_size_for_base_url(request, config, &read_base_url(config))
|
||||
}
|
||||
|
||||
fn estimate_request_body_size_for_base_url(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
base_url: &str,
|
||||
) -> usize {
|
||||
let payload = build_chat_completion_request_for_base_url(request, config, base_url);
|
||||
let payload = build_chat_completion_request(request, config);
|
||||
// serde_json::to_vec gives us the exact byte size of the serialized JSON
|
||||
serde_json::to_vec(&payload).map_or(0, |v| v.len())
|
||||
}
|
||||
@@ -970,15 +826,7 @@ pub fn check_request_body_size(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
) -> Result<(), ApiError> {
|
||||
check_request_body_size_for_base_url(request, config, &read_base_url(config))
|
||||
}
|
||||
|
||||
fn check_request_body_size_for_base_url(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
base_url: &str,
|
||||
) -> Result<(), ApiError> {
|
||||
let estimated_bytes = estimate_request_body_size_for_base_url(request, config, base_url);
|
||||
let estimated_bytes = estimate_request_body_size(request, config);
|
||||
let max_bytes = config.max_request_body_bytes;
|
||||
|
||||
if estimated_bytes > max_bytes {
|
||||
@@ -994,18 +842,9 @@ fn check_request_body_size_for_base_url(
|
||||
|
||||
/// Builds a chat completion request payload from a `MessageRequest`.
|
||||
/// Public for benchmarking purposes.
|
||||
#[must_use]
|
||||
pub fn build_chat_completion_request(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
) -> Value {
|
||||
build_chat_completion_request_for_base_url(request, config, &read_base_url(config))
|
||||
}
|
||||
|
||||
fn build_chat_completion_request_for_base_url(
|
||||
request: &MessageRequest,
|
||||
config: OpenAiCompatConfig,
|
||||
base_url: &str,
|
||||
) -> Value {
|
||||
let mut messages = Vec::new();
|
||||
if let Some(system) = request.system.as_ref().filter(|value| !value.is_empty()) {
|
||||
@@ -1014,10 +853,8 @@ fn build_chat_completion_request_for_base_url(
|
||||
"content": system,
|
||||
}));
|
||||
}
|
||||
// Resolve the transport routing prefix into the wire model. Custom
|
||||
// OpenAI-compatible gateways may require slash-containing slugs intact.
|
||||
let wire_model = wire_model_for_base_url(&request.model, config, base_url);
|
||||
let wire_model = wire_model.as_ref();
|
||||
// Strip routing prefix (e.g., "openai/gpt-4" → "gpt-4") for the wire.
|
||||
let wire_model = strip_routing_prefix(&request.model);
|
||||
for message in &request.messages {
|
||||
messages.extend(translate_message(message, wire_model));
|
||||
}
|
||||
@@ -1086,29 +923,9 @@ fn build_chat_completion_request_for_base_url(
|
||||
payload["reasoning_effort"] = json!(effort);
|
||||
}
|
||||
|
||||
for (key, value) in &request.extra_body {
|
||||
if is_protected_extra_body_key(key) {
|
||||
continue;
|
||||
}
|
||||
payload[key] = value.clone();
|
||||
}
|
||||
|
||||
payload
|
||||
}
|
||||
|
||||
fn is_protected_extra_body_key(key: &str) -> bool {
|
||||
matches!(
|
||||
key,
|
||||
"model"
|
||||
| "messages"
|
||||
| "stream"
|
||||
| "tools"
|
||||
| "tool_choice"
|
||||
| "max_tokens"
|
||||
| "max_completion_tokens"
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns true for models that do NOT support the `is_error` field in tool results.
|
||||
/// kimi models (via Moonshot AI/Dashscope) reject this field with 400 Bad Request.
|
||||
/// Returns true for models that do NOT support the `is_error` field in tool results.
|
||||
@@ -1131,14 +948,10 @@ pub fn translate_message(message: &InputMessage, model: &str) -> Vec<Value> {
|
||||
match message.role.as_str() {
|
||||
"assistant" => {
|
||||
let mut text = String::new();
|
||||
let mut reasoning = String::new();
|
||||
let mut tool_calls = Vec::new();
|
||||
for block in &message.content {
|
||||
match block {
|
||||
InputContentBlock::Text { text: value } => text.push_str(value),
|
||||
InputContentBlock::Thinking {
|
||||
thinking: value, ..
|
||||
} => reasoning.push_str(value),
|
||||
InputContentBlock::ToolUse { id, name, input } => tool_calls.push(json!({
|
||||
"id": id,
|
||||
"type": "function",
|
||||
@@ -1150,18 +963,13 @@ pub fn translate_message(message: &InputMessage, model: &str) -> Vec<Value> {
|
||||
InputContentBlock::ToolResult { .. } => {}
|
||||
}
|
||||
}
|
||||
let include_reasoning =
|
||||
model_requires_reasoning_content_in_history(model) && !reasoning.is_empty();
|
||||
if text.is_empty() && tool_calls.is_empty() && !include_reasoning {
|
||||
if text.is_empty() && tool_calls.is_empty() {
|
||||
Vec::new()
|
||||
} else {
|
||||
let mut msg = serde_json::json!({
|
||||
"role": "assistant",
|
||||
"content": (!text.is_empty()).then_some(text),
|
||||
});
|
||||
if include_reasoning {
|
||||
msg["reasoning_content"] = json!(reasoning);
|
||||
}
|
||||
// Only include tool_calls when non-empty: some providers reject
|
||||
// assistant messages with an explicit empty tool_calls array.
|
||||
if !tool_calls.is_empty() {
|
||||
@@ -1195,7 +1003,7 @@ pub fn translate_message(message: &InputMessage, model: &str) -> Vec<Value> {
|
||||
}
|
||||
Some(msg)
|
||||
}
|
||||
InputContentBlock::Thinking { .. } | InputContentBlock::ToolUse { .. } => None,
|
||||
InputContentBlock::ToolUse { .. } => None,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
@@ -1374,16 +1182,6 @@ fn normalize_response(
|
||||
"chat completion response missing choices",
|
||||
))?;
|
||||
let mut content = Vec::new();
|
||||
if let Some(thinking) = choice
|
||||
.message
|
||||
.reasoning_content
|
||||
.filter(|value| !value.is_empty())
|
||||
{
|
||||
content.push(OutputContentBlock::Thinking {
|
||||
thinking,
|
||||
signature: None,
|
||||
});
|
||||
}
|
||||
if let Some(text) = choice.message.content.filter(|value| !value.is_empty()) {
|
||||
content.push(OutputContentBlock::Text { text });
|
||||
}
|
||||
@@ -1405,10 +1203,18 @@ fn normalize_response(
|
||||
.finish_reason
|
||||
.map(|value| normalize_finish_reason(&value)),
|
||||
stop_sequence: None,
|
||||
usage: response
|
||||
.usage
|
||||
.as_ref()
|
||||
.map_or_else(Usage::default, OpenAiUsage::normalized),
|
||||
usage: Usage {
|
||||
input_tokens: response
|
||||
.usage
|
||||
.as_ref()
|
||||
.map_or(0, |usage| usage.prompt_tokens),
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
output_tokens: response
|
||||
.usage
|
||||
.as_ref()
|
||||
.map_or(0, |usage| usage.completion_tokens),
|
||||
},
|
||||
request_id: None,
|
||||
})
|
||||
}
|
||||
@@ -1607,18 +1413,15 @@ impl StringExt for String {
|
||||
mod tests {
|
||||
use super::{
|
||||
build_chat_completion_request, chat_completions_endpoint, is_reasoning_model,
|
||||
model_requires_reasoning_content_in_history, normalize_finish_reason, normalize_response,
|
||||
openai_tool_choice, parse_tool_arguments, OpenAiCompatClient, OpenAiCompatConfig,
|
||||
StreamState,
|
||||
normalize_finish_reason, openai_tool_choice, parse_tool_arguments, OpenAiCompatClient,
|
||||
OpenAiCompatConfig,
|
||||
};
|
||||
use crate::error::ApiError;
|
||||
use crate::types::{
|
||||
ContentBlockDelta, ContentBlockDeltaEvent, ContentBlockStartEvent, ContentBlockStopEvent,
|
||||
InputContentBlock, InputMessage, MessageRequest, OutputContentBlock, StreamEvent,
|
||||
ToolChoice, ToolDefinition, ToolResultContentBlock,
|
||||
InputContentBlock, InputMessage, MessageRequest, ToolChoice, ToolDefinition,
|
||||
ToolResultContentBlock,
|
||||
};
|
||||
use serde_json::json;
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
|
||||
#[test]
|
||||
@@ -1662,188 +1465,6 @@ mod tests {
|
||||
assert_eq!(payload["tool_choice"], json!("auto"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn model_requires_reasoning_content_in_history_detects_deepseek_v4_models() {
|
||||
// Given DeepSeek V4 and non-V4 model names.
|
||||
let positive = [
|
||||
"deepseek-v4-flash",
|
||||
"deepseek-v4-pro",
|
||||
"openai/deepseek-v4-pro",
|
||||
"deepseek/deepseek-v4-flash",
|
||||
];
|
||||
let negative = [
|
||||
"deepseek-reasoner",
|
||||
"deepseek-chat",
|
||||
"gpt-4o",
|
||||
"claude-sonnet-4-6",
|
||||
];
|
||||
|
||||
// When checking whether history reasoning_content is required.
|
||||
// Then only DeepSeek V4 variants require it.
|
||||
for model in positive {
|
||||
assert!(model_requires_reasoning_content_in_history(model));
|
||||
}
|
||||
for model in negative {
|
||||
assert!(!model_requires_reasoning_content_in_history(model));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn legacy_deepseek_reasoner_request_omits_reasoning_content_for_assistant_history() {
|
||||
// Given an assistant history turn containing thinking.
|
||||
let request = assistant_history_with_thinking_request("deepseek-reasoner");
|
||||
|
||||
// When serializing for legacy deepseek-reasoner.
|
||||
let payload = build_chat_completion_request(&request, OpenAiCompatConfig::openai());
|
||||
|
||||
// Then reasoning_content is omitted.
|
||||
let assistant = &payload["messages"][0];
|
||||
assert_eq!(assistant["role"], json!("assistant"));
|
||||
assert!(assistant.get("reasoning_content").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deepseek_v4_pro_request_includes_reasoning_content_for_assistant_history() {
|
||||
// Given an assistant history turn containing thinking.
|
||||
let request = assistant_history_with_thinking_request("openai/deepseek-v4-pro");
|
||||
|
||||
// When serializing for DeepSeek V4 Pro.
|
||||
let payload = build_chat_completion_request(&request, OpenAiCompatConfig::openai());
|
||||
|
||||
// Then reasoning_content is included on the assistant message.
|
||||
let assistant = &payload["messages"][0];
|
||||
assert_eq!(assistant["reasoning_content"], json!("prior reasoning"));
|
||||
assert_eq!(assistant["content"], json!("answer"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deepseek_v4_flash_request_includes_reasoning_content_for_assistant_history() {
|
||||
// Given an assistant history turn containing thinking.
|
||||
let request = assistant_history_with_thinking_request("deepseek-v4-flash");
|
||||
|
||||
// When serializing for DeepSeek V4 Flash.
|
||||
let payload = build_chat_completion_request(&request, OpenAiCompatConfig::openai());
|
||||
|
||||
// Then reasoning_content is included on the assistant message.
|
||||
let assistant = &payload["messages"][0];
|
||||
assert_eq!(assistant["reasoning_content"], json!("prior reasoning"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_streaming_response_with_reasoning_content_emits_thinking_block_first() {
|
||||
// Given a non-streaming OpenAI-compatible response with reasoning_content.
|
||||
let response = super::ChatCompletionResponse {
|
||||
id: "chatcmpl_reasoning".to_string(),
|
||||
model: "deepseek-v4-pro".to_string(),
|
||||
choices: vec![super::ChatChoice {
|
||||
message: super::ChatMessage {
|
||||
role: "assistant".to_string(),
|
||||
content: Some("final answer".to_string()),
|
||||
reasoning_content: Some("hidden thought".to_string()),
|
||||
tool_calls: Vec::new(),
|
||||
},
|
||||
finish_reason: Some("stop".to_string()),
|
||||
}],
|
||||
usage: None,
|
||||
};
|
||||
|
||||
// When normalizing the provider response.
|
||||
let normalized = normalize_response("deepseek-v4-pro", response).expect("normalized");
|
||||
|
||||
// Then Thinking is the first content block, before text.
|
||||
assert_eq!(
|
||||
normalized.content,
|
||||
vec![
|
||||
OutputContentBlock::Thinking {
|
||||
thinking: "hidden thought".to_string(),
|
||||
signature: None,
|
||||
},
|
||||
OutputContentBlock::Text {
|
||||
text: "final answer".to_string(),
|
||||
},
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn streaming_chunks_with_reasoning_content_emit_thinking_block_events_before_text() {
|
||||
// Given streaming chunks with reasoning_content followed by text.
|
||||
let mut state = StreamState::new("deepseek-v4-pro".to_string());
|
||||
let mut events = state
|
||||
.ingest_chunk(super::ChatCompletionChunk {
|
||||
id: "chatcmpl_stream_reasoning".to_string(),
|
||||
model: Some("deepseek-v4-pro".to_string()),
|
||||
choices: vec![super::ChunkChoice {
|
||||
delta: super::ChunkDelta {
|
||||
content: None,
|
||||
reasoning_content: Some("think".to_string()),
|
||||
tool_calls: Vec::new(),
|
||||
},
|
||||
finish_reason: None,
|
||||
}],
|
||||
usage: None,
|
||||
})
|
||||
.expect("reasoning chunk");
|
||||
events.extend(
|
||||
state
|
||||
.ingest_chunk(super::ChatCompletionChunk {
|
||||
id: "chatcmpl_stream_reasoning".to_string(),
|
||||
model: None,
|
||||
choices: vec![super::ChunkChoice {
|
||||
delta: super::ChunkDelta {
|
||||
content: Some(" answer".to_string()),
|
||||
reasoning_content: None,
|
||||
tool_calls: Vec::new(),
|
||||
},
|
||||
finish_reason: Some("stop".to_string()),
|
||||
}],
|
||||
usage: None,
|
||||
})
|
||||
.expect("text chunk"),
|
||||
);
|
||||
events.extend(state.finish().expect("finish"));
|
||||
|
||||
// When reading normalized stream events.
|
||||
// Then Thinking starts at index 0, text is offset to index 1.
|
||||
assert!(matches!(events[0], StreamEvent::MessageStart(_)));
|
||||
assert!(matches!(
|
||||
events[1],
|
||||
StreamEvent::ContentBlockStart(ContentBlockStartEvent {
|
||||
index: 0,
|
||||
content_block: OutputContentBlock::Thinking { .. },
|
||||
})
|
||||
));
|
||||
assert!(matches!(
|
||||
events[2],
|
||||
StreamEvent::ContentBlockDelta(ContentBlockDeltaEvent {
|
||||
index: 0,
|
||||
delta: ContentBlockDelta::ThinkingDelta { .. },
|
||||
})
|
||||
));
|
||||
assert!(matches!(
|
||||
events[3],
|
||||
StreamEvent::ContentBlockStop(ContentBlockStopEvent { index: 0 })
|
||||
));
|
||||
assert!(matches!(
|
||||
events[4],
|
||||
StreamEvent::ContentBlockStart(ContentBlockStartEvent {
|
||||
index: 1,
|
||||
content_block: OutputContentBlock::Text { .. },
|
||||
})
|
||||
));
|
||||
assert!(matches!(
|
||||
events[5],
|
||||
StreamEvent::ContentBlockDelta(ContentBlockDeltaEvent {
|
||||
index: 1,
|
||||
delta: ContentBlockDelta::TextDelta { .. },
|
||||
})
|
||||
));
|
||||
assert!(matches!(
|
||||
events[6],
|
||||
StreamEvent::ContentBlockStop(ContentBlockStopEvent { index: 1 })
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tool_schema_object_gets_strict_fields_for_responses_endpoint() {
|
||||
// OpenAI /responses endpoint rejects object schemas missing
|
||||
@@ -2003,27 +1624,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
fn assistant_history_with_thinking_request(model: &str) -> MessageRequest {
|
||||
MessageRequest {
|
||||
model: model.to_string(),
|
||||
max_tokens: 100,
|
||||
messages: vec![InputMessage {
|
||||
role: "assistant".to_string(),
|
||||
content: vec![
|
||||
InputContentBlock::Thinking {
|
||||
thinking: "prior reasoning".to_string(),
|
||||
signature: None,
|
||||
},
|
||||
InputContentBlock::Text {
|
||||
text: "answer".to_string(),
|
||||
},
|
||||
],
|
||||
}],
|
||||
stream: false,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn env_lock() -> std::sync::MutexGuard<'static, ()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(|| Mutex::new(()))
|
||||
@@ -2053,7 +1653,6 @@ mod tests {
|
||||
presence_penalty: Some(0.3),
|
||||
stop: Some(vec!["\n".to_string()]),
|
||||
reasoning_effort: None,
|
||||
extra_body: BTreeMap::new(),
|
||||
};
|
||||
let payload = build_chat_completion_request(&request, OpenAiCompatConfig::openai());
|
||||
assert_eq!(payload["temperature"], 0.7);
|
||||
@@ -2063,39 +1662,6 @@ mod tests {
|
||||
assert_eq!(payload["stop"], json!(["\n"]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_body_params_are_passed_through_without_overriding_core_fields() {
|
||||
let mut extra_body = BTreeMap::new();
|
||||
extra_body.insert(
|
||||
"web_search_options".to_string(),
|
||||
json!({"search_context_size": "medium"}),
|
||||
);
|
||||
extra_body.insert("parallel_tool_calls".to_string(), json!(false));
|
||||
extra_body.insert("model".to_string(), json!("bad-override"));
|
||||
extra_body.insert("messages".to_string(), json!([]));
|
||||
extra_body.insert("max_tokens".to_string(), json!(1));
|
||||
|
||||
let payload = build_chat_completion_request(
|
||||
&MessageRequest {
|
||||
model: "gpt-4o".to_string(),
|
||||
max_tokens: 1024,
|
||||
messages: vec![InputMessage::user_text("hello")],
|
||||
extra_body,
|
||||
..Default::default()
|
||||
},
|
||||
OpenAiCompatConfig::openai(),
|
||||
);
|
||||
|
||||
assert_eq!(payload["model"], json!("gpt-4o"));
|
||||
assert_eq!(payload["max_tokens"], json!(1024));
|
||||
assert_eq!(payload["messages"].as_array().map(Vec::len), Some(1));
|
||||
assert_eq!(
|
||||
payload["web_search_options"],
|
||||
json!({"search_context_size": "medium"})
|
||||
);
|
||||
assert_eq!(payload["parallel_tool_calls"], json!(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reasoning_model_strips_tuning_params() {
|
||||
let request = MessageRequest {
|
||||
@@ -2629,16 +2195,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn provider_specific_size_limits_are_correct() {
|
||||
assert_eq!(
|
||||
OpenAiCompatConfig::dashscope().max_request_body_bytes,
|
||||
6_291_456
|
||||
); // 6MB
|
||||
assert_eq!(
|
||||
OpenAiCompatConfig::openai().max_request_body_bytes,
|
||||
104_857_600
|
||||
); // 100MB
|
||||
assert_eq!(OpenAiCompatConfig::xai().max_request_body_bytes, 52_428_800);
|
||||
// 50MB
|
||||
assert_eq!(OpenAiCompatConfig::dashscope().max_request_body_bytes, 6_291_456); // 6MB
|
||||
assert_eq!(OpenAiCompatConfig::openai().max_request_body_bytes, 104_857_600); // 100MB
|
||||
assert_eq!(OpenAiCompatConfig::xai().max_request_body_bytes, 52_428_800); // 50MB
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use runtime::{pricing_for_model, TokenUsage, UsageCostEstimate};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -33,14 +31,6 @@ pub struct MessageRequest {
|
||||
/// Silently ignored by backends that do not support it.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub reasoning_effort: Option<String>,
|
||||
/// Provider-specific OpenAI-compatible request body parameters. These are
|
||||
/// copied into the final JSON payload after core fields are populated so
|
||||
/// users can opt into gateway features such as `web_search_options`,
|
||||
/// `parallel_tool_calls`, or custom local-server switches without waiting
|
||||
/// for first-class typed fields. Core protocol keys are protected and cannot
|
||||
/// be overridden through this map.
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub extra_body: BTreeMap<String, Value>,
|
||||
}
|
||||
|
||||
impl MessageRequest {
|
||||
@@ -91,11 +81,6 @@ pub enum InputContentBlock {
|
||||
Text {
|
||||
text: String,
|
||||
},
|
||||
Thinking {
|
||||
thinking: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
signature: Option<String>,
|
||||
},
|
||||
ToolUse {
|
||||
id: String,
|
||||
name: String,
|
||||
@@ -283,9 +268,8 @@ pub enum StreamEvent {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use runtime::format_usd;
|
||||
use serde_json::json;
|
||||
|
||||
use super::{InputContentBlock, MessageResponse, Usage};
|
||||
use super::{MessageResponse, Usage};
|
||||
|
||||
#[test]
|
||||
fn usage_total_tokens_includes_cache_tokens() {
|
||||
@@ -323,33 +307,4 @@ mod tests {
|
||||
assert_eq!(format_usd(cost.total_cost_usd()), "$54.6750");
|
||||
assert_eq!(response.total_tokens(), 1_800_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn input_content_block_thinking_serializes_with_snake_case_type() {
|
||||
// given
|
||||
let block = InputContentBlock::Thinking {
|
||||
thinking: "pondering".to_string(),
|
||||
signature: Some("sig_123".to_string()),
|
||||
};
|
||||
|
||||
// when
|
||||
let serialized = serde_json::to_value(&block).unwrap();
|
||||
let deserialized: InputContentBlock = serde_json::from_value(json!({
|
||||
"type": "thinking",
|
||||
"thinking": "pondering",
|
||||
"signature": "sig_123"
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
serialized,
|
||||
json!({
|
||||
"type": "thinking",
|
||||
"thinking": "pondering",
|
||||
"signature": "sig_123"
|
||||
})
|
||||
);
|
||||
assert_eq!(deserialized, block);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,13 +2,12 @@ use std::collections::HashMap;
|
||||
use std::ffi::OsString;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Mutex as StdMutex, OnceLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use api::{
|
||||
build_http_client_with, ApiError, ContentBlockDelta, ContentBlockDeltaEvent,
|
||||
ContentBlockStartEvent, ContentBlockStopEvent, InputContentBlock, InputMessage,
|
||||
MessageDeltaEvent, MessageRequest, OpenAiCompatClient, OpenAiCompatConfig, OutputContentBlock,
|
||||
ProviderClient, ProxyConfig, StreamEvent, ToolChoice, ToolDefinition,
|
||||
ApiError, ContentBlockDelta, ContentBlockDeltaEvent, ContentBlockStartEvent,
|
||||
ContentBlockStopEvent, InputContentBlock, InputMessage, MessageDeltaEvent, MessageRequest,
|
||||
OpenAiCompatClient, OpenAiCompatConfig, OutputContentBlock, ProviderClient, StreamEvent,
|
||||
ToolChoice, ToolDefinition,
|
||||
};
|
||||
use serde_json::json;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
@@ -26,7 +25,7 @@ async fn send_message_uses_openai_compatible_endpoint_and_auth() {
|
||||
"\"message\":{\"role\":\"assistant\",\"content\":\"Hello from Grok\",\"tool_calls\":[]},",
|
||||
"\"finish_reason\":\"stop\"",
|
||||
"}],",
|
||||
"\"usage\":{\"prompt_tokens\":11,\"completion_tokens\":5,\"prompt_tokens_details\":{\"cached_tokens\":3}}",
|
||||
"\"usage\":{\"prompt_tokens\":11,\"completion_tokens\":5}",
|
||||
"}"
|
||||
);
|
||||
let server = spawn_server(
|
||||
@@ -43,9 +42,6 @@ async fn send_message_uses_openai_compatible_endpoint_and_auth() {
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(response.model, "grok-3");
|
||||
assert_eq!(response.usage.input_tokens, 8);
|
||||
assert_eq!(response.usage.cache_read_input_tokens, 3);
|
||||
assert_eq!(response.usage.output_tokens, 5);
|
||||
assert_eq!(response.total_tokens(), 16);
|
||||
assert_eq!(
|
||||
response.content,
|
||||
@@ -67,153 +63,6 @@ async fn send_message_uses_openai_compatible_endpoint_and_auth() {
|
||||
assert_eq!(body["tools"][0]["type"], json!("function"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_message_passes_optional_openai_compatible_parameters_on_wire() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let body = concat!(
|
||||
"{",
|
||||
"\"id\":\"chatcmpl_params\",",
|
||||
"\"model\":\"gpt-4o\",",
|
||||
"\"choices\":[{",
|
||||
"\"message\":{\"role\":\"assistant\",\"content\":\"Parameters preserved\",\"tool_calls\":[]},",
|
||||
"\"finish_reason\":\"stop\"",
|
||||
"}],",
|
||||
"\"usage\":{\"prompt_tokens\":3,\"completion_tokens\":2}",
|
||||
"}"
|
||||
);
|
||||
let server = spawn_server(
|
||||
state.clone(),
|
||||
vec![http_response("200 OK", "application/json", body)],
|
||||
)
|
||||
.await;
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_base_url(server.base_url());
|
||||
let response = client
|
||||
.send_message(&MessageRequest {
|
||||
model: "gpt-4o".to_string(),
|
||||
temperature: Some(0.2),
|
||||
top_p: Some(0.8),
|
||||
frequency_penalty: Some(0.15),
|
||||
presence_penalty: Some(0.25),
|
||||
stop: Some(vec!["END".to_string()]),
|
||||
reasoning_effort: Some("low".to_string()),
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(response.total_tokens(), 5);
|
||||
|
||||
let captured = state.lock().await;
|
||||
let request = captured.first().expect("server should capture request");
|
||||
let body: serde_json::Value = serde_json::from_str(&request.body).expect("json body");
|
||||
assert_eq!(body["model"], json!("gpt-4o"));
|
||||
assert_eq!(body["temperature"], json!(0.2));
|
||||
assert_eq!(body["top_p"], json!(0.8));
|
||||
assert_eq!(body["frequency_penalty"], json!(0.15));
|
||||
assert_eq!(body["presence_penalty"], json!(0.25));
|
||||
assert_eq!(body["stop"], json!(["END"]));
|
||||
assert_eq!(body["reasoning_effort"], json!("low"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_message_preserves_deepseek_reasoning_content_before_text() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let body = concat!(
|
||||
"{",
|
||||
"\"id\":\"chatcmpl_deepseek_reasoning\",",
|
||||
"\"model\":\"deepseek-v4-pro\",",
|
||||
"\"choices\":[{",
|
||||
"\"message\":{\"role\":\"assistant\",\"reasoning_content\":\"Think first\",\"content\":\"Answer second\",\"tool_calls\":[]},",
|
||||
"\"finish_reason\":\"stop\"",
|
||||
"}],",
|
||||
"\"usage\":{\"prompt_tokens\":11,\"completion_tokens\":5}",
|
||||
"}"
|
||||
);
|
||||
let server = spawn_server(
|
||||
state.clone(),
|
||||
vec![http_response("200 OK", "application/json", body)],
|
||||
)
|
||||
.await;
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_base_url(server.base_url());
|
||||
let response = client
|
||||
.send_message(&MessageRequest {
|
||||
model: "openai/deepseek-v4-pro".to_string(),
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("request should succeed");
|
||||
|
||||
assert_eq!(
|
||||
response.content,
|
||||
vec![
|
||||
OutputContentBlock::Thinking {
|
||||
thinking: "Think first".to_string(),
|
||||
signature: None,
|
||||
},
|
||||
OutputContentBlock::Text {
|
||||
text: "Answer second".to_string(),
|
||||
},
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn custom_openai_gateway_preserves_slash_model_ids_and_extra_body_params() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let body = concat!(
|
||||
"{",
|
||||
"\"id\":\"chatcmpl_slash_model\",",
|
||||
"\"model\":\"openai/gpt-4.1-mini\",",
|
||||
"\"choices\":[{",
|
||||
"\"message\":{\"role\":\"assistant\",\"content\":\"Gateway accepted slug\",\"tool_calls\":[]},",
|
||||
"\"finish_reason\":\"stop\"",
|
||||
"}],",
|
||||
"\"usage\":{\"prompt_tokens\":3,\"completion_tokens\":2}",
|
||||
"}"
|
||||
);
|
||||
let server = spawn_server(
|
||||
state.clone(),
|
||||
vec![http_response("200 OK", "application/json", body)],
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut extra_body = std::collections::BTreeMap::new();
|
||||
extra_body.insert(
|
||||
"web_search_options".to_string(),
|
||||
json!({"search_context_size": "low"}),
|
||||
);
|
||||
extra_body.insert("parallel_tool_calls".to_string(), json!(false));
|
||||
extra_body.insert("model".to_string(), json!("malicious-override"));
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_base_url(server.base_url());
|
||||
let response = client
|
||||
.send_message(&MessageRequest {
|
||||
model: "openai/gpt-4.1-mini".to_string(),
|
||||
extra_body,
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("gateway request should succeed");
|
||||
|
||||
assert_eq!(response.model, "openai/gpt-4.1-mini");
|
||||
assert_eq!(response.total_tokens(), 5);
|
||||
|
||||
let captured = state.lock().await;
|
||||
let request = captured.first().expect("captured request");
|
||||
let body: serde_json::Value = serde_json::from_str(&request.body).expect("json body");
|
||||
assert_eq!(body["model"], json!("openai/gpt-4.1-mini"));
|
||||
assert_eq!(
|
||||
body["web_search_options"],
|
||||
json!({"search_context_size": "low"})
|
||||
);
|
||||
assert_eq!(body["parallel_tool_calls"], json!(false));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn send_message_blocks_oversized_xai_requests_before_the_http_call() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
@@ -384,65 +233,6 @@ async fn stream_message_normalizes_text_and_multiple_tool_calls() {
|
||||
assert!(request.body.contains("\"stream\":true"));
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
#[tokio::test]
|
||||
async fn stream_message_retries_retryable_sse_handshake_failures() {
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let sse = concat!(
|
||||
"data: {\"id\":\"chatcmpl_stream_retry\",\"model\":\"gpt-4o\",\"choices\":[{\"delta\":{\"content\":\"Recovered\"}}]}\n\n",
|
||||
"data: {\"id\":\"chatcmpl_stream_retry\",\"choices\":[{\"delta\":{},\"finish_reason\":\"stop\"}]}\n\n",
|
||||
"data: [DONE]\n\n"
|
||||
);
|
||||
let server = spawn_server(
|
||||
state.clone(),
|
||||
vec![
|
||||
http_response(
|
||||
"500 Internal Server Error",
|
||||
"application/json",
|
||||
"{\"error\":{\"message\":\"try again\",\"type\":\"server_error\",\"code\":500}}",
|
||||
),
|
||||
http_response_with_headers(
|
||||
"200 OK",
|
||||
"text/event-stream",
|
||||
sse,
|
||||
&[("x-request-id", "req_stream_retry")],
|
||||
),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_base_url(server.base_url())
|
||||
.with_retry_policy(1, Duration::ZERO, Duration::ZERO);
|
||||
let mut stream = client
|
||||
.stream_message(&MessageRequest {
|
||||
model: "gpt-4o".to_string(),
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("stream should retry once then start");
|
||||
|
||||
assert_eq!(stream.request_id(), Some("req_stream_retry"));
|
||||
let mut events = Vec::new();
|
||||
while let Some(event) = stream.next_event().await.expect("event should parse") {
|
||||
events.push(event);
|
||||
}
|
||||
assert!(events.iter().any(|event| matches!(
|
||||
event,
|
||||
StreamEvent::ContentBlockDelta(ContentBlockDeltaEvent {
|
||||
delta: ContentBlockDelta::TextDelta { text },
|
||||
..
|
||||
}) if text == "Recovered"
|
||||
)));
|
||||
|
||||
let captured = state.lock().await;
|
||||
assert_eq!(captured.len(), 2, "one original request plus one retry");
|
||||
for request in captured.iter() {
|
||||
let body: serde_json::Value = serde_json::from_str(&request.body).expect("json body");
|
||||
assert_eq!(body["stream"], json!(true));
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
#[tokio::test]
|
||||
async fn openai_streaming_requests_opt_into_usage_chunks() {
|
||||
@@ -450,7 +240,7 @@ async fn openai_streaming_requests_opt_into_usage_chunks() {
|
||||
let sse = concat!(
|
||||
"data: {\"id\":\"chatcmpl_openai_stream\",\"model\":\"gpt-5\",\"choices\":[{\"delta\":{\"content\":\"Hi\"}}]}\n\n",
|
||||
"data: {\"id\":\"chatcmpl_openai_stream\",\"choices\":[{\"delta\":{},\"finish_reason\":\"stop\"}]}\n\n",
|
||||
"data: {\"id\":\"chatcmpl_openai_stream\",\"choices\":[],\"usage\":{\"prompt_tokens\":9,\"completion_tokens\":4,\"prompt_tokens_details\":{\"cached_tokens\":2}}}\n\n",
|
||||
"data: {\"id\":\"chatcmpl_openai_stream\",\"choices\":[],\"usage\":{\"prompt_tokens\":9,\"completion_tokens\":4}}\n\n",
|
||||
"data: [DONE]\n\n"
|
||||
);
|
||||
let server = spawn_server(
|
||||
@@ -505,10 +295,8 @@ async fn openai_streaming_requests_opt_into_usage_chunks() {
|
||||
|
||||
match &events[4] {
|
||||
StreamEvent::MessageDelta(MessageDeltaEvent { usage, .. }) => {
|
||||
assert_eq!(usage.input_tokens, 7);
|
||||
assert_eq!(usage.cache_read_input_tokens, 2);
|
||||
assert_eq!(usage.input_tokens, 9);
|
||||
assert_eq!(usage.output_tokens, 4);
|
||||
assert_eq!(usage.total_tokens(), 13);
|
||||
}
|
||||
other => panic!("expected message delta, got {other:?}"),
|
||||
}
|
||||
@@ -521,44 +309,6 @@ async fn openai_streaming_requests_opt_into_usage_chunks() {
|
||||
assert_eq!(body["stream_options"], json!({"include_usage": true}));
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
#[tokio::test]
|
||||
async fn openai_compatible_client_honors_http_proxy_for_requests() {
|
||||
let _lock = env_lock();
|
||||
let state = Arc::new(Mutex::new(Vec::<CapturedRequest>::new()));
|
||||
let proxy = spawn_server(
|
||||
state.clone(),
|
||||
vec![http_response(
|
||||
"200 OK",
|
||||
"application/json",
|
||||
"{\"id\":\"chatcmpl_proxy\",\"model\":\"gpt-4o\",\"choices\":[{\"message\":{\"role\":\"assistant\",\"content\":\"Via proxy\",\"tool_calls\":[]},\"finish_reason\":\"stop\"}],\"usage\":{\"prompt_tokens\":4,\"completion_tokens\":3}}",
|
||||
)],
|
||||
)
|
||||
.await;
|
||||
let proxied_http = build_http_client_with(&ProxyConfig::from_proxy_url(proxy.base_url()))
|
||||
.expect("proxy client should build");
|
||||
|
||||
let client = OpenAiCompatClient::new("openai-test-key", OpenAiCompatConfig::openai())
|
||||
.with_http_client(proxied_http)
|
||||
.with_base_url("http://origin.invalid/v1");
|
||||
let response = client
|
||||
.send_message(&MessageRequest {
|
||||
model: "gpt-4o".to_string(),
|
||||
..sample_request(false)
|
||||
})
|
||||
.await
|
||||
.expect("proxy should return the OpenAI-compatible response");
|
||||
|
||||
assert_eq!(response.total_tokens(), 7);
|
||||
let captured = state.lock().await;
|
||||
let request = captured.first().expect("proxy should capture request");
|
||||
assert_eq!(request.path, "http://origin.invalid/v1/chat/completions");
|
||||
assert_eq!(
|
||||
request.headers.get("authorization").map(String::as_str),
|
||||
Some("Bearer openai-test-key")
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(clippy::await_holding_lock)]
|
||||
#[tokio::test]
|
||||
async fn provider_client_dispatches_xai_requests_from_env() {
|
||||
|
||||
@@ -221,11 +221,11 @@ const SLASH_COMMAND_SPECS: &[SlashCommandSpec] = &[
|
||||
SlashCommandSpec {
|
||||
name: "session",
|
||||
aliases: &[],
|
||||
summary: "List, check, switch, fork, or delete managed local sessions",
|
||||
summary: "List, switch, fork, or delete managed local sessions",
|
||||
argument_hint: Some(
|
||||
"[list|exists <session-id>|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]",
|
||||
"[list|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]",
|
||||
),
|
||||
resume_supported: true,
|
||||
resume_supported: false,
|
||||
},
|
||||
SlashCommandSpec {
|
||||
name: "plugin",
|
||||
@@ -1590,17 +1590,7 @@ fn parse_session_command(args: &[&str]) -> Result<SlashCommand, SlashCommandPars
|
||||
action: Some("list".to_string()),
|
||||
target: None,
|
||||
}),
|
||||
["list", ..] => Err(usage_error("session", "[list|exists <session-id>|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]")),
|
||||
["exists"] => Err(usage_error("session exists", "<session-id>")),
|
||||
["exists", target] => Ok(SlashCommand::Session {
|
||||
action: Some("exists".to_string()),
|
||||
target: Some((*target).to_string()),
|
||||
}),
|
||||
["exists", ..] => Err(command_error(
|
||||
"Unexpected arguments for /session exists.",
|
||||
"session",
|
||||
"/session exists <session-id>",
|
||||
)),
|
||||
["list", ..] => Err(usage_error("session", "[list|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]")),
|
||||
["switch"] => Err(usage_error("session switch", "<session-id>")),
|
||||
["switch", target] => Ok(SlashCommand::Session {
|
||||
action: Some("switch".to_string()),
|
||||
@@ -1647,10 +1637,10 @@ fn parse_session_command(args: &[&str]) -> Result<SlashCommand, SlashCommandPars
|
||||
)),
|
||||
[action, ..] => Err(command_error(
|
||||
&format!(
|
||||
"Unknown /session action '{action}'. Use list, exists <session-id>, switch <session-id>, fork [branch-name], or delete <session-id> [--force]."
|
||||
"Unknown /session action '{action}'. Use list, switch <session-id>, fork [branch-name], or delete <session-id> [--force]."
|
||||
),
|
||||
"session",
|
||||
"/session [list|exists <session-id>|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]",
|
||||
"/session [list|switch <session-id>|fork [branch-name]|delete <session-id> [--force]]",
|
||||
)),
|
||||
}
|
||||
}
|
||||
@@ -2381,40 +2371,6 @@ pub fn handle_skills_slash_command(args: Option<&str>, cwd: &Path) -> std::io::R
|
||||
let skills = load_skills_from_roots(&roots)?;
|
||||
Ok(render_skills_report(&skills))
|
||||
}
|
||||
Some(args) if args.starts_with("list ") => {
|
||||
let filter = args["list ".len()..].trim().to_lowercase();
|
||||
let roots = discover_skill_roots(cwd);
|
||||
let skills = load_skills_from_roots(&roots)?;
|
||||
let filtered: Vec<_> = skills
|
||||
.into_iter()
|
||||
.filter(|s| s.name.to_lowercase().contains(&filter))
|
||||
.collect();
|
||||
Ok(render_skills_report(&filtered))
|
||||
}
|
||||
Some("show" | "info" | "describe") => {
|
||||
let roots = discover_skill_roots(cwd);
|
||||
let skills = load_skills_from_roots(&roots)?;
|
||||
Ok(render_skills_report(&skills))
|
||||
}
|
||||
Some(args)
|
||||
if args.starts_with("show ")
|
||||
|| args.starts_with("info ")
|
||||
|| args.starts_with("describe ") =>
|
||||
{
|
||||
let name = args
|
||||
.split_once(' ')
|
||||
.map(|(_, name)| name)
|
||||
.unwrap_or_default()
|
||||
.trim()
|
||||
.to_lowercase();
|
||||
let roots = discover_skill_roots(cwd);
|
||||
let skills = load_skills_from_roots(&roots)?;
|
||||
let matched: Vec<_> = skills
|
||||
.into_iter()
|
||||
.filter(|s| s.name.to_lowercase() == name)
|
||||
.collect();
|
||||
Ok(render_skills_report(&matched))
|
||||
}
|
||||
Some("install") => Ok(render_skills_usage(Some("install"))),
|
||||
Some(args) if args.starts_with("install ") => {
|
||||
let target = args["install ".len()..].trim();
|
||||
@@ -2446,40 +2402,6 @@ pub fn handle_skills_slash_command_json(args: Option<&str>, cwd: &Path) -> std::
|
||||
let skills = load_skills_from_roots(&roots)?;
|
||||
Ok(render_skills_report_json(&skills))
|
||||
}
|
||||
Some(args) if args.starts_with("list ") => {
|
||||
let filter = args["list ".len()..].trim().to_lowercase();
|
||||
let roots = discover_skill_roots(cwd);
|
||||
let skills = load_skills_from_roots(&roots)?;
|
||||
let filtered: Vec<_> = skills
|
||||
.into_iter()
|
||||
.filter(|s| s.name.to_lowercase().contains(&filter))
|
||||
.collect();
|
||||
Ok(render_skills_report_json(&filtered))
|
||||
}
|
||||
Some("show" | "info" | "describe") => {
|
||||
let roots = discover_skill_roots(cwd);
|
||||
let skills = load_skills_from_roots(&roots)?;
|
||||
Ok(render_skills_report_json(&skills))
|
||||
}
|
||||
Some(args)
|
||||
if args.starts_with("show ")
|
||||
|| args.starts_with("info ")
|
||||
|| args.starts_with("describe ") =>
|
||||
{
|
||||
let name = args
|
||||
.split_once(' ')
|
||||
.map(|(_, name)| name)
|
||||
.unwrap_or_default()
|
||||
.trim()
|
||||
.to_lowercase();
|
||||
let roots = discover_skill_roots(cwd);
|
||||
let skills = load_skills_from_roots(&roots)?;
|
||||
let matched: Vec<_> = skills
|
||||
.into_iter()
|
||||
.filter(|s| s.name.to_lowercase() == name)
|
||||
.collect();
|
||||
Ok(render_skills_report_json(&matched))
|
||||
}
|
||||
Some("install") => Ok(render_skills_usage_json(Some("install"))),
|
||||
Some(args) if args.starts_with("install ") => {
|
||||
let target = args["install ".len()..].trim();
|
||||
@@ -2497,27 +2419,10 @@ pub fn handle_skills_slash_command_json(args: Option<&str>, cwd: &Path) -> std::
|
||||
#[must_use]
|
||||
pub fn classify_skills_slash_command(args: Option<&str>) -> SkillSlashDispatch {
|
||||
match normalize_optional_args(args) {
|
||||
None | Some("list" | "help" | "-h" | "--help" | "show" | "info" | "describe") => {
|
||||
SkillSlashDispatch::Local
|
||||
}
|
||||
Some(args)
|
||||
if args
|
||||
.split_whitespace()
|
||||
.any(|part| matches!(part, "-h" | "--help")) =>
|
||||
{
|
||||
SkillSlashDispatch::Local
|
||||
}
|
||||
None | Some("list" | "help" | "-h" | "--help") => SkillSlashDispatch::Local,
|
||||
Some(args) if args == "install" || args.starts_with("install ") => {
|
||||
SkillSlashDispatch::Local
|
||||
}
|
||||
Some(args)
|
||||
if args.starts_with("list ")
|
||||
|| args.starts_with("show ")
|
||||
|| args.starts_with("info ")
|
||||
|| args.starts_with("describe ") =>
|
||||
{
|
||||
SkillSlashDispatch::Local
|
||||
}
|
||||
Some(args) => SkillSlashDispatch::Invoke(format!("${}", args.trim_start_matches('/'))),
|
||||
}
|
||||
}
|
||||
@@ -2632,7 +2537,6 @@ pub fn resolve_skill_path(cwd: &Path, skill: &str) -> std::io::Result<PathBuf> {
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn render_mcp_report_for(
|
||||
loader: &ConfigLoader,
|
||||
cwd: &Path,
|
||||
@@ -2692,45 +2596,10 @@ fn render_mcp_report_for(
|
||||
)),
|
||||
}
|
||||
}
|
||||
Some(args) if args.split_whitespace().next() == Some("list") && args.contains(' ') => {
|
||||
// `mcp list <filter>` — list does not accept arguments; treat as unsupported action.
|
||||
Ok(render_mcp_unsupported_action_text(
|
||||
args,
|
||||
"list accepts no filter argument; use `claw mcp list`",
|
||||
))
|
||||
}
|
||||
Some(args) if matches!(args.split_whitespace().next(), Some("info" | "describe")) => {
|
||||
Ok(render_mcp_unsupported_action_text(
|
||||
args,
|
||||
"use `claw mcp show <server>` to inspect a server",
|
||||
))
|
||||
}
|
||||
Some(args) => Ok(render_mcp_usage(Some(args))),
|
||||
}
|
||||
}
|
||||
|
||||
fn render_mcp_unsupported_action_text(action: &str, hint: &str) -> String {
|
||||
format!(
|
||||
"MCP\n Error unsupported action '{action}'\n Hint {hint}\n Usage /mcp [list|show <server>|help]"
|
||||
)
|
||||
}
|
||||
|
||||
fn render_mcp_unsupported_action_json(action: &str, hint: &str) -> Value {
|
||||
json!({
|
||||
"kind": "mcp",
|
||||
"action": "error",
|
||||
"ok": false,
|
||||
"error_kind": "unsupported_action",
|
||||
"requested_action": action,
|
||||
"hint": hint,
|
||||
"usage": {
|
||||
"slash_command": "/mcp [list|show <server>|help]",
|
||||
"direct_cli": "claw mcp [list|show <server>|help]",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn render_mcp_report_json_for(
|
||||
loader: &ConfigLoader,
|
||||
cwd: &Path,
|
||||
@@ -2754,8 +2623,10 @@ fn render_mcp_report_json_for(
|
||||
// runs, the existing serializer adds `status: "ok"` below.
|
||||
match loader.load() {
|
||||
Ok(runtime_config) => {
|
||||
let mut value =
|
||||
render_mcp_summary_report_json(cwd, runtime_config.mcp().servers());
|
||||
let mut value = render_mcp_summary_report_json(
|
||||
cwd,
|
||||
runtime_config.mcp().servers(),
|
||||
);
|
||||
if let Some(map) = value.as_object_mut() {
|
||||
map.insert("status".to_string(), Value::String("ok".to_string()));
|
||||
map.insert("config_load_error".to_string(), Value::Null);
|
||||
@@ -2811,18 +2682,6 @@ fn render_mcp_report_json_for(
|
||||
})),
|
||||
}
|
||||
}
|
||||
Some(args) if args.split_whitespace().next() == Some("list") && args.contains(' ') => {
|
||||
Ok(render_mcp_unsupported_action_json(
|
||||
args,
|
||||
"list accepts no filter argument; use `claw mcp list`",
|
||||
))
|
||||
}
|
||||
Some(args) if matches!(args.split_whitespace().next(), Some("info" | "describe")) => {
|
||||
Ok(render_mcp_unsupported_action_json(
|
||||
args,
|
||||
"use `claw mcp show <server>` to inspect a server",
|
||||
))
|
||||
}
|
||||
Some(args) => Ok(render_mcp_usage_json(Some(args))),
|
||||
}
|
||||
}
|
||||
@@ -3802,7 +3661,6 @@ fn render_mcp_server_report(
|
||||
format!(" Working directory {}", cwd.display()),
|
||||
format!(" Name {server_name}"),
|
||||
format!(" Scope {}", config_source_label(server.scope)),
|
||||
format!(" Required {}", server.required),
|
||||
format!(
|
||||
" Transport {}",
|
||||
mcp_transport_label(&server.config)
|
||||
@@ -4201,7 +4059,6 @@ fn mcp_server_details_json(config: &McpServerConfig) -> Value {
|
||||
fn mcp_server_json(name: &str, server: &ScopedMcpServerConfig) -> Value {
|
||||
json!({
|
||||
"name": name,
|
||||
"required": server.required,
|
||||
"scope": config_source_json(server.scope),
|
||||
"transport": mcp_transport_json(&server.config),
|
||||
"summary": mcp_server_summary(&server.config),
|
||||
@@ -4329,8 +4186,8 @@ mod tests {
|
||||
DefinitionSource, SkillOrigin, SkillRoot, SkillSlashDispatch, SlashCommand,
|
||||
};
|
||||
use plugins::{
|
||||
PluginError, PluginKind, PluginLifecycle, PluginLoadFailure, PluginManager,
|
||||
PluginManagerConfig, PluginMetadata, PluginSummary,
|
||||
PluginError, PluginKind, PluginLoadFailure, PluginManager, PluginManagerConfig,
|
||||
PluginMetadata, PluginSummary,
|
||||
};
|
||||
use runtime::{
|
||||
CompactionConfig, ConfigLoader, ContentBlock, ConversationMessage, MessageRole, Session,
|
||||
@@ -4604,13 +4461,6 @@ mod tests {
|
||||
target: Some("abc123".to_string())
|
||||
}))
|
||||
);
|
||||
assert_eq!(
|
||||
SlashCommand::parse("/session exists abc123"),
|
||||
Ok(Some(SlashCommand::Session {
|
||||
action: Some("exists".to_string()),
|
||||
target: Some("abc123".to_string())
|
||||
}))
|
||||
);
|
||||
assert_eq!(
|
||||
SlashCommand::parse("/plugins install demo"),
|
||||
Ok(Some(SlashCommand::Plugins {
|
||||
@@ -4771,32 +4621,6 @@ mod tests {
|
||||
assert!(agents_error.contains(" Usage /agents [list|help]"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn skills_show_and_list_filter_do_not_invoke_model() {
|
||||
// `show`, `info`, `list <filter>` must route to Local, not Invoke.
|
||||
// Regression for: `claw skills show plan` unexpectedly spawned a model session.
|
||||
for token in &["show", "info", "describe"] {
|
||||
assert_eq!(
|
||||
classify_skills_slash_command(Some(token)),
|
||||
SkillSlashDispatch::Local,
|
||||
"`skills {token}` alone must be Local"
|
||||
);
|
||||
}
|
||||
for prefix in &["show ", "info ", "list ", "describe "] {
|
||||
let arg = format!("{prefix}plan");
|
||||
assert_eq!(
|
||||
classify_skills_slash_command(Some(&arg)),
|
||||
SkillSlashDispatch::Local,
|
||||
"`skills {arg}` must be Local, not Invoke"
|
||||
);
|
||||
}
|
||||
// Bare invocable tokens still dispatch to Invoke.
|
||||
assert_eq!(
|
||||
classify_skills_slash_command(Some("plan")),
|
||||
SkillSlashDispatch::Invoke("$plan".to_string()),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_skills_invocation_arguments_for_prompt_dispatch() {
|
||||
assert_eq!(
|
||||
@@ -4819,38 +4643,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mcp_unsupported_actions_return_typed_error_not_generic_help() {
|
||||
// `mcp info <name>` and `mcp list <filter>` must return typed errors, not raw help.
|
||||
// Regression for #504: these previously fell through to render_mcp_usage with
|
||||
// unexpected=arg, giving no machine-readable error_kind.
|
||||
use crate::handle_mcp_slash_command_json;
|
||||
use std::path::PathBuf;
|
||||
let cwd = PathBuf::from("/tmp");
|
||||
|
||||
let info_json = handle_mcp_slash_command_json(Some("info nonexistent"), &cwd)
|
||||
.expect("info nonexistent should not error at IO level");
|
||||
assert_eq!(info_json["kind"], "mcp");
|
||||
assert_eq!(info_json["ok"], false);
|
||||
assert_eq!(info_json["error_kind"], "unsupported_action");
|
||||
assert!(info_json["hint"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.contains("show"));
|
||||
|
||||
let list_filter_json = handle_mcp_slash_command_json(Some("list nonexistent"), &cwd)
|
||||
.expect("list nonexistent should not error at IO level");
|
||||
assert_eq!(list_filter_json["kind"], "mcp");
|
||||
assert_eq!(list_filter_json["ok"], false);
|
||||
assert_eq!(list_filter_json["error_kind"], "unsupported_action");
|
||||
|
||||
let describe_json = handle_mcp_slash_command_json(Some("describe myserver"), &cwd)
|
||||
.expect("describe myserver should not error at IO level");
|
||||
assert_eq!(describe_json["kind"], "mcp");
|
||||
assert_eq!(describe_json["ok"], false);
|
||||
assert_eq!(describe_json["error_kind"], "unsupported_action");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_invalid_mcp_arguments() {
|
||||
let show_error = parse_error_message("/mcp show alpha beta");
|
||||
@@ -5148,7 +4940,6 @@ mod tests {
|
||||
root: None,
|
||||
},
|
||||
enabled: true,
|
||||
lifecycle: PluginLifecycle::default(),
|
||||
},
|
||||
PluginSummary {
|
||||
metadata: PluginMetadata {
|
||||
@@ -5162,7 +4953,6 @@ mod tests {
|
||||
root: None,
|
||||
},
|
||||
enabled: false,
|
||||
lifecycle: PluginLifecycle::default(),
|
||||
},
|
||||
]);
|
||||
|
||||
@@ -5189,7 +4979,6 @@ mod tests {
|
||||
root: None,
|
||||
},
|
||||
enabled: true,
|
||||
lifecycle: PluginLifecycle::default(),
|
||||
}],
|
||||
&[PluginLoadFailure::new(
|
||||
PathBuf::from("/tmp/broken-plugin"),
|
||||
@@ -5604,7 +5393,6 @@ mod tests {
|
||||
"command": "uvx",
|
||||
"args": ["alpha-server"],
|
||||
"env": {"ALPHA_TOKEN": "secret"},
|
||||
"required": true,
|
||||
"toolCallTimeoutMs": 1200
|
||||
},
|
||||
"remote": {
|
||||
@@ -5650,7 +5438,6 @@ mod tests {
|
||||
let show = super::render_mcp_report_for(&loader, &workspace, Some("show alpha"))
|
||||
.expect("mcp show report should render");
|
||||
assert!(show.contains("Name alpha"));
|
||||
assert!(show.contains("Required true"));
|
||||
assert!(show.contains("Command uvx"));
|
||||
assert!(show.contains("Args alpha-server"));
|
||||
assert!(show.contains("Env keys ALPHA_TOKEN"));
|
||||
@@ -5683,7 +5470,6 @@ mod tests {
|
||||
"command": "uvx",
|
||||
"args": ["alpha-server"],
|
||||
"env": {"ALPHA_TOKEN": "secret"},
|
||||
"required": true,
|
||||
"toolCallTimeoutMs": 1200
|
||||
},
|
||||
"remote": {
|
||||
@@ -5720,7 +5506,6 @@ mod tests {
|
||||
assert_eq!(list["action"], "list");
|
||||
assert_eq!(list["configured_servers"], 2);
|
||||
assert_eq!(list["servers"][0]["name"], "alpha");
|
||||
assert_eq!(list["servers"][0]["required"], true);
|
||||
assert_eq!(list["servers"][0]["transport"]["id"], "stdio");
|
||||
assert_eq!(list["servers"][0]["details"]["command"], "uvx");
|
||||
assert_eq!(list["servers"][1]["name"], "remote");
|
||||
@@ -5736,7 +5521,6 @@ mod tests {
|
||||
assert_eq!(show["action"], "show");
|
||||
assert_eq!(show["found"], true);
|
||||
assert_eq!(show["server"]["name"], "alpha");
|
||||
assert_eq!(show["server"]["required"], true);
|
||||
assert_eq!(show["server"]["details"]["env_keys"][0], "ALPHA_TOKEN");
|
||||
assert_eq!(show["server"]["details"]["tool_call_timeout_ms"], 1200);
|
||||
|
||||
|
||||
@@ -648,7 +648,6 @@ impl RegisteredPlugin {
|
||||
PluginSummary {
|
||||
metadata: self.metadata().clone(),
|
||||
enabled: self.enabled,
|
||||
lifecycle: self.definition.lifecycle().clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -657,18 +656,6 @@ impl RegisteredPlugin {
|
||||
pub struct PluginSummary {
|
||||
pub metadata: PluginMetadata,
|
||||
pub enabled: bool,
|
||||
pub lifecycle: PluginLifecycle,
|
||||
}
|
||||
|
||||
impl PluginSummary {
|
||||
#[must_use]
|
||||
pub fn lifecycle_state(&self) -> &'static str {
|
||||
if self.enabled {
|
||||
"ready"
|
||||
} else {
|
||||
"disabled"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -3332,7 +3319,7 @@ mod tests {
|
||||
let config_home = temp_dir("installed-report-home");
|
||||
let bundled_root = temp_dir("installed-report-bundled");
|
||||
let install_root = config_home.join("plugins").join("installed");
|
||||
write_lifecycle_plugin(&install_root.join("valid"), "installed-valid", "1.0.0");
|
||||
write_external_plugin(&install_root.join("valid"), "installed-valid", "1.0.0");
|
||||
write_broken_plugin(&install_root.join("broken"), "installed-broken");
|
||||
|
||||
let mut config = PluginManagerConfig::new(&config_home);
|
||||
@@ -3347,14 +3334,6 @@ mod tests {
|
||||
|
||||
// then
|
||||
assert!(report.registry().contains("installed-valid@external"));
|
||||
let summaries = report.summaries();
|
||||
let valid = summaries
|
||||
.iter()
|
||||
.find(|summary| summary.metadata.id == "installed-valid@external")
|
||||
.expect("valid plugin summary should be present");
|
||||
assert_eq!(valid.lifecycle_state(), "disabled");
|
||||
assert_eq!(valid.lifecycle.init.len(), 1);
|
||||
assert_eq!(valid.lifecycle.shutdown.len(), 1);
|
||||
assert_eq!(report.failures().len(), 1);
|
||||
assert!(report.failures()[0]
|
||||
.plugin_root
|
||||
|
||||
@@ -1,502 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
/// Machine-readable policy exception scope that an approval token may override.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ApprovalScope {
|
||||
pub policy: String,
|
||||
pub action: String,
|
||||
pub repository: Option<String>,
|
||||
pub branch: Option<String>,
|
||||
}
|
||||
|
||||
impl ApprovalScope {
|
||||
#[must_use]
|
||||
pub fn new(policy: impl Into<String>, action: impl Into<String>) -> Self {
|
||||
Self {
|
||||
policy: policy.into(),
|
||||
action: action.into(),
|
||||
repository: None,
|
||||
branch: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_repository(mut self, repository: impl Into<String>) -> Self {
|
||||
self.repository = Some(repository.into());
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_branch(mut self, branch: impl Into<String>) -> Self {
|
||||
self.branch = Some(branch.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Actor/session hop recorded when an approval is delegated or consumed.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ApprovalDelegationHop {
|
||||
pub actor: String,
|
||||
pub session_id: Option<String>,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl ApprovalDelegationHop {
|
||||
#[must_use]
|
||||
pub fn new(actor: impl Into<String>, reason: impl Into<String>) -> Self {
|
||||
Self {
|
||||
actor: actor.into(),
|
||||
session_id: None,
|
||||
reason: reason.into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_session_id(mut self, session_id: impl Into<String>) -> Self {
|
||||
self.session_id = Some(session_id.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Current lifecycle state for a policy-exception approval token.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ApprovalTokenStatus {
|
||||
Pending,
|
||||
Granted,
|
||||
Consumed,
|
||||
Expired,
|
||||
Revoked,
|
||||
}
|
||||
|
||||
impl ApprovalTokenStatus {
|
||||
#[must_use]
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Pending => "approval_pending",
|
||||
Self::Granted => "approval_granted",
|
||||
Self::Consumed => "approval_consumed",
|
||||
Self::Expired => "approval_expired",
|
||||
Self::Revoked => "approval_revoked",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Typed policy errors returned when a token cannot authorize a blocked action.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ApprovalTokenError {
|
||||
NoApproval,
|
||||
ApprovalPending,
|
||||
ApprovalExpired,
|
||||
ApprovalRevoked,
|
||||
ApprovalAlreadyConsumed,
|
||||
ScopeMismatch {
|
||||
expected: Box<ApprovalScope>,
|
||||
actual: Box<ApprovalScope>,
|
||||
},
|
||||
UnauthorizedDelegate {
|
||||
expected: String,
|
||||
actual: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl ApprovalTokenError {
|
||||
#[must_use]
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::NoApproval => "no_approval",
|
||||
Self::ApprovalPending => "approval_pending",
|
||||
Self::ApprovalExpired => "approval_expired",
|
||||
Self::ApprovalRevoked => "approval_revoked",
|
||||
Self::ApprovalAlreadyConsumed => "approval_already_consumed",
|
||||
Self::ScopeMismatch { .. } => "approval_scope_mismatch",
|
||||
Self::UnauthorizedDelegate { .. } => "approval_unauthorized_delegate",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Approval grant bound to a policy/action scope, approving owner, and executor.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ApprovalTokenGrant {
|
||||
pub token: String,
|
||||
pub scope: ApprovalScope,
|
||||
pub approving_actor: String,
|
||||
pub approved_executor: String,
|
||||
pub status: ApprovalTokenStatus,
|
||||
pub expires_at_epoch_seconds: Option<u64>,
|
||||
pub max_uses: u32,
|
||||
pub uses: u32,
|
||||
delegation_chain: Vec<ApprovalDelegationHop>,
|
||||
}
|
||||
|
||||
impl ApprovalTokenGrant {
|
||||
#[must_use]
|
||||
pub fn pending(
|
||||
token: impl Into<String>,
|
||||
scope: ApprovalScope,
|
||||
approving_actor: impl Into<String>,
|
||||
approved_executor: impl Into<String>,
|
||||
) -> Self {
|
||||
Self {
|
||||
token: token.into(),
|
||||
scope,
|
||||
approving_actor: approving_actor.into(),
|
||||
approved_executor: approved_executor.into(),
|
||||
status: ApprovalTokenStatus::Pending,
|
||||
expires_at_epoch_seconds: None,
|
||||
max_uses: 1,
|
||||
uses: 0,
|
||||
delegation_chain: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn granted(
|
||||
token: impl Into<String>,
|
||||
scope: ApprovalScope,
|
||||
approving_actor: impl Into<String>,
|
||||
approved_executor: impl Into<String>,
|
||||
) -> Self {
|
||||
Self::pending(token, scope, approving_actor, approved_executor).approve()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn approve(mut self) -> Self {
|
||||
self.status = ApprovalTokenStatus::Granted;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn expires_at(mut self, epoch_seconds: u64) -> Self {
|
||||
self.expires_at_epoch_seconds = Some(epoch_seconds);
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_max_uses(mut self, max_uses: u32) -> Self {
|
||||
self.max_uses = max_uses.max(1);
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_delegation_hop(mut self, hop: ApprovalDelegationHop) -> Self {
|
||||
self.delegation_chain.push(hop);
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn delegation_chain(&self) -> &[ApprovalDelegationHop] {
|
||||
&self.delegation_chain
|
||||
}
|
||||
}
|
||||
|
||||
/// Auditable result of verifying or consuming an approval token.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ApprovalTokenAudit {
|
||||
pub token: String,
|
||||
pub scope: ApprovalScope,
|
||||
pub approving_actor: String,
|
||||
pub executing_actor: String,
|
||||
pub status: ApprovalTokenStatus,
|
||||
pub delegated_execution: bool,
|
||||
pub delegation_chain: Vec<ApprovalDelegationHop>,
|
||||
pub uses: u32,
|
||||
pub max_uses: u32,
|
||||
}
|
||||
|
||||
/// In-memory approval-token ledger with one-time-use and replay protection.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
pub struct ApprovalTokenLedger {
|
||||
grants: BTreeMap<String, ApprovalTokenGrant>,
|
||||
}
|
||||
|
||||
impl ApprovalTokenLedger {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, grant: ApprovalTokenGrant) {
|
||||
self.grants.insert(grant.token.clone(), grant);
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn get(&self, token: &str) -> Option<&ApprovalTokenGrant> {
|
||||
self.grants.get(token)
|
||||
}
|
||||
|
||||
pub fn revoke(&mut self, token: &str) -> Result<ApprovalTokenAudit, ApprovalTokenError> {
|
||||
let grant = self
|
||||
.grants
|
||||
.get_mut(token)
|
||||
.ok_or(ApprovalTokenError::NoApproval)?;
|
||||
grant.status = ApprovalTokenStatus::Revoked;
|
||||
Ok(Self::audit_for(grant, &grant.approved_executor))
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
token: &str,
|
||||
scope: &ApprovalScope,
|
||||
executing_actor: &str,
|
||||
now_epoch_seconds: u64,
|
||||
) -> Result<ApprovalTokenAudit, ApprovalTokenError> {
|
||||
let grant = self
|
||||
.grants
|
||||
.get(token)
|
||||
.ok_or(ApprovalTokenError::NoApproval)?;
|
||||
Self::validate_grant(grant, scope, executing_actor, now_epoch_seconds)?;
|
||||
Ok(Self::audit_for(grant, executing_actor))
|
||||
}
|
||||
|
||||
pub fn consume(
|
||||
&mut self,
|
||||
token: &str,
|
||||
scope: &ApprovalScope,
|
||||
executing_actor: &str,
|
||||
now_epoch_seconds: u64,
|
||||
) -> Result<ApprovalTokenAudit, ApprovalTokenError> {
|
||||
let grant = self
|
||||
.grants
|
||||
.get_mut(token)
|
||||
.ok_or(ApprovalTokenError::NoApproval)?;
|
||||
Self::validate_grant(grant, scope, executing_actor, now_epoch_seconds)?;
|
||||
grant.uses += 1;
|
||||
if grant.uses >= grant.max_uses {
|
||||
grant.status = ApprovalTokenStatus::Consumed;
|
||||
}
|
||||
Ok(Self::audit_for(grant, executing_actor))
|
||||
}
|
||||
|
||||
fn validate_grant(
|
||||
grant: &ApprovalTokenGrant,
|
||||
scope: &ApprovalScope,
|
||||
executing_actor: &str,
|
||||
now_epoch_seconds: u64,
|
||||
) -> Result<(), ApprovalTokenError> {
|
||||
match grant.status {
|
||||
ApprovalTokenStatus::Pending => return Err(ApprovalTokenError::ApprovalPending),
|
||||
ApprovalTokenStatus::Consumed => {
|
||||
return Err(ApprovalTokenError::ApprovalAlreadyConsumed)
|
||||
}
|
||||
ApprovalTokenStatus::Expired => return Err(ApprovalTokenError::ApprovalExpired),
|
||||
ApprovalTokenStatus::Revoked => return Err(ApprovalTokenError::ApprovalRevoked),
|
||||
ApprovalTokenStatus::Granted => {}
|
||||
}
|
||||
|
||||
if grant
|
||||
.expires_at_epoch_seconds
|
||||
.is_some_and(|expires_at| now_epoch_seconds > expires_at)
|
||||
{
|
||||
return Err(ApprovalTokenError::ApprovalExpired);
|
||||
}
|
||||
|
||||
if grant.uses >= grant.max_uses {
|
||||
return Err(ApprovalTokenError::ApprovalAlreadyConsumed);
|
||||
}
|
||||
|
||||
if grant.scope != *scope {
|
||||
return Err(ApprovalTokenError::ScopeMismatch {
|
||||
expected: Box::new(grant.scope.clone()),
|
||||
actual: Box::new(scope.clone()),
|
||||
});
|
||||
}
|
||||
|
||||
if grant.approved_executor != executing_actor {
|
||||
return Err(ApprovalTokenError::UnauthorizedDelegate {
|
||||
expected: grant.approved_executor.clone(),
|
||||
actual: executing_actor.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn audit_for(grant: &ApprovalTokenGrant, executing_actor: &str) -> ApprovalTokenAudit {
|
||||
let mut delegation_chain = grant.delegation_chain.clone();
|
||||
if delegation_chain.is_empty() {
|
||||
delegation_chain.push(ApprovalDelegationHop::new(
|
||||
grant.approving_actor.clone(),
|
||||
"approval granted",
|
||||
));
|
||||
}
|
||||
if grant.approving_actor != executing_actor
|
||||
&& !delegation_chain
|
||||
.iter()
|
||||
.any(|hop| hop.actor == executing_actor)
|
||||
{
|
||||
delegation_chain.push(ApprovalDelegationHop::new(
|
||||
executing_actor.to_string(),
|
||||
"delegated execution",
|
||||
));
|
||||
}
|
||||
|
||||
ApprovalTokenAudit {
|
||||
token: grant.token.clone(),
|
||||
scope: grant.scope.clone(),
|
||||
approving_actor: grant.approving_actor.clone(),
|
||||
executing_actor: executing_actor.to_string(),
|
||||
status: grant.status,
|
||||
delegated_execution: grant.approving_actor != executing_actor,
|
||||
delegation_chain,
|
||||
uses: grant.uses,
|
||||
max_uses: grant.max_uses,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
ApprovalDelegationHop, ApprovalScope, ApprovalTokenError, ApprovalTokenGrant,
|
||||
ApprovalTokenLedger, ApprovalTokenStatus,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn approval_token_blocks_until_owner_grants_policy_exception() {
|
||||
let mut ledger = ApprovalTokenLedger::new();
|
||||
let scope = ApprovalScope::new("main_push_forbidden", "git push")
|
||||
.with_repository("sisyphus/claw-code")
|
||||
.with_branch("main");
|
||||
ledger.insert(ApprovalTokenGrant::pending(
|
||||
"tok-pending",
|
||||
scope.clone(),
|
||||
"repo-owner",
|
||||
"release-bot",
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-missing", &scope, "release-bot", 10),
|
||||
Err(ApprovalTokenError::NoApproval)
|
||||
));
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-pending", &scope, "release-bot", 10),
|
||||
Err(ApprovalTokenError::ApprovalPending)
|
||||
));
|
||||
|
||||
ledger.insert(ApprovalTokenGrant::granted(
|
||||
"tok-granted",
|
||||
scope.clone(),
|
||||
"repo-owner",
|
||||
"release-bot",
|
||||
));
|
||||
let audit = ledger
|
||||
.verify("tok-granted", &scope, "release-bot", 10)
|
||||
.expect("owner approval should verify");
|
||||
|
||||
assert_eq!(audit.status, ApprovalTokenStatus::Granted);
|
||||
assert_eq!(audit.approving_actor, "repo-owner");
|
||||
assert_eq!(audit.executing_actor, "release-bot");
|
||||
assert!(audit.delegated_execution);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn approval_token_is_one_time_use_and_rejects_replay() {
|
||||
let mut ledger = ApprovalTokenLedger::new();
|
||||
let scope = ApprovalScope::new("release_requires_owner", "release publish")
|
||||
.with_repository("sisyphus/claw-code");
|
||||
ledger.insert(ApprovalTokenGrant::granted(
|
||||
"tok-once",
|
||||
scope.clone(),
|
||||
"owner",
|
||||
"release-bot",
|
||||
));
|
||||
|
||||
let first = ledger
|
||||
.consume("tok-once", &scope, "release-bot", 10)
|
||||
.expect("first use should consume token");
|
||||
assert_eq!(first.status, ApprovalTokenStatus::Consumed);
|
||||
assert_eq!(first.uses, 1);
|
||||
|
||||
assert!(matches!(
|
||||
ledger.consume("tok-once", &scope, "release-bot", 11),
|
||||
Err(ApprovalTokenError::ApprovalAlreadyConsumed)
|
||||
));
|
||||
assert_eq!(
|
||||
ledger.get("tok-once").map(|grant| grant.status),
|
||||
Some(ApprovalTokenStatus::Consumed)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn approval_token_rejects_scope_expansion_expiry_and_revocation() {
|
||||
let mut ledger = ApprovalTokenLedger::new();
|
||||
let scope = ApprovalScope::new("main_push_forbidden", "git push")
|
||||
.with_repository("sisyphus/claw-code")
|
||||
.with_branch("main");
|
||||
let dev_scope = ApprovalScope::new("main_push_forbidden", "git push")
|
||||
.with_repository("sisyphus/claw-code")
|
||||
.with_branch("dev");
|
||||
|
||||
ledger.insert(
|
||||
ApprovalTokenGrant::granted("tok-expiring", scope.clone(), "owner", "bot")
|
||||
.expires_at(20),
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-expiring", &dev_scope, "bot", 10),
|
||||
Err(ApprovalTokenError::ScopeMismatch { .. })
|
||||
));
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-expiring", &scope, "bot", 21),
|
||||
Err(ApprovalTokenError::ApprovalExpired)
|
||||
));
|
||||
|
||||
ledger.insert(ApprovalTokenGrant::granted(
|
||||
"tok-revoked",
|
||||
scope.clone(),
|
||||
"owner",
|
||||
"bot",
|
||||
));
|
||||
let revoked = ledger
|
||||
.revoke("tok-revoked")
|
||||
.expect("revocation should be audited");
|
||||
assert_eq!(revoked.status, ApprovalTokenStatus::Revoked);
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-revoked", &scope, "bot", 10),
|
||||
Err(ApprovalTokenError::ApprovalRevoked)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn approval_token_preserves_delegation_traceability() {
|
||||
let mut ledger = ApprovalTokenLedger::new();
|
||||
let scope = ApprovalScope::new("deploy_requires_owner", "deploy prod");
|
||||
ledger.insert(
|
||||
ApprovalTokenGrant::granted("tok-delegated", scope.clone(), "owner", "deploy-bot")
|
||||
.with_delegation_hop(
|
||||
ApprovalDelegationHop::new("owner", "owner approval")
|
||||
.with_session_id("session-owner"),
|
||||
)
|
||||
.with_delegation_hop(
|
||||
ApprovalDelegationHop::new("lead-agent", "handoff to deploy bot")
|
||||
.with_session_id("session-lead"),
|
||||
),
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
ledger.verify("tok-delegated", &scope, "unexpected-bot", 10),
|
||||
Err(ApprovalTokenError::UnauthorizedDelegate { expected, actual })
|
||||
if expected == "deploy-bot" && actual == "unexpected-bot"
|
||||
));
|
||||
|
||||
let audit = ledger
|
||||
.consume("tok-delegated", &scope, "deploy-bot", 10)
|
||||
.expect("approved delegate should consume token");
|
||||
let actors = audit
|
||||
.delegation_chain
|
||||
.iter()
|
||||
.map(|hop| hop.actor.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(audit.delegated_execution);
|
||||
assert_eq!(actors, vec!["owner", "lead-agent", "deploy-bot"]);
|
||||
assert_eq!(
|
||||
audit.delegation_chain[0].session_id.as_deref(),
|
||||
Some("session-owner")
|
||||
);
|
||||
assert_eq!(
|
||||
audit.delegation_chain[1].session_id.as_deref(),
|
||||
Some("session-lead")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,6 @@ use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use tokio::process::Command as TokioCommand;
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::time::timeout;
|
||||
@@ -123,7 +122,7 @@ fn detect_and_emit_ship_prepared(command: &str) {
|
||||
actor: get_git_actor().unwrap_or_else(|| "unknown".to_string()),
|
||||
pr_number: None,
|
||||
};
|
||||
let _event = LaneEvent::ship_prepared(format!("{now}"), &provenance);
|
||||
let _event = LaneEvent::ship_prepared(format!("{}", now), &provenance);
|
||||
// Log to stderr as interim routing before event stream integration
|
||||
eprintln!(
|
||||
"[ship.prepared] branch={} -> main, commits={}, actor={}",
|
||||
@@ -173,14 +172,31 @@ async fn execute_bash_async(
|
||||
) -> io::Result<BashCommandOutput> {
|
||||
// Detect and emit ship provenance for git push operations
|
||||
detect_and_emit_ship_prepared(&input.command);
|
||||
|
||||
|
||||
let mut command = prepare_tokio_command(&input.command, &cwd, &sandbox_status, true);
|
||||
|
||||
let output_result = if let Some(timeout_ms) = input.timeout {
|
||||
if let Ok(result) = timeout(Duration::from_millis(timeout_ms), command.output()).await {
|
||||
(result?, false)
|
||||
} else {
|
||||
return Ok(timeout_output(&input, timeout_ms, sandbox_status));
|
||||
match timeout(Duration::from_millis(timeout_ms), command.output()).await {
|
||||
Ok(result) => (result?, false),
|
||||
Err(_) => {
|
||||
return Ok(BashCommandOutput {
|
||||
stdout: String::new(),
|
||||
stderr: format!("Command exceeded timeout of {timeout_ms} ms"),
|
||||
raw_output_path: None,
|
||||
interrupted: true,
|
||||
is_image: None,
|
||||
background_task_id: None,
|
||||
backgrounded_by_user: None,
|
||||
assistant_auto_backgrounded: None,
|
||||
dangerously_disable_sandbox: input.dangerously_disable_sandbox,
|
||||
return_code_interpretation: Some(String::from("timeout")),
|
||||
no_output_expected: Some(true),
|
||||
structured_content: None,
|
||||
persisted_output_path: None,
|
||||
persisted_output_size: None,
|
||||
sandbox_status: Some(sandbox_status),
|
||||
});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(command.output().await?, false)
|
||||
@@ -217,67 +233,6 @@ async fn execute_bash_async(
|
||||
})
|
||||
}
|
||||
|
||||
fn timeout_output(
|
||||
input: &BashCommandInput,
|
||||
timeout_ms: u64,
|
||||
sandbox_status: SandboxStatus,
|
||||
) -> BashCommandOutput {
|
||||
let is_test = is_test_command(&input.command);
|
||||
let return_code_interpretation = if is_test { "test.hung" } else { "timeout" };
|
||||
BashCommandOutput {
|
||||
stdout: String::new(),
|
||||
stderr: format!("Command exceeded timeout of {timeout_ms} ms"),
|
||||
raw_output_path: None,
|
||||
interrupted: true,
|
||||
is_image: None,
|
||||
background_task_id: None,
|
||||
backgrounded_by_user: None,
|
||||
assistant_auto_backgrounded: None,
|
||||
dangerously_disable_sandbox: input.dangerously_disable_sandbox,
|
||||
return_code_interpretation: Some(String::from(return_code_interpretation)),
|
||||
no_output_expected: Some(true),
|
||||
structured_content: Some(vec![test_timeout_provenance(
|
||||
&input.command,
|
||||
timeout_ms,
|
||||
is_test,
|
||||
)]),
|
||||
persisted_output_path: None,
|
||||
persisted_output_size: None,
|
||||
sandbox_status: Some(sandbox_status),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_test_command(command: &str) -> bool {
|
||||
let normalized = command
|
||||
.split_whitespace()
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ")
|
||||
.to_ascii_lowercase();
|
||||
normalized.contains("cargo test")
|
||||
|| normalized.contains("cargo nextest")
|
||||
|| normalized.contains("npm test")
|
||||
|| normalized.contains("pnpm test")
|
||||
|| normalized.contains("yarn test")
|
||||
|| normalized.contains("pytest")
|
||||
}
|
||||
|
||||
fn test_timeout_provenance(
|
||||
command: &str,
|
||||
timeout_ms: u64,
|
||||
classified_as_test_hang: bool,
|
||||
) -> serde_json::Value {
|
||||
json!({
|
||||
"event": if classified_as_test_hang { "test.hung" } else { "command.timeout" },
|
||||
"failureClass": if classified_as_test_hang { "test_hang" } else { "timeout" },
|
||||
"data": {
|
||||
"command": command,
|
||||
"timeoutMs": timeout_ms,
|
||||
"provenance": "bash.timeout",
|
||||
"classification": if classified_as_test_hang { "test.hung" } else { "timeout" }
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn sandbox_status_for_input(input: &BashCommandInput, cwd: &std::path::Path) -> SandboxStatus {
|
||||
let config = ConfigLoader::default_for(cwd).load().map_or_else(
|
||||
|_| SandboxConfig::default(),
|
||||
@@ -394,31 +349,6 @@ mod tests {
|
||||
|
||||
assert!(!output.sandbox_status.expect("sandbox status").enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn timed_out_test_command_is_classified_as_hung_test_with_provenance() {
|
||||
let output = execute_bash(BashCommandInput {
|
||||
command: String::from("sleep 1 # cargo test slow_case"),
|
||||
timeout: Some(1),
|
||||
description: None,
|
||||
run_in_background: Some(false),
|
||||
dangerously_disable_sandbox: Some(false),
|
||||
namespace_restrictions: Some(false),
|
||||
isolate_network: Some(false),
|
||||
filesystem_mode: Some(FilesystemIsolationMode::WorkspaceOnly),
|
||||
allowed_mounts: None,
|
||||
})
|
||||
.expect("bash command should return structured timeout");
|
||||
|
||||
assert!(output.interrupted);
|
||||
assert_eq!(
|
||||
output.return_code_interpretation.as_deref(),
|
||||
Some("test.hung")
|
||||
);
|
||||
let structured = output.structured_content.expect("structured content");
|
||||
assert_eq!(structured[0]["event"], "test.hung");
|
||||
assert_eq!(structured[0]["data"]["provenance"], "bash.timeout");
|
||||
}
|
||||
}
|
||||
|
||||
/// Maximum output bytes before truncation (16 KiB, matching upstream).
|
||||
|
||||
@@ -212,7 +212,7 @@ fn summarize_messages(messages: &[ConversationMessage]) -> String {
|
||||
.filter_map(|block| match block {
|
||||
ContentBlock::ToolUse { name, .. } => Some(name.as_str()),
|
||||
ContentBlock::ToolResult { tool_name, .. } => Some(tool_name.as_str()),
|
||||
ContentBlock::Text { .. } | ContentBlock::Thinking { .. } => None,
|
||||
ContentBlock::Text { .. } => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
tool_names.sort_unstable();
|
||||
@@ -317,9 +317,6 @@ fn merge_compact_summaries(existing_summary: Option<&str>, new_summary: &str) ->
|
||||
fn summarize_block(block: &ContentBlock) -> String {
|
||||
let raw = match block {
|
||||
ContentBlock::Text { text } => text.clone(),
|
||||
ContentBlock::Thinking { thinking, .. } => {
|
||||
format!("thinking ({} chars)", thinking.chars().count())
|
||||
}
|
||||
ContentBlock::ToolUse { name, input, .. } => format!("tool_use {name}({input})"),
|
||||
ContentBlock::ToolResult {
|
||||
tool_name,
|
||||
@@ -381,7 +378,6 @@ fn collect_key_files(messages: &[ConversationMessage]) -> Vec<String> {
|
||||
ContentBlock::Text { text } => text.as_str(),
|
||||
ContentBlock::ToolUse { input, .. } => input.as_str(),
|
||||
ContentBlock::ToolResult { output, .. } => output.as_str(),
|
||||
ContentBlock::Thinking { thinking, .. } => thinking.as_str(),
|
||||
})
|
||||
.flat_map(extract_file_candidates)
|
||||
.collect::<Vec<_>>();
|
||||
@@ -404,7 +400,6 @@ fn first_text_block(message: &ConversationMessage) -> Option<&str> {
|
||||
ContentBlock::Text { text } if !text.trim().is_empty() => Some(text.as_str()),
|
||||
ContentBlock::ToolUse { .. }
|
||||
| ContentBlock::ToolResult { .. }
|
||||
| ContentBlock::Thinking { .. }
|
||||
| ContentBlock::Text { .. } => None,
|
||||
})
|
||||
}
|
||||
@@ -455,10 +450,6 @@ fn estimate_message_tokens(message: &ConversationMessage) -> usize {
|
||||
ContentBlock::ToolResult {
|
||||
tool_name, output, ..
|
||||
} => (tool_name.len() + output.len()) / 4 + 1,
|
||||
ContentBlock::Thinking {
|
||||
thinking,
|
||||
signature,
|
||||
} => thinking.len() / 4 + signature.as_ref().map_or(0, |value| value.len() / 4 + 1),
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
|
||||
@@ -101,7 +101,6 @@ pub struct McpConfigCollection {
|
||||
/// MCP server config paired with the scope that defined it.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ScopedMcpServerConfig {
|
||||
pub required: bool,
|
||||
pub scope: ConfigSource,
|
||||
pub config: McpServerConfig,
|
||||
}
|
||||
@@ -415,17 +414,6 @@ impl RuntimeConfig {
|
||||
pub fn trusted_roots(&self) -> &[String] {
|
||||
&self.feature_config.trusted_roots
|
||||
}
|
||||
|
||||
/// Merge config-level default trusted roots with per-call roots.
|
||||
///
|
||||
/// Config roots are defaults and are kept first; per-call roots extend the
|
||||
/// allowlist for a specific worker/session creation request. Duplicates are
|
||||
/// removed without reordering the first occurrence so evidence remains
|
||||
/// deterministic while avoiding repeated trust checks.
|
||||
#[must_use]
|
||||
pub fn trusted_roots_with_overrides(&self, per_call_roots: &[String]) -> Vec<String> {
|
||||
merge_trusted_roots(self.trusted_roots(), per_call_roots)
|
||||
}
|
||||
}
|
||||
|
||||
impl RuntimeFeatureConfig {
|
||||
@@ -495,22 +483,6 @@ impl RuntimeFeatureConfig {
|
||||
pub fn trusted_roots(&self) -> &[String] {
|
||||
&self.trusted_roots
|
||||
}
|
||||
|
||||
/// Merge this config's default trusted roots with per-call roots.
|
||||
#[must_use]
|
||||
pub fn trusted_roots_with_overrides(&self, per_call_roots: &[String]) -> Vec<String> {
|
||||
merge_trusted_roots(self.trusted_roots(), per_call_roots)
|
||||
}
|
||||
}
|
||||
|
||||
fn merge_trusted_roots(config_roots: &[String], per_call_roots: &[String]) -> Vec<String> {
|
||||
let mut merged = Vec::with_capacity(config_roots.len() + per_call_roots.len());
|
||||
for root in config_roots.iter().chain(per_call_roots.iter()) {
|
||||
if !merged.contains(root) {
|
||||
merged.push(root.clone());
|
||||
}
|
||||
}
|
||||
merged
|
||||
}
|
||||
|
||||
impl ProviderFallbackConfig {
|
||||
@@ -753,12 +725,6 @@ fn merge_mcp_servers(
|
||||
target.insert(
|
||||
name.clone(),
|
||||
ScopedMcpServerConfig {
|
||||
required: optional_bool(
|
||||
expect_object(value, &format!("{}: mcpServers.{name}", path.display()))?,
|
||||
"required",
|
||||
&format!("{}: mcpServers.{name}", path.display()),
|
||||
)?
|
||||
.unwrap_or(false),
|
||||
scope: source,
|
||||
config: parsed,
|
||||
},
|
||||
@@ -1279,8 +1245,8 @@ fn push_unique(target: &mut Vec<String>, value: String) {
|
||||
mod tests {
|
||||
use super::{
|
||||
deep_merge_objects, parse_permission_mode_label, ConfigLoader, ConfigSource,
|
||||
McpServerConfig, McpTransport, ResolvedPermissionMode, RuntimeFeatureConfig,
|
||||
RuntimeHookConfig, RuntimePluginConfig, CLAW_SETTINGS_SCHEMA_NAME,
|
||||
McpServerConfig, McpTransport, ResolvedPermissionMode, RuntimeHookConfig,
|
||||
RuntimePluginConfig, CLAW_SETTINGS_SCHEMA_NAME,
|
||||
};
|
||||
use crate::json::JsonValue;
|
||||
use crate::sandbox::FilesystemIsolationMode;
|
||||
@@ -1536,51 +1502,6 @@ mod tests {
|
||||
fs::remove_dir_all(root).expect("cleanup temp dir");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trusted_roots_with_overrides_preserves_config_defaults_and_adds_per_call_roots() {
|
||||
// given
|
||||
let root = temp_dir();
|
||||
let cwd = root.join("project");
|
||||
let home = root.join("home").join(".claw");
|
||||
fs::create_dir_all(&home).expect("home config dir");
|
||||
fs::create_dir_all(&cwd).expect("project dir");
|
||||
fs::write(
|
||||
home.join("settings.json"),
|
||||
r#"{"trustedRoots": ["/tmp/config-default", "/tmp/shared"]}"#,
|
||||
)
|
||||
.expect("write settings");
|
||||
|
||||
// when
|
||||
let loaded = ConfigLoader::new(&cwd, &home)
|
||||
.load()
|
||||
.expect("config should load");
|
||||
let merged = loaded.trusted_roots_with_overrides(&[
|
||||
"/tmp/per-call".to_string(),
|
||||
"/tmp/shared".to_string(),
|
||||
]);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
merged,
|
||||
["/tmp/config-default", "/tmp/shared", "/tmp/per-call"]
|
||||
);
|
||||
|
||||
fs::remove_dir_all(root).expect("cleanup temp dir");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn runtime_feature_trusted_roots_with_overrides_matches_runtime_config_merge() {
|
||||
let config = RuntimeFeatureConfig {
|
||||
trusted_roots: vec!["/tmp/config".to_string()],
|
||||
..RuntimeFeatureConfig::default()
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
config.trusted_roots_with_overrides(&["/tmp/per-call".to_string()]),
|
||||
["/tmp/config", "/tmp/per-call"]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trusted_roots_default_is_empty_when_unset() {
|
||||
// given
|
||||
@@ -1617,8 +1538,7 @@ mod tests {
|
||||
"stdio-server": {
|
||||
"command": "uvx",
|
||||
"args": ["mcp-server"],
|
||||
"env": {"TOKEN": "secret"},
|
||||
"required": true
|
||||
"env": {"TOKEN": "secret"}
|
||||
},
|
||||
"remote-server": {
|
||||
"type": "http",
|
||||
@@ -1667,7 +1587,6 @@ mod tests {
|
||||
.get("stdio-server")
|
||||
.expect("stdio server should exist");
|
||||
assert_eq!(stdio_server.scope, ConfigSource::User);
|
||||
assert!(stdio_server.required);
|
||||
assert_eq!(stdio_server.transport(), McpTransport::Stdio);
|
||||
|
||||
let remote_server = loaded
|
||||
@@ -1675,7 +1594,6 @@ mod tests {
|
||||
.get("remote-server")
|
||||
.expect("remote server should exist");
|
||||
assert_eq!(remote_server.scope, ConfigSource::Local);
|
||||
assert!(!remote_server.required);
|
||||
assert_eq!(remote_server.transport(), McpTransport::Ws);
|
||||
match &remote_server.config {
|
||||
McpServerConfig::Ws(config) => {
|
||||
|
||||
@@ -28,10 +28,6 @@ pub struct ApiRequest {
|
||||
/// Streamed events emitted while processing a single assistant turn.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum AssistantEvent {
|
||||
Thinking {
|
||||
thinking: String,
|
||||
signature: Option<String>,
|
||||
},
|
||||
TextDelta(String),
|
||||
ToolUse {
|
||||
id: String,
|
||||
@@ -725,16 +721,6 @@ fn build_assistant_message(
|
||||
|
||||
for event in events {
|
||||
match event {
|
||||
AssistantEvent::Thinking {
|
||||
thinking,
|
||||
signature,
|
||||
} => {
|
||||
flush_text_block(&mut text, &mut blocks);
|
||||
blocks.push(ContentBlock::Thinking {
|
||||
thinking,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
AssistantEvent::TextDelta(delta) => text.push_str(&delta),
|
||||
AssistantEvent::ToolUse { id, name, input } => {
|
||||
flush_text_block(&mut text, &mut blocks);
|
||||
@@ -1737,47 +1723,6 @@ mod tests {
|
||||
.contains("assistant stream produced no content"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn build_assistant_message_places_thinking_block_before_text_and_tool_use() {
|
||||
// given
|
||||
let events = vec![
|
||||
AssistantEvent::Thinking {
|
||||
thinking: "pondering".to_string(),
|
||||
signature: Some("sig".to_string()),
|
||||
},
|
||||
AssistantEvent::TextDelta("hello".to_string()),
|
||||
AssistantEvent::ToolUse {
|
||||
id: "tool-1".to_string(),
|
||||
name: "echo".to_string(),
|
||||
input: "payload".to_string(),
|
||||
},
|
||||
AssistantEvent::MessageStop,
|
||||
];
|
||||
|
||||
// when
|
||||
let (message, _, _) = build_assistant_message(events)
|
||||
.expect("assistant message should preserve thinking, text, and tool blocks");
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
message.blocks,
|
||||
vec![
|
||||
ContentBlock::Thinking {
|
||||
thinking: "pondering".to_string(),
|
||||
signature: Some("sig".to_string()),
|
||||
},
|
||||
ContentBlock::Text {
|
||||
text: "hello".to_string(),
|
||||
},
|
||||
ContentBlock::ToolUse {
|
||||
id: "tool-1".to_string(),
|
||||
name: "echo".to_string(),
|
||||
input: "payload".to_string(),
|
||||
},
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn static_tool_executor_rejects_unknown_tools() {
|
||||
// given
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::cmp::Reverse;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -8,7 +7,7 @@ use std::time::Instant;
|
||||
use glob::Pattern;
|
||||
use regex::RegexBuilder;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use walkdir::{DirEntry, WalkDir};
|
||||
use walkdir::WalkDir;
|
||||
|
||||
/// Maximum file size that can be read (10 MB).
|
||||
const MAX_READ_SIZE: u64 = 10 * 1024 * 1024;
|
||||
@@ -16,15 +15,6 @@ const MAX_READ_SIZE: u64 = 10 * 1024 * 1024;
|
||||
/// Maximum file size that can be written (10 MB).
|
||||
const MAX_WRITE_SIZE: usize = 10 * 1024 * 1024;
|
||||
|
||||
const GLOB_SEARCH_IGNORED_DIRS: &[&str] = &[
|
||||
".git",
|
||||
"node_modules",
|
||||
".build",
|
||||
"target",
|
||||
"dist",
|
||||
"coverage",
|
||||
];
|
||||
|
||||
/// Check whether a file appears to contain binary content by examining
|
||||
/// the first chunk for NUL bytes.
|
||||
fn is_binary_file(path: &Path) -> io::Result<bool> {
|
||||
@@ -307,23 +297,11 @@ pub fn edit_file(
|
||||
|
||||
/// Expands a glob pattern and returns matching filenames.
|
||||
pub fn glob_search(pattern: &str, path: Option<&str>) -> io::Result<GlobSearchOutput> {
|
||||
glob_search_impl(pattern, path, None)
|
||||
}
|
||||
|
||||
fn glob_search_impl(
|
||||
pattern: &str,
|
||||
path: Option<&str>,
|
||||
workspace_root: Option<&Path>,
|
||||
) -> io::Result<GlobSearchOutput> {
|
||||
let started = Instant::now();
|
||||
let base_dir = path
|
||||
.map(normalize_path)
|
||||
.transpose()?
|
||||
.unwrap_or(std::env::current_dir()?);
|
||||
let canonical_root = workspace_root.map(canonicalize_workspace_root);
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
validate_workspace_boundary(&base_dir, root)?;
|
||||
}
|
||||
let search_pattern = if Path::new(pattern).is_absolute() {
|
||||
pattern.to_owned()
|
||||
} else {
|
||||
@@ -335,32 +313,14 @@ fn glob_search_impl(
|
||||
// `Assets/**/*.{cs,uxml,uss}` work correctly.
|
||||
let expanded = expand_braces(&search_pattern);
|
||||
|
||||
let mut seen = HashSet::new();
|
||||
let mut seen = std::collections::HashSet::new();
|
||||
let mut matches = Vec::new();
|
||||
for pat in &expanded {
|
||||
let compiled = Pattern::new(pat)
|
||||
let entries = glob::glob(pat)
|
||||
.map_err(|error| io::Error::new(io::ErrorKind::InvalidInput, error.to_string()))?;
|
||||
let walk_root = derive_glob_walk_root(pat);
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
let canonical_walk_root = walk_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| walk_root.clone());
|
||||
validate_workspace_boundary(&canonical_walk_root, root)?;
|
||||
}
|
||||
let entries = WalkDir::new(&walk_root)
|
||||
.into_iter()
|
||||
.filter_entry(|entry| !should_skip_glob_dir(entry));
|
||||
for entry in entries.flatten() {
|
||||
let candidate = entry.path();
|
||||
if entry.file_type().is_file()
|
||||
&& compiled.matches_path(candidate)
|
||||
&& seen.insert(candidate.to_path_buf())
|
||||
{
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
let canonical_candidate = candidate.canonicalize()?;
|
||||
validate_workspace_boundary(&canonical_candidate, root)?;
|
||||
}
|
||||
matches.push(candidate.to_path_buf());
|
||||
if entry.is_file() && seen.insert(entry.clone()) {
|
||||
matches.push(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -389,23 +349,12 @@ fn glob_search_impl(
|
||||
|
||||
/// Runs a regex search over workspace files with optional context lines.
|
||||
pub fn grep_search(input: &GrepSearchInput) -> io::Result<GrepSearchOutput> {
|
||||
grep_search_impl(input, None)
|
||||
}
|
||||
|
||||
fn grep_search_impl(
|
||||
input: &GrepSearchInput,
|
||||
workspace_root: Option<&Path>,
|
||||
) -> io::Result<GrepSearchOutput> {
|
||||
let base_path = input
|
||||
.path
|
||||
.as_deref()
|
||||
.map(normalize_path)
|
||||
.transpose()?
|
||||
.unwrap_or(std::env::current_dir()?);
|
||||
let canonical_root = workspace_root.map(canonicalize_workspace_root);
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
validate_workspace_boundary(&base_path, root)?;
|
||||
}
|
||||
|
||||
let regex = RegexBuilder::new(&input.pattern)
|
||||
.case_insensitive(input.case_insensitive.unwrap_or(false))
|
||||
@@ -431,10 +380,6 @@ fn grep_search_impl(
|
||||
let mut total_matches = 0usize;
|
||||
|
||||
for file_path in collect_search_files(&base_path)? {
|
||||
if let Some(root) = canonical_root.as_deref() {
|
||||
let canonical_file = file_path.canonicalize()?;
|
||||
validate_workspace_boundary(&canonical_file, root)?;
|
||||
}
|
||||
if !matches_optional_filters(&file_path, glob_filter.as_ref(), file_type) {
|
||||
continue;
|
||||
}
|
||||
@@ -484,21 +429,27 @@ fn grep_search_impl(
|
||||
|
||||
let (filenames, applied_limit, applied_offset) =
|
||||
apply_limit(filenames, input.head_limit, input.offset);
|
||||
if output_mode == "content" {
|
||||
return Ok(build_grep_content_output(
|
||||
output_mode,
|
||||
let content_output = if output_mode == "content" {
|
||||
let (lines, limit, offset) = apply_limit(content_lines, input.head_limit, input.offset);
|
||||
return Ok(GrepSearchOutput {
|
||||
mode: Some(output_mode),
|
||||
num_files: filenames.len(),
|
||||
filenames,
|
||||
content_lines,
|
||||
input.head_limit,
|
||||
input.offset,
|
||||
));
|
||||
}
|
||||
num_lines: Some(lines.len()),
|
||||
content: Some(lines.join("\n")),
|
||||
num_matches: None,
|
||||
applied_limit: limit,
|
||||
applied_offset: offset,
|
||||
});
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(GrepSearchOutput {
|
||||
mode: Some(output_mode.clone()),
|
||||
num_files: filenames.len(),
|
||||
filenames,
|
||||
content: None,
|
||||
content: content_output,
|
||||
num_lines: None,
|
||||
num_matches: (output_mode == "count").then_some(total_matches),
|
||||
applied_limit,
|
||||
@@ -506,65 +457,6 @@ fn grep_search_impl(
|
||||
})
|
||||
}
|
||||
|
||||
fn build_grep_content_output(
|
||||
output_mode: String,
|
||||
filenames: Vec<String>,
|
||||
content_lines: Vec<String>,
|
||||
head_limit: Option<usize>,
|
||||
offset: Option<usize>,
|
||||
) -> GrepSearchOutput {
|
||||
let (lines, limit, offset) = apply_limit(content_lines, head_limit, offset);
|
||||
GrepSearchOutput {
|
||||
mode: Some(output_mode),
|
||||
num_files: filenames.len(),
|
||||
filenames,
|
||||
num_lines: Some(lines.len()),
|
||||
content: Some(lines.join("\n")),
|
||||
num_matches: None,
|
||||
applied_limit: limit,
|
||||
applied_offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
fn canonicalize_workspace_root(workspace_root: &Path) -> PathBuf {
|
||||
workspace_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace_root.to_path_buf())
|
||||
}
|
||||
|
||||
fn should_skip_glob_dir(entry: &DirEntry) -> bool {
|
||||
entry.file_type().is_dir()
|
||||
&& entry
|
||||
.file_name()
|
||||
.to_str()
|
||||
.is_some_and(|name| GLOB_SEARCH_IGNORED_DIRS.contains(&name))
|
||||
}
|
||||
|
||||
fn derive_glob_walk_root(pattern: &str) -> PathBuf {
|
||||
let path = Path::new(pattern);
|
||||
let mut prefix = PathBuf::new();
|
||||
let mut saw_component = false;
|
||||
|
||||
for component in path.components() {
|
||||
let text = component.as_os_str().to_string_lossy();
|
||||
if component_contains_glob(&text) {
|
||||
break;
|
||||
}
|
||||
prefix.push(component.as_os_str());
|
||||
saw_component = true;
|
||||
}
|
||||
|
||||
if saw_component {
|
||||
prefix
|
||||
} else {
|
||||
std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."))
|
||||
}
|
||||
}
|
||||
|
||||
fn component_contains_glob(component: &str) -> bool {
|
||||
component.contains('*') || component.contains('?') || component.contains('[')
|
||||
}
|
||||
|
||||
fn collect_search_files(base_path: &Path) -> io::Result<Vec<PathBuf>> {
|
||||
if base_path.is_file() {
|
||||
return Ok(vec![base_path.to_path_buf()]);
|
||||
@@ -682,7 +574,9 @@ pub fn read_file_in_workspace(
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<ReadFileOutput> {
|
||||
let absolute_path = normalize_path(path)?;
|
||||
let canonical_root = canonicalize_workspace_root(workspace_root);
|
||||
let canonical_root = workspace_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace_root.to_path_buf());
|
||||
validate_workspace_boundary(&absolute_path, &canonical_root)?;
|
||||
read_file(path, offset, limit)
|
||||
}
|
||||
@@ -695,7 +589,9 @@ pub fn write_file_in_workspace(
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<WriteFileOutput> {
|
||||
let absolute_path = normalize_path_allow_missing(path)?;
|
||||
let canonical_root = canonicalize_workspace_root(workspace_root);
|
||||
let canonical_root = workspace_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace_root.to_path_buf());
|
||||
validate_workspace_boundary(&absolute_path, &canonical_root)?;
|
||||
write_file(path, content)
|
||||
}
|
||||
@@ -710,30 +606,13 @@ pub fn edit_file_in_workspace(
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<EditFileOutput> {
|
||||
let absolute_path = normalize_path(path)?;
|
||||
let canonical_root = canonicalize_workspace_root(workspace_root);
|
||||
let canonical_root = workspace_root
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace_root.to_path_buf());
|
||||
validate_workspace_boundary(&absolute_path, &canonical_root)?;
|
||||
edit_file(path, old_string, new_string, replace_all)
|
||||
}
|
||||
|
||||
/// Expand a glob pattern with workspace boundary enforcement.
|
||||
#[allow(dead_code)]
|
||||
pub fn glob_search_in_workspace(
|
||||
pattern: &str,
|
||||
path: Option<&str>,
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<GlobSearchOutput> {
|
||||
glob_search_impl(pattern, path, Some(workspace_root))
|
||||
}
|
||||
|
||||
/// Search file contents with workspace boundary enforcement.
|
||||
#[allow(dead_code)]
|
||||
pub fn grep_search_in_workspace(
|
||||
input: &GrepSearchInput,
|
||||
workspace_root: &Path,
|
||||
) -> io::Result<GrepSearchOutput> {
|
||||
grep_search_impl(input, Some(workspace_root))
|
||||
}
|
||||
|
||||
/// Check whether a path is a symlink that resolves outside the workspace.
|
||||
#[allow(dead_code)]
|
||||
pub fn is_symlink_escape(path: &Path, workspace_root: &Path) -> io::Result<bool> {
|
||||
@@ -772,13 +651,11 @@ fn expand_braces(pattern: &str) -> Vec<String> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use super::{
|
||||
component_contains_glob, derive_glob_walk_root, edit_file, expand_braces, glob_search,
|
||||
grep_search, is_symlink_escape, read_file, read_file_in_workspace, write_file,
|
||||
write_file_in_workspace, GrepSearchInput, MAX_WRITE_SIZE,
|
||||
edit_file, expand_braces, glob_search, grep_search, is_symlink_escape, read_file,
|
||||
read_file_in_workspace, write_file, GrepSearchInput, MAX_WRITE_SIZE,
|
||||
};
|
||||
|
||||
fn temp_path(name: &str) -> std::path::PathBuf {
|
||||
@@ -878,68 +755,6 @@ mod tests {
|
||||
assert!(!is_symlink_escape(&normal, &workspace).expect("check should succeed"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn workspace_read_rejects_symlink_escape_regression_3007_class() {
|
||||
let workspace = temp_path("workspace-read-symlink-escape");
|
||||
let outside = temp_path("workspace-read-symlink-target");
|
||||
std::fs::create_dir_all(&workspace).expect("workspace dir should be created");
|
||||
std::fs::create_dir_all(&outside).expect("outside dir should be created");
|
||||
let outside_file = outside.join("secret.txt");
|
||||
std::fs::write(&outside_file, "outside secret").expect("outside file should write");
|
||||
|
||||
let link_path = workspace.join("linked-secret.txt");
|
||||
std::os::unix::fs::symlink(&outside_file, &link_path).expect("symlink should create");
|
||||
|
||||
let result =
|
||||
read_file_in_workspace(link_path.to_string_lossy().as_ref(), None, None, &workspace);
|
||||
|
||||
assert!(result.is_err(), "symlink escape must be rejected");
|
||||
let error = result.unwrap_err();
|
||||
assert_eq!(error.kind(), std::io::ErrorKind::PermissionDenied);
|
||||
assert!(
|
||||
error.to_string().contains("escapes workspace"),
|
||||
"error should explain workspace escape: {error}"
|
||||
);
|
||||
|
||||
let _ = std::fs::remove_dir_all(&workspace);
|
||||
let _ = std::fs::remove_dir_all(&outside);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn workspace_write_rejects_parent_symlink_escape_regression_3007_class() {
|
||||
let workspace = temp_path("workspace-write-symlink-escape");
|
||||
let outside = temp_path("workspace-write-symlink-target");
|
||||
std::fs::create_dir_all(&workspace).expect("workspace dir should be created");
|
||||
std::fs::create_dir_all(&outside).expect("outside dir should be created");
|
||||
|
||||
let link_dir = workspace.join("linked-outside");
|
||||
std::os::unix::fs::symlink(&outside, &link_dir).expect("symlink dir should create");
|
||||
let escaped_child = link_dir.join("created.txt");
|
||||
|
||||
let result = write_file_in_workspace(
|
||||
escaped_child.to_string_lossy().as_ref(),
|
||||
"must not escape",
|
||||
&workspace,
|
||||
);
|
||||
|
||||
assert!(result.is_err(), "parent symlink escape must be rejected");
|
||||
let error = result.unwrap_err();
|
||||
assert_eq!(error.kind(), std::io::ErrorKind::PermissionDenied);
|
||||
assert!(
|
||||
error.to_string().contains("escapes workspace"),
|
||||
"error should explain workspace escape: {error}"
|
||||
);
|
||||
assert!(
|
||||
!outside.join("created.txt").exists(),
|
||||
"write should not create through an escaping symlink"
|
||||
);
|
||||
|
||||
let _ = std::fs::remove_dir_all(&workspace);
|
||||
let _ = std::fs::remove_dir_all(&outside);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn globs_and_greps_directory() {
|
||||
let dir = temp_path("search-dir");
|
||||
@@ -1021,50 +836,4 @@ mod tests {
|
||||
);
|
||||
let _ = std::fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn glob_search_skips_common_heavy_directories() {
|
||||
let dir = temp_path("glob-ignored-dirs");
|
||||
std::fs::create_dir_all(dir.join("src")).unwrap();
|
||||
std::fs::create_dir_all(dir.join("docs")).unwrap();
|
||||
std::fs::create_dir_all(dir.join("node_modules/pkg")).unwrap();
|
||||
std::fs::create_dir_all(dir.join(".build/checkouts/pkg")).unwrap();
|
||||
std::fs::create_dir_all(dir.join("target/debug/deps")).unwrap();
|
||||
|
||||
std::fs::write(dir.join("src/AGENTS.md"), "src").unwrap();
|
||||
std::fs::write(dir.join("docs/AGENTS.md"), "docs").unwrap();
|
||||
std::fs::write(dir.join("node_modules/pkg/AGENTS.md"), "node_modules").unwrap();
|
||||
std::fs::write(dir.join(".build/checkouts/pkg/AGENTS.md"), ".build").unwrap();
|
||||
std::fs::write(dir.join("target/debug/deps/AGENTS.md"), "target").unwrap();
|
||||
|
||||
let result =
|
||||
glob_search("**/AGENTS.md", Some(dir.to_str().unwrap())).expect("glob should succeed");
|
||||
|
||||
assert_eq!(result.num_files, 2, "ignored dirs should be pruned");
|
||||
assert!(result
|
||||
.filenames
|
||||
.iter()
|
||||
.any(|path| path.ends_with("src/AGENTS.md")));
|
||||
assert!(result
|
||||
.filenames
|
||||
.iter()
|
||||
.any(|path| path.ends_with("docs/AGENTS.md")));
|
||||
assert!(!result
|
||||
.filenames
|
||||
.iter()
|
||||
.any(|path| path.contains("node_modules")
|
||||
|| path.contains(".build")
|
||||
|| path.contains("/target/")));
|
||||
|
||||
let _ = std::fs::remove_dir_all(&dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn derive_glob_walk_root_stops_at_first_glob_component() {
|
||||
let root = derive_glob_walk_root("/tmp/demo/**/AGENTS.md");
|
||||
assert_eq!(root, PathBuf::from("/tmp/demo"));
|
||||
assert!(component_contains_glob("**"));
|
||||
assert!(component_contains_glob("*.rs"));
|
||||
assert!(!component_contains_glob("src"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,399 +0,0 @@
|
||||
//! Machine-checkable conformance helpers for G004 event/report contract bundles.
|
||||
//!
|
||||
//! The harness intentionally validates JSON-shaped artifacts instead of owning the
|
||||
//! lane-event, report, or approval-token implementations. This keeps it usable by
|
||||
//! independent implementation lanes and by golden fixtures produced outside the
|
||||
//! runtime crate.
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
const BUNDLE_SCHEMA_VERSION: &str = "g004.contract.bundle.v1";
|
||||
const REPORT_SCHEMA_VERSION: &str = "g004.report.v1";
|
||||
|
||||
/// A single conformance validation failure.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct G004ConformanceError {
|
||||
/// JSON pointer-ish path to the invalid field.
|
||||
pub path: String,
|
||||
/// Human-readable reason the field failed validation.
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl G004ConformanceError {
|
||||
fn new(path: impl Into<String>, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
path: path.into(),
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate a G004 golden contract bundle.
|
||||
///
|
||||
/// The bundle shape is deliberately small and cross-lane:
|
||||
/// - `laneEvents[]` must expose stable event identity, ordering/provenance, and
|
||||
/// terminal dedupe fingerprints.
|
||||
/// - `reports[]` must expose schema identity, content hash, projection/redaction
|
||||
/// provenance, capability negotiation, fact/hypothesis/negative-evidence
|
||||
/// labels, confidence, and field-level delta attribution.
|
||||
/// - `approvalTokens[]` must expose owner/scope, delegation chain, one-time-use,
|
||||
/// and replay-prevention fields.
|
||||
#[must_use]
|
||||
pub fn validate_g004_contract_bundle(bundle: &Value) -> Vec<G004ConformanceError> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
require_string_eq(bundle, "/schemaVersion", BUNDLE_SCHEMA_VERSION, &mut errors);
|
||||
validate_lane_events(bundle.get("laneEvents"), "/laneEvents", &mut errors);
|
||||
validate_reports(bundle.get("reports"), "/reports", &mut errors);
|
||||
validate_approval_tokens(bundle.get("approvalTokens"), "/approvalTokens", &mut errors);
|
||||
|
||||
errors
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_g004_contract_bundle_valid(bundle: &Value) -> bool {
|
||||
validate_g004_contract_bundle(bundle).is_empty()
|
||||
}
|
||||
|
||||
fn validate_lane_events(value: Option<&Value>, path: &str, errors: &mut Vec<G004ConformanceError>) {
|
||||
let Some(events) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut previous_seq = None;
|
||||
for (index, event) in events.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_non_empty_string_at(event, "/event", &format!("{base}/event"), errors);
|
||||
require_non_empty_string_at(event, "/status", &format!("{base}/status"), errors);
|
||||
require_non_empty_string_at(event, "/emittedAt", &format!("{base}/emittedAt"), errors);
|
||||
require_non_empty_string_at(
|
||||
event,
|
||||
"/metadata/provenance",
|
||||
&format!("{base}/metadata/provenance"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
event,
|
||||
"/metadata/emitterIdentity",
|
||||
&format!("{base}/metadata/emitterIdentity"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
event,
|
||||
"/metadata/environmentLabel",
|
||||
&format!("{base}/metadata/environmentLabel"),
|
||||
errors,
|
||||
);
|
||||
|
||||
match get_path(event, "/metadata/seq").and_then(Value::as_u64) {
|
||||
Some(seq) => {
|
||||
if let Some(previous) = previous_seq {
|
||||
if seq <= previous {
|
||||
errors.push(G004ConformanceError::new(
|
||||
format!("{base}/metadata/seq"),
|
||||
"sequence must be strictly increasing",
|
||||
));
|
||||
}
|
||||
}
|
||||
previous_seq = Some(seq);
|
||||
}
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
format!("{base}/metadata/seq"),
|
||||
"required u64 field missing",
|
||||
)),
|
||||
}
|
||||
|
||||
if is_terminal_event_value(event.get("event")) {
|
||||
require_non_empty_string_at(
|
||||
event,
|
||||
"/metadata/eventFingerprint",
|
||||
&format!("{base}/metadata/eventFingerprint"),
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_reports(value: Option<&Value>, path: &str, errors: &mut Vec<G004ConformanceError>) {
|
||||
let Some(reports) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, report) in reports.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_string_eq_at(
|
||||
report,
|
||||
"/schemaVersion",
|
||||
&format!("{base}/schemaVersion"),
|
||||
REPORT_SCHEMA_VERSION,
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(report, "/reportId", &format!("{base}/reportId"), errors);
|
||||
require_non_empty_string_at(
|
||||
report,
|
||||
"/identity/contentHash",
|
||||
&format!("{base}/identity/contentHash"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
report,
|
||||
"/projection/provenance",
|
||||
&format!("{base}/projection/provenance"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
report,
|
||||
"/redaction/provenance",
|
||||
&format!("{base}/redaction/provenance"),
|
||||
errors,
|
||||
);
|
||||
non_empty_array(
|
||||
get_path(report, "/consumerCapabilities"),
|
||||
&format!("{base}/consumerCapabilities"),
|
||||
errors,
|
||||
);
|
||||
validate_findings(
|
||||
get_path(report, "/findings"),
|
||||
&format!("{base}/findings"),
|
||||
errors,
|
||||
);
|
||||
validate_field_deltas(
|
||||
get_path(report, "/fieldDeltas"),
|
||||
&format!("{base}/fieldDeltas"),
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_findings(value: Option<&Value>, path: &str, errors: &mut Vec<G004ConformanceError>) {
|
||||
let Some(findings) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, finding) in findings.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_one_of_at(
|
||||
finding,
|
||||
"/kind",
|
||||
&format!("{base}/kind"),
|
||||
&["fact", "hypothesis", "negative_evidence"],
|
||||
errors,
|
||||
);
|
||||
require_one_of_at(
|
||||
finding,
|
||||
"/confidence",
|
||||
&format!("{base}/confidence"),
|
||||
&["low", "medium", "high"],
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(finding, "/statement", &format!("{base}/statement"), errors);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_field_deltas(
|
||||
value: Option<&Value>,
|
||||
path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
let Some(deltas) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, delta) in deltas.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_non_empty_string_at(delta, "/field", &format!("{base}/field"), errors);
|
||||
require_non_empty_string_at(
|
||||
delta,
|
||||
"/previousHash",
|
||||
&format!("{base}/previousHash"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
delta,
|
||||
"/currentHash",
|
||||
&format!("{base}/currentHash"),
|
||||
errors,
|
||||
);
|
||||
require_non_empty_string_at(
|
||||
delta,
|
||||
"/attribution",
|
||||
&format!("{base}/attribution"),
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_approval_tokens(
|
||||
value: Option<&Value>,
|
||||
path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
let Some(tokens) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, token) in tokens.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_non_empty_string_at(token, "/tokenId", &format!("{base}/tokenId"), errors);
|
||||
require_non_empty_string_at(token, "/owner", &format!("{base}/owner"), errors);
|
||||
require_non_empty_string_at(token, "/scope", &format!("{base}/scope"), errors);
|
||||
require_non_empty_string_at(token, "/issuedAt", &format!("{base}/issuedAt"), errors);
|
||||
require_bool_true_at(token, "/oneTimeUse", &format!("{base}/oneTimeUse"), errors);
|
||||
require_non_empty_string_at(
|
||||
token,
|
||||
"/replayPreventionNonce",
|
||||
&format!("{base}/replayPreventionNonce"),
|
||||
errors,
|
||||
);
|
||||
validate_delegation_chain(
|
||||
get_path(token, "/delegationChain"),
|
||||
&format!("{base}/delegationChain"),
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_delegation_chain(
|
||||
value: Option<&Value>,
|
||||
path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
let Some(chain) = non_empty_array(value, path, errors) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (index, hop) in chain.iter().enumerate() {
|
||||
let base = format!("{path}/{index}");
|
||||
require_non_empty_string_at(hop, "/from", &format!("{base}/from"), errors);
|
||||
require_non_empty_string_at(hop, "/to", &format!("{base}/to"), errors);
|
||||
require_non_empty_string_at(hop, "/action", &format!("{base}/action"), errors);
|
||||
require_non_empty_string_at(hop, "/at", &format!("{base}/at"), errors);
|
||||
}
|
||||
}
|
||||
|
||||
fn non_empty_array<'a>(
|
||||
value: Option<&'a Value>,
|
||||
path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) -> Option<&'a Vec<Value>> {
|
||||
match value.and_then(Value::as_array) {
|
||||
Some(array) if !array.is_empty() => Some(array),
|
||||
Some(_) => {
|
||||
errors.push(G004ConformanceError::new(path, "array must not be empty"));
|
||||
None
|
||||
}
|
||||
None => {
|
||||
errors.push(G004ConformanceError::new(
|
||||
path,
|
||||
"required array field missing",
|
||||
));
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn require_string_eq(
|
||||
root: &Value,
|
||||
path: &str,
|
||||
expected: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
require_string_eq_at(root, path, path, expected, errors);
|
||||
}
|
||||
|
||||
fn require_string_eq_at(
|
||||
root: &Value,
|
||||
pointer: &str,
|
||||
error_path: &str,
|
||||
expected: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
match get_path(root, pointer).and_then(Value::as_str) {
|
||||
Some(actual) if actual == expected => {}
|
||||
Some(actual) => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
format!("expected '{expected}', got '{actual}'"),
|
||||
)),
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"required string field missing",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_non_empty_string_at(
|
||||
root: &Value,
|
||||
pointer: &str,
|
||||
error_path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
match get_path(root, pointer).and_then(Value::as_str) {
|
||||
Some(value) if !value.trim().is_empty() => {}
|
||||
Some(_) => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"string must not be empty",
|
||||
)),
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"required string field missing",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_one_of_at(
|
||||
root: &Value,
|
||||
pointer: &str,
|
||||
error_path: &str,
|
||||
allowed: &[&str],
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
match get_path(root, pointer).and_then(Value::as_str) {
|
||||
Some(value) if allowed.contains(&value) => {}
|
||||
Some(value) => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
format!("'{value}' is not one of {}", allowed.join(", ")),
|
||||
)),
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"required string field missing",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn require_bool_true_at(
|
||||
root: &Value,
|
||||
pointer: &str,
|
||||
error_path: &str,
|
||||
errors: &mut Vec<G004ConformanceError>,
|
||||
) {
|
||||
match get_path(root, pointer).and_then(Value::as_bool) {
|
||||
Some(true) => {}
|
||||
Some(false) => errors.push(G004ConformanceError::new(error_path, "must be true")),
|
||||
None => errors.push(G004ConformanceError::new(
|
||||
error_path,
|
||||
"required boolean field missing",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_terminal_event_value(value: Option<&Value>) -> bool {
|
||||
matches!(
|
||||
value.and_then(Value::as_str),
|
||||
Some("lane.finished" | "lane.failed" | "lane.merged" | "lane.superseded" | "lane.closed")
|
||||
)
|
||||
}
|
||||
|
||||
fn get_path<'a>(root: &'a Value, path: &str) -> Option<&'a Value> {
|
||||
if let Some(value) = root.pointer(path) {
|
||||
return Some(value);
|
||||
}
|
||||
|
||||
let segments = path.trim_start_matches('/').split('/').collect::<Vec<_>>();
|
||||
for index in 1..segments.len() {
|
||||
let relative = format!("/{}", segments[index..].join("/"));
|
||||
if let Some(value) = root.pointer(&relative) {
|
||||
return Some(value);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
@@ -27,38 +27,19 @@ impl std::fmt::Display for GreenLevel {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct GreenContract {
|
||||
pub required_level: GreenLevel,
|
||||
pub requirements: Vec<GreenContractRequirement>,
|
||||
pub block_known_flakes: bool,
|
||||
}
|
||||
|
||||
impl GreenContract {
|
||||
#[must_use]
|
||||
pub fn new(required_level: GreenLevel) -> Self {
|
||||
Self {
|
||||
required_level,
|
||||
requirements: Vec::new(),
|
||||
block_known_flakes: false,
|
||||
}
|
||||
Self { required_level }
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn merge_ready(required_level: GreenLevel) -> Self {
|
||||
Self {
|
||||
required_level,
|
||||
requirements: vec![
|
||||
GreenContractRequirement::TestCommandProvenance,
|
||||
GreenContractRequirement::BaseBranchFreshness,
|
||||
GreenContractRequirement::RecoveryAttemptContext,
|
||||
],
|
||||
block_known_flakes: true,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate(&self, observed_level: Option<GreenLevel>) -> GreenContractOutcome {
|
||||
pub fn evaluate(self, observed_level: Option<GreenLevel>) -> GreenContractOutcome {
|
||||
match observed_level {
|
||||
Some(level) if level >= self.required_level => GreenContractOutcome::Satisfied {
|
||||
required_level: self.required_level,
|
||||
@@ -72,170 +53,11 @@ impl GreenContract {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate_evidence(&self, evidence: &GreenEvidence) -> GreenEvidenceOutcome {
|
||||
let mut missing = Vec::new();
|
||||
let mut blocking_flakes = Vec::new();
|
||||
|
||||
if evidence.observed_level < self.required_level {
|
||||
missing.push(GreenContractRequirement::RequiredLevel);
|
||||
}
|
||||
|
||||
for requirement in &self.requirements {
|
||||
match requirement {
|
||||
GreenContractRequirement::TestCommandProvenance
|
||||
if !evidence.has_passing_test_command() =>
|
||||
{
|
||||
missing.push(*requirement);
|
||||
}
|
||||
GreenContractRequirement::BaseBranchFreshness if !evidence.base_branch_fresh => {
|
||||
missing.push(*requirement);
|
||||
}
|
||||
GreenContractRequirement::RecoveryAttemptContext
|
||||
if !evidence.recovery_attempt_context_recorded =>
|
||||
{
|
||||
missing.push(*requirement);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
if self.block_known_flakes {
|
||||
blocking_flakes = evidence
|
||||
.known_flakes
|
||||
.iter()
|
||||
.filter(|flake| flake.blocks_green)
|
||||
.cloned()
|
||||
.collect();
|
||||
}
|
||||
|
||||
if missing.is_empty() && blocking_flakes.is_empty() {
|
||||
GreenEvidenceOutcome::Satisfied {
|
||||
required_level: self.required_level,
|
||||
observed_level: evidence.observed_level,
|
||||
}
|
||||
} else {
|
||||
GreenEvidenceOutcome::Unsatisfied {
|
||||
required_level: self.required_level,
|
||||
observed_level: evidence.observed_level,
|
||||
missing,
|
||||
blocking_flakes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_satisfied_by(&self, observed_level: GreenLevel) -> bool {
|
||||
pub fn is_satisfied_by(self, observed_level: GreenLevel) -> bool {
|
||||
observed_level >= self.required_level
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct GreenEvidence {
|
||||
pub observed_level: GreenLevel,
|
||||
pub test_commands: Vec<TestCommandProvenance>,
|
||||
pub base_branch_fresh: bool,
|
||||
pub known_flakes: Vec<KnownFlake>,
|
||||
pub recovery_attempt_context_recorded: bool,
|
||||
}
|
||||
|
||||
impl GreenEvidence {
|
||||
#[must_use]
|
||||
pub fn new(observed_level: GreenLevel) -> Self {
|
||||
Self {
|
||||
observed_level,
|
||||
test_commands: Vec::new(),
|
||||
base_branch_fresh: false,
|
||||
known_flakes: Vec::new(),
|
||||
recovery_attempt_context_recorded: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_test_command(mut self, command: impl Into<String>, exit_code: i32) -> Self {
|
||||
self.test_commands.push(TestCommandProvenance {
|
||||
command: command.into(),
|
||||
exit_code,
|
||||
});
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_base_branch_fresh(mut self, is_fresh: bool) -> Self {
|
||||
self.base_branch_fresh = is_fresh;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_known_flake(mut self, test_name: impl Into<String>, blocks_green: bool) -> Self {
|
||||
self.known_flakes.push(KnownFlake {
|
||||
test_name: test_name.into(),
|
||||
blocks_green,
|
||||
});
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_recovery_attempt_context(mut self, recorded: bool) -> Self {
|
||||
self.recovery_attempt_context_recorded = recorded;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn has_passing_test_command(&self) -> bool {
|
||||
self.test_commands.iter().any(TestCommandProvenance::passed)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct TestCommandProvenance {
|
||||
pub command: String,
|
||||
pub exit_code: i32,
|
||||
}
|
||||
|
||||
impl TestCommandProvenance {
|
||||
#[must_use]
|
||||
pub fn passed(&self) -> bool {
|
||||
self.exit_code == 0 && !self.command.trim().is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct KnownFlake {
|
||||
pub test_name: String,
|
||||
pub blocks_green: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum GreenContractRequirement {
|
||||
RequiredLevel,
|
||||
TestCommandProvenance,
|
||||
BaseBranchFreshness,
|
||||
RecoveryAttemptContext,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(tag = "outcome", rename_all = "snake_case")]
|
||||
pub enum GreenEvidenceOutcome {
|
||||
Satisfied {
|
||||
required_level: GreenLevel,
|
||||
observed_level: GreenLevel,
|
||||
},
|
||||
Unsatisfied {
|
||||
required_level: GreenLevel,
|
||||
observed_level: GreenLevel,
|
||||
missing: Vec<GreenContractRequirement>,
|
||||
blocking_flakes: Vec<KnownFlake>,
|
||||
},
|
||||
}
|
||||
|
||||
impl GreenEvidenceOutcome {
|
||||
#[must_use]
|
||||
pub fn is_satisfied(&self) -> bool {
|
||||
matches!(self, Self::Satisfied { .. })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(tag = "outcome", rename_all = "snake_case")]
|
||||
pub enum GreenContractOutcome {
|
||||
@@ -327,83 +149,4 @@ mod tests {
|
||||
}
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn merge_ready_contract_requires_provenance_beyond_test_level() {
|
||||
// given
|
||||
let contract = GreenContract::merge_ready(GreenLevel::Workspace);
|
||||
let evidence = GreenEvidence::new(GreenLevel::Workspace)
|
||||
.with_test_command("cargo test --manifest-path rust/Cargo.toml", 0);
|
||||
|
||||
// when
|
||||
let outcome = contract.evaluate_evidence(&evidence);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
outcome,
|
||||
GreenEvidenceOutcome::Unsatisfied {
|
||||
required_level: GreenLevel::Workspace,
|
||||
observed_level: GreenLevel::Workspace,
|
||||
missing: vec![
|
||||
GreenContractRequirement::BaseBranchFreshness,
|
||||
GreenContractRequirement::RecoveryAttemptContext,
|
||||
],
|
||||
blocking_flakes: vec![],
|
||||
}
|
||||
);
|
||||
assert!(!outcome.is_satisfied());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_ready_contract_accepts_complete_test_provenance_context() {
|
||||
// given
|
||||
let contract = GreenContract::merge_ready(GreenLevel::Workspace);
|
||||
let evidence = GreenEvidence::new(GreenLevel::MergeReady)
|
||||
.with_test_command("cargo test --manifest-path rust/Cargo.toml", 0)
|
||||
.with_base_branch_fresh(true)
|
||||
.with_recovery_attempt_context(true);
|
||||
|
||||
// when
|
||||
let outcome = contract.evaluate_evidence(&evidence);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
outcome,
|
||||
GreenEvidenceOutcome::Satisfied {
|
||||
required_level: GreenLevel::Workspace,
|
||||
observed_level: GreenLevel::MergeReady,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn known_blocking_flake_prevents_green_contract_satisfaction() {
|
||||
// given
|
||||
let contract = GreenContract::merge_ready(GreenLevel::Workspace);
|
||||
let evidence = GreenEvidence::new(GreenLevel::MergeReady)
|
||||
.with_test_command("cargo test --manifest-path rust/Cargo.toml", 0)
|
||||
.with_base_branch_fresh(true)
|
||||
.with_recovery_attempt_context(true)
|
||||
.with_known_flake(
|
||||
"session_lifecycle_prefers_running_process_over_idle_shell",
|
||||
true,
|
||||
);
|
||||
|
||||
// when
|
||||
let outcome = contract.evaluate_evidence(&evidence);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
outcome,
|
||||
GreenEvidenceOutcome::Unsatisfied {
|
||||
required_level: GreenLevel::Workspace,
|
||||
observed_level: GreenLevel::MergeReady,
|
||||
missing: vec![],
|
||||
blocking_flakes: vec![KnownFlake {
|
||||
test_name: "session_lifecycle_prefers_running_process_over_idle_shell"
|
||||
.to_string(),
|
||||
blocks_green: true,
|
||||
}],
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,6 @@
|
||||
//! MCP plumbing, tool-facing file operations, and the core conversation loop
|
||||
//! that drives interactive and one-shot turns.
|
||||
|
||||
mod approval_tokens;
|
||||
mod bash;
|
||||
pub mod bash_validation;
|
||||
mod bootstrap;
|
||||
@@ -14,7 +13,6 @@ mod config;
|
||||
pub mod config_validate;
|
||||
mod conversation;
|
||||
mod file_ops;
|
||||
pub mod g004_conformance;
|
||||
mod git_context;
|
||||
pub mod green_contract;
|
||||
mod hooks;
|
||||
@@ -35,7 +33,6 @@ mod policy_engine;
|
||||
mod prompt;
|
||||
pub mod recovery_recipes;
|
||||
mod remote;
|
||||
mod report_schema;
|
||||
pub mod sandbox;
|
||||
mod session;
|
||||
pub mod session_control;
|
||||
@@ -52,10 +49,6 @@ mod trust_resolver;
|
||||
mod usage;
|
||||
pub mod worker_boot;
|
||||
|
||||
pub use approval_tokens::{
|
||||
ApprovalDelegationHop, ApprovalScope, ApprovalTokenAudit, ApprovalTokenError,
|
||||
ApprovalTokenGrant, ApprovalTokenLedger, ApprovalTokenStatus,
|
||||
};
|
||||
pub use bash::{execute_bash, BashCommandInput, BashCommandOutput};
|
||||
pub use bootstrap::{BootstrapPhase, BootstrapPlan};
|
||||
pub use branch_lock::{detect_branch_lock_collisions, BranchLockCollision, BranchLockIntent};
|
||||
@@ -81,10 +74,9 @@ pub use conversation::{
|
||||
ToolExecutor, TurnSummary,
|
||||
};
|
||||
pub use file_ops::{
|
||||
edit_file, edit_file_in_workspace, glob_search, glob_search_in_workspace, grep_search,
|
||||
grep_search_in_workspace, read_file, read_file_in_workspace, write_file,
|
||||
write_file_in_workspace, EditFileOutput, GlobSearchOutput, GrepSearchInput, GrepSearchOutput,
|
||||
ReadFileOutput, StructuredPatchHunk, TextFilePayload, WriteFileOutput,
|
||||
edit_file, glob_search, grep_search, read_file, write_file, EditFileOutput, GlobSearchOutput,
|
||||
GrepSearchInput, GrepSearchOutput, ReadFileOutput, StructuredPatchHunk, TextFilePayload,
|
||||
WriteFileOutput,
|
||||
};
|
||||
pub use git_context::{GitCommitEntry, GitContext};
|
||||
pub use hooks::{
|
||||
@@ -135,31 +127,22 @@ pub use plugin_lifecycle::{
|
||||
PluginState, ResourceInfo, ServerHealth, ServerStatus, ToolInfo,
|
||||
};
|
||||
pub use policy_engine::{
|
||||
evaluate, evaluate_with_events, ApprovalToken, DiffScope, GreenLevel, LaneBlocker, LaneContext,
|
||||
PolicyAction, PolicyCondition, PolicyDecisionEvent, PolicyDecisionKind, PolicyEngine,
|
||||
PolicyEvaluation, PolicyRule, ReconcileReason, ReviewStatus,
|
||||
evaluate, DiffScope, GreenLevel, LaneBlocker, LaneContext, PolicyAction, PolicyCondition,
|
||||
PolicyEngine, PolicyRule, ReconcileReason, ReviewStatus,
|
||||
};
|
||||
pub use prompt::{
|
||||
load_system_prompt, prepend_bullets, ContextFile, ModelFamilyIdentity, ProjectContext,
|
||||
PromptBuildError, SystemPromptBuilder, FRONTIER_MODEL_NAME, SYSTEM_PROMPT_DYNAMIC_BOUNDARY,
|
||||
load_system_prompt, prepend_bullets, ContextFile, ProjectContext, PromptBuildError,
|
||||
SystemPromptBuilder, FRONTIER_MODEL_NAME, SYSTEM_PROMPT_DYNAMIC_BOUNDARY,
|
||||
};
|
||||
pub use recovery_recipes::{
|
||||
attempt_recovery, recipe_for, EscalationPolicy, FailureScenario, RecoveryAttemptState,
|
||||
RecoveryAttemptType, RecoveryCommandResult, RecoveryContext, RecoveryEvent,
|
||||
RecoveryLedgerEntry, RecoveryRecipe, RecoveryResult, RecoveryStatusReport, RecoveryStep,
|
||||
attempt_recovery, recipe_for, EscalationPolicy, FailureScenario, RecoveryContext,
|
||||
RecoveryEvent, RecoveryRecipe, RecoveryResult, RecoveryStep,
|
||||
};
|
||||
pub use remote::{
|
||||
inherited_upstream_proxy_env, no_proxy_list, read_token, upstream_proxy_ws_url,
|
||||
RemoteSessionContext, UpstreamProxyBootstrap, UpstreamProxyState, DEFAULT_REMOTE_BASE_URL,
|
||||
DEFAULT_SESSION_TOKEN_PATH, DEFAULT_SYSTEM_CA_BUNDLE, NO_PROXY_HOSTS, UPSTREAM_PROXY_ENV_KEYS,
|
||||
};
|
||||
pub use report_schema::{
|
||||
canonicalize_report, project_report, report_content_hash, report_schema_v1_registry,
|
||||
CanonicalReportV1, ClaimKind, ConsumerCapabilities, FieldDelta, FieldDeltaState,
|
||||
NegativeEvidence, NegativeFindingStatus, ProjectionProvenance, RedactionProvenance,
|
||||
ReportClaim, ReportConfidence, ReportIdentity, ReportProjectionV1, ReportSchemaField,
|
||||
ReportSchemaRegistry, SensitivityClass, DEFAULT_PROJECTION_POLICY_V1, REPORT_SCHEMA_V1,
|
||||
};
|
||||
pub use sandbox::{
|
||||
build_linux_sandbox_command, detect_container_environment, detect_container_environment_from,
|
||||
resolve_sandbox_status, resolve_sandbox_status_for_request, ContainerEnvironment,
|
||||
@@ -168,7 +151,7 @@ pub use sandbox::{
|
||||
};
|
||||
pub use session::{
|
||||
ContentBlock, ConversationMessage, MessageRole, Session, SessionCompaction, SessionError,
|
||||
SessionFork, SessionHeartbeat, SessionLiveness, SessionPromptEntry,
|
||||
SessionFork, SessionPromptEntry,
|
||||
};
|
||||
pub use sse::{IncrementalSseParser, SseEvent};
|
||||
pub use stale_base::{
|
||||
@@ -179,10 +162,7 @@ pub use stale_branch::{
|
||||
apply_policy, check_freshness, BranchFreshness, StaleBranchAction, StaleBranchEvent,
|
||||
StaleBranchPolicy,
|
||||
};
|
||||
pub use task_packet::{
|
||||
validate_packet, TaskPacket, TaskPacketValidationError, TaskResource, ValidatedPacket,
|
||||
};
|
||||
pub use task_registry::{LaneBoard, LaneBoardEntry, LaneFreshness, LaneHeartbeat};
|
||||
pub use task_packet::{validate_packet, TaskPacket, TaskPacketValidationError, ValidatedPacket};
|
||||
#[cfg(test)]
|
||||
pub use trust_resolver::{TrustConfig, TrustDecision, TrustEvent, TrustPolicy, TrustResolver};
|
||||
pub use usage::{
|
||||
|
||||
@@ -117,7 +117,7 @@ pub fn scoped_mcp_config_hash(config: &ScopedMcpServerConfig) -> String {
|
||||
format!("claudeai-proxy|{}|{}", proxy.url, proxy.id)
|
||||
}
|
||||
};
|
||||
stable_hex_hash(&format!("required:{}|{rendered}", config.required))
|
||||
stable_hex_hash(&rendered)
|
||||
}
|
||||
|
||||
fn render_command_signature(command: &[String]) -> String {
|
||||
@@ -275,12 +275,10 @@ mod tests {
|
||||
oauth: None,
|
||||
});
|
||||
let user = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::User,
|
||||
config: base_config.clone(),
|
||||
};
|
||||
let local = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: base_config,
|
||||
};
|
||||
@@ -290,7 +288,6 @@ mod tests {
|
||||
);
|
||||
|
||||
let changed = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Http(McpRemoteServerConfig {
|
||||
url: "https://vendor.example/v2/mcp".to_string(),
|
||||
|
||||
@@ -143,7 +143,6 @@ mod tests {
|
||||
#[test]
|
||||
fn bootstraps_stdio_servers_into_transport_targets() {
|
||||
let config = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::User,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "uvx".to_string(),
|
||||
@@ -177,7 +176,6 @@ mod tests {
|
||||
#[test]
|
||||
fn bootstraps_remote_servers_with_oauth_auth() {
|
||||
let config = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Project,
|
||||
config: McpServerConfig::Http(McpRemoteServerConfig {
|
||||
url: "https://vendor.example/mcp".to_string(),
|
||||
@@ -215,7 +213,6 @@ mod tests {
|
||||
#[test]
|
||||
fn bootstraps_websocket_and_sdk_transports_without_oauth() {
|
||||
let ws = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Ws(McpWebSocketServerConfig {
|
||||
url: "wss://vendor.example/mcp".to_string(),
|
||||
@@ -224,7 +221,6 @@ mod tests {
|
||||
}),
|
||||
};
|
||||
let sdk = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Sdk(McpSdkServerConfig {
|
||||
name: "sdk-server".to_string(),
|
||||
|
||||
@@ -230,7 +230,6 @@ pub struct ManagedMcpTool {
|
||||
pub struct UnsupportedMcpServer {
|
||||
pub server_name: String,
|
||||
pub transport: McpTransport,
|
||||
pub required: bool,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
@@ -238,7 +237,6 @@ pub struct UnsupportedMcpServer {
|
||||
pub struct McpDiscoveryFailure {
|
||||
pub server_name: String,
|
||||
pub phase: McpLifecyclePhase,
|
||||
pub required: bool,
|
||||
pub error: String,
|
||||
pub recoverable: bool,
|
||||
pub context: BTreeMap<String, String>,
|
||||
@@ -368,7 +366,7 @@ impl McpServerManagerError {
|
||||
) && matches!(self, Self::Transport { .. } | Self::Timeout { .. })
|
||||
}
|
||||
|
||||
fn discovery_failure(&self, server_name: &str, required: bool) -> McpDiscoveryFailure {
|
||||
fn discovery_failure(&self, server_name: &str) -> McpDiscoveryFailure {
|
||||
let phase = self.lifecycle_phase();
|
||||
let recoverable = self.recoverable();
|
||||
let context = self.error_context();
|
||||
@@ -376,7 +374,6 @@ impl McpServerManagerError {
|
||||
McpDiscoveryFailure {
|
||||
server_name: server_name.to_string(),
|
||||
phase,
|
||||
required,
|
||||
error: self.to_string(),
|
||||
recoverable,
|
||||
context,
|
||||
@@ -450,10 +447,7 @@ fn unsupported_server_failed_server(server: &UnsupportedMcpServer) -> McpFailedS
|
||||
McpLifecyclePhase::ServerRegistration,
|
||||
Some(server.server_name.clone()),
|
||||
server.reason.clone(),
|
||||
BTreeMap::from([
|
||||
("transport".to_string(), format!("{:?}", server.transport)),
|
||||
("required".to_string(), server.required.to_string()),
|
||||
]),
|
||||
BTreeMap::from([("transport".to_string(), format!("{:?}", server.transport))]),
|
||||
false,
|
||||
),
|
||||
}
|
||||
@@ -470,16 +464,14 @@ struct ManagedMcpServer {
|
||||
bootstrap: McpClientBootstrap,
|
||||
process: Option<McpStdioProcess>,
|
||||
initialized: bool,
|
||||
required: bool,
|
||||
}
|
||||
|
||||
impl ManagedMcpServer {
|
||||
fn new(bootstrap: McpClientBootstrap, required: bool) -> Self {
|
||||
fn new(bootstrap: McpClientBootstrap) -> Self {
|
||||
Self {
|
||||
bootstrap,
|
||||
process: None,
|
||||
initialized: false,
|
||||
required,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -506,15 +498,11 @@ impl McpServerManager {
|
||||
for (server_name, server_config) in servers {
|
||||
if server_config.transport() == McpTransport::Stdio {
|
||||
let bootstrap = McpClientBootstrap::from_scoped_config(server_name, server_config);
|
||||
managed_servers.insert(
|
||||
server_name.clone(),
|
||||
ManagedMcpServer::new(bootstrap, server_config.required),
|
||||
);
|
||||
managed_servers.insert(server_name.clone(), ManagedMcpServer::new(bootstrap));
|
||||
} else {
|
||||
unsupported_servers.push(UnsupportedMcpServer {
|
||||
server_name: server_name.clone(),
|
||||
transport: server_config.transport(),
|
||||
required: server_config.required,
|
||||
reason: format!(
|
||||
"transport {:?} is not supported by McpServerManager",
|
||||
server_config.transport()
|
||||
@@ -588,11 +576,7 @@ impl McpServerManager {
|
||||
}
|
||||
Err(error) => {
|
||||
self.clear_routes_for_server(&server_name);
|
||||
let required = self
|
||||
.servers
|
||||
.get(&server_name)
|
||||
.is_some_and(|server| server.required);
|
||||
failed_servers.push(error.discovery_failure(&server_name, required));
|
||||
failed_servers.push(error.discovery_failure(&server_name));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -606,11 +590,7 @@ impl McpServerManager {
|
||||
failure.phase,
|
||||
Some(failure.server_name.clone()),
|
||||
failure.error.clone(),
|
||||
{
|
||||
let mut context = failure.context.clone();
|
||||
context.insert("required".to_string(), failure.required.to_string());
|
||||
context
|
||||
},
|
||||
failure.context.clone(),
|
||||
failure.recoverable,
|
||||
),
|
||||
})
|
||||
@@ -1785,7 +1765,6 @@ mod tests {
|
||||
|
||||
fn sample_bootstrap(script_path: &Path) -> McpClientBootstrap {
|
||||
let config = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "/bin/sh".to_string(),
|
||||
@@ -1853,7 +1832,6 @@ mod tests {
|
||||
]);
|
||||
env.extend(extra_env);
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "python3".to_string(),
|
||||
@@ -1896,7 +1874,6 @@ mod tests {
|
||||
#[test]
|
||||
fn rejects_non_stdio_bootstrap() {
|
||||
let config = ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Sdk(crate::config::McpSdkServerConfig {
|
||||
name: "sdk-server".to_string(),
|
||||
@@ -2333,7 +2310,6 @@ mod tests {
|
||||
let servers = BTreeMap::from([(
|
||||
"slow".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "python3".to_string(),
|
||||
@@ -2387,7 +2363,6 @@ mod tests {
|
||||
let servers = BTreeMap::from([(
|
||||
"broken".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "python3".to_string(),
|
||||
@@ -2726,7 +2701,6 @@ mod tests {
|
||||
(
|
||||
"broken".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: true,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: broken_script_path.display().to_string(),
|
||||
@@ -2748,7 +2722,6 @@ mod tests {
|
||||
);
|
||||
assert_eq!(report.failed_servers.len(), 1);
|
||||
assert_eq!(report.failed_servers[0].server_name, "broken");
|
||||
assert!(report.failed_servers[0].required);
|
||||
assert_eq!(
|
||||
report.failed_servers[0].phase,
|
||||
McpLifecyclePhase::InitializeHandshake
|
||||
@@ -2769,14 +2742,6 @@ mod tests {
|
||||
assert_eq!(degraded.working_servers, vec!["alpha".to_string()]);
|
||||
assert_eq!(degraded.failed_servers.len(), 1);
|
||||
assert_eq!(degraded.failed_servers[0].server_name, "broken");
|
||||
assert_eq!(
|
||||
degraded.failed_servers[0]
|
||||
.error
|
||||
.context
|
||||
.get("required")
|
||||
.map(String::as_str),
|
||||
Some("true")
|
||||
);
|
||||
assert_eq!(
|
||||
degraded.failed_servers[0].phase,
|
||||
McpLifecyclePhase::InitializeHandshake
|
||||
@@ -2812,7 +2777,6 @@ mod tests {
|
||||
(
|
||||
"http".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: true,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Http(McpRemoteServerConfig {
|
||||
url: "https://example.test/mcp".to_string(),
|
||||
@@ -2825,7 +2789,6 @@ mod tests {
|
||||
(
|
||||
"sdk".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Sdk(McpSdkServerConfig {
|
||||
name: "sdk-server".to_string(),
|
||||
@@ -2835,7 +2798,6 @@ mod tests {
|
||||
(
|
||||
"ws".to_string(),
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Ws(McpWebSocketServerConfig {
|
||||
url: "wss://example.test/mcp".to_string(),
|
||||
@@ -2851,14 +2813,11 @@ mod tests {
|
||||
|
||||
assert_eq!(unsupported.len(), 3);
|
||||
assert_eq!(unsupported[0].server_name, "http");
|
||||
assert!(unsupported[0].required);
|
||||
assert_eq!(unsupported[1].server_name, "sdk");
|
||||
assert_eq!(unsupported[2].server_name, "ws");
|
||||
let failed = unsupported_server_failed_server(&unsupported[0]);
|
||||
assert_eq!(failed.phase, McpLifecyclePhase::ServerRegistration);
|
||||
assert_eq!(
|
||||
failed.error.context.get("required").map(String::as_str),
|
||||
Some("true")
|
||||
unsupported_server_failed_server(&unsupported[0]).phase,
|
||||
McpLifecyclePhase::ServerRegistration
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -442,7 +442,6 @@ mod tests {
|
||||
log_path: &Path,
|
||||
) -> ScopedMcpServerConfig {
|
||||
ScopedMcpServerConfig {
|
||||
required: false,
|
||||
scope: ConfigSource::Local,
|
||||
config: McpServerConfig::Stdio(McpStdioServerConfig {
|
||||
command: "python3".to_string(),
|
||||
|
||||
@@ -61,25 +61,6 @@ pub enum PluginState {
|
||||
}
|
||||
|
||||
impl PluginState {
|
||||
#[must_use]
|
||||
pub fn startup_event(&self) -> Option<PluginLifecycleEvent> {
|
||||
match self {
|
||||
Self::Healthy => Some(PluginLifecycleEvent::StartupHealthy),
|
||||
Self::Degraded { .. } => Some(PluginLifecycleEvent::StartupDegraded),
|
||||
Self::Failed { .. } => Some(PluginLifecycleEvent::StartupFailed),
|
||||
Self::Unconfigured
|
||||
| Self::Validated
|
||||
| Self::Starting
|
||||
| Self::ShuttingDown
|
||||
| Self::Stopped => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn is_startup_terminal(&self) -> bool {
|
||||
self.startup_event().is_some()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn from_servers(servers: &[ServerHealth]) -> Self {
|
||||
if servers.is_empty() {
|
||||
@@ -141,11 +122,6 @@ pub struct PluginHealthcheck {
|
||||
}
|
||||
|
||||
impl PluginHealthcheck {
|
||||
#[must_use]
|
||||
pub fn startup_event(&self) -> Option<PluginLifecycleEvent> {
|
||||
self.state.startup_event()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new(plugin_name: impl Into<String>, servers: Vec<ServerHealth>) -> Self {
|
||||
let state = PluginState::from_servers(&servers);
|
||||
@@ -367,41 +343,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn startup_event_maps_terminal_health_states() {
|
||||
// given
|
||||
let healthy =
|
||||
PluginHealthcheck::new("healthy-plugin", vec![healthy_server("alpha", &["search"])]);
|
||||
let degraded = PluginHealthcheck::new(
|
||||
"degraded-plugin",
|
||||
vec![
|
||||
healthy_server("alpha", &["search"]),
|
||||
failed_server("beta", &["write"], "connection refused"),
|
||||
],
|
||||
);
|
||||
let failed = PluginHealthcheck::new(
|
||||
"failed-plugin",
|
||||
vec![failed_server("beta", &["write"], "connection refused")],
|
||||
);
|
||||
|
||||
// then
|
||||
assert_eq!(
|
||||
healthy.startup_event(),
|
||||
Some(PluginLifecycleEvent::StartupHealthy)
|
||||
);
|
||||
assert_eq!(
|
||||
degraded.startup_event(),
|
||||
Some(PluginLifecycleEvent::StartupDegraded)
|
||||
);
|
||||
assert_eq!(
|
||||
failed.startup_event(),
|
||||
Some(PluginLifecycleEvent::StartupFailed)
|
||||
);
|
||||
assert!(healthy.state.is_startup_terminal());
|
||||
assert_eq!(PluginState::Starting.startup_event(), None);
|
||||
assert!(!PluginState::Starting.is_startup_terminal());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn full_lifecycle_happy_path() {
|
||||
// given
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub type GreenLevel = u8;
|
||||
|
||||
const STALE_BRANCH_THRESHOLD: Duration = Duration::from_hours(1);
|
||||
const STALE_BRANCH_THRESHOLD: Duration = Duration::from_secs(60 * 60);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct PolicyRule {
|
||||
@@ -48,11 +46,6 @@ pub enum PolicyCondition {
|
||||
ReviewPassed,
|
||||
ScopedDiff,
|
||||
TimedOut { duration: Duration },
|
||||
RetryAvailable,
|
||||
RebaseRequired,
|
||||
StaleCleanupRequired,
|
||||
ApprovalTokenPresent,
|
||||
ApprovalTokenMissing,
|
||||
}
|
||||
|
||||
impl PolicyCondition {
|
||||
@@ -65,9 +58,7 @@ impl PolicyCondition {
|
||||
Self::Or(conditions) => conditions
|
||||
.iter()
|
||||
.any(|condition| condition.matches(context)),
|
||||
Self::GreenAt { level } => {
|
||||
context.green_contract_satisfied && context.green_level >= *level
|
||||
}
|
||||
Self::GreenAt { level } => context.green_level >= *level,
|
||||
Self::StaleBranch => context.branch_freshness >= STALE_BRANCH_THRESHOLD,
|
||||
Self::StartupBlocked => context.blocker == LaneBlocker::Startup,
|
||||
Self::LaneCompleted => context.completed,
|
||||
@@ -75,11 +66,6 @@ impl PolicyCondition {
|
||||
Self::ReviewPassed => context.review_status == ReviewStatus::Approved,
|
||||
Self::ScopedDiff => context.diff_scope == DiffScope::Scoped,
|
||||
Self::TimedOut { duration } => context.branch_freshness >= *duration,
|
||||
Self::RetryAvailable => context.retry_count < context.retry_limit,
|
||||
Self::RebaseRequired => context.rebase_required,
|
||||
Self::StaleCleanupRequired => context.stale_cleanup_required,
|
||||
Self::ApprovalTokenPresent => context.approval_token.is_some(),
|
||||
Self::ApprovalTokenMissing => context.approval_token.is_none(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -89,15 +75,11 @@ pub enum PolicyAction {
|
||||
MergeToDev,
|
||||
MergeForward,
|
||||
RecoverOnce,
|
||||
Retry { reason: String },
|
||||
Rebase { reason: String },
|
||||
Escalate { reason: String },
|
||||
CloseoutLane,
|
||||
CleanupSession,
|
||||
CleanupStale { reason: String },
|
||||
Reconcile { reason: ReconcileReason },
|
||||
Notify { channel: String },
|
||||
RequireApprovalToken { operation: String },
|
||||
Block { reason: String },
|
||||
Chain(Vec<PolicyAction>),
|
||||
}
|
||||
@@ -148,61 +130,16 @@ pub enum DiffScope {
|
||||
Scoped,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ApprovalToken {
|
||||
pub token_id: String,
|
||||
pub operation: String,
|
||||
pub granted_by: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum PolicyDecisionKind {
|
||||
Retry,
|
||||
Rebase,
|
||||
Merge,
|
||||
Escalate,
|
||||
StaleCleanup,
|
||||
ApprovalRequired,
|
||||
Notify,
|
||||
Block,
|
||||
Closeout,
|
||||
Reconcile,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct PolicyDecisionEvent {
|
||||
pub lane_id: String,
|
||||
pub rule_name: String,
|
||||
pub priority: u32,
|
||||
pub kind: PolicyDecisionKind,
|
||||
pub explanation: String,
|
||||
pub approval_token_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct PolicyEvaluation {
|
||||
pub actions: Vec<PolicyAction>,
|
||||
pub events: Vec<PolicyDecisionEvent>,
|
||||
}
|
||||
|
||||
#[allow(clippy::struct_excessive_bools)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct LaneContext {
|
||||
pub lane_id: String,
|
||||
pub green_level: GreenLevel,
|
||||
pub green_contract_satisfied: bool,
|
||||
pub branch_freshness: Duration,
|
||||
pub blocker: LaneBlocker,
|
||||
pub review_status: ReviewStatus,
|
||||
pub diff_scope: DiffScope,
|
||||
pub completed: bool,
|
||||
pub reconciled: bool,
|
||||
pub retry_count: u32,
|
||||
pub retry_limit: u32,
|
||||
pub rebase_required: bool,
|
||||
pub stale_cleanup_required: bool,
|
||||
pub approval_token: Option<ApprovalToken>,
|
||||
}
|
||||
|
||||
impl LaneContext {
|
||||
@@ -219,18 +156,12 @@ impl LaneContext {
|
||||
Self {
|
||||
lane_id: lane_id.into(),
|
||||
green_level,
|
||||
green_contract_satisfied: false,
|
||||
branch_freshness,
|
||||
blocker,
|
||||
review_status,
|
||||
diff_scope,
|
||||
completed,
|
||||
reconciled: false,
|
||||
retry_count: 0,
|
||||
retry_limit: 1,
|
||||
rebase_required: false,
|
||||
stale_cleanup_required: false,
|
||||
approval_token: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,51 +171,14 @@ impl LaneContext {
|
||||
Self {
|
||||
lane_id: lane_id.into(),
|
||||
green_level: 0,
|
||||
green_contract_satisfied: false,
|
||||
branch_freshness: Duration::from_secs(0),
|
||||
blocker: LaneBlocker::None,
|
||||
review_status: ReviewStatus::Pending,
|
||||
diff_scope: DiffScope::Full,
|
||||
completed: true,
|
||||
reconciled: true,
|
||||
retry_count: 0,
|
||||
retry_limit: 1,
|
||||
rebase_required: false,
|
||||
stale_cleanup_required: false,
|
||||
approval_token: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_green_contract_satisfied(mut self, satisfied: bool) -> Self {
|
||||
self.green_contract_satisfied = satisfied;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_retry_state(mut self, retry_count: u32, retry_limit: u32) -> Self {
|
||||
self.retry_count = retry_count;
|
||||
self.retry_limit = retry_limit;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_rebase_required(mut self, required: bool) -> Self {
|
||||
self.rebase_required = required;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_stale_cleanup_required(mut self, required: bool) -> Self {
|
||||
self.stale_cleanup_required = required;
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_approval_token(mut self, token: ApprovalToken) -> Self {
|
||||
self.approval_token = Some(token);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
@@ -308,119 +202,17 @@ impl PolicyEngine {
|
||||
pub fn evaluate(&self, context: &LaneContext) -> Vec<PolicyAction> {
|
||||
evaluate(self, context)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate_with_events(&self, context: &LaneContext) -> PolicyEvaluation {
|
||||
evaluate_with_events(self, context)
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate(engine: &PolicyEngine, context: &LaneContext) -> Vec<PolicyAction> {
|
||||
evaluate_with_events(engine, context).actions
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn evaluate_with_events(engine: &PolicyEngine, context: &LaneContext) -> PolicyEvaluation {
|
||||
let mut actions = Vec::new();
|
||||
let mut events = Vec::new();
|
||||
for rule in &engine.rules {
|
||||
if rule.matches(context) {
|
||||
let before = actions.len();
|
||||
rule.action.flatten_into(&mut actions);
|
||||
for action in &actions[before..] {
|
||||
events.push(decision_event(rule, context, action));
|
||||
}
|
||||
}
|
||||
}
|
||||
PolicyEvaluation { actions, events }
|
||||
}
|
||||
|
||||
fn decision_event(
|
||||
rule: &PolicyRule,
|
||||
context: &LaneContext,
|
||||
action: &PolicyAction,
|
||||
) -> PolicyDecisionEvent {
|
||||
let (kind, explanation) = match action {
|
||||
PolicyAction::MergeToDev | PolicyAction::MergeForward => (
|
||||
PolicyDecisionKind::Merge,
|
||||
format!(
|
||||
"rule '{}' allows merge action for lane {}",
|
||||
rule.name, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::RecoverOnce | PolicyAction::Retry { reason: _ } => (
|
||||
PolicyDecisionKind::Retry,
|
||||
format!(
|
||||
"rule '{}' allows retry {}/{} for lane {}",
|
||||
rule.name, context.retry_count, context.retry_limit, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::Rebase { reason } => (
|
||||
PolicyDecisionKind::Rebase,
|
||||
format!("rule '{}' requires rebase: {reason}", rule.name),
|
||||
),
|
||||
PolicyAction::Escalate { reason } => (
|
||||
PolicyDecisionKind::Escalate,
|
||||
format!(
|
||||
"rule '{}' escalates lane {}: {reason}",
|
||||
rule.name, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::CleanupStale { reason } => (
|
||||
PolicyDecisionKind::StaleCleanup,
|
||||
format!("rule '{}' requests cleanup: {reason}", rule.name),
|
||||
),
|
||||
PolicyAction::CleanupSession => (
|
||||
PolicyDecisionKind::StaleCleanup,
|
||||
format!("rule '{}' requests session cleanup", rule.name),
|
||||
),
|
||||
PolicyAction::CloseoutLane => (
|
||||
PolicyDecisionKind::Closeout,
|
||||
format!("rule '{}' closes out lane {}", rule.name, context.lane_id),
|
||||
),
|
||||
PolicyAction::Reconcile { reason } => (
|
||||
PolicyDecisionKind::Reconcile,
|
||||
format!(
|
||||
"rule '{}' reconciles lane {}: {reason:?}",
|
||||
rule.name, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::Notify { channel } => (
|
||||
PolicyDecisionKind::Notify,
|
||||
format!("rule '{}' notifies {channel}", rule.name),
|
||||
),
|
||||
PolicyAction::RequireApprovalToken { operation } => (
|
||||
PolicyDecisionKind::ApprovalRequired,
|
||||
format!(
|
||||
"rule '{}' requires approval token for {operation}",
|
||||
rule.name
|
||||
),
|
||||
),
|
||||
PolicyAction::Block { reason } => (
|
||||
PolicyDecisionKind::Block,
|
||||
format!(
|
||||
"rule '{}' blocks lane {}: {reason}",
|
||||
rule.name, context.lane_id
|
||||
),
|
||||
),
|
||||
PolicyAction::Chain(_) => (
|
||||
PolicyDecisionKind::Notify,
|
||||
format!("rule '{}' expanded a chained action", rule.name),
|
||||
),
|
||||
};
|
||||
|
||||
PolicyDecisionEvent {
|
||||
lane_id: context.lane_id.clone(),
|
||||
rule_name: rule.name.clone(),
|
||||
priority: rule.priority,
|
||||
kind,
|
||||
explanation,
|
||||
approval_token_id: context
|
||||
.approval_token
|
||||
.as_ref()
|
||||
.map(|token| token.token_id.clone()),
|
||||
}
|
||||
actions
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -428,9 +220,8 @@ mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use super::{
|
||||
evaluate, ApprovalToken, DiffScope, LaneBlocker, LaneContext, PolicyAction,
|
||||
PolicyCondition, PolicyDecisionKind, PolicyEngine, PolicyRule, ReconcileReason,
|
||||
ReviewStatus, STALE_BRANCH_THRESHOLD,
|
||||
evaluate, DiffScope, LaneBlocker, LaneContext, PolicyAction, PolicyCondition, PolicyEngine,
|
||||
PolicyRule, ReconcileReason, ReviewStatus, STALE_BRANCH_THRESHOLD,
|
||||
};
|
||||
|
||||
fn default_context() -> LaneContext {
|
||||
@@ -447,37 +238,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn merge_to_dev_rule_fires_for_green_scoped_reviewed_lane() {
|
||||
// given
|
||||
let engine = PolicyEngine::new(vec![PolicyRule::new(
|
||||
"merge-to-dev",
|
||||
PolicyCondition::And(vec![
|
||||
PolicyCondition::GreenAt { level: 2 },
|
||||
PolicyCondition::ScopedDiff,
|
||||
PolicyCondition::ReviewPassed,
|
||||
]),
|
||||
PolicyAction::MergeToDev,
|
||||
20,
|
||||
)]);
|
||||
let context = LaneContext::new(
|
||||
"lane-7",
|
||||
3,
|
||||
Duration::from_secs(5),
|
||||
LaneBlocker::None,
|
||||
ReviewStatus::Approved,
|
||||
DiffScope::Scoped,
|
||||
false,
|
||||
)
|
||||
.with_green_contract_satisfied(true);
|
||||
|
||||
// when
|
||||
let actions = engine.evaluate(&context);
|
||||
|
||||
// then
|
||||
assert_eq!(actions, vec![PolicyAction::MergeToDev]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_rule_blocks_when_green_tests_lack_contract_provenance() {
|
||||
// given
|
||||
let engine = PolicyEngine::new(vec![PolicyRule::new(
|
||||
"merge-to-dev",
|
||||
@@ -503,7 +263,7 @@ mod tests {
|
||||
let actions = engine.evaluate(&context);
|
||||
|
||||
// then
|
||||
assert!(actions.is_empty());
|
||||
assert_eq!(actions, vec![PolicyAction::MergeToDev]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -708,8 +468,7 @@ mod tests {
|
||||
ReviewStatus::Pending,
|
||||
DiffScope::Full,
|
||||
false,
|
||||
)
|
||||
.with_green_contract_satisfied(true);
|
||||
);
|
||||
|
||||
// when
|
||||
let actions = engine.evaluate(&context);
|
||||
@@ -730,121 +489,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::duration_suboptimal_units, clippy::too_many_lines)]
|
||||
fn executable_decision_table_emits_retry_rebase_merge_escalate_cleanup_and_approval_events() {
|
||||
let engine = PolicyEngine::new(vec![
|
||||
PolicyRule::new(
|
||||
"retry-available",
|
||||
PolicyCondition::RetryAvailable,
|
||||
PolicyAction::Retry {
|
||||
reason: "transient failure".to_string(),
|
||||
},
|
||||
1,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"rebase-required",
|
||||
PolicyCondition::RebaseRequired,
|
||||
PolicyAction::Rebase {
|
||||
reason: "base branch moved".to_string(),
|
||||
},
|
||||
2,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"stale-cleanup",
|
||||
PolicyCondition::StaleCleanupRequired,
|
||||
PolicyAction::CleanupStale {
|
||||
reason: "lease expired".to_string(),
|
||||
},
|
||||
3,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"approval-required",
|
||||
PolicyCondition::ApprovalTokenMissing,
|
||||
PolicyAction::RequireApprovalToken {
|
||||
operation: "merge".to_string(),
|
||||
},
|
||||
4,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"merge-approved",
|
||||
PolicyCondition::And(vec![
|
||||
PolicyCondition::ApprovalTokenPresent,
|
||||
PolicyCondition::GreenAt { level: 2 },
|
||||
PolicyCondition::ScopedDiff,
|
||||
PolicyCondition::ReviewPassed,
|
||||
]),
|
||||
PolicyAction::MergeToDev,
|
||||
5,
|
||||
),
|
||||
PolicyRule::new(
|
||||
"retry-exhausted",
|
||||
PolicyCondition::TimedOut {
|
||||
duration: Duration::from_secs(60),
|
||||
},
|
||||
PolicyAction::Escalate {
|
||||
reason: "lane timed out".to_string(),
|
||||
},
|
||||
6,
|
||||
),
|
||||
]);
|
||||
|
||||
let missing_token_context = LaneContext::new(
|
||||
"lane-cc2",
|
||||
2,
|
||||
Duration::from_secs(90),
|
||||
LaneBlocker::None,
|
||||
ReviewStatus::Approved,
|
||||
DiffScope::Scoped,
|
||||
false,
|
||||
)
|
||||
.with_green_contract_satisfied(true)
|
||||
.with_retry_state(0, 1)
|
||||
.with_rebase_required(true)
|
||||
.with_stale_cleanup_required(true);
|
||||
|
||||
let missing = engine.evaluate_with_events(&missing_token_context);
|
||||
assert!(missing.actions.contains(&PolicyAction::Retry {
|
||||
reason: "transient failure".to_string()
|
||||
}));
|
||||
assert!(missing.actions.contains(&PolicyAction::Rebase {
|
||||
reason: "base branch moved".to_string()
|
||||
}));
|
||||
assert!(missing.actions.contains(&PolicyAction::CleanupStale {
|
||||
reason: "lease expired".to_string()
|
||||
}));
|
||||
assert!(missing
|
||||
.actions
|
||||
.contains(&PolicyAction::RequireApprovalToken {
|
||||
operation: "merge".to_string()
|
||||
}));
|
||||
assert!(missing.actions.contains(&PolicyAction::Escalate {
|
||||
reason: "lane timed out".to_string()
|
||||
}));
|
||||
assert!(missing
|
||||
.events
|
||||
.iter()
|
||||
.any(|event| event.kind == PolicyDecisionKind::ApprovalRequired
|
||||
&& event.explanation.contains("approval token")));
|
||||
|
||||
let approved_context = missing_token_context.with_approval_token(ApprovalToken {
|
||||
token_id: "approval-123".to_string(),
|
||||
operation: "merge".to_string(),
|
||||
granted_by: "leader".to_string(),
|
||||
});
|
||||
let approved = engine.evaluate_with_events(&approved_context);
|
||||
assert!(approved.actions.contains(&PolicyAction::MergeToDev));
|
||||
let merge_event = approved
|
||||
.events
|
||||
.iter()
|
||||
.find(|event| event.kind == PolicyDecisionKind::Merge)
|
||||
.expect("merge event should be emitted");
|
||||
assert_eq!(
|
||||
merge_event.approval_token_id.as_deref(),
|
||||
Some("approval-123")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reconciled_lane_emits_reconcile_and_cleanup() {
|
||||
// given — a lane where branch is already merged, no PR needed, session stale
|
||||
|
||||
@@ -43,24 +43,6 @@ pub const FRONTIER_MODEL_NAME: &str = "Claude Opus 4.6";
|
||||
const MAX_INSTRUCTION_FILE_CHARS: usize = 4_000;
|
||||
const MAX_TOTAL_INSTRUCTION_CHARS: usize = 12_000;
|
||||
|
||||
/// Neutral identity for the model family line in generated prompts.
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub enum ModelFamilyIdentity {
|
||||
#[default]
|
||||
Claude,
|
||||
Generic,
|
||||
}
|
||||
|
||||
impl ModelFamilyIdentity {
|
||||
#[must_use]
|
||||
pub const fn family_label(self) -> &'static str {
|
||||
match self {
|
||||
Self::Claude => FRONTIER_MODEL_NAME,
|
||||
Self::Generic => "an AI assistant",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Contents of an instruction file included in prompt construction.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ContextFile {
|
||||
@@ -115,7 +97,6 @@ pub struct SystemPromptBuilder {
|
||||
output_style_prompt: Option<String>,
|
||||
os_name: Option<String>,
|
||||
os_version: Option<String>,
|
||||
model_family: Option<ModelFamilyIdentity>,
|
||||
append_sections: Vec<String>,
|
||||
project_context: Option<ProjectContext>,
|
||||
config: Option<RuntimeConfig>,
|
||||
@@ -141,12 +122,6 @@ impl SystemPromptBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_model_family(mut self, model_family: ModelFamilyIdentity) -> Self {
|
||||
self.model_family = Some(model_family);
|
||||
self
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_project_context(mut self, project_context: ProjectContext) -> Self {
|
||||
self.project_context = Some(project_context);
|
||||
@@ -204,10 +179,9 @@ impl SystemPromptBuilder {
|
||||
|| "unknown".to_string(),
|
||||
|context| context.current_date.clone(),
|
||||
);
|
||||
let identity = self.model_family.unwrap_or_default();
|
||||
let mut lines = vec!["# Environment context".to_string()];
|
||||
lines.extend(prepend_bullets(vec![
|
||||
format!("Model family: {}", identity.family_label()),
|
||||
format!("Model family: {FRONTIER_MODEL_NAME}"),
|
||||
format!("Working directory: {cwd}"),
|
||||
format!("Date: {date}"),
|
||||
format!(
|
||||
@@ -460,14 +434,12 @@ pub fn load_system_prompt(
|
||||
current_date: impl Into<String>,
|
||||
os_name: impl Into<String>,
|
||||
os_version: impl Into<String>,
|
||||
model_family: ModelFamilyIdentity,
|
||||
) -> Result<Vec<String>, PromptBuildError> {
|
||||
let cwd = cwd.into();
|
||||
let project_context = ProjectContext::discover_with_git(&cwd, current_date.into())?;
|
||||
let config = ConfigLoader::default_for(&cwd).load()?;
|
||||
Ok(SystemPromptBuilder::new()
|
||||
.with_os(os_name, os_version)
|
||||
.with_model_family(model_family)
|
||||
.with_project_context(project_context)
|
||||
.with_runtime_config(config)
|
||||
.build())
|
||||
@@ -550,8 +522,7 @@ mod tests {
|
||||
use super::{
|
||||
collapse_blank_lines, display_context_path, normalize_instruction_content,
|
||||
render_instruction_content, render_instruction_files, truncate_instruction_content,
|
||||
ContextFile, ModelFamilyIdentity, ProjectContext, SystemPromptBuilder,
|
||||
SYSTEM_PROMPT_DYNAMIC_BOUNDARY,
|
||||
ContextFile, ProjectContext, SystemPromptBuilder, SYSTEM_PROMPT_DYNAMIC_BOUNDARY,
|
||||
};
|
||||
use crate::config::ConfigLoader;
|
||||
use std::fs;
|
||||
@@ -833,19 +804,13 @@ mod tests {
|
||||
std::env::set_var("HOME", &root);
|
||||
std::env::set_var("CLAW_CONFIG_HOME", root.join("missing-home"));
|
||||
std::env::set_current_dir(&root).expect("change cwd");
|
||||
let prompt = super::load_system_prompt(
|
||||
&root,
|
||||
"2026-03-31",
|
||||
"linux",
|
||||
"6.8",
|
||||
ModelFamilyIdentity::Claude,
|
||||
)
|
||||
.expect("system prompt should load")
|
||||
.join(
|
||||
"
|
||||
let prompt = super::load_system_prompt(&root, "2026-03-31", "linux", "6.8")
|
||||
.expect("system prompt should load")
|
||||
.join(
|
||||
"
|
||||
|
||||
",
|
||||
);
|
||||
);
|
||||
std::env::set_current_dir(previous).expect("restore cwd");
|
||||
if let Some(value) = original_home {
|
||||
std::env::set_var("HOME", value);
|
||||
@@ -863,50 +828,6 @@ mod tests {
|
||||
fs::remove_dir_all(root).expect("cleanup temp dir");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn renders_default_claude_model_family_identity() {
|
||||
// given: a prompt builder without an explicit model family override
|
||||
let project_context = ProjectContext {
|
||||
cwd: PathBuf::from("/tmp/project"),
|
||||
current_date: "2026-03-31".to_string(),
|
||||
..ProjectContext::default()
|
||||
};
|
||||
|
||||
// when: rendering the system prompt environment section
|
||||
let prompt = SystemPromptBuilder::new()
|
||||
.with_os("linux", "6.8")
|
||||
.with_project_context(project_context)
|
||||
.render();
|
||||
|
||||
// then: the Claude model family label is preserved by default
|
||||
assert!(prompt.contains("Model family: Claude Opus 4.6"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn renders_generic_model_family_identity_without_claude_label() {
|
||||
// given: a prompt builder with generic model family identity
|
||||
let project_context = ProjectContext {
|
||||
cwd: PathBuf::from("/tmp/project"),
|
||||
current_date: "2026-03-31".to_string(),
|
||||
..ProjectContext::default()
|
||||
};
|
||||
|
||||
// when: rendering the system prompt environment section
|
||||
let prompt = SystemPromptBuilder::new()
|
||||
.with_os("linux", "6.8")
|
||||
.with_model_family(ModelFamilyIdentity::Generic)
|
||||
.with_project_context(project_context)
|
||||
.render();
|
||||
let model_family_line = prompt
|
||||
.lines()
|
||||
.find(|line| line.contains("Model family:"))
|
||||
.expect("model family line should render");
|
||||
|
||||
// then: the model family line is neutral and excludes Claude Opus 4.6
|
||||
assert_eq!(model_family_line, " - Model family: an AI assistant");
|
||||
assert!(!model_family_line.contains("Claude Opus 4.6"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn renders_claude_code_style_sections_with_project_context() {
|
||||
let root = temp_dir();
|
||||
|
||||
@@ -45,9 +45,7 @@ impl FailureScenario {
|
||||
#[must_use]
|
||||
pub fn from_worker_failure_kind(kind: WorkerFailureKind) -> Self {
|
||||
match kind {
|
||||
WorkerFailureKind::TrustGate | WorkerFailureKind::ToolPermissionGate => {
|
||||
Self::TrustPromptUnresolved
|
||||
}
|
||||
WorkerFailureKind::TrustGate => Self::TrustPromptUnresolved,
|
||||
WorkerFailureKind::PromptDelivery => Self::PromptMisdelivery,
|
||||
WorkerFailureKind::Protocol => Self::McpHandshakeFailure,
|
||||
WorkerFailureKind::Provider | WorkerFailureKind::StartupNoEvidence => {
|
||||
@@ -121,21 +119,6 @@ pub enum RecoveryResult {
|
||||
},
|
||||
}
|
||||
|
||||
/// Type of recovery execution represented in the ledger.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RecoveryAttemptType {
|
||||
Automatic,
|
||||
}
|
||||
|
||||
/// Result for one executable recovery command/step.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RecoveryCommandResult {
|
||||
pub command: RecoveryStep,
|
||||
pub status: RecoveryAttemptState,
|
||||
pub result: String,
|
||||
}
|
||||
|
||||
/// Structured event emitted during recovery.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
@@ -150,59 +133,14 @@ pub enum RecoveryEvent {
|
||||
Escalated,
|
||||
}
|
||||
|
||||
/// Machine-readable recovery progress for one failure scenario.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RecoveryLedgerEntry {
|
||||
pub recipe_id: String,
|
||||
pub attempt_type: RecoveryAttemptType,
|
||||
pub trigger: FailureScenario,
|
||||
pub attempt_count: u32,
|
||||
pub retry_limit: u32,
|
||||
pub attempts_remaining: u32,
|
||||
pub state: RecoveryAttemptState,
|
||||
pub started_at: Option<String>,
|
||||
pub finished_at: Option<String>,
|
||||
pub command_results: Vec<RecoveryCommandResult>,
|
||||
pub result: Option<RecoveryResult>,
|
||||
pub last_failure_summary: Option<String>,
|
||||
pub escalation_reason: Option<String>,
|
||||
}
|
||||
|
||||
/// Current state of a recovery recipe attempt.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RecoveryAttemptState {
|
||||
Queued,
|
||||
Running,
|
||||
Succeeded,
|
||||
Failed,
|
||||
Exhausted,
|
||||
}
|
||||
|
||||
/// Machine-readable status projection for callers that need to
|
||||
/// distinguish an untouched scenario from an exhausted recovery.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RecoveryStatusReport {
|
||||
pub scenario: FailureScenario,
|
||||
pub attempted: bool,
|
||||
pub state: Option<RecoveryAttemptState>,
|
||||
pub attempt_count: u32,
|
||||
pub retry_limit: Option<u32>,
|
||||
pub attempts_remaining: Option<u32>,
|
||||
pub escalation_reason: Option<String>,
|
||||
}
|
||||
|
||||
/// Minimal context for tracking recovery state and emitting events.
|
||||
///
|
||||
/// Holds per-scenario attempt counts, a structured event log, a recovery
|
||||
/// attempt ledger, and an optional simulation knob for controlling step
|
||||
/// outcomes during tests.
|
||||
/// Holds per-scenario attempt counts, a structured event log, and an
|
||||
/// optional simulation knob for controlling step outcomes during tests.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct RecoveryContext {
|
||||
attempts: HashMap<FailureScenario, u32>,
|
||||
events: Vec<RecoveryEvent>,
|
||||
ledger: HashMap<FailureScenario, RecoveryLedgerEntry>,
|
||||
clock_tick: u64,
|
||||
/// Optional step index at which simulated execution fails.
|
||||
/// `None` means all steps succeed.
|
||||
fail_at_step: Option<usize>,
|
||||
@@ -232,51 +170,6 @@ impl RecoveryContext {
|
||||
pub fn attempt_count(&self, scenario: &FailureScenario) -> u32 {
|
||||
self.attempts.get(scenario).copied().unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Returns the machine-readable recovery ledger entry for a scenario.
|
||||
#[must_use]
|
||||
pub fn ledger_entry(&self, scenario: &FailureScenario) -> Option<&RecoveryLedgerEntry> {
|
||||
self.ledger.get(scenario)
|
||||
}
|
||||
|
||||
/// Returns all recovery ledger entries currently tracked by this context.
|
||||
#[must_use]
|
||||
pub fn ledger_entries(&self) -> Vec<&RecoveryLedgerEntry> {
|
||||
let mut entries: Vec<_> = self.ledger.values().collect();
|
||||
entries.sort_by(|left, right| left.recipe_id.cmp(&right.recipe_id));
|
||||
entries
|
||||
}
|
||||
|
||||
/// Returns a compact machine-readable recovery status for a scenario,
|
||||
/// including `attempted = false` when no ledger entry exists yet.
|
||||
#[must_use]
|
||||
pub fn status_report(&self, scenario: &FailureScenario) -> RecoveryStatusReport {
|
||||
self.ledger_entry(scenario).map_or(
|
||||
RecoveryStatusReport {
|
||||
scenario: *scenario,
|
||||
attempted: false,
|
||||
state: None,
|
||||
attempt_count: 0,
|
||||
retry_limit: None,
|
||||
attempts_remaining: None,
|
||||
escalation_reason: None,
|
||||
},
|
||||
|entry| RecoveryStatusReport {
|
||||
scenario: *scenario,
|
||||
attempted: entry.attempt_count > 0,
|
||||
state: Some(entry.state),
|
||||
attempt_count: entry.attempt_count,
|
||||
retry_limit: Some(entry.retry_limit),
|
||||
attempts_remaining: Some(entry.attempts_remaining),
|
||||
escalation_reason: entry.escalation_reason.clone(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn next_timestamp(&mut self) -> String {
|
||||
self.clock_tick += 1;
|
||||
format!("recovery-ledger-tick-{}", self.clock_tick)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the known recovery recipe for the given failure scenario.
|
||||
@@ -338,51 +231,18 @@ pub fn recipe_for(scenario: &FailureScenario) -> RecoveryRecipe {
|
||||
/// Looks up the recipe, enforces the one-attempt-before-escalation
|
||||
/// policy, simulates step execution (controlled by the context), and
|
||||
/// emits structured [`RecoveryEvent`]s for every attempt.
|
||||
#[allow(clippy::too_many_lines)]
|
||||
pub fn attempt_recovery(scenario: &FailureScenario, ctx: &mut RecoveryContext) -> RecoveryResult {
|
||||
let recipe = recipe_for(scenario);
|
||||
let recipe_id = scenario.to_string();
|
||||
ctx.ledger
|
||||
.entry(*scenario)
|
||||
.or_insert_with(|| RecoveryLedgerEntry {
|
||||
recipe_id: recipe_id.clone(),
|
||||
attempt_type: RecoveryAttemptType::Automatic,
|
||||
trigger: *scenario,
|
||||
attempt_count: 0,
|
||||
retry_limit: recipe.max_attempts,
|
||||
attempts_remaining: recipe.max_attempts,
|
||||
state: RecoveryAttemptState::Queued,
|
||||
started_at: None,
|
||||
finished_at: None,
|
||||
command_results: Vec::new(),
|
||||
result: None,
|
||||
last_failure_summary: None,
|
||||
escalation_reason: None,
|
||||
});
|
||||
|
||||
let current_attempts = ctx.attempt_count(scenario);
|
||||
let attempt_count = ctx.attempts.entry(*scenario).or_insert(0);
|
||||
|
||||
// Enforce one automatic recovery attempt before escalation.
|
||||
if current_attempts >= recipe.max_attempts {
|
||||
if *attempt_count >= recipe.max_attempts {
|
||||
let result = RecoveryResult::EscalationRequired {
|
||||
reason: format!(
|
||||
"max recovery attempts ({}) exceeded for {}",
|
||||
recipe.max_attempts, scenario
|
||||
),
|
||||
};
|
||||
let finished_at = ctx.next_timestamp();
|
||||
if let Some(entry) = ctx.ledger.get_mut(scenario) {
|
||||
entry.attempt_count = current_attempts;
|
||||
entry.attempts_remaining = 0;
|
||||
entry.state = RecoveryAttemptState::Exhausted;
|
||||
entry.finished_at = Some(finished_at);
|
||||
entry.result = Some(result.clone());
|
||||
let RecoveryResult::EscalationRequired { reason } = &result else {
|
||||
unreachable!("exhaustion always produces escalation");
|
||||
};
|
||||
entry.last_failure_summary = Some(reason.clone());
|
||||
entry.escalation_reason = Some(reason.clone());
|
||||
}
|
||||
ctx.events.push(RecoveryEvent::RecoveryAttempted {
|
||||
scenario: *scenario,
|
||||
recipe,
|
||||
@@ -392,44 +252,19 @@ pub fn attempt_recovery(scenario: &FailureScenario, ctx: &mut RecoveryContext) -
|
||||
return result;
|
||||
}
|
||||
|
||||
let updated_attempts = ctx.attempts.entry(*scenario).or_insert(0);
|
||||
*updated_attempts += 1;
|
||||
let updated_attempts = *updated_attempts;
|
||||
let started_at = ctx.next_timestamp();
|
||||
if let Some(entry) = ctx.ledger.get_mut(scenario) {
|
||||
entry.attempt_count = updated_attempts;
|
||||
entry.attempts_remaining = recipe.max_attempts.saturating_sub(updated_attempts);
|
||||
entry.state = RecoveryAttemptState::Running;
|
||||
entry.started_at = Some(started_at);
|
||||
entry.finished_at = None;
|
||||
entry.command_results.clear();
|
||||
entry.result = None;
|
||||
entry.last_failure_summary = None;
|
||||
entry.escalation_reason = None;
|
||||
}
|
||||
*attempt_count += 1;
|
||||
|
||||
// Execute steps, honoring the optional fail_at_step simulation.
|
||||
let fail_index = ctx.fail_at_step;
|
||||
let mut executed = Vec::new();
|
||||
let mut command_results = Vec::new();
|
||||
let mut failed = false;
|
||||
|
||||
for (i, step) in recipe.steps.iter().enumerate() {
|
||||
if fail_index == Some(i) {
|
||||
command_results.push(RecoveryCommandResult {
|
||||
command: step.clone(),
|
||||
status: RecoveryAttemptState::Failed,
|
||||
result: format!("step {i} failed for {scenario}"),
|
||||
});
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
executed.push(step.clone());
|
||||
command_results.push(RecoveryCommandResult {
|
||||
command: step.clone(),
|
||||
status: RecoveryAttemptState::Succeeded,
|
||||
result: format!("step {i} succeeded for {scenario}"),
|
||||
});
|
||||
}
|
||||
|
||||
let result = if failed {
|
||||
@@ -451,29 +286,6 @@ pub fn attempt_recovery(scenario: &FailureScenario, ctx: &mut RecoveryContext) -
|
||||
};
|
||||
|
||||
// Emit the attempt as structured event data.
|
||||
let finished_at = ctx.next_timestamp();
|
||||
if let Some(entry) = ctx.ledger.get_mut(scenario) {
|
||||
entry.finished_at = Some(finished_at);
|
||||
entry.command_results = command_results;
|
||||
entry.result = Some(result.clone());
|
||||
match &result {
|
||||
RecoveryResult::Recovered { .. } => {
|
||||
entry.state = RecoveryAttemptState::Succeeded;
|
||||
}
|
||||
RecoveryResult::PartialRecovery { remaining, .. } => {
|
||||
entry.state = RecoveryAttemptState::Failed;
|
||||
entry.last_failure_summary = Some(format!(
|
||||
"{} step(s) remaining after partial recovery",
|
||||
remaining.len()
|
||||
));
|
||||
}
|
||||
RecoveryResult::EscalationRequired { reason } => {
|
||||
entry.state = RecoveryAttemptState::Exhausted;
|
||||
entry.last_failure_summary = Some(reason.clone());
|
||||
entry.escalation_reason = Some(reason.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.events.push(RecoveryEvent::RecoveryAttempted {
|
||||
scenario: *scenario,
|
||||
recipe,
|
||||
@@ -685,126 +497,6 @@ mod tests {
|
||||
assert_eq!(ctx.attempt_count(&FailureScenario::PromptMisdelivery), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_context_exposes_machine_readable_ledger() {
|
||||
// given
|
||||
let mut ctx = RecoveryContext::new();
|
||||
|
||||
// when
|
||||
let result = attempt_recovery(&FailureScenario::StaleBranch, &mut ctx);
|
||||
|
||||
// then
|
||||
assert_eq!(result, RecoveryResult::Recovered { steps_taken: 2 });
|
||||
let entry = ctx
|
||||
.ledger_entry(&FailureScenario::StaleBranch)
|
||||
.expect("stale branch ledger entry");
|
||||
assert_eq!(entry.recipe_id, "stale_branch");
|
||||
assert_eq!(entry.attempt_type, RecoveryAttemptType::Automatic);
|
||||
assert_eq!(entry.trigger, FailureScenario::StaleBranch);
|
||||
assert_eq!(entry.attempt_count, 1);
|
||||
assert_eq!(entry.retry_limit, 1);
|
||||
assert_eq!(entry.attempts_remaining, 0);
|
||||
assert_eq!(entry.state, RecoveryAttemptState::Succeeded);
|
||||
assert!(entry.started_at.is_some());
|
||||
assert!(entry.finished_at.is_some());
|
||||
assert_eq!(
|
||||
entry.result,
|
||||
Some(RecoveryResult::Recovered { steps_taken: 2 })
|
||||
);
|
||||
assert_eq!(entry.command_results.len(), 2);
|
||||
assert_eq!(entry.command_results[0].command, RecoveryStep::RebaseBranch);
|
||||
assert_eq!(
|
||||
entry.command_results[0].status,
|
||||
RecoveryAttemptState::Succeeded
|
||||
);
|
||||
assert_eq!(entry.last_failure_summary, None);
|
||||
assert_eq!(entry.escalation_reason, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_ledger_records_exhausted_escalation_reason() {
|
||||
// given
|
||||
let mut ctx = RecoveryContext::new();
|
||||
let scenario = FailureScenario::PromptMisdelivery;
|
||||
|
||||
// when
|
||||
let _ = attempt_recovery(&scenario, &mut ctx);
|
||||
let result = attempt_recovery(&scenario, &mut ctx);
|
||||
|
||||
// then
|
||||
assert!(matches!(result, RecoveryResult::EscalationRequired { .. }));
|
||||
let entry = ctx.ledger_entry(&scenario).expect("ledger entry");
|
||||
assert_eq!(entry.state, RecoveryAttemptState::Exhausted);
|
||||
assert_eq!(entry.attempt_count, 1);
|
||||
assert_eq!(entry.attempts_remaining, 0);
|
||||
assert!(matches!(
|
||||
entry.result,
|
||||
Some(RecoveryResult::EscalationRequired { .. })
|
||||
));
|
||||
assert!(entry
|
||||
.escalation_reason
|
||||
.as_deref()
|
||||
.expect("escalation reason")
|
||||
.contains("max recovery attempts"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_status_report_distinguishes_not_attempted_from_exhausted() {
|
||||
// given
|
||||
let mut ctx = RecoveryContext::new();
|
||||
let scenario = FailureScenario::PromptMisdelivery;
|
||||
|
||||
// then — no ledger entry is not the same as exhausted.
|
||||
let not_attempted = ctx.status_report(&scenario);
|
||||
assert!(!not_attempted.attempted);
|
||||
assert_eq!(not_attempted.state, None);
|
||||
assert_eq!(not_attempted.attempt_count, 0);
|
||||
assert_eq!(not_attempted.retry_limit, None);
|
||||
|
||||
// when — one allowed attempt then one extra attempt.
|
||||
let _ = attempt_recovery(&scenario, &mut ctx);
|
||||
let _ = attempt_recovery(&scenario, &mut ctx);
|
||||
|
||||
// then
|
||||
let exhausted = ctx.status_report(&scenario);
|
||||
assert!(exhausted.attempted);
|
||||
assert_eq!(exhausted.state, Some(RecoveryAttemptState::Exhausted));
|
||||
assert_eq!(exhausted.attempt_count, 1);
|
||||
assert_eq!(exhausted.retry_limit, Some(1));
|
||||
assert_eq!(exhausted.attempts_remaining, Some(0));
|
||||
assert!(exhausted
|
||||
.escalation_reason
|
||||
.as_deref()
|
||||
.is_some_and(|reason| reason.contains("max recovery attempts")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_ledger_records_failed_command_result() {
|
||||
// given
|
||||
let mut ctx = RecoveryContext::new().with_fail_at_step(1);
|
||||
let scenario = FailureScenario::PartialPluginStartup;
|
||||
|
||||
// when
|
||||
let result = attempt_recovery(&scenario, &mut ctx);
|
||||
|
||||
// then
|
||||
assert!(matches!(result, RecoveryResult::PartialRecovery { .. }));
|
||||
let entry = ctx.ledger_entry(&scenario).expect("ledger entry");
|
||||
assert_eq!(entry.state, RecoveryAttemptState::Failed);
|
||||
assert_eq!(entry.command_results.len(), 2);
|
||||
assert_eq!(
|
||||
entry.command_results[0].status,
|
||||
RecoveryAttemptState::Succeeded
|
||||
);
|
||||
assert_eq!(
|
||||
entry.command_results[1].status,
|
||||
RecoveryAttemptState::Failed
|
||||
);
|
||||
assert!(entry.command_results[1]
|
||||
.result
|
||||
.contains("partial_plugin_startup"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stale_branch_recipe_has_rebase_then_clean_build() {
|
||||
// given
|
||||
|
||||
@@ -1,552 +0,0 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
pub const REPORT_SCHEMA_V1: &str = "claw.report.v1";
|
||||
pub const DEFAULT_PROJECTION_POLICY_V1: &str = "claw.report.projection.v1";
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ClaimKind {
|
||||
ObservedFact,
|
||||
Inference,
|
||||
Hypothesis,
|
||||
Recommendation,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ReportConfidence {
|
||||
High,
|
||||
Medium,
|
||||
Low,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum SensitivityClass {
|
||||
Public,
|
||||
Internal,
|
||||
OperatorOnly,
|
||||
Secret,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum FieldDeltaState {
|
||||
Changed,
|
||||
Unchanged,
|
||||
Cleared,
|
||||
CarriedForward,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum NegativeFindingStatus {
|
||||
NotObservedInCheckedScope,
|
||||
UnknownNotChecked,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportClaim {
|
||||
pub id: String,
|
||||
pub kind: ClaimKind,
|
||||
pub text: String,
|
||||
pub confidence: ReportConfidence,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub evidence: Vec<String>,
|
||||
pub sensitivity: SensitivityClass,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct NegativeEvidence {
|
||||
pub id: String,
|
||||
pub status: NegativeFindingStatus,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub checked_surfaces: Vec<String>,
|
||||
pub query: String,
|
||||
pub window: String,
|
||||
pub sensitivity: SensitivityClass,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct FieldDelta {
|
||||
pub field: String,
|
||||
pub state: FieldDeltaState,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub previous_hash: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub current_hash: Option<String>,
|
||||
pub attribution: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportIdentity {
|
||||
pub report_id: String,
|
||||
pub content_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct CanonicalReportV1 {
|
||||
pub schema_version: String,
|
||||
pub identity: ReportIdentity,
|
||||
pub generated_at: String,
|
||||
pub producer: String,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub claims: Vec<ReportClaim>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub negative_evidence: Vec<NegativeEvidence>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub field_deltas: Vec<FieldDelta>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ConsumerCapabilities {
|
||||
pub consumer: String,
|
||||
#[serde(default, skip_serializing_if = "BTreeSet::is_empty")]
|
||||
pub schema_versions: BTreeSet<String>,
|
||||
#[serde(default, skip_serializing_if = "BTreeSet::is_empty")]
|
||||
pub field_families: BTreeSet<String>,
|
||||
pub max_sensitivity: SensitivityClass,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RedactionProvenance {
|
||||
pub field_path: String,
|
||||
pub reason: String,
|
||||
pub policy_id: String,
|
||||
pub original_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ProjectionProvenance {
|
||||
pub policy_id: String,
|
||||
pub source_schema_version: String,
|
||||
pub source_report_id: String,
|
||||
pub source_content_hash: String,
|
||||
pub consumer: String,
|
||||
pub downgraded: bool,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub omitted_field_families: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub redactions: Vec<RedactionProvenance>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportProjectionV1 {
|
||||
pub schema_version: String,
|
||||
pub projection_id: String,
|
||||
pub view: String,
|
||||
pub provenance: ProjectionProvenance,
|
||||
pub payload: Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportSchemaField {
|
||||
pub id: String,
|
||||
pub description: String,
|
||||
pub required: bool,
|
||||
pub field_family: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ReportSchemaRegistry {
|
||||
pub schema_version: String,
|
||||
pub compatibility: String,
|
||||
pub fields: Vec<ReportSchemaField>,
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn report_schema_v1_registry() -> ReportSchemaRegistry {
|
||||
ReportSchemaRegistry {
|
||||
schema_version: REPORT_SCHEMA_V1.to_string(),
|
||||
compatibility: "additive fields are compatible; missing required fields are breaking"
|
||||
.to_string(),
|
||||
fields: vec![
|
||||
field(
|
||||
"identity.report_id",
|
||||
"stable canonical report identity",
|
||||
true,
|
||||
"identity",
|
||||
),
|
||||
field(
|
||||
"identity.content_hash",
|
||||
"hash of canonical payload excluding identity",
|
||||
true,
|
||||
"identity",
|
||||
),
|
||||
field(
|
||||
"claims[].kind",
|
||||
"fact/inference/hypothesis/recommendation label",
|
||||
true,
|
||||
"claims",
|
||||
),
|
||||
field(
|
||||
"claims[].confidence",
|
||||
"confidence bucket for the claim",
|
||||
true,
|
||||
"claims",
|
||||
),
|
||||
field(
|
||||
"claims[].evidence",
|
||||
"evidence ids supporting a claim",
|
||||
false,
|
||||
"claims",
|
||||
),
|
||||
field(
|
||||
"negative_evidence[]",
|
||||
"searched-and-not-found findings with checked scope",
|
||||
false,
|
||||
"negative_evidence",
|
||||
),
|
||||
field(
|
||||
"field_deltas[]",
|
||||
"field-level changed/unchanged/cleared/carried-forward attribution",
|
||||
false,
|
||||
"field_deltas",
|
||||
),
|
||||
field(
|
||||
"projection.provenance.redactions[]",
|
||||
"redaction policy provenance for projected fields",
|
||||
false,
|
||||
"projection",
|
||||
),
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn canonicalize_report(mut report: CanonicalReportV1) -> CanonicalReportV1 {
|
||||
report.schema_version = REPORT_SCHEMA_V1.to_string();
|
||||
report.claims.sort_by(|a, b| a.id.cmp(&b.id));
|
||||
report.negative_evidence.sort_by(|a, b| a.id.cmp(&b.id));
|
||||
report.field_deltas.sort_by(|a, b| a.field.cmp(&b.field));
|
||||
let content_hash = report_content_hash(&report);
|
||||
if report.identity.report_id.is_empty() {
|
||||
report.identity.report_id = format!("report-{content_hash}");
|
||||
}
|
||||
report.identity.content_hash = content_hash;
|
||||
report
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn report_content_hash(report: &CanonicalReportV1) -> String {
|
||||
let mut hashable = report.clone();
|
||||
hashable.identity.report_id.clear();
|
||||
hashable.identity.content_hash.clear();
|
||||
stable_json_hash(&serde_json::to_value(hashable).expect("report should serialize"))
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn project_report(
|
||||
report: &CanonicalReportV1,
|
||||
capabilities: &ConsumerCapabilities,
|
||||
view: impl Into<String>,
|
||||
) -> ReportProjectionV1 {
|
||||
let view = view.into();
|
||||
let supports_schema = capabilities.schema_versions.contains(REPORT_SCHEMA_V1);
|
||||
let mut omitted_field_families = Vec::new();
|
||||
let mut redactions = Vec::new();
|
||||
let mut payload = serde_json::Map::new();
|
||||
|
||||
payload.insert(
|
||||
"identity".to_string(),
|
||||
serde_json::to_value(&report.identity).expect("identity serializes"),
|
||||
);
|
||||
payload.insert(
|
||||
"generated_at".to_string(),
|
||||
Value::String(report.generated_at.clone()),
|
||||
);
|
||||
payload.insert(
|
||||
"producer".to_string(),
|
||||
Value::String(report.producer.clone()),
|
||||
);
|
||||
|
||||
if supports_family(capabilities, "claims") {
|
||||
let claims = report
|
||||
.claims
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(index, claim)| redact_claim(index, claim, capabilities, &mut redactions))
|
||||
.collect::<Vec<_>>();
|
||||
payload.insert("claims".to_string(), Value::Array(claims));
|
||||
} else {
|
||||
omitted_field_families.push("claims".to_string());
|
||||
}
|
||||
|
||||
if supports_family(capabilities, "negative_evidence") {
|
||||
payload.insert(
|
||||
"negative_evidence".to_string(),
|
||||
serde_json::to_value(&report.negative_evidence).expect("negative evidence serializes"),
|
||||
);
|
||||
} else {
|
||||
omitted_field_families.push("negative_evidence".to_string());
|
||||
}
|
||||
|
||||
if supports_family(capabilities, "field_deltas") {
|
||||
payload.insert(
|
||||
"field_deltas".to_string(),
|
||||
serde_json::to_value(&report.field_deltas).expect("field deltas serialize"),
|
||||
);
|
||||
} else {
|
||||
omitted_field_families.push("field_deltas".to_string());
|
||||
}
|
||||
|
||||
let downgraded =
|
||||
!supports_schema || !omitted_field_families.is_empty() || !redactions.is_empty();
|
||||
let provenance = ProjectionProvenance {
|
||||
policy_id: DEFAULT_PROJECTION_POLICY_V1.to_string(),
|
||||
source_schema_version: report.schema_version.clone(),
|
||||
source_report_id: report.identity.report_id.clone(),
|
||||
source_content_hash: report.identity.content_hash.clone(),
|
||||
consumer: capabilities.consumer.clone(),
|
||||
downgraded,
|
||||
omitted_field_families,
|
||||
redactions,
|
||||
};
|
||||
let mut projection = ReportProjectionV1 {
|
||||
schema_version: REPORT_SCHEMA_V1.to_string(),
|
||||
projection_id: String::new(),
|
||||
view,
|
||||
provenance,
|
||||
payload: Value::Object(payload),
|
||||
};
|
||||
projection.projection_id = stable_json_hash(&serde_json::json!({
|
||||
"view": projection.view,
|
||||
"provenance": projection.provenance,
|
||||
"payload": projection.payload,
|
||||
}));
|
||||
projection
|
||||
}
|
||||
|
||||
fn field(id: &str, description: &str, required: bool, field_family: &str) -> ReportSchemaField {
|
||||
ReportSchemaField {
|
||||
id: id.to_string(),
|
||||
description: description.to_string(),
|
||||
required,
|
||||
field_family: field_family.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn supports_family(capabilities: &ConsumerCapabilities, family: &str) -> bool {
|
||||
capabilities.field_families.is_empty() || capabilities.field_families.contains(family)
|
||||
}
|
||||
|
||||
fn redact_claim(
|
||||
index: usize,
|
||||
claim: &ReportClaim,
|
||||
capabilities: &ConsumerCapabilities,
|
||||
redactions: &mut Vec<RedactionProvenance>,
|
||||
) -> Option<Value> {
|
||||
if claim.sensitivity <= capabilities.max_sensitivity {
|
||||
return Some(serde_json::to_value(claim).expect("claim serializes"));
|
||||
}
|
||||
if claim.sensitivity == SensitivityClass::Secret {
|
||||
redactions.push(RedactionProvenance {
|
||||
field_path: format!("claims[{index}]"),
|
||||
reason: "omitted: sensitivity exceeds consumer policy".to_string(),
|
||||
policy_id: DEFAULT_PROJECTION_POLICY_V1.to_string(),
|
||||
original_hash: stable_json_hash(
|
||||
&serde_json::to_value(claim).expect("claim serializes"),
|
||||
),
|
||||
});
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut redacted = claim.clone();
|
||||
let original_hash = stable_json_hash(&serde_json::to_value(claim).expect("claim serializes"));
|
||||
redacted.text = "<redacted>".to_string();
|
||||
redacted.evidence.clear();
|
||||
redactions.push(RedactionProvenance {
|
||||
field_path: format!("claims[{index}].text"),
|
||||
reason: "transformed: sensitivity exceeds consumer policy".to_string(),
|
||||
policy_id: DEFAULT_PROJECTION_POLICY_V1.to_string(),
|
||||
original_hash,
|
||||
});
|
||||
Some(serde_json::to_value(redacted).expect("redacted claim serializes"))
|
||||
}
|
||||
|
||||
fn stable_json_hash(value: &Value) -> String {
|
||||
let normalized = normalize_json(value);
|
||||
let bytes = serde_json::to_vec(&normalized).expect("normalized json should serialize");
|
||||
let digest = Sha256::digest(bytes);
|
||||
let mut hash = String::with_capacity(16);
|
||||
for byte in &digest[..8] {
|
||||
use std::fmt::Write as _;
|
||||
write!(&mut hash, "{byte:02x}").expect("writing to String should not fail");
|
||||
}
|
||||
hash
|
||||
}
|
||||
|
||||
fn normalize_json(value: &Value) -> Value {
|
||||
match value {
|
||||
Value::Array(values) => Value::Array(values.iter().map(normalize_json).collect()),
|
||||
Value::Object(map) => {
|
||||
let sorted = map
|
||||
.iter()
|
||||
.map(|(key, value)| (key.clone(), normalize_json(value)))
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
serde_json::to_value(sorted).expect("sorted map should serialize")
|
||||
}
|
||||
other => other.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
canonicalize_report, project_report, report_schema_v1_registry, CanonicalReportV1,
|
||||
ClaimKind, ConsumerCapabilities, FieldDelta, FieldDeltaState, NegativeEvidence,
|
||||
NegativeFindingStatus, ReportClaim, ReportConfidence, ReportIdentity, SensitivityClass,
|
||||
REPORT_SCHEMA_V1,
|
||||
};
|
||||
|
||||
fn fixture_report() -> CanonicalReportV1 {
|
||||
canonicalize_report(CanonicalReportV1 {
|
||||
schema_version: String::new(),
|
||||
identity: ReportIdentity {
|
||||
report_id: String::new(),
|
||||
content_hash: String::new(),
|
||||
},
|
||||
generated_at: "2026-05-14T00:00:00Z".to_string(),
|
||||
producer: "worker-1".to_string(),
|
||||
claims: vec![
|
||||
ReportClaim {
|
||||
id: "claim-secret".to_string(),
|
||||
kind: ClaimKind::ObservedFact,
|
||||
text: "secret token appeared in logs".to_string(),
|
||||
confidence: ReportConfidence::High,
|
||||
evidence: vec!["log:secret".to_string()],
|
||||
sensitivity: SensitivityClass::Secret,
|
||||
},
|
||||
ReportClaim {
|
||||
id: "claim-hypothesis".to_string(),
|
||||
kind: ClaimKind::Hypothesis,
|
||||
text: "transport restart likely caused the retry".to_string(),
|
||||
confidence: ReportConfidence::Medium,
|
||||
evidence: vec!["event:transport".to_string()],
|
||||
sensitivity: SensitivityClass::Internal,
|
||||
},
|
||||
ReportClaim {
|
||||
id: "claim-fact".to_string(),
|
||||
kind: ClaimKind::ObservedFact,
|
||||
text: "lane finished once".to_string(),
|
||||
confidence: ReportConfidence::High,
|
||||
evidence: vec!["event:lane.finished".to_string()],
|
||||
sensitivity: SensitivityClass::Public,
|
||||
},
|
||||
],
|
||||
negative_evidence: vec![NegativeEvidence {
|
||||
id: "neg-blocker".to_string(),
|
||||
status: NegativeFindingStatus::NotObservedInCheckedScope,
|
||||
checked_surfaces: vec!["lane_events".to_string(), "worker_status".to_string()],
|
||||
query: "current blocker".to_string(),
|
||||
window: "2026-05-14T00:00:00Z/2026-05-14T00:05:00Z".to_string(),
|
||||
sensitivity: SensitivityClass::Public,
|
||||
}],
|
||||
field_deltas: vec![FieldDelta {
|
||||
field: "blocker".to_string(),
|
||||
state: FieldDeltaState::Cleared,
|
||||
previous_hash: Some("prev123".to_string()),
|
||||
current_hash: None,
|
||||
attribution: "lane.failed reconciled to lane.finished".to_string(),
|
||||
}],
|
||||
})
|
||||
}
|
||||
|
||||
fn capabilities(families: &[&str], max_sensitivity: SensitivityClass) -> ConsumerCapabilities {
|
||||
ConsumerCapabilities {
|
||||
consumer: "clawhip".to_string(),
|
||||
schema_versions: [REPORT_SCHEMA_V1.to_string()].into_iter().collect(),
|
||||
field_families: families
|
||||
.iter()
|
||||
.map(|family| (*family).to_string())
|
||||
.collect(),
|
||||
max_sensitivity,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn report_schema_registry_is_self_describing() {
|
||||
let registry = report_schema_v1_registry();
|
||||
assert_eq!(registry.schema_version, REPORT_SCHEMA_V1);
|
||||
assert!(registry
|
||||
.fields
|
||||
.iter()
|
||||
.any(|field| field.id == "claims[].kind"));
|
||||
assert!(registry
|
||||
.fields
|
||||
.iter()
|
||||
.any(|field| field.id == "negative_evidence[]"));
|
||||
assert!(registry
|
||||
.fields
|
||||
.iter()
|
||||
.any(|field| field.id == "projection.provenance.redactions[]"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn canonical_report_labels_claims_negative_evidence_and_deltas() {
|
||||
let report = fixture_report();
|
||||
assert_eq!(report.schema_version, REPORT_SCHEMA_V1);
|
||||
assert!(report.identity.report_id.starts_with("report-"));
|
||||
assert_eq!(report.identity.content_hash.len(), 16);
|
||||
assert_eq!(report.claims[0].id, "claim-fact");
|
||||
assert_eq!(report.claims[1].kind, ClaimKind::Hypothesis);
|
||||
assert_eq!(report.claims[1].confidence, ReportConfidence::Medium);
|
||||
assert_eq!(
|
||||
report.negative_evidence[0].status,
|
||||
NegativeFindingStatus::NotObservedInCheckedScope
|
||||
);
|
||||
assert_eq!(report.field_deltas[0].state, FieldDeltaState::Cleared);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn projections_are_deterministic_and_record_redaction_provenance() {
|
||||
let report = fixture_report();
|
||||
let capabilities = capabilities(
|
||||
&["claims", "negative_evidence", "field_deltas"],
|
||||
SensitivityClass::Public,
|
||||
);
|
||||
|
||||
let first = project_report(&report, &capabilities, "delta_brief");
|
||||
let second = project_report(&report, &capabilities, "delta_brief");
|
||||
|
||||
assert_eq!(first, second);
|
||||
assert_eq!(first.provenance.source_report_id, report.identity.report_id);
|
||||
assert_eq!(
|
||||
first.provenance.source_content_hash,
|
||||
report.identity.content_hash
|
||||
);
|
||||
assert!(first.provenance.downgraded);
|
||||
assert_eq!(first.provenance.redactions.len(), 2);
|
||||
assert!(first
|
||||
.provenance
|
||||
.redactions
|
||||
.iter()
|
||||
.any(|redaction| redaction.field_path == "claims[1].text"));
|
||||
assert!(first
|
||||
.provenance
|
||||
.redactions
|
||||
.iter()
|
||||
.any(|redaction| redaction.field_path == "claims[2]"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn capability_negotiation_omits_unsupported_field_families() {
|
||||
let report = fixture_report();
|
||||
let capabilities = capabilities(&["claims"], SensitivityClass::Internal);
|
||||
let projection = project_report(&report, &capabilities, "legacy_clawhip");
|
||||
|
||||
assert!(projection.provenance.downgraded);
|
||||
assert_eq!(
|
||||
projection.provenance.omitted_field_families,
|
||||
vec!["negative_evidence".to_string(), "field_deltas".to_string()]
|
||||
);
|
||||
assert!(projection.payload.get("claims").is_some());
|
||||
assert!(projection.payload.get("negative_evidence").is_none());
|
||||
assert!(projection.payload.get("field_deltas").is_none());
|
||||
}
|
||||
}
|
||||
@@ -298,7 +298,8 @@ fn unshare_user_namespace_works() -> bool {
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.status()
|
||||
.is_ok_and(|status| status.success())
|
||||
.map(|s| s.success())
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -8,14 +8,10 @@ use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::json::{JsonError, JsonValue};
|
||||
use crate::usage::TokenUsage;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const SESSION_VERSION: u32 = 1;
|
||||
const ROTATE_AFTER_BYTES: u64 = 256 * 1024;
|
||||
const MAX_ROTATED_FILES: usize = 3;
|
||||
const MAX_JSONL_FIELD_CHARS: usize = 16 * 1024;
|
||||
const JSONL_TRUNCATION_MARKER: &str = "… [truncated for session JSONL]";
|
||||
const JSONL_REDACTION_MARKER: &str = "[redacted]";
|
||||
static SESSION_ID_COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
static LAST_TIMESTAMP_MS: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
@@ -34,10 +30,6 @@ pub enum ContentBlock {
|
||||
Text {
|
||||
text: String,
|
||||
},
|
||||
Thinking {
|
||||
thinking: String,
|
||||
signature: Option<String>,
|
||||
},
|
||||
ToolUse {
|
||||
id: String,
|
||||
name: String,
|
||||
@@ -86,25 +78,6 @@ struct SessionPersistence {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
/// Running-state liveness classification for a session heartbeat.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum SessionLiveness {
|
||||
Healthy,
|
||||
Stalled,
|
||||
TransportDead,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
/// Heartbeat emitted from canonical session state, independent of terminal rendering.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct SessionHeartbeat {
|
||||
pub session_id: String,
|
||||
pub observed_at_ms: u64,
|
||||
pub transport_alive: bool,
|
||||
pub liveness: SessionLiveness,
|
||||
}
|
||||
|
||||
/// Persisted conversational state for the runtime and CLI session manager.
|
||||
///
|
||||
/// `workspace_root` binds the session to the worktree it was created in. The
|
||||
@@ -273,35 +246,6 @@ impl Session {
|
||||
self.push_message(ConversationMessage::user_text(text))
|
||||
}
|
||||
|
||||
pub fn record_health_check(&mut self, timestamp_ms: u64) {
|
||||
self.last_health_check_ms = Some(timestamp_ms);
|
||||
self.touch();
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn heartbeat_at(
|
||||
&self,
|
||||
now_ms: u64,
|
||||
stalled_after_ms: u64,
|
||||
transport_alive: bool,
|
||||
) -> SessionHeartbeat {
|
||||
let liveness = match (transport_alive, self.last_health_check_ms) {
|
||||
(false, _) => SessionLiveness::TransportDead,
|
||||
(true, Some(last)) if now_ms.saturating_sub(last) <= stalled_after_ms => {
|
||||
SessionLiveness::Healthy
|
||||
}
|
||||
(true, Some(_)) => SessionLiveness::Stalled,
|
||||
(true, None) => SessionLiveness::Unknown,
|
||||
};
|
||||
|
||||
SessionHeartbeat {
|
||||
session_id: self.session_id.clone(),
|
||||
observed_at_ms: now_ms,
|
||||
transport_alive,
|
||||
liveness,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_compaction(&mut self, summary: impl Into<String>, removed_message_count: usize) {
|
||||
self.touch();
|
||||
let count = self.compaction.as_ref().map_or(1, |value| value.count + 1);
|
||||
@@ -793,22 +737,6 @@ impl ContentBlock {
|
||||
object.insert("type".to_string(), JsonValue::String("text".to_string()));
|
||||
object.insert("text".to_string(), JsonValue::String(text.clone()));
|
||||
}
|
||||
Self::Thinking {
|
||||
thinking,
|
||||
signature,
|
||||
} => {
|
||||
object.insert(
|
||||
"type".to_string(),
|
||||
JsonValue::String("thinking".to_string()),
|
||||
);
|
||||
object.insert("thinking".to_string(), JsonValue::String(thinking.clone()));
|
||||
if let Some(signature) = signature {
|
||||
object.insert(
|
||||
"signature".to_string(),
|
||||
JsonValue::String(signature.clone()),
|
||||
);
|
||||
}
|
||||
}
|
||||
Self::ToolUse { id, name, input } => {
|
||||
object.insert(
|
||||
"type".to_string(),
|
||||
@@ -855,13 +783,6 @@ impl ContentBlock {
|
||||
"text" => Ok(Self::Text {
|
||||
text: required_string(object, "text")?,
|
||||
}),
|
||||
"thinking" => Ok(Self::Thinking {
|
||||
thinking: required_string(object, "thinking")?,
|
||||
signature: object
|
||||
.get("signature")
|
||||
.and_then(JsonValue::as_str)
|
||||
.map(String::from),
|
||||
}),
|
||||
"tool_use" => Ok(Self::ToolUse {
|
||||
id: required_string(object, "id")?,
|
||||
name: required_string(object, "name")?,
|
||||
@@ -923,7 +844,7 @@ impl SessionCompaction {
|
||||
);
|
||||
object.insert(
|
||||
"summary".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(&self.summary)),
|
||||
JsonValue::String(self.summary.clone()),
|
||||
);
|
||||
Ok(JsonValue::Object(object))
|
||||
}
|
||||
@@ -983,10 +904,7 @@ impl SessionPromptEntry {
|
||||
"timestamp_ms".to_string(),
|
||||
JsonValue::Number(i64::try_from(self.timestamp_ms).unwrap_or(i64::MAX)),
|
||||
);
|
||||
object.insert(
|
||||
"text".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(&self.text)),
|
||||
);
|
||||
object.insert("text".to_string(), JsonValue::String(self.text.clone()));
|
||||
JsonValue::Object(object)
|
||||
}
|
||||
|
||||
@@ -1004,165 +922,10 @@ impl SessionPromptEntry {
|
||||
fn message_record(message: &ConversationMessage) -> JsonValue {
|
||||
let mut object = BTreeMap::new();
|
||||
object.insert("type".to_string(), JsonValue::String("message".to_string()));
|
||||
object.insert("message".to_string(), persisted_message_json(message));
|
||||
object.insert("message".to_string(), message.to_json());
|
||||
JsonValue::Object(object)
|
||||
}
|
||||
|
||||
fn persisted_message_json(message: &ConversationMessage) -> JsonValue {
|
||||
let mut object = BTreeMap::new();
|
||||
object.insert(
|
||||
"role".to_string(),
|
||||
JsonValue::String(
|
||||
match message.role {
|
||||
MessageRole::System => "system",
|
||||
MessageRole::User => "user",
|
||||
MessageRole::Assistant => "assistant",
|
||||
MessageRole::Tool => "tool",
|
||||
}
|
||||
.to_string(),
|
||||
),
|
||||
);
|
||||
object.insert(
|
||||
"blocks".to_string(),
|
||||
JsonValue::Array(message.blocks.iter().map(persisted_block_json).collect()),
|
||||
);
|
||||
if let Some(usage) = message.usage {
|
||||
object.insert("usage".to_string(), usage_to_json(usage));
|
||||
}
|
||||
JsonValue::Object(object)
|
||||
}
|
||||
|
||||
fn persisted_block_json(block: &ContentBlock) -> JsonValue {
|
||||
let mut object = BTreeMap::new();
|
||||
match block {
|
||||
ContentBlock::Text { text } => {
|
||||
object.insert("type".to_string(), JsonValue::String("text".to_string()));
|
||||
object.insert(
|
||||
"text".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(text)),
|
||||
);
|
||||
}
|
||||
ContentBlock::Thinking {
|
||||
thinking,
|
||||
signature,
|
||||
} => {
|
||||
object.insert(
|
||||
"type".to_string(),
|
||||
JsonValue::String("thinking".to_string()),
|
||||
);
|
||||
object.insert(
|
||||
"thinking".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(thinking)),
|
||||
);
|
||||
if let Some(signature) = signature {
|
||||
object.insert(
|
||||
"signature".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(signature)),
|
||||
);
|
||||
}
|
||||
}
|
||||
ContentBlock::ToolUse { id, name, input } => {
|
||||
object.insert(
|
||||
"type".to_string(),
|
||||
JsonValue::String("tool_use".to_string()),
|
||||
);
|
||||
object.insert(
|
||||
"id".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(id)),
|
||||
);
|
||||
object.insert("name".to_string(), JsonValue::String(name.clone()));
|
||||
object.insert(
|
||||
"input".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(input)),
|
||||
);
|
||||
}
|
||||
ContentBlock::ToolResult {
|
||||
tool_use_id,
|
||||
tool_name,
|
||||
output,
|
||||
is_error,
|
||||
} => {
|
||||
object.insert(
|
||||
"type".to_string(),
|
||||
JsonValue::String("tool_result".to_string()),
|
||||
);
|
||||
object.insert(
|
||||
"tool_use_id".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(tool_use_id)),
|
||||
);
|
||||
object.insert(
|
||||
"tool_name".to_string(),
|
||||
JsonValue::String(tool_name.clone()),
|
||||
);
|
||||
object.insert(
|
||||
"output".to_string(),
|
||||
JsonValue::String(sanitize_jsonl_field(output)),
|
||||
);
|
||||
object.insert("is_error".to_string(), JsonValue::Bool(*is_error));
|
||||
}
|
||||
}
|
||||
JsonValue::Object(object)
|
||||
}
|
||||
|
||||
fn sanitize_jsonl_field(value: &str) -> String {
|
||||
truncate_jsonl_field(&redact_jsonl_secrets(value))
|
||||
}
|
||||
|
||||
fn truncate_jsonl_field(value: &str) -> String {
|
||||
let char_count = value.chars().count();
|
||||
if char_count <= MAX_JSONL_FIELD_CHARS {
|
||||
return value.to_string();
|
||||
}
|
||||
|
||||
let keep = MAX_JSONL_FIELD_CHARS.saturating_sub(JSONL_TRUNCATION_MARKER.chars().count());
|
||||
let mut truncated = value.chars().take(keep).collect::<String>();
|
||||
truncated.push_str(JSONL_TRUNCATION_MARKER);
|
||||
truncated
|
||||
}
|
||||
|
||||
fn redact_jsonl_secrets(value: &str) -> String {
|
||||
let mut redacted = value.to_string();
|
||||
for marker in [
|
||||
"ANTHROPIC_API_KEY=",
|
||||
"ANTHROPIC_AUTH_TOKEN=",
|
||||
"OPENAI_API_KEY=",
|
||||
"DASHSCOPE_API_KEY=",
|
||||
"XAI_API_KEY=",
|
||||
"Authorization: Bearer ",
|
||||
"authorization: Bearer ",
|
||||
"Bearer sk-",
|
||||
"sk-ant-",
|
||||
] {
|
||||
redacted = redact_after_marker(&redacted, marker);
|
||||
}
|
||||
redacted
|
||||
}
|
||||
|
||||
fn redact_after_marker(value: &str, marker: &str) -> String {
|
||||
let mut output = String::with_capacity(value.len());
|
||||
let mut rest = value;
|
||||
|
||||
while let Some(index) = rest.find(marker) {
|
||||
let (before, after_before) = rest.split_at(index);
|
||||
output.push_str(before);
|
||||
output.push_str(marker);
|
||||
output.push_str(JSONL_REDACTION_MARKER);
|
||||
|
||||
let secret_start = marker.len();
|
||||
let after_marker = &after_before[secret_start..];
|
||||
let secret_end = after_marker
|
||||
.char_indices()
|
||||
.find_map(|(idx, ch)| {
|
||||
(ch.is_whitespace() || matches!(ch, '\'' | '"' | ',' | '}' | ']')).then_some(idx)
|
||||
})
|
||||
.unwrap_or(after_marker.len());
|
||||
rest = &after_marker[secret_end..];
|
||||
}
|
||||
|
||||
output.push_str(rest);
|
||||
output
|
||||
}
|
||||
|
||||
fn usage_to_json(usage: TokenUsage) -> JsonValue {
|
||||
let mut object = BTreeMap::new();
|
||||
object.insert(
|
||||
@@ -1445,36 +1208,6 @@ mod tests {
|
||||
assert_eq!(restored.session_id, session.session_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn persists_assistant_thinking_block_round_trip_through_jsonl() {
|
||||
// given
|
||||
let mut session = Session::new();
|
||||
session
|
||||
.push_message(ConversationMessage::assistant(vec![
|
||||
ContentBlock::Thinking {
|
||||
thinking: "trace the path through session persistence".to_string(),
|
||||
signature: Some("sig-123".to_string()),
|
||||
},
|
||||
]))
|
||||
.expect("thinking block should append");
|
||||
let path = temp_session_path("thinking-jsonl");
|
||||
|
||||
// when
|
||||
session.save_to_path(&path).expect("session should save");
|
||||
let restored = Session::load_from_path(&path).expect("session should load");
|
||||
fs::remove_file(&path).expect("temp file should be removable");
|
||||
|
||||
// then
|
||||
assert_eq!(restored, session);
|
||||
assert_eq!(
|
||||
restored.messages[0].blocks[0],
|
||||
ContentBlock::Thinking {
|
||||
thinking: "trace the path through session persistence".to_string(),
|
||||
signature: Some("sig-123".to_string()),
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loads_legacy_session_json_object() {
|
||||
let path = temp_session_path("legacy");
|
||||
@@ -1525,54 +1258,6 @@ mod tests {
|
||||
assert_eq!(restored.messages[0], ConversationMessage::user_text("hi"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn jsonl_persistence_redacts_and_truncates_oversized_payload_fields() {
|
||||
let path = temp_session_path("jsonl-safeguards");
|
||||
let secret = "sk-live-secret-should-not-persist";
|
||||
let oversized_output = format!(
|
||||
"OPENAI_API_KEY={secret}\n{}",
|
||||
"tool-output ".repeat(super::MAX_JSONL_FIELD_CHARS)
|
||||
);
|
||||
let mut session = Session::new();
|
||||
session
|
||||
.push_message(ConversationMessage::assistant(vec![
|
||||
ContentBlock::ToolUse {
|
||||
id: "tool-1".to_string(),
|
||||
name: "bash".to_string(),
|
||||
input: format!("Authorization: Bearer {secret}"),
|
||||
},
|
||||
]))
|
||||
.expect("tool use should append");
|
||||
session
|
||||
.push_message(ConversationMessage::tool_result(
|
||||
"tool-1",
|
||||
"bash",
|
||||
oversized_output,
|
||||
false,
|
||||
))
|
||||
.expect("tool result should append");
|
||||
|
||||
session.save_to_path(&path).expect("session should save");
|
||||
let persisted = fs::read_to_string(&path).expect("session jsonl should read");
|
||||
let restored = Session::load_from_path(&path).expect("session should load");
|
||||
fs::remove_file(&path).expect("temp file should be removable");
|
||||
|
||||
assert!(
|
||||
!persisted.contains(secret),
|
||||
"secret leaked into JSONL: {persisted}"
|
||||
);
|
||||
assert!(persisted.contains(super::JSONL_REDACTION_MARKER));
|
||||
assert!(persisted.contains(super::JSONL_TRUNCATION_MARKER));
|
||||
|
||||
let ContentBlock::ToolResult { output, .. } = &restored.messages[1].blocks[0] else {
|
||||
panic!("restored second message should be a tool result");
|
||||
};
|
||||
assert!(!output.contains(secret));
|
||||
assert!(output.contains(super::JSONL_REDACTION_MARKER));
|
||||
assert!(output.ends_with(super::JSONL_TRUNCATION_MARKER));
|
||||
assert!(output.chars().count() <= super::MAX_JSONL_FIELD_CHARS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn persists_compaction_metadata() {
|
||||
let path = temp_session_path("compaction");
|
||||
@@ -1857,26 +1542,4 @@ mod workspace_sessions_dir_tests {
|
||||
fs::remove_dir_all(&tmp_a).ok();
|
||||
fs::remove_dir_all(&tmp_b).ok();
|
||||
}
|
||||
#[test]
|
||||
fn session_heartbeat_classifies_healthy_stalled_transport_dead_and_unknown() {
|
||||
let mut session = Session::new();
|
||||
assert_eq!(
|
||||
session.heartbeat_at(1_000, 500, true).liveness,
|
||||
SessionLiveness::Unknown
|
||||
);
|
||||
|
||||
session.record_health_check(800);
|
||||
assert_eq!(
|
||||
session.heartbeat_at(1_000, 500, true).liveness,
|
||||
SessionLiveness::Healthy
|
||||
);
|
||||
assert_eq!(
|
||||
session.heartbeat_at(2_000, 500, true).liveness,
|
||||
SessionLiveness::Stalled
|
||||
);
|
||||
assert_eq!(
|
||||
session.heartbeat_at(1_000, 500, false).liveness,
|
||||
SessionLiveness::TransportDead
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user