Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 47 additions & 0 deletions .cargo/audit.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# audit.toml - cargo-audit configuration for security audit
# https://github.com/rustsec/rustsec/blob/main/cargo-audit/audit.toml.example
#
# This file consolidates all RUSTSEC advisory exceptions in one place.
# These are known vulnerabilities in transitive dependencies that cannot be
# easily upgraded, typically due to deep dependency chains (e.g., wasmtime).

[advisories]
# These advisories are either:
# 1. False positives for our use case
# 2. In transitive dependencies we cannot easily update
# 3. Low severity issues being tracked for future resolution
#
# All wasmtime issues are due to using v29.0.1 via cortex-plugins.
# Upgrading wasmtime is a significant effort tracked separately.
ignore = [
# wasmtime v29.0.1 - Unsound API access to WebAssembly shared linear memory
# Severity: low (1.8)
# We don't expose raw WebAssembly memory APIs to untrusted code
"RUSTSEC-2025-0118",

# wasmtime v29.0.1 - Host panic with fd_renumber WASIp1 function
# Severity: low (3.3)
# Limited exposure - panic doesn't compromise security
"RUSTSEC-2025-0046",

# wasmtime v29.0.1 - Segfault with f64.copysign operator on x86-64
# Severity: medium (4.1)
# Tracked for wasmtime upgrade
"RUSTSEC-2026-0006",

# fxhash v0.2.1 - unmaintained
# Transitive dependency via selectors/scraper and wasmtime
"RUSTSEC-2025-0057",

# paste v1.0.15 - unmaintained
# Transitive dependency via wasmtime and ratatui
"RUSTSEC-2024-0436",

# lru v0.12.5 - unsound IterMut implementation
# Transitive dependency via ratatui
# We don't use LRU cache iteration mutably
"RUSTSEC-2026-0002",
]

# Warn on informational advisories (unmaintained, unsound, etc.)
informational_warnings = ["unmaintained", "unsound", "notice"]
5 changes: 2 additions & 3 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,6 @@ jobs:
audit:
name: Security Audit
runs-on: blacksmith-4vcpu-ubuntu-2404
# Security audit is informational - don't block CI on known vulnerabilities
# Issues are automatically created for any vulnerabilities found
continue-on-error: true
permissions:
contents: read
issues: write
Expand All @@ -210,6 +207,8 @@ jobs:
- name: Install Rust stable
uses: dtolnay/rust-toolchain@stable

# Known RUSTSEC advisories are configured in .cargo/audit.toml
# See that file for detailed explanations of each exception
- uses: actions-rust-lang/audit@v1
name: Audit Rust Dependencies

Expand Down
83 changes: 1 addition & 82 deletions src/cortex-cli/src/models_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -411,63 +411,6 @@ fn get_available_models() -> Vec<ModelInfo> {
input_cost_per_million: Some(0.55),
output_cost_per_million: Some(2.19),
},
// Ollama models (local)
ModelInfo {
id: "llama3.2".to_string(),
name: "Llama 3.2".to_string(),
provider: "ollama".to_string(),
capabilities: ModelCapabilities {
vision: false,
tools: true,
parallel_tools: false,
streaming: true,
json_mode: true,
},
input_cost_per_million: None, // Local model, no API cost
output_cost_per_million: None,
},
ModelInfo {
id: "llama3.2:1b".to_string(),
name: "Llama 3.2 1B".to_string(),
provider: "ollama".to_string(),
capabilities: ModelCapabilities {
vision: false,
tools: true,
parallel_tools: false,
streaming: true,
json_mode: true,
},
input_cost_per_million: None,
output_cost_per_million: None,
},
ModelInfo {
id: "codellama".to_string(),
name: "Code Llama".to_string(),
provider: "ollama".to_string(),
capabilities: ModelCapabilities {
vision: false,
tools: false,
parallel_tools: false,
streaming: true,
json_mode: false,
},
input_cost_per_million: None,
output_cost_per_million: None,
},
ModelInfo {
id: "mistral".to_string(),
name: "Mistral 7B".to_string(),
provider: "ollama".to_string(),
capabilities: ModelCapabilities {
vision: false,
tools: true,
parallel_tools: false,
streaming: true,
json_mode: true,
},
input_cost_per_million: None,
output_cost_per_million: None,
},
]
}

Expand Down Expand Up @@ -799,7 +742,7 @@ mod tests {
let model = ModelInfo {
id: "local-model".to_string(),
name: "Local Model".to_string(),
provider: "ollama".to_string(),
provider: "local".to_string(),
capabilities: ModelCapabilities::default(),
input_cost_per_million: None,
output_cost_per_million: None,
Expand Down Expand Up @@ -881,30 +824,6 @@ mod tests {
assert!(!google_models.is_empty(), "Should have Google models");
}

#[test]
fn test_get_available_models_has_ollama() {
let models = get_available_models();
let ollama_models: Vec<_> = models.iter().filter(|m| m.provider == "ollama").collect();
assert!(!ollama_models.is_empty(), "Should have Ollama models");
}

#[test]
fn test_get_available_models_ollama_has_no_cost() {
let models = get_available_models();
for model in models.iter().filter(|m| m.provider == "ollama") {
assert!(
model.input_cost_per_million.is_none(),
"Ollama model {} should have no input cost",
model.id
);
assert!(
model.output_cost_per_million.is_none(),
"Ollama model {} should have no output cost",
model.id
);
}
}

#[test]
fn test_get_available_models_unique_ids() {
let models = get_available_models();
Expand Down