Skip to content

Commit 113a61e

Browse files
committed
refactor(cli): remove Ollama provider from models command
Remove all Ollama model definitions and related tests from cortex-cli. This simplifies the codebase by removing support for the local Ollama provider, focusing on cloud-based model providers.
1 parent 3d784b4 commit 113a61e

File tree

1 file changed

+1
-82
lines changed

1 file changed

+1
-82
lines changed

src/cortex-cli/src/models_cmd.rs

Lines changed: 1 addition & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -411,63 +411,6 @@ fn get_available_models() -> Vec<ModelInfo> {
411411
input_cost_per_million: Some(0.55),
412412
output_cost_per_million: Some(2.19),
413413
},
414-
// Ollama models (local)
415-
ModelInfo {
416-
id: "llama3.2".to_string(),
417-
name: "Llama 3.2".to_string(),
418-
provider: "ollama".to_string(),
419-
capabilities: ModelCapabilities {
420-
vision: false,
421-
tools: true,
422-
parallel_tools: false,
423-
streaming: true,
424-
json_mode: true,
425-
},
426-
input_cost_per_million: None, // Local model, no API cost
427-
output_cost_per_million: None,
428-
},
429-
ModelInfo {
430-
id: "llama3.2:1b".to_string(),
431-
name: "Llama 3.2 1B".to_string(),
432-
provider: "ollama".to_string(),
433-
capabilities: ModelCapabilities {
434-
vision: false,
435-
tools: true,
436-
parallel_tools: false,
437-
streaming: true,
438-
json_mode: true,
439-
},
440-
input_cost_per_million: None,
441-
output_cost_per_million: None,
442-
},
443-
ModelInfo {
444-
id: "codellama".to_string(),
445-
name: "Code Llama".to_string(),
446-
provider: "ollama".to_string(),
447-
capabilities: ModelCapabilities {
448-
vision: false,
449-
tools: false,
450-
parallel_tools: false,
451-
streaming: true,
452-
json_mode: false,
453-
},
454-
input_cost_per_million: None,
455-
output_cost_per_million: None,
456-
},
457-
ModelInfo {
458-
id: "mistral".to_string(),
459-
name: "Mistral 7B".to_string(),
460-
provider: "ollama".to_string(),
461-
capabilities: ModelCapabilities {
462-
vision: false,
463-
tools: true,
464-
parallel_tools: false,
465-
streaming: true,
466-
json_mode: true,
467-
},
468-
input_cost_per_million: None,
469-
output_cost_per_million: None,
470-
},
471414
]
472415
}
473416

@@ -799,7 +742,7 @@ mod tests {
799742
let model = ModelInfo {
800743
id: "local-model".to_string(),
801744
name: "Local Model".to_string(),
802-
provider: "ollama".to_string(),
745+
provider: "local".to_string(),
803746
capabilities: ModelCapabilities::default(),
804747
input_cost_per_million: None,
805748
output_cost_per_million: None,
@@ -881,30 +824,6 @@ mod tests {
881824
assert!(!google_models.is_empty(), "Should have Google models");
882825
}
883826

884-
#[test]
885-
fn test_get_available_models_has_ollama() {
886-
let models = get_available_models();
887-
let ollama_models: Vec<_> = models.iter().filter(|m| m.provider == "ollama").collect();
888-
assert!(!ollama_models.is_empty(), "Should have Ollama models");
889-
}
890-
891-
#[test]
892-
fn test_get_available_models_ollama_has_no_cost() {
893-
let models = get_available_models();
894-
for model in models.iter().filter(|m| m.provider == "ollama") {
895-
assert!(
896-
model.input_cost_per_million.is_none(),
897-
"Ollama model {} should have no input cost",
898-
model.id
899-
);
900-
assert!(
901-
model.output_cost_per_million.is_none(),
902-
"Ollama model {} should have no output cost",
903-
model.id
904-
);
905-
}
906-
}
907-
908827
#[test]
909828
fn test_get_available_models_unique_ids() {
910829
let models = get_available_models();

0 commit comments

Comments
 (0)