hokusai/tests/test_config.py

221 lines
8.7 KiB
Python

"""Integration tests for bulkgen.config."""
from __future__ import annotations
from pathlib import Path
import pytest
import yaml
from bulkgen.config import (
Defaults,
TargetConfig,
infer_required_capabilities,
load_config,
resolve_model,
)
from bulkgen.providers.models import Capability
class TestLoadConfig:
"""Test loading and validating YAML config files end-to-end."""
def test_minimal_config(self, project_dir: Path) -> None:
config_path = project_dir / "test.bulkgen.yaml"
_ = config_path.write_text(
yaml.dump({"targets": {"out.txt": {"prompt": "hello"}}})
)
config = load_config(config_path)
assert "out.txt" in config.targets
assert config.targets["out.txt"].prompt == "hello"
assert config.defaults.text_model == "mistral-large-latest"
assert config.defaults.image_model == "flux-pro-1.1"
def test_full_config_with_all_fields(self, project_dir: Path) -> None:
raw = {
"defaults": {
"text_model": "custom-text",
"image_model": "custom-image",
},
"targets": {
"banner.png": {
"prompt": "A wide banner",
"model": "flux-dev",
"width": 1920,
"height": 480,
"inputs": ["ref.png"],
"reference_images": ["ref.png"],
"control_images": ["ctrl.png"],
},
"story.md": {
"prompt": "Write a story",
"inputs": ["banner.png"],
},
},
}
config_path = project_dir / "full.bulkgen.yaml"
_ = config_path.write_text(yaml.dump(raw, default_flow_style=False))
config = load_config(config_path)
assert config.defaults.text_model == "custom-text"
assert config.defaults.image_model == "custom-image"
banner = config.targets["banner.png"]
assert banner.model == "flux-dev"
assert banner.width == 1920
assert banner.height == 480
assert banner.reference_images == ["ref.png"]
assert banner.control_images == ["ctrl.png"]
story = config.targets["story.md"]
assert story.model is None
assert story.inputs == ["banner.png"]
def test_empty_targets_rejected(self, project_dir: Path) -> None:
config_path = project_dir / "empty.bulkgen.yaml"
_ = config_path.write_text(yaml.dump({"targets": {}}))
with pytest.raises(Exception, match="At least one target"):
_ = load_config(config_path)
def test_missing_prompt_rejected(self, project_dir: Path) -> None:
config_path = project_dir / "bad.bulkgen.yaml"
_ = config_path.write_text(yaml.dump({"targets": {"out.txt": {}}}))
with pytest.raises(Exception):
_ = load_config(config_path)
class TestInferRequiredCapabilities:
"""Test capability inference from file extensions and target config."""
def test_plain_image(self) -> None:
target = TargetConfig(prompt="x")
assert infer_required_capabilities("out.png", target) == frozenset(
{Capability.TEXT_TO_IMAGE}
)
@pytest.mark.parametrize("name", ["out.png", "out.jpg", "out.jpeg", "out.webp"])
def test_image_extensions(self, name: str) -> None:
target = TargetConfig(prompt="x")
caps = infer_required_capabilities(name, target)
assert Capability.TEXT_TO_IMAGE in caps
@pytest.mark.parametrize("name", ["OUT.PNG", "OUT.JPG"])
def test_case_insensitive(self, name: str) -> None:
target = TargetConfig(prompt="x")
caps = infer_required_capabilities(name, target)
assert Capability.TEXT_TO_IMAGE in caps
def test_image_with_reference_images(self) -> None:
target = TargetConfig(prompt="x", reference_images=["ref.png"])
assert infer_required_capabilities("out.png", target) == frozenset(
{Capability.TEXT_TO_IMAGE, Capability.REFERENCE_IMAGES}
)
def test_image_with_control_images(self) -> None:
target = TargetConfig(prompt="x", control_images=["ctrl.png"])
assert infer_required_capabilities("out.png", target) == frozenset(
{Capability.TEXT_TO_IMAGE, Capability.CONTROL_IMAGES}
)
def test_image_with_both(self) -> None:
target = TargetConfig(
prompt="x", reference_images=["ref.png"], control_images=["ctrl.png"]
)
assert infer_required_capabilities("out.png", target) == frozenset(
{
Capability.TEXT_TO_IMAGE,
Capability.REFERENCE_IMAGES,
Capability.CONTROL_IMAGES,
}
)
@pytest.mark.parametrize("name", ["doc.md", "doc.txt"])
def test_text_extensions(self, name: str) -> None:
target = TargetConfig(prompt="x")
caps = infer_required_capabilities(name, target)
assert caps == frozenset({Capability.TEXT_GENERATION})
def test_text_with_text_inputs(self) -> None:
target = TargetConfig(prompt="x", inputs=["data.txt"])
assert infer_required_capabilities("out.md", target) == frozenset(
{Capability.TEXT_GENERATION}
)
def test_text_with_image_input(self) -> None:
target = TargetConfig(prompt="x", inputs=["photo.png"])
assert infer_required_capabilities("out.txt", target) == frozenset(
{Capability.TEXT_GENERATION, Capability.VISION}
)
def test_text_with_image_reference(self) -> None:
target = TargetConfig(prompt="x", reference_images=["ref.jpg"])
assert infer_required_capabilities("out.md", target) == frozenset(
{Capability.TEXT_GENERATION, Capability.VISION}
)
def test_unsupported_extension_raises(self) -> None:
target = TargetConfig(prompt="x")
with pytest.raises(ValueError, match="unsupported extension"):
_ = infer_required_capabilities("data.csv", target)
def test_no_extension_raises(self) -> None:
target = TargetConfig(prompt="x")
with pytest.raises(ValueError, match="unsupported extension"):
_ = infer_required_capabilities("Makefile", target)
class TestResolveModel:
"""Test model resolution with capability validation."""
def test_explicit_model_wins(self) -> None:
target = TargetConfig(prompt="x", model="mistral-small-latest")
result = resolve_model("out.txt", target, Defaults())
assert result.name == "mistral-small-latest"
def test_default_text_model(self) -> None:
target = TargetConfig(prompt="x")
defaults = Defaults(text_model="mistral-large-latest")
result = resolve_model("out.md", target, defaults)
assert result.name == "mistral-large-latest"
def test_default_image_model(self) -> None:
target = TargetConfig(prompt="x")
defaults = Defaults(image_model="flux-dev")
result = resolve_model("out.png", target, defaults)
assert result.name == "flux-dev"
def test_unknown_model_raises(self) -> None:
target = TargetConfig(prompt="x", model="nonexistent-model")
with pytest.raises(ValueError, match="Unknown model"):
_ = resolve_model("out.txt", target, Defaults())
def test_explicit_model_missing_capability_raises(self) -> None:
# flux-dev does not support reference images
target = TargetConfig(prompt="x", model="flux-dev", reference_images=["r.png"])
with pytest.raises(ValueError, match="lacks required capabilities"):
_ = resolve_model("out.png", target, Defaults())
def test_default_fallback_for_reference_images(self) -> None:
# Default flux-dev lacks reference_images, should fall back to a capable model
target = TargetConfig(prompt="x", reference_images=["r.png"])
defaults = Defaults(image_model="flux-dev")
result = resolve_model("out.png", target, defaults)
assert Capability.REFERENCE_IMAGES in result.capabilities
def test_default_fallback_for_vision(self) -> None:
# Default mistral-large-latest lacks vision, should fall back to a pixtral model
target = TargetConfig(prompt="x", inputs=["photo.png"])
defaults = Defaults(text_model="mistral-large-latest")
result = resolve_model("out.txt", target, defaults)
assert Capability.VISION in result.capabilities
def test_default_preferred_when_capable(self) -> None:
# Default flux-2-pro already supports reference_images, should be used directly
target = TargetConfig(prompt="x", reference_images=["r.png"])
defaults = Defaults(image_model="flux-2-pro")
result = resolve_model("out.png", target, defaults)
assert result.name == "flux-2-pro"