test: add integration tests for all modules
Some checks failed
Continuous Integration / Build Package (push) Successful in 34s
Continuous Integration / Lint, Check & Test (push) Failing after 48s

- Add pytest-asyncio dev dependency and configure asyncio_mode=auto
- Add filterwarnings to suppress third-party PydanticDeprecatedSince20
- Add conftest.py with shared fixtures (project_dir, write_config, etc.)
- Add test_config.py: YAML loading, target type inference, model resolution
- Add test_graph.py: DAG construction, cycle detection, build ordering
- Add test_state.py: hash functions, state persistence, dirty checking
- Add test_builder.py: full build pipeline with FakeProvider, incremental
  builds, selective builds, error isolation, dependency cascading
- Add test_providers.py: ImageProvider and TextProvider with mocked clients
- Add test_cli.py: build/clean/graph commands via typer CliRunner
- All 94 tests pass with 0 basedpyright warnings
This commit is contained in:
Konstantin Fickel 2026-02-14 11:07:36 +01:00
parent 452b3c4eb0
commit eef9712924
Signed by: kfickel
GPG key ID: A793722F9933C1A5
10 changed files with 1662 additions and 0 deletions

View file

@ -20,9 +20,16 @@ bulkgen = "bulkgen.cli:app"
requires = ["hatchling"] requires = ["hatchling"]
build-backend = "hatchling.build" build-backend = "hatchling.build"
[tool.pytest.ini_options]
asyncio_mode = "auto"
filterwarnings = [
"ignore::pydantic.warnings.PydanticDeprecatedSince20",
]
[dependency-groups] [dependency-groups]
dev = [ dev = [
"basedpyright>=1.38.0", "basedpyright>=1.38.0",
"pytest>=9.0.2", "pytest>=9.0.2",
"pytest-asyncio>=0.25.0",
"ruff>=0.15.1", "ruff>=0.15.1",
] ]

0
tests/__init__.py Normal file
View file

79
tests/conftest.py Normal file
View file

@ -0,0 +1,79 @@
"""Shared fixtures for bulkgen integration tests."""
from __future__ import annotations
from collections.abc import Callable
from pathlib import Path
import pytest
import yaml
from bulkgen.config import ProjectConfig, load_config
WriteConfig = Callable[[dict[str, object]], ProjectConfig]
@pytest.fixture
def project_dir(tmp_path: Path) -> Path:
"""A temporary directory acting as the project root."""
return tmp_path
@pytest.fixture
def write_config(project_dir: Path) -> WriteConfig:
"""Write a bulkgen YAML config and return the loaded ProjectConfig.
Usage::
config = write_config({"targets": {"out.txt": {"prompt": "hello"}}})
"""
def _write(raw: dict[str, object]) -> ProjectConfig:
config_path = project_dir / "project.bulkgen.yaml"
_ = config_path.write_text(yaml.dump(raw, default_flow_style=False))
return load_config(config_path)
return _write
@pytest.fixture
def simple_text_config(write_config: WriteConfig) -> ProjectConfig:
"""A minimal config with one text target."""
return write_config({"targets": {"output.txt": {"prompt": "Generate something"}}})
@pytest.fixture
def multi_target_config(project_dir: Path, write_config: WriteConfig) -> ProjectConfig:
"""Config with multiple targets forming a dependency chain.
input.txt (external file) -> summary.md -> final.txt
Also has an independent image target: hero.png
"""
_ = (project_dir / "input.txt").write_text("Some input content")
return write_config(
{
"targets": {
"summary.md": {
"prompt": "Summarize the input",
"inputs": ["input.txt"],
},
"final.txt": {
"prompt": "Finalize the summary",
"inputs": ["summary.md"],
},
"hero.png": {
"prompt": "A heroic landscape",
"width": 1024,
"height": 768,
},
}
}
)
@pytest.fixture
def prompt_file(project_dir: Path) -> Path:
"""Create a prompt file on disk and return its path."""
p = project_dir / "my_prompt.txt"
_ = p.write_text("This prompt comes from a file")
return p

379
tests/test_builder.py Normal file
View file

@ -0,0 +1,379 @@
"""Integration tests for bulkgen.builder."""
from __future__ import annotations
from pathlib import Path
from typing import override
from unittest.mock import patch
import pytest
from bulkgen.builder import (
_collect_all_deps, # pyright: ignore[reportPrivateUsage]
_collect_dep_files, # pyright: ignore[reportPrivateUsage]
_collect_extra_params, # pyright: ignore[reportPrivateUsage]
_resolve_prompt, # pyright: ignore[reportPrivateUsage]
run_build,
)
from bulkgen.config import ProjectConfig, TargetConfig, TargetType
from bulkgen.providers import Provider
from bulkgen.state import load_state
from tests.conftest import WriteConfig
class FakeProvider(Provider):
"""A provider that writes a marker file instead of calling an API."""
@override
async def generate(
self,
target_name: str,
target_config: TargetConfig,
resolved_prompt: str,
resolved_model: str,
project_dir: Path,
) -> None:
output = project_dir / target_name
_ = output.write_text(f"generated:{target_name}:{resolved_prompt}")
class FailingProvider(Provider):
"""A provider that always raises."""
@override
async def generate(
self,
target_name: str,
target_config: TargetConfig,
resolved_prompt: str,
resolved_model: str,
project_dir: Path,
) -> None:
msg = f"Simulated failure for {target_name}"
raise RuntimeError(msg)
def _fake_providers() -> dict[TargetType, Provider]:
return {
TargetType.TEXT: FakeProvider(),
TargetType.IMAGE: FakeProvider(),
}
class TestResolvePrompt:
"""Test prompt resolution (file vs inline)."""
def test_inline_prompt(self, project_dir: Path) -> None:
assert _resolve_prompt("Just a string", project_dir) == "Just a string"
def test_file_prompt(self, project_dir: Path, prompt_file: Path) -> None:
result = _resolve_prompt(prompt_file.name, project_dir)
assert result == "This prompt comes from a file"
def test_nonexistent_file_treated_as_inline(self, project_dir: Path) -> None:
result = _resolve_prompt("no_such_file.txt", project_dir)
assert result == "no_such_file.txt"
class TestCollectHelpers:
"""Test dependency collection helpers."""
def test_collect_dep_files(
self, project_dir: Path, write_config: WriteConfig
) -> None:
_ = (project_dir / "input.txt").write_text("data")
_ = (project_dir / "ref.png").write_bytes(b"ref")
config = write_config(
{
"targets": {
"out.png": {
"prompt": "x",
"inputs": ["input.txt"],
"reference_image": "ref.png",
"control_images": [],
}
}
}
)
deps = _collect_dep_files("out.png", config, project_dir)
dep_names = [d.name for d in deps]
assert "input.txt" in dep_names
assert "ref.png" in dep_names
def test_collect_extra_params(self, write_config: WriteConfig) -> None:
config = write_config(
{
"targets": {
"out.png": {
"prompt": "x",
"width": 512,
"height": 256,
"reference_image": "ref.png",
}
}
}
)
params = _collect_extra_params("out.png", config)
assert params["width"] == 512
assert params["height"] == 256
assert params["reference_image"] == "ref.png"
def test_collect_extra_params_empty(self, write_config: WriteConfig) -> None:
config = write_config({"targets": {"out.txt": {"prompt": "x"}}})
assert _collect_extra_params("out.txt", config) == {}
def test_collect_all_deps(self, write_config: WriteConfig) -> None:
config = write_config(
{
"targets": {
"out.png": {
"prompt": "x",
"inputs": ["a.txt"],
"reference_image": "ref.png",
"control_images": ["c1.png", "c2.png"],
}
}
}
)
deps = _collect_all_deps("out.png", config)
assert deps == ["a.txt", "ref.png", "c1.png", "c2.png"]
class TestRunBuild:
"""Integration tests for the full build pipeline with fake providers."""
async def test_build_single_text_target(
self, project_dir: Path, simple_text_config: ProjectConfig
) -> None:
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
result = await run_build(simple_text_config, project_dir)
assert result.built == ["output.txt"]
assert result.skipped == []
assert result.failed == {}
assert (project_dir / "output.txt").exists()
async def test_build_chain_dependency(
self, project_dir: Path, multi_target_config: ProjectConfig
) -> None:
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
result = await run_build(multi_target_config, project_dir)
assert "summary.md" in result.built
assert "final.txt" in result.built
assert "hero.png" in result.built
assert result.failed == {}
assert (project_dir / "summary.md").exists()
assert (project_dir / "final.txt").exists()
assert (project_dir / "hero.png").exists()
async def test_incremental_build_skips_clean_targets(
self, project_dir: Path, simple_text_config: ProjectConfig
) -> None:
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
result1 = await run_build(simple_text_config, project_dir)
assert result1.built == ["output.txt"]
result2 = await run_build(simple_text_config, project_dir)
assert result2.skipped == ["output.txt"]
assert result2.built == []
async def test_rebuild_after_prompt_change(
self, project_dir: Path, write_config: WriteConfig
) -> None:
config1 = write_config({"targets": {"out.txt": {"prompt": "version 1"}}})
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
r1 = await run_build(config1, project_dir)
assert r1.built == ["out.txt"]
config2 = write_config({"targets": {"out.txt": {"prompt": "version 2"}}})
r2 = await run_build(config2, project_dir)
assert r2.built == ["out.txt"]
async def test_rebuild_after_input_change(
self, project_dir: Path, write_config: WriteConfig
) -> None:
_ = (project_dir / "data.txt").write_text("original")
config = write_config(
{"targets": {"out.md": {"prompt": "x", "inputs": ["data.txt"]}}}
)
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
r1 = await run_build(config, project_dir)
assert r1.built == ["out.md"]
_ = (project_dir / "data.txt").write_text("modified")
r2 = await run_build(config, project_dir)
assert r2.built == ["out.md"]
async def test_selective_build_single_target(
self, project_dir: Path, multi_target_config: ProjectConfig
) -> None:
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
result = await run_build(
multi_target_config, project_dir, target="summary.md"
)
assert "summary.md" in result.built
assert "hero.png" not in result.built
assert "final.txt" not in result.built
async def test_selective_build_unknown_target_raises(
self, project_dir: Path, simple_text_config: ProjectConfig
) -> None:
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
with pytest.raises(ValueError, match="Unknown target"):
_ = await run_build(
simple_text_config, project_dir, target="nonexistent.txt"
)
async def test_failed_target_isolates_independent(
self, project_dir: Path, write_config: WriteConfig
) -> None:
config = write_config(
{
"targets": {
"fail.txt": {"prompt": "will fail"},
"ok.txt": {"prompt": "will succeed"},
}
}
)
fail_provider = FailingProvider()
fake_provider = FakeProvider()
async def selective_generate(
target_name: str,
target_config: TargetConfig,
resolved_prompt: str,
resolved_model: str,
project_dir: Path,
) -> None:
if target_name == "fail.txt":
await fail_provider.generate(
target_name,
target_config,
resolved_prompt,
resolved_model,
project_dir,
)
else:
await fake_provider.generate(
target_name,
target_config,
resolved_prompt,
resolved_model,
project_dir,
)
routing_provider = FakeProvider()
routing_provider.generate = selective_generate # type: ignore[assignment]
providers_dict: dict[TargetType, Provider] = {
TargetType.TEXT: routing_provider,
TargetType.IMAGE: routing_provider,
}
with patch("bulkgen.builder._create_providers", return_value=providers_dict):
result = await run_build(config, project_dir)
assert "fail.txt" in result.failed
assert "ok.txt" in result.built
async def test_failed_dep_cascades(
self, project_dir: Path, write_config: WriteConfig
) -> None:
config = write_config(
{
"targets": {
"base.txt": {"prompt": "base"},
"child.txt": {"prompt": "child", "inputs": ["base.txt"]},
}
}
)
with patch("bulkgen.builder._create_providers") as mock_cp:
mock_cp.return_value = {
TargetType.TEXT: FailingProvider(),
TargetType.IMAGE: FakeProvider(),
}
result = await run_build(config, project_dir)
assert "base.txt" in result.failed
assert "child.txt" in result.failed
assert "Dependency failed" in result.failed["child.txt"]
async def test_missing_provider_records_failure(
self, project_dir: Path, simple_text_config: ProjectConfig
) -> None:
with patch(
"bulkgen.builder._create_providers",
return_value={},
):
result = await run_build(simple_text_config, project_dir)
assert "output.txt" in result.failed
assert "MISTRAL_API_KEY" in result.failed["output.txt"]
async def test_state_saved_after_each_generation(
self, project_dir: Path, write_config: WriteConfig
) -> None:
config = write_config(
{
"targets": {
"a.txt": {"prompt": "first"},
"b.txt": {"prompt": "second", "inputs": ["a.txt"]},
}
}
)
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
_ = await run_build(config, project_dir)
state = load_state(project_dir)
assert "a.txt" in state.targets
assert "b.txt" in state.targets
async def test_prompt_file_resolution_in_build(
self, project_dir: Path, prompt_file: Path, write_config: WriteConfig
) -> None:
config = write_config({"targets": {"out.txt": {"prompt": prompt_file.name}}})
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
result = await run_build(config, project_dir)
assert result.built == ["out.txt"]
content = (project_dir / "out.txt").read_text()
assert "This prompt comes from a file" in content
async def test_rebuild_after_output_deleted(
self, project_dir: Path, simple_text_config: ProjectConfig
) -> None:
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
r1 = await run_build(simple_text_config, project_dir)
assert r1.built == ["output.txt"]
(project_dir / "output.txt").unlink()
r2 = await run_build(simple_text_config, project_dir)
assert r2.built == ["output.txt"]
async def test_diamond_dependency_all_built(
self, project_dir: Path, write_config: WriteConfig
) -> None:
_ = (project_dir / "root.txt").write_text("root data")
config = write_config(
{
"targets": {
"left.md": {"prompt": "left", "inputs": ["root.txt"]},
"right.md": {"prompt": "right", "inputs": ["root.txt"]},
"merge.txt": {
"prompt": "merge",
"inputs": ["left.md", "right.md"],
},
}
}
)
with patch("bulkgen.builder._create_providers", return_value=_fake_providers()):
result = await run_build(config, project_dir)
assert set(result.built) == {"left.md", "right.md", "merge.txt"}
assert result.failed == {}

228
tests/test_cli.py Normal file
View file

@ -0,0 +1,228 @@
"""Integration tests for bulkgen.cli.
Patching ``Path.cwd()`` produces Any-typed return values from mock objects.
"""
# pyright: reportAny=false
from __future__ import annotations
from pathlib import Path
from unittest.mock import AsyncMock, patch
import pytest
import yaml
from typer.testing import CliRunner
from bulkgen.builder import BuildResult
from bulkgen.cli import app
runner = CliRunner()
@pytest.fixture
def cli_project(tmp_path: Path) -> Path:
"""Create a minimal project directory with a config file."""
config = {
"targets": {
"output.txt": {"prompt": "Generate text"},
"image.png": {"prompt": "Generate image"},
}
}
_ = (tmp_path / "project.bulkgen.yaml").write_text(
yaml.dump(config, default_flow_style=False)
)
return tmp_path
class TestFindConfig:
"""Test config file discovery."""
def test_no_config_file(self, tmp_path: Path) -> None:
with patch("bulkgen.cli.Path") as mock_path_cls:
mock_path_cls.cwd.return_value = tmp_path
result = runner.invoke(app, ["build"])
assert result.exit_code != 0
assert "No .bulkgen.yaml file found" in result.output
def test_multiple_config_files(self, tmp_path: Path) -> None:
_ = (tmp_path / "a.bulkgen.yaml").write_text(
yaml.dump({"targets": {"x.txt": {"prompt": "a"}}})
)
_ = (tmp_path / "b.bulkgen.yaml").write_text(
yaml.dump({"targets": {"y.txt": {"prompt": "b"}}})
)
with patch("bulkgen.cli.Path") as mock_path_cls:
mock_path_cls.cwd.return_value = tmp_path
result = runner.invoke(app, ["build"])
assert result.exit_code != 0
assert "Multiple .bulkgen.yaml files found" in result.output
class TestBuildCommand:
"""Test the build CLI command."""
def test_build_success(self, cli_project: Path) -> None:
build_result = BuildResult(
built=["output.txt", "image.png"], skipped=[], failed={}
)
with (
patch("bulkgen.cli.Path") as mock_path_cls,
patch(
"bulkgen.cli.run_build",
new_callable=AsyncMock,
return_value=build_result,
),
):
mock_path_cls.cwd.return_value = cli_project
result = runner.invoke(app, ["build"])
assert result.exit_code == 0
assert "Built 2 target(s)" in result.output
def test_build_with_skipped(self, cli_project: Path) -> None:
build_result = BuildResult(
built=[], skipped=["output.txt", "image.png"], failed={}
)
with (
patch("bulkgen.cli.Path") as mock_path_cls,
patch(
"bulkgen.cli.run_build",
new_callable=AsyncMock,
return_value=build_result,
),
):
mock_path_cls.cwd.return_value = cli_project
result = runner.invoke(app, ["build"])
assert result.exit_code == 0
assert "Skipped 2 target(s) (up to date)" in result.output
def test_build_with_failures(self, cli_project: Path) -> None:
build_result = BuildResult(
built=["output.txt"],
skipped=[],
failed={"image.png": "Missing BFL_API_KEY"},
)
with (
patch("bulkgen.cli.Path") as mock_path_cls,
patch(
"bulkgen.cli.run_build",
new_callable=AsyncMock,
return_value=build_result,
),
):
mock_path_cls.cwd.return_value = cli_project
result = runner.invoke(app, ["build"])
assert result.exit_code == 1
assert "Failed 1 target(s)" in result.output
def test_build_specific_target(self, cli_project: Path) -> None:
build_result = BuildResult(built=["output.txt"], skipped=[], failed={})
with (
patch("bulkgen.cli.Path") as mock_path_cls,
patch(
"bulkgen.cli.run_build",
new_callable=AsyncMock,
return_value=build_result,
) as mock_run,
):
mock_path_cls.cwd.return_value = cli_project
result = runner.invoke(app, ["build", "output.txt"])
assert result.exit_code == 0
call_args = mock_run.call_args
assert call_args[0][2] == "output.txt"
class TestCleanCommand:
"""Test the clean CLI command."""
def test_clean_removes_targets(self, cli_project: Path) -> None:
_ = (cli_project / "output.txt").write_text("generated")
_ = (cli_project / "image.png").write_bytes(b"\x89PNG")
_ = (cli_project / ".bulkgen.state.yaml").write_text("targets: {}")
with patch("bulkgen.cli.Path") as mock_path_cls:
mock_path_cls.cwd.return_value = cli_project
result = runner.invoke(app, ["clean"])
assert result.exit_code == 0
assert not (cli_project / "output.txt").exists()
assert not (cli_project / "image.png").exists()
assert not (cli_project / ".bulkgen.state.yaml").exists()
assert "Cleaned 2 artifact(s)" in result.output
def test_clean_no_artifacts(self, cli_project: Path) -> None:
with patch("bulkgen.cli.Path") as mock_path_cls:
mock_path_cls.cwd.return_value = cli_project
result = runner.invoke(app, ["clean"])
assert result.exit_code == 0
assert "Cleaned 0 artifact(s)" in result.output
def test_clean_partial_artifacts(self, cli_project: Path) -> None:
_ = (cli_project / "output.txt").write_text("generated")
with patch("bulkgen.cli.Path") as mock_path_cls:
mock_path_cls.cwd.return_value = cli_project
result = runner.invoke(app, ["clean"])
assert result.exit_code == 0
assert "Cleaned 1 artifact(s)" in result.output
assert not (cli_project / "output.txt").exists()
class TestGraphCommand:
"""Test the graph CLI command."""
def test_graph_single_target(self, tmp_path: Path) -> None:
config = {"targets": {"out.txt": {"prompt": "hello"}}}
_ = (tmp_path / "test.bulkgen.yaml").write_text(
yaml.dump(config, default_flow_style=False)
)
with patch("bulkgen.cli.Path") as mock_path_cls:
mock_path_cls.cwd.return_value = tmp_path
result = runner.invoke(app, ["graph"])
assert result.exit_code == 0
assert "out.txt" in result.output
def test_graph_with_dependencies(self, tmp_path: Path) -> None:
_ = (tmp_path / "input.txt").write_text("data")
config = {
"targets": {
"step1.md": {"prompt": "x", "inputs": ["input.txt"]},
"step2.txt": {"prompt": "y", "inputs": ["step1.md"]},
}
}
_ = (tmp_path / "test.bulkgen.yaml").write_text(
yaml.dump(config, default_flow_style=False)
)
with patch("bulkgen.cli.Path") as mock_path_cls:
mock_path_cls.cwd.return_value = tmp_path
result = runner.invoke(app, ["graph"])
assert result.exit_code == 0
assert "input.txt" in result.output
assert "step1.md" in result.output
assert "step2.txt" in result.output
assert "<-" in result.output
def test_graph_shows_stages(self, tmp_path: Path) -> None:
_ = (tmp_path / "data.txt").write_text("data")
config = {
"targets": {
"a.txt": {"prompt": "x", "inputs": ["data.txt"]},
"b.txt": {"prompt": "y", "inputs": ["a.txt"]},
}
}
_ = (tmp_path / "test.bulkgen.yaml").write_text(
yaml.dump(config, default_flow_style=False)
)
with patch("bulkgen.cli.Path") as mock_path_cls:
mock_path_cls.cwd.return_value = tmp_path
result = runner.invoke(app, ["graph"])
assert result.exit_code == 0
assert "Stage" in result.output

132
tests/test_config.py Normal file
View file

@ -0,0 +1,132 @@
"""Integration tests for bulkgen.config."""
from __future__ import annotations
from pathlib import Path
import pytest
import yaml
from bulkgen.config import (
Defaults,
TargetConfig,
TargetType,
infer_target_type,
load_config,
resolve_model,
)
class TestLoadConfig:
"""Test loading and validating YAML config files end-to-end."""
def test_minimal_config(self, project_dir: Path) -> None:
config_path = project_dir / "test.bulkgen.yaml"
_ = config_path.write_text(
yaml.dump({"targets": {"out.txt": {"prompt": "hello"}}})
)
config = load_config(config_path)
assert "out.txt" in config.targets
assert config.targets["out.txt"].prompt == "hello"
assert config.defaults.text_model == "mistral-large-latest"
assert config.defaults.image_model == "flux-pro-1.1"
def test_full_config_with_all_fields(self, project_dir: Path) -> None:
raw = {
"defaults": {
"text_model": "custom-text",
"image_model": "custom-image",
},
"targets": {
"banner.png": {
"prompt": "A wide banner",
"model": "flux-dev",
"width": 1920,
"height": 480,
"inputs": ["ref.png"],
"reference_image": "ref.png",
"control_images": ["ctrl.png"],
},
"story.md": {
"prompt": "Write a story",
"inputs": ["banner.png"],
},
},
}
config_path = project_dir / "full.bulkgen.yaml"
_ = config_path.write_text(yaml.dump(raw, default_flow_style=False))
config = load_config(config_path)
assert config.defaults.text_model == "custom-text"
assert config.defaults.image_model == "custom-image"
banner = config.targets["banner.png"]
assert banner.model == "flux-dev"
assert banner.width == 1920
assert banner.height == 480
assert banner.reference_image == "ref.png"
assert banner.control_images == ["ctrl.png"]
story = config.targets["story.md"]
assert story.model is None
assert story.inputs == ["banner.png"]
def test_empty_targets_rejected(self, project_dir: Path) -> None:
config_path = project_dir / "empty.bulkgen.yaml"
_ = config_path.write_text(yaml.dump({"targets": {}}))
with pytest.raises(Exception, match="At least one target"):
_ = load_config(config_path)
def test_missing_prompt_rejected(self, project_dir: Path) -> None:
config_path = project_dir / "bad.bulkgen.yaml"
_ = config_path.write_text(yaml.dump({"targets": {"out.txt": {}}}))
with pytest.raises(Exception):
_ = load_config(config_path)
class TestInferTargetType:
"""Test target type inference from file extensions."""
@pytest.mark.parametrize(
"name", ["photo.png", "photo.jpg", "photo.jpeg", "photo.webp"]
)
def test_image_extensions(self, name: str) -> None:
assert infer_target_type(name) is TargetType.IMAGE
@pytest.mark.parametrize("name", ["PHOTO.PNG", "PHOTO.JPG"])
def test_case_insensitive(self, name: str) -> None:
assert infer_target_type(name) is TargetType.IMAGE
@pytest.mark.parametrize("name", ["doc.md", "doc.txt"])
def test_text_extensions(self, name: str) -> None:
assert infer_target_type(name) is TargetType.TEXT
def test_unsupported_extension_raises(self) -> None:
with pytest.raises(ValueError, match="unsupported extension"):
_ = infer_target_type("data.csv")
def test_no_extension_raises(self) -> None:
with pytest.raises(ValueError, match="unsupported extension"):
_ = infer_target_type("Makefile")
class TestResolveModel:
"""Test model resolution (explicit vs. default)."""
def test_explicit_model_wins(self) -> None:
target = TargetConfig(prompt="x", model="my-model")
assert resolve_model("out.txt", target, Defaults()) == "my-model"
def test_default_text_model(self) -> None:
target = TargetConfig(prompt="x")
defaults = Defaults(text_model="custom-text")
assert resolve_model("out.md", target, defaults) == "custom-text"
def test_default_image_model(self) -> None:
target = TargetConfig(prompt="x")
defaults = Defaults(image_model="custom-image")
assert resolve_model("out.png", target, defaults) == "custom-image"

194
tests/test_graph.py Normal file
View file

@ -0,0 +1,194 @@
"""Integration tests for bulkgen.graph."""
from __future__ import annotations
from pathlib import Path
import pytest
from bulkgen.config import ProjectConfig
from bulkgen.graph import build_graph, get_build_order, get_subgraph_for_target
from tests.conftest import WriteConfig
class TestBuildGraph:
"""Test dependency graph construction from real configs."""
def test_single_target_no_deps(
self, project_dir: Path, simple_text_config: ProjectConfig
) -> None:
graph = build_graph(simple_text_config, project_dir)
assert "output.txt" in graph.nodes
assert graph.number_of_edges() == 0
def test_chain_dependency(
self, project_dir: Path, multi_target_config: ProjectConfig
) -> None:
graph = build_graph(multi_target_config, project_dir)
assert graph.has_edge("input.txt", "summary.md")
assert graph.has_edge("summary.md", "final.txt")
assert not graph.has_edge("input.txt", "final.txt")
def test_external_file_as_node(
self, project_dir: Path, multi_target_config: ProjectConfig
) -> None:
graph = build_graph(multi_target_config, project_dir)
assert "input.txt" in graph.nodes
def test_missing_dependency_raises(
self, project_dir: Path, write_config: WriteConfig
) -> None:
config = write_config(
{"targets": {"out.txt": {"prompt": "x", "inputs": ["nonexistent.txt"]}}}
)
with pytest.raises(
ValueError, match="neither a defined target nor an existing file"
):
_ = build_graph(config, project_dir)
def test_cycle_raises(self, project_dir: Path, write_config: WriteConfig) -> None:
config = write_config(
{
"targets": {
"a.txt": {"prompt": "x", "inputs": ["b.txt"]},
"b.txt": {"prompt": "x", "inputs": ["a.txt"]},
}
}
)
with pytest.raises(ValueError, match="cycle"):
_ = build_graph(config, project_dir)
def test_reference_image_creates_edge(
self, project_dir: Path, write_config: WriteConfig
) -> None:
_ = (project_dir / "ref.png").write_bytes(b"\x89PNG")
config = write_config(
{"targets": {"out.png": {"prompt": "x", "reference_image": "ref.png"}}}
)
graph = build_graph(config, project_dir)
assert graph.has_edge("ref.png", "out.png")
def test_control_images_create_edges(
self, project_dir: Path, write_config: WriteConfig
) -> None:
_ = (project_dir / "ctrl1.png").write_bytes(b"\x89PNG")
_ = (project_dir / "ctrl2.png").write_bytes(b"\x89PNG")
config = write_config(
{
"targets": {
"out.png": {
"prompt": "x",
"control_images": ["ctrl1.png", "ctrl2.png"],
}
}
}
)
graph = build_graph(config, project_dir)
assert graph.has_edge("ctrl1.png", "out.png")
assert graph.has_edge("ctrl2.png", "out.png")
def test_target_depending_on_another_target(
self, project_dir: Path, write_config: WriteConfig
) -> None:
config = write_config(
{
"targets": {
"base.txt": {"prompt": "base"},
"derived.txt": {"prompt": "derive", "inputs": ["base.txt"]},
}
}
)
graph = build_graph(config, project_dir)
assert graph.has_edge("base.txt", "derived.txt")
class TestGetBuildOrder:
"""Test topological generation ordering."""
def test_independent_targets_same_generation(
self, project_dir: Path, write_config: WriteConfig
) -> None:
config = write_config(
{
"targets": {
"a.txt": {"prompt": "x"},
"b.txt": {"prompt": "y"},
"c.png": {"prompt": "z"},
}
}
)
graph = build_graph(config, project_dir)
order = get_build_order(graph)
assert len(order) == 1
assert set(order[0]) == {"a.txt", "b.txt", "c.png"}
def test_chain_produces_sequential_generations(
self, project_dir: Path, multi_target_config: ProjectConfig
) -> None:
graph = build_graph(multi_target_config, project_dir)
order = get_build_order(graph)
# Flatten to find relative positions
flat = [name for gen in order for name in gen]
assert flat.index("input.txt") < flat.index("summary.md")
assert flat.index("summary.md") < flat.index("final.txt")
def test_diamond_dependency(
self, project_dir: Path, write_config: WriteConfig
) -> None:
_ = (project_dir / "root.txt").write_text("root")
config = write_config(
{
"targets": {
"left.txt": {"prompt": "x", "inputs": ["root.txt"]},
"right.txt": {"prompt": "y", "inputs": ["root.txt"]},
"merge.txt": {
"prompt": "z",
"inputs": ["left.txt", "right.txt"],
},
}
}
)
graph = build_graph(config, project_dir)
order = get_build_order(graph)
flat = [name for gen in order for name in gen]
assert flat.index("root.txt") < flat.index("left.txt")
assert flat.index("root.txt") < flat.index("right.txt")
assert flat.index("left.txt") < flat.index("merge.txt")
assert flat.index("right.txt") < flat.index("merge.txt")
class TestGetSubgraphForTarget:
"""Test selective subgraph extraction."""
def test_subgraph_includes_transitive_deps(
self, project_dir: Path, multi_target_config: ProjectConfig
) -> None:
graph = build_graph(multi_target_config, project_dir)
sub = get_subgraph_for_target(graph, "final.txt")
assert "final.txt" in sub.nodes
assert "summary.md" in sub.nodes
assert "input.txt" in sub.nodes
# hero.png is independent and should NOT be included
assert "hero.png" not in sub.nodes
def test_subgraph_leaf_target(
self, project_dir: Path, multi_target_config: ProjectConfig
) -> None:
graph = build_graph(multi_target_config, project_dir)
sub = get_subgraph_for_target(graph, "hero.png")
assert set(sub.nodes) == {"hero.png"}
def test_subgraph_preserves_edges(
self, project_dir: Path, multi_target_config: ProjectConfig
) -> None:
graph = build_graph(multi_target_config, project_dir)
sub = get_subgraph_for_target(graph, "final.txt")
assert sub.has_edge("input.txt", "summary.md")
assert sub.has_edge("summary.md", "final.txt")

319
tests/test_providers.py Normal file
View file

@ -0,0 +1,319 @@
"""Integration tests for bulkgen.providers (image and text).
Mock-heavy tests produce many Any-typed expressions from MagicMock.
"""
# pyright: reportAny=false
from __future__ import annotations
import base64
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from bulkgen.config import TargetConfig
from bulkgen.providers.image import ImageProvider
from bulkgen.providers.image import (
_encode_image_b64 as encode_image_b64, # pyright: ignore[reportPrivateUsage]
)
from bulkgen.providers.text import TextProvider
def _make_bfl_mocks(
image_bytes: bytes,
) -> tuple[MagicMock, MagicMock]:
"""Return (mock_result, mock_http) for BFL image generation tests."""
mock_result = MagicMock()
mock_result.result = {"sample": "https://example.com/img.png"}
mock_response = MagicMock()
mock_response.content = image_bytes
mock_response.raise_for_status.return_value = None
mock_http = AsyncMock()
mock_http.get.return_value = mock_response
mock_http.__aenter__ = AsyncMock(return_value=mock_http)
mock_http.__aexit__ = AsyncMock(return_value=False)
return mock_result, mock_http
def _make_mistral_mock(response: MagicMock) -> AsyncMock:
"""Return a mock Mistral client."""
mock_client = AsyncMock()
mock_client.chat.complete_async.return_value = response
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=False)
return mock_client
def _make_text_response(content: str | None) -> MagicMock:
"""Return a mock Mistral response with one choice."""
choice = MagicMock()
choice.message.content = content
response = MagicMock()
response.choices = [choice]
return response
class TestImageProvider:
"""Test ImageProvider with mocked BFL client and HTTP."""
@pytest.fixture
def image_bytes(self) -> bytes:
return b"\x89PNG\r\n\x1a\n" + b"\x00" * 100
async def test_basic_image_generation(
self, project_dir: Path, image_bytes: bytes
) -> None:
target_config = TargetConfig(prompt="A red square")
mock_result, mock_http = _make_bfl_mocks(image_bytes)
with (
patch("bulkgen.providers.image.BFLClient") as mock_cls,
patch("bulkgen.providers.image.isinstance", return_value=True),
patch("bulkgen.providers.image.httpx.AsyncClient") as mock_http_cls,
):
mock_cls.return_value.generate.return_value = mock_result
mock_http_cls.return_value = mock_http
provider = ImageProvider(api_key="test-key")
await provider.generate(
target_name="out.png",
target_config=target_config,
resolved_prompt="A red square",
resolved_model="flux-pro-1.1",
project_dir=project_dir,
)
output = project_dir / "out.png"
assert output.exists()
assert output.read_bytes() == image_bytes
async def test_image_with_dimensions(
self, project_dir: Path, image_bytes: bytes
) -> None:
target_config = TargetConfig(prompt="A banner", width=1920, height=480)
mock_result, mock_http = _make_bfl_mocks(image_bytes)
with (
patch("bulkgen.providers.image.BFLClient") as mock_cls,
patch("bulkgen.providers.image.isinstance", return_value=True),
patch("bulkgen.providers.image.httpx.AsyncClient") as mock_http_cls,
):
client_instance = mock_cls.return_value
client_instance.generate.return_value = mock_result
mock_http_cls.return_value = mock_http
provider = ImageProvider(api_key="test-key")
await provider.generate(
target_name="banner.png",
target_config=target_config,
resolved_prompt="A banner",
resolved_model="flux-pro-1.1",
project_dir=project_dir,
)
call_args = client_instance.generate.call_args
inputs = call_args[0][1]
assert inputs["width"] == 1920
assert inputs["height"] == 480
async def test_image_with_reference_image(
self, project_dir: Path, image_bytes: bytes
) -> None:
ref_path = project_dir / "ref.png"
_ = ref_path.write_bytes(b"reference image data")
target_config = TargetConfig(prompt="Like this", reference_image="ref.png")
mock_result, mock_http = _make_bfl_mocks(image_bytes)
with (
patch("bulkgen.providers.image.BFLClient") as mock_cls,
patch("bulkgen.providers.image.isinstance", return_value=True),
patch("bulkgen.providers.image.httpx.AsyncClient") as mock_http_cls,
):
client_instance = mock_cls.return_value
client_instance.generate.return_value = mock_result
mock_http_cls.return_value = mock_http
provider = ImageProvider(api_key="test-key")
await provider.generate(
target_name="out.png",
target_config=target_config,
resolved_prompt="Like this",
resolved_model="flux-kontext-pro",
project_dir=project_dir,
)
call_args = client_instance.generate.call_args
inputs = call_args[0][1]
assert "image_prompt" in inputs
assert inputs["image_prompt"] == encode_image_b64(ref_path)
async def test_image_no_sample_url_raises(self, project_dir: Path) -> None:
target_config = TargetConfig(prompt="x")
mock_result = MagicMock()
mock_result.result = {}
with (
patch("bulkgen.providers.image.BFLClient") as mock_cls,
patch("bulkgen.providers.image.isinstance", return_value=True),
):
mock_cls.return_value.generate.return_value = mock_result
provider = ImageProvider(api_key="test-key")
with pytest.raises(RuntimeError, match="did not return an image URL"):
await provider.generate(
target_name="fail.png",
target_config=target_config,
resolved_prompt="x",
resolved_model="flux-pro",
project_dir=project_dir,
)
def test_encode_image_b64(self, project_dir: Path) -> None:
data = b"test image bytes"
f = project_dir / "test.png"
_ = f.write_bytes(data)
encoded = encode_image_b64(f)
assert base64.b64decode(encoded) == data
class TestTextProvider:
"""Test TextProvider with mocked Mistral client."""
async def test_basic_text_generation(self, project_dir: Path) -> None:
target_config = TargetConfig(prompt="Write a poem")
response = _make_text_response("Roses are red...")
with patch("bulkgen.providers.text.Mistral") as mock_cls:
mock_cls.return_value = _make_mistral_mock(response)
provider = TextProvider(api_key="test-key")
await provider.generate(
target_name="poem.txt",
target_config=target_config,
resolved_prompt="Write a poem",
resolved_model="mistral-large-latest",
project_dir=project_dir,
)
output = project_dir / "poem.txt"
assert output.exists()
assert output.read_text() == "Roses are red..."
async def test_text_with_text_input(self, project_dir: Path) -> None:
_ = (project_dir / "source.txt").write_text("Source material here")
target_config = TargetConfig(prompt="Summarize", inputs=["source.txt"])
response = _make_text_response("Summary: ...")
with patch("bulkgen.providers.text.Mistral") as mock_cls:
mock_client = _make_mistral_mock(response)
mock_cls.return_value = mock_client
provider = TextProvider(api_key="test-key")
await provider.generate(
target_name="summary.md",
target_config=target_config,
resolved_prompt="Summarize",
resolved_model="mistral-large-latest",
project_dir=project_dir,
)
call_args = mock_client.chat.complete_async.call_args
messages = call_args.kwargs["messages"]
prompt_text = messages[0].content
assert "--- Contents of source.txt ---" in prompt_text
assert "Source material here" in prompt_text
async def test_text_with_image_input(self, project_dir: Path) -> None:
_ = (project_dir / "photo.png").write_bytes(b"\x89PNG")
target_config = TargetConfig(prompt="Describe this image", inputs=["photo.png"])
response = _make_text_response("A beautiful photo")
with patch("bulkgen.providers.text.Mistral") as mock_cls:
mock_client = _make_mistral_mock(response)
mock_cls.return_value = mock_client
provider = TextProvider(api_key="test-key")
await provider.generate(
target_name="desc.txt",
target_config=target_config,
resolved_prompt="Describe this image",
resolved_model="mistral-large-latest",
project_dir=project_dir,
)
call_args = mock_client.chat.complete_async.call_args
messages = call_args.kwargs["messages"]
prompt_text = messages[0].content
assert "[Attached image: photo.png]" in prompt_text
async def test_text_no_choices_raises(self, project_dir: Path) -> None:
target_config = TargetConfig(prompt="x")
response = MagicMock()
response.choices = []
with patch("bulkgen.providers.text.Mistral") as mock_cls:
mock_cls.return_value = _make_mistral_mock(response)
provider = TextProvider(api_key="test-key")
with pytest.raises(RuntimeError, match="no choices"):
await provider.generate(
target_name="fail.txt",
target_config=target_config,
resolved_prompt="x",
resolved_model="mistral-large-latest",
project_dir=project_dir,
)
async def test_text_empty_content_raises(self, project_dir: Path) -> None:
target_config = TargetConfig(prompt="x")
response = _make_text_response(None)
with patch("bulkgen.providers.text.Mistral") as mock_cls:
mock_cls.return_value = _make_mistral_mock(response)
provider = TextProvider(api_key="test-key")
with pytest.raises(RuntimeError, match="empty content"):
await provider.generate(
target_name="fail.txt",
target_config=target_config,
resolved_prompt="x",
resolved_model="mistral-large-latest",
project_dir=project_dir,
)
async def test_text_with_multiple_inputs(self, project_dir: Path) -> None:
_ = (project_dir / "a.txt").write_text("content A")
_ = (project_dir / "b.txt").write_text("content B")
_ = (project_dir / "c.png").write_bytes(b"\x89PNG")
target_config = TargetConfig(
prompt="Combine all", inputs=["a.txt", "b.txt", "c.png"]
)
response = _make_text_response("Combined")
with patch("bulkgen.providers.text.Mistral") as mock_cls:
mock_client = _make_mistral_mock(response)
mock_cls.return_value = mock_client
provider = TextProvider(api_key="test-key")
await provider.generate(
target_name="out.md",
target_config=target_config,
resolved_prompt="Combine all",
resolved_model="mistral-large-latest",
project_dir=project_dir,
)
call_args = mock_client.chat.complete_async.call_args
prompt_text = call_args.kwargs["messages"][0].content
assert "--- Contents of a.txt ---" in prompt_text
assert "content A" in prompt_text
assert "--- Contents of b.txt ---" in prompt_text
assert "content B" in prompt_text
assert "[Attached image: c.png]" in prompt_text

310
tests/test_state.py Normal file
View file

@ -0,0 +1,310 @@
"""Integration tests for bulkgen.state."""
from __future__ import annotations
from pathlib import Path
import yaml
from bulkgen.state import (
BuildState,
TargetState,
hash_file,
hash_string,
is_target_dirty,
load_state,
record_target_state,
save_state,
)
class TestHashFunctions:
"""Test hashing helpers."""
def test_hash_file_deterministic(self, project_dir: Path) -> None:
f = project_dir / "data.txt"
_ = f.write_text("hello world")
assert hash_file(f) == hash_file(f)
def test_hash_file_changes_with_content(self, project_dir: Path) -> None:
f = project_dir / "data.txt"
_ = f.write_text("version 1")
h1 = hash_file(f)
_ = f.write_text("version 2")
h2 = hash_file(f)
assert h1 != h2
def test_hash_string_deterministic(self) -> None:
assert hash_string("abc") == hash_string("abc")
def test_hash_string_differs(self) -> None:
assert hash_string("abc") != hash_string("xyz")
class TestStatePersistence:
"""Test save/load round-trip of build state."""
def test_load_missing_file_returns_empty(self, project_dir: Path) -> None:
state = load_state(project_dir)
assert state.targets == {}
def test_save_and_load_round_trip(self, project_dir: Path) -> None:
state = BuildState(
targets={
"out.txt": TargetState(
input_hashes={"dep.txt": "abc123"},
prompt_hash="prompt_hash_val",
model="mistral-large-latest",
extra_hash="",
)
}
)
save_state(state, project_dir)
loaded = load_state(project_dir)
assert loaded.targets["out.txt"].model == "mistral-large-latest"
assert loaded.targets["out.txt"].input_hashes == {"dep.txt": "abc123"}
assert loaded.targets["out.txt"].prompt_hash == "prompt_hash_val"
def test_load_empty_yaml(self, project_dir: Path) -> None:
_ = (project_dir / ".bulkgen.state.yaml").write_text("")
state = load_state(project_dir)
assert state.targets == {}
def test_save_overwrites_existing(self, project_dir: Path) -> None:
state1 = BuildState(
targets={
"a.txt": TargetState(input_hashes={}, prompt_hash="h1", model="m1")
}
)
save_state(state1, project_dir)
state2 = BuildState(
targets={
"b.txt": TargetState(input_hashes={}, prompt_hash="h2", model="m2")
}
)
save_state(state2, project_dir)
loaded = load_state(project_dir)
assert "b.txt" in loaded.targets
assert "a.txt" not in loaded.targets
def test_state_file_is_valid_yaml(self, project_dir: Path) -> None:
state = BuildState(
targets={
"out.txt": TargetState(
input_hashes={"f.txt": "hash"},
prompt_hash="ph",
model="m",
extra_hash="eh",
)
}
)
save_state(state, project_dir)
raw: object = yaml.safe_load( # pyright: ignore[reportAny]
(project_dir / ".bulkgen.state.yaml").read_text()
)
assert isinstance(raw, dict)
assert "targets" in raw
class TestIsDirty:
"""Test dirty-checking logic with real files."""
def _setup_target(
self, project_dir: Path, *, dep_content: str = "dep data"
) -> tuple[BuildState, list[Path]]:
"""Create a built target with one dependency and return (state, dep_files)."""
dep = project_dir / "dep.txt"
_ = dep.write_text(dep_content)
output = project_dir / "out.txt"
_ = output.write_text("generated output")
state = BuildState()
dep_files = [dep]
record_target_state(
"out.txt",
resolved_prompt="prompt",
model="model-v1",
dep_files=dep_files,
extra_params={},
state=state,
project_dir=project_dir,
)
return state, dep_files
def test_clean_target_not_dirty(self, project_dir: Path) -> None:
state, dep_files = self._setup_target(project_dir)
assert not is_target_dirty(
"out.txt",
resolved_prompt="prompt",
model="model-v1",
dep_files=dep_files,
extra_params={},
state=state,
project_dir=project_dir,
)
def test_missing_output_is_dirty(self, project_dir: Path) -> None:
state, dep_files = self._setup_target(project_dir)
(project_dir / "out.txt").unlink()
assert is_target_dirty(
"out.txt",
resolved_prompt="prompt",
model="model-v1",
dep_files=dep_files,
extra_params={},
state=state,
project_dir=project_dir,
)
def test_changed_dep_is_dirty(self, project_dir: Path) -> None:
state, dep_files = self._setup_target(project_dir)
_ = (project_dir / "dep.txt").write_text("MODIFIED content")
assert is_target_dirty(
"out.txt",
resolved_prompt="prompt",
model="model-v1",
dep_files=dep_files,
extra_params={},
state=state,
project_dir=project_dir,
)
def test_changed_prompt_is_dirty(self, project_dir: Path) -> None:
state, dep_files = self._setup_target(project_dir)
assert is_target_dirty(
"out.txt",
resolved_prompt="DIFFERENT prompt",
model="model-v1",
dep_files=dep_files,
extra_params={},
state=state,
project_dir=project_dir,
)
def test_changed_model_is_dirty(self, project_dir: Path) -> None:
state, dep_files = self._setup_target(project_dir)
assert is_target_dirty(
"out.txt",
resolved_prompt="prompt",
model="model-v2",
dep_files=dep_files,
extra_params={},
state=state,
project_dir=project_dir,
)
def test_changed_extra_params_is_dirty(self, project_dir: Path) -> None:
state, dep_files = self._setup_target(project_dir)
assert is_target_dirty(
"out.txt",
resolved_prompt="prompt",
model="model-v1",
dep_files=dep_files,
extra_params={"width": 512},
state=state,
project_dir=project_dir,
)
def test_never_built_target_is_dirty(self, project_dir: Path) -> None:
_ = (project_dir / "out.txt").write_text("exists but never recorded")
assert is_target_dirty(
"out.txt",
resolved_prompt="prompt",
model="model-v1",
dep_files=[],
extra_params={},
state=BuildState(),
project_dir=project_dir,
)
def test_new_dep_added_is_dirty(self, project_dir: Path) -> None:
state, dep_files = self._setup_target(project_dir)
new_dep = project_dir / "extra.txt"
_ = new_dep.write_text("extra dep")
dep_files.append(new_dep)
assert is_target_dirty(
"out.txt",
resolved_prompt="prompt",
model="model-v1",
dep_files=dep_files,
extra_params={},
state=state,
project_dir=project_dir,
)
class TestRecordAndDirtyRoundTrip:
"""Test that recording state then checking produces consistent results."""
def test_record_then_check_not_dirty(self, project_dir: Path) -> None:
dep = project_dir / "input.txt"
_ = dep.write_text("data")
output = project_dir / "result.md"
_ = output.write_text("result")
state = BuildState()
dep_files = [dep]
record_target_state(
"result.md",
resolved_prompt="do the thing",
model="mistral-large-latest",
dep_files=dep_files,
extra_params={"width": 100},
state=state,
project_dir=project_dir,
)
assert not is_target_dirty(
"result.md",
resolved_prompt="do the thing",
model="mistral-large-latest",
dep_files=dep_files,
extra_params={"width": 100},
state=state,
project_dir=project_dir,
)
def test_state_survives_save_load_cycle(self, project_dir: Path) -> None:
dep = project_dir / "input.txt"
_ = dep.write_text("data")
output = project_dir / "result.md"
_ = output.write_text("result")
state = BuildState()
dep_files = [dep]
record_target_state(
"result.md",
resolved_prompt="do the thing",
model="mistral-large-latest",
dep_files=dep_files,
extra_params={},
state=state,
project_dir=project_dir,
)
save_state(state, project_dir)
loaded_state = load_state(project_dir)
assert not is_target_dirty(
"result.md",
resolved_prompt="do the thing",
model="mistral-large-latest",
dep_files=dep_files,
extra_params={},
state=loaded_state,
project_dir=project_dir,
)

14
uv.lock generated
View file

@ -75,6 +75,7 @@ dependencies = [
dev = [ dev = [
{ name = "basedpyright" }, { name = "basedpyright" },
{ name = "pytest" }, { name = "pytest" },
{ name = "pytest-asyncio" },
{ name = "ruff" }, { name = "ruff" },
] ]
@ -92,6 +93,7 @@ requires-dist = [
dev = [ dev = [
{ name = "basedpyright", specifier = ">=1.38.0" }, { name = "basedpyright", specifier = ">=1.38.0" },
{ name = "pytest", specifier = ">=9.0.2" }, { name = "pytest", specifier = ">=9.0.2" },
{ name = "pytest-asyncio", specifier = ">=0.25.0" },
{ name = "ruff", specifier = ">=0.15.1" }, { name = "ruff", specifier = ">=0.15.1" },
] ]
@ -557,6 +559,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" }, { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
] ]
[[package]]
name = "pytest-asyncio"
version = "1.3.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pytest" },
]
sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" },
]
[[package]] [[package]]
name = "python-dateutil" name = "python-dateutil"
version = "2.9.0.post0" version = "2.9.0.post0"