Skip to content

API Reference

IO

multidimensional_evaluation_engine.io.load_candidates

io/load_candidates.py: Load candidates from CSV.

load_candidates

load_candidates(
    csv_path: Path, factor_specs: list[FactorSpec]
) -> list[Candidate]

Load candidates from a CSV file.

Candidate factor values are typed using structural factor specifications.

Parameters:

Name Type Description Default
csv_path Path

Path to the CSV file.

required
factor_specs list[FactorSpec]

Structural factor definitions used to type CSV columns.

required

Returns:

Type Description
list[Candidate]

List of Candidate objects.

Raises:

Type Description
FileNotFoundError

If the CSV file does not exist.

ValueError

If required columns are missing, if unknown factor columns are present, or if a value cannot be coerced to the required type.

Source code in src/multidimensional_evaluation_engine/io/load_candidates.py
def load_candidates(
    csv_path: Path,
    factor_specs: list[FactorSpec],
) -> list[Candidate]:
    """Load candidates from a CSV file.

    Candidate factor values are typed using structural factor specifications.

    Args:
        csv_path: Path to the CSV file.
        factor_specs: Structural factor definitions used to type CSV columns.

    Returns:
        List of Candidate objects.

    Raises:
        FileNotFoundError: If the CSV file does not exist.
        ValueError: If required columns are missing, if unknown factor columns
            are present, or if a value cannot be coerced to the required type.
    """
    logger.info(f"Loading candidates from: {csv_path}")
    candidates: list[Candidate] = []

    if not csv_path.exists():
        raise FileNotFoundError(f"Candidate CSV file not found: {csv_path}")

    factor_specs_by_id = {spec.factor_id: spec for spec in factor_specs}

    with csv_path.open("r", encoding="utf-8", newline="") as file:
        reader = csv.DictReader(file)

        if reader.fieldnames is None:
            raise ValueError("CSV file must include a header row.")

        headers = [header.strip() for header in reader.fieldnames]

        missing = _REQUIRED_COLUMNS - set(headers)
        if missing:
            missing_str = ", ".join(sorted(missing))
            raise ValueError(f"CSV file is missing required columns: {missing_str}")

        factor_headers = [
            header for header in headers if header not in _RESERVED_COLUMNS
        ]

        unknown_headers = [
            header for header in factor_headers if header not in factor_specs_by_id
        ]
        if unknown_headers:
            unknown_str = ", ".join(sorted(unknown_headers))
            raise ValueError(
                f"CSV contains factor columns not defined in factor specs: {unknown_str}"
            )

        for row_number, row in enumerate(reader, start=2):
            cleaned_row = {
                key.strip(): (value.strip() if value is not None else "")
                for key, value in row.items()
                if key is not None
            }

            factor_values: dict[str, FactorValue] = {}

            for factor_id in factor_headers:
                raw_text = cleaned_row.get(factor_id, "")
                spec = factor_specs_by_id[factor_id]
                typed_value = _coerce_candidate_value(
                    raw_text=raw_text,
                    factor_id=factor_id,
                    form=spec.form,
                    row_number=row_number,
                )
                factor_values[factor_id] = FactorValue(
                    factor_id=factor_id,
                    form=spec.form,
                    value=typed_value,
                )

            candidates.append(
                Candidate(
                    candidate_id=cleaned_row["candidate_id"],
                    candidate_name=cleaned_row["candidate_name"],
                    factor_values=factor_values,
                    notes=cleaned_row.get("notes", ""),
                )
            )

    logger.info(f"Loaded {len(candidates)} candidates.")
    return candidates

multidimensional_evaluation_engine.io.load_policy

io/load_policy.py: Load policy from TOML.

load_policy

load_policy(path: Path) -> Policy

Load a policy from a TOML file.

Parameters:

Name Type Description Default
path Path

Path to TOML policy file.

required

Returns:

Type Description
Policy

Policy instance.

Raises:

Type Description
FileNotFoundError

If the file does not exist.

TOMLDecodeError

If TOML parsing fails.

KeyError

If required policy sections are missing.

TypeError

If policy content has the wrong structural type.

ValueError

If policy content fails model validation.

Source code in src/multidimensional_evaluation_engine/io/load_policy.py
def load_policy(path: Path) -> Policy:
    """Load a policy from a TOML file.

    Args:
        path: Path to TOML policy file.

    Returns:
        Policy instance.

    Raises:
        FileNotFoundError: If the file does not exist.
        tomllib.TOMLDecodeError: If TOML parsing fails.
        KeyError: If required policy sections are missing.
        TypeError: If policy content has the wrong structural type.
        ValueError: If policy content fails model validation.
    """
    logger.info(f"Loading policy from: {path}")

    if not path.exists():
        raise FileNotFoundError(f"Policy file not found: {path}")

    with path.open("rb") as file:
        data = tomllib.load(file)

    required = {
        "factor_specs",
        "constraint_rules",
        "score_rules",
        "interpretation",
    }
    missing = required - set(data)
    if missing:
        missing_str = ", ".join(sorted(missing))
        raise KeyError(f"Policy is missing required sections: {missing_str}")

    policy = Policy.from_dict(data)

    logger.info("Policy loaded successfully.")
    return policy

Evaluation

multidimensional_evaluation_engine.evaluation.evaluator

evaluation/evaluator.py: Evaluate candidates against policy.

evaluate_candidate

evaluate_candidate(
    candidate: Candidate, policy: Policy
) -> CandidateResult

Evaluate a candidate under a policy.

This bridge evaluator supports the newer typed policy model while consuming candidate.factor_values.

Current behavior: - hard constraints are evaluated first - scored findings are evaluated next - weighted scores are summed into a single total - the returned CandidateResult remains compatible with the existing score-dictionary shape

Returned score keys include: - one entry per score rule, keyed by rule_id - "total" for aggregate weighted score - "admissible" as 1.0 or 0.0 - optional interpretation label as 1.0 for the highest satisfied label

Parameters:

Name Type Description Default
candidate Candidate

Candidate with factor values.

required
policy Policy

Policy defining factor specs, constraint rules, score rules, and aggregate interpretation thresholds.

required

Returns:

Type Description
CandidateResult

CandidateResult containing computed scores.

Raises:

Type Description
KeyError

If a required factor is missing from the candidate.

ValueError

If a factor value cannot be interpreted for its rule.

Source code in src/multidimensional_evaluation_engine/evaluation/evaluator.py
def evaluate_candidate(
    candidate: Candidate,
    policy: Policy,
) -> CandidateResult:
    """Evaluate a candidate under a policy.

    This bridge evaluator supports the newer typed policy model while consuming
    candidate.factor_values.

    Current behavior:
    - hard constraints are evaluated first
    - scored findings are evaluated next
    - weighted scores are summed into a single total
    - the returned CandidateResult remains compatible with the existing
      score-dictionary shape

    Returned score keys include:
    - one entry per score rule, keyed by rule_id
    - "total" for aggregate weighted score
    - "admissible" as 1.0 or 0.0
    - optional interpretation label as 1.0 for the highest satisfied label

    Args:
        candidate: Candidate with factor values.
        policy: Policy defining factor specs, constraint rules, score rules,
            and aggregate interpretation thresholds.

    Returns:
        CandidateResult containing computed scores.

    Raises:
        KeyError: If a required factor is missing from the candidate.
        ValueError: If a factor value cannot be interpreted for its rule.
    """
    scores: dict[str, float] = {}
    admissible = True

    for rule in policy.constraint_rules:
        factor_value = _get_candidate_value(candidate, rule.factor_id)
        passed = _evaluate_constraint(factor_value, rule.comparator, rule.threshold)
        if not passed:
            admissible = False

    total = 0.0

    for rule in policy.score_rules:
        factor_value = _get_candidate_value(candidate, rule.factor_id)
        rule_score = _evaluate_score_rule(factor_value, rule)
        if rule_score is None:
            continue  # No score for this rule; skip it
        weighted_score = rule_score * rule.weight
        scores[rule.rule_id] = round(weighted_score, 3)
        total += weighted_score

    scores["total"] = round(total, 3)
    scores["admissible"] = 1.0 if admissible else 0.0

    interpretation_label = _interpret_total(total, policy.interpretation)
    if interpretation_label:
        scores[f"interpretation:{interpretation_label}"] = 1.0

    return CandidateResult(
        candidate=candidate,
        scores=scores,
    )

Reporting

multidimensional_evaluation_engine.reporting.tables

reporting/tables.py: Simple text-table reporting for scored results.

format_results

format_results(results: list[CandidateResult]) -> str

Format evaluation results as a plain-text report.

Produces a deterministic, human-readable summary of scores for each candidate.

Parameters:

Name Type Description Default
results list[CandidateResult]

List of candidate evaluation results.

required

Returns:

Type Description
str

Formatted string suitable for console output or logging.

Source code in src/multidimensional_evaluation_engine/reporting/tables.py
def format_results(results: list[CandidateResult]) -> str:
    """Format evaluation results as a plain-text report.

    Produces a deterministic, human-readable summary of scores for each candidate.

    Args:
        results: List of candidate evaluation results.

    Returns:
        Formatted string suitable for console output or logging.
    """
    lines: list[str] = []

    for r in results:
        lines.append(f"{r.candidate.candidate_id} | {r.candidate.candidate_name}")

        # Separate interpretation labels
        interpretations = [
            label.split(":", 1)[1]
            for label in r.scores
            if label.startswith("interpretation:")
        ]

        # Deterministic ordering for stable output (excluding interpretation keys)
        for label in sorted(r.scores):
            if label.startswith("interpretation:"):
                continue
            score = r.scores[label]
            lines.append(f"  - {label}: {score:.2f}")

        if interpretations:
            lines.append(f"  * interpretation: {', '.join(sorted(interpretations))}")

        lines.append("")

    return "\n".join(lines).rstrip()

Domain Models

multidimensional_evaluation_engine.domain.candidates

domain/candidates.py: Domain models for evaluation inputs.

Candidate dataclass

A generic candidate for multidimensional evaluation.

A candidate represents an entity being evaluated under a policy. Domain-specific inputs are carried as raw factor values keyed by factor_id.

Attributes:

Name Type Description
candidate_id str

Stable identifier for the candidate.

candidate_name str

Human-readable name.

factor_values dict[str, FactorValue]

Mapping of factor_id to raw primitive value.

notes str

Optional free-text annotation.

metadata dict[str, str]

Optional auxiliary key-value data not used in scoring.

Source code in src/multidimensional_evaluation_engine/domain/candidates.py
@dataclass(frozen=True)
class Candidate:
    """A generic candidate for multidimensional evaluation.

    A candidate represents an entity being evaluated under a policy.
    Domain-specific inputs are carried as raw factor values keyed by factor_id.

    Attributes:
        candidate_id: Stable identifier for the candidate.
        candidate_name: Human-readable name.
        factor_values: Mapping of factor_id to raw primitive value.
        notes: Optional free-text annotation.
        metadata: Optional auxiliary key-value data not used in scoring.
    """

    candidate_id: str
    candidate_name: str
    factor_values: dict[str, FactorValue] = field(
        default_factory=_default_factor_values
    )
    notes: str = ""
    metadata: dict[str, str] = field(default_factory=_default_metadata)

multidimensional_evaluation_engine.domain.factors

domain/factors.py: Core factor and recorded-value models.

EvidenceRef dataclass

Reference to supporting source material.

Source code in src/multidimensional_evaluation_engine/domain/factors.py
@dataclass(frozen=True)
class EvidenceRef:
    """Reference to supporting source material."""

    source_label: str
    locator: str = ""
    note: str = ""

FactorForm

Bases: StrEnum

Structural form of a factor value.

Source code in src/multidimensional_evaluation_engine/domain/factors.py
class FactorForm(StrEnum):
    """Structural form of a factor value."""

    BINARY = "binary"
    NUMERIC = "numeric"
    CATEGORICAL = "categorical"
    EVIDENCE = "evidence"

FactorSpec dataclass

Structural definition of a factor.

Source code in src/multidimensional_evaluation_engine/domain/factors.py
@dataclass(frozen=True)
class FactorSpec:
    """Structural definition of a factor."""

    factor_id: str
    label: str
    form: FactorForm
    description: str = ""
    unit: str = ""
    allowed_values: tuple[str, ...] = ()

FactorValue dataclass

Neutral recorded value for one factor.

Source code in src/multidimensional_evaluation_engine/domain/factors.py
@dataclass(frozen=True)
class FactorValue:
    """Neutral recorded value for one factor."""

    factor_id: str
    form: FactorForm
    value: bool | float | str
    evidence: tuple[EvidenceRef, ...] = ()
    note: str = ""

multidimensional_evaluation_engine.domain.results

domain/results.py: Result models for candidate evaluation.

CandidateResult dataclass

Result of evaluating a candidate under a policy.

Contains computed scores and optional derived representations for interpretation or visualization.

Attributes:

Name Type Description
candidate Candidate

The evaluated candidate.

scores dict[str, float]

Mapping of label to numeric score.

levels dict[str, str]

Optional mapping of label to qualitative level (e.g., "low", "medium", "high").

visual_levels dict[str, str]

Optional mapping of label to presentation-oriented categories (e.g., for UI color or grouping).

Source code in src/multidimensional_evaluation_engine/domain/results.py
@dataclass(frozen=True)
class CandidateResult:
    """Result of evaluating a candidate under a policy.

    Contains computed scores and optional derived representations
    for interpretation or visualization.

    Attributes:
        candidate: The evaluated candidate.
        scores: Mapping of label to numeric score.
        levels: Optional mapping of label to qualitative level
            (e.g., "low", "medium", "high").
        visual_levels: Optional mapping of label to presentation-oriented
            categories (e.g., for UI color or grouping).
    """

    candidate: Candidate
    scores: dict[str, float]
    levels: dict[str, str] = field(default_factory=_default_levels)
    visual_levels: dict[str, str] = field(default_factory=_default_visual_levels)

Policy

multidimensional_evaluation_engine.policy.policy

policy/policy.py: Policy models for multidimensional evaluation.

Comparator

Bases: StrEnum

Comparison operator for threshold rules.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
class Comparator(StrEnum):
    """Comparison operator for threshold rules."""

    LT = "lt"
    LE = "le"
    EQ = "eq"
    GE = "ge"
    GT = "gt"
    IN = "in"

ConstraintRule dataclass

Rule for evaluating a hard constraint.

Attributes:

Name Type Description
rule_id str

Stable identifier for the rule.

factor_id str

Factor to which this rule applies.

comparator Comparator

Comparison operator.

threshold bool | float | str | tuple[str, ...]

Comparison target. May be bool, number, string, or tuple.

message str

Human-readable result text.

rationale str

Optional explanation of why the rule exists.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
@dataclass(frozen=True)
class ConstraintRule:
    """Rule for evaluating a hard constraint.

    Attributes:
        rule_id: Stable identifier for the rule.
        factor_id: Factor to which this rule applies.
        comparator: Comparison operator.
        threshold: Comparison target. May be bool, number, string, or tuple.
        message: Human-readable result text.
        rationale: Optional explanation of why the rule exists.
    """

    rule_id: str
    factor_id: str
    comparator: Comparator
    threshold: bool | float | str | tuple[str, ...]
    message: str
    rationale: str = ""

EvaluationRole

Bases: StrEnum

Role a factor plays in evaluation.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
class EvaluationRole(StrEnum):
    """Role a factor plays in evaluation."""

    HARD_CONSTRAINT = "hard_constraint"
    SCORED_FINDING = "scored_finding"

NumericBand dataclass

Numeric range mapped to a score and optional qualitative band.

Attributes:

Name Type Description
min_inclusive float

Inclusive lower bound.

max_inclusive float

Inclusive upper bound.

score float

Numeric score assigned to values in this band.

band str

Optional qualitative label such as 'low' or 'strong'.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
@dataclass(frozen=True)
class NumericBand:
    """Numeric range mapped to a score and optional qualitative band.

    Attributes:
        min_inclusive: Inclusive lower bound.
        max_inclusive: Inclusive upper bound.
        score: Numeric score assigned to values in this band.
        band: Optional qualitative label such as 'low' or 'strong'.
    """

    min_inclusive: float
    max_inclusive: float
    score: float
    band: str = ""

Policy dataclass

Configuration for multidimensional evaluation.

A policy defines: - which factors are recognized - which factors are hard constraints versus scored findings - how raw factor values are mapped to pass/fail findings or scores - how aggregate numeric results are interpreted

Note

factor_specs, constraint_rules, and score_rules are stored as lists for construction convenience. Callers should treat them as logically immutable; the frozen dataclass prevents field reassignment but does not prevent mutation of list contents.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
@dataclass(frozen=True)
class Policy:
    """Configuration for multidimensional evaluation.

    A policy defines:
    - which factors are recognized
    - which factors are hard constraints versus scored findings
    - how raw factor values are mapped to pass/fail findings or scores
    - how aggregate numeric results are interpreted

    Note:
        factor_specs, constraint_rules, and score_rules are stored as lists
        for construction convenience. Callers should treat them as logically
        immutable; the frozen dataclass prevents field reassignment but does
        not prevent mutation of list contents.
    """

    factor_specs: list[FactorSpec] = field(default_factory=_default_factor_specs)
    constraint_rules: list[ConstraintRule] = field(
        default_factory=_default_constraint_rules
    )
    score_rules: list[ScoreRule] = field(default_factory=_default_score_rules)
    interpretation: dict[str, float] = field(default_factory=_default_interpretation)
    metadata: dict[str, str] = field(default_factory=_default_metadata)

    def __post_init__(self) -> None:
        """Validate cross-references between factor specs and rules."""
        known_ids = {spec.factor_id for spec in self.factor_specs}

        for rule in self.constraint_rules:
            if rule.factor_id not in known_ids:
                raise ValueError(
                    f"ConstraintRule {rule.rule_id!r} references unknown "
                    f"factor_id {rule.factor_id!r}."
                )

        for rule in self.score_rules:
            if rule.factor_id not in known_ids:
                raise ValueError(
                    f"ScoreRule {rule.rule_id!r} references unknown "
                    f"factor_id {rule.factor_id!r}."
                )

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> "Policy":
        """Create a Policy from a dictionary.

        Args:
            data: Parsed policy configuration, typically from TOML.

        Returns:
            Policy instance with typed factor and rule objects.
        """
        raw_factor_specs = _as_list_of_maps(data.get("factor_specs", []))
        raw_constraint_rules = _as_list_of_maps(data.get("constraint_rules", []))
        raw_score_rules = _as_list_of_maps(data.get("score_rules", []))

        factor_specs = [
            FactorSpec(
                factor_id=str(item["factor_id"]),
                label=str(item["label"]),
                form=FactorForm(str(item["form"])),
                description=str(item.get("description", "")),
                unit=str(item.get("unit", "")),
                allowed_values=tuple(str(v) for v in item.get("allowed_values", ())),
            )
            for item in raw_factor_specs
        ]

        constraint_rules = [
            ConstraintRule(
                rule_id=str(item["rule_id"]),
                factor_id=str(item["factor_id"]),
                comparator=Comparator(str(item["comparator"])),
                threshold=_coerce_threshold(item["threshold"]),
                message=str(item["message"]),
                rationale=str(item.get("rationale", "")),
            )
            for item in raw_constraint_rules
        ]

        score_rules = [
            ScoreRule(
                rule_id=str(item["rule_id"]),
                factor_id=str(item["factor_id"]),
                weight=float(item["weight"]),
                categorical_scores=_as_string_float_map(
                    item.get("categorical_scores", {})
                ),
                numeric_bands=tuple(
                    NumericBand(
                        min_inclusive=float(band["min_inclusive"]),
                        max_inclusive=float(band["max_inclusive"]),
                        score=float(band["score"]),
                        band=band.get("band", ""),
                    )
                    for band in item.get("numeric_bands", [])
                ),
                binary_scores=_as_bool_float_map(item.get("binary_scores", {})),
                rationale=str(item.get("rationale", "")),
            )
            for item in raw_score_rules
        ]

        return cls(
            factor_specs=factor_specs,
            constraint_rules=constraint_rules,
            score_rules=score_rules,
            interpretation=_as_string_float_map(data.get("interpretation", {})),
            metadata=_as_string_string_map(data.get("metadata", {})),
        )

__post_init__

__post_init__() -> None

Validate cross-references between factor specs and rules.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
def __post_init__(self) -> None:
    """Validate cross-references between factor specs and rules."""
    known_ids = {spec.factor_id for spec in self.factor_specs}

    for rule in self.constraint_rules:
        if rule.factor_id not in known_ids:
            raise ValueError(
                f"ConstraintRule {rule.rule_id!r} references unknown "
                f"factor_id {rule.factor_id!r}."
            )

    for rule in self.score_rules:
        if rule.factor_id not in known_ids:
            raise ValueError(
                f"ScoreRule {rule.rule_id!r} references unknown "
                f"factor_id {rule.factor_id!r}."
            )

from_dict classmethod

from_dict(data: dict[str, Any]) -> Policy

Create a Policy from a dictionary.

Parameters:

Name Type Description Default
data dict[str, Any]

Parsed policy configuration, typically from TOML.

required

Returns:

Type Description
Policy

Policy instance with typed factor and rule objects.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "Policy":
    """Create a Policy from a dictionary.

    Args:
        data: Parsed policy configuration, typically from TOML.

    Returns:
        Policy instance with typed factor and rule objects.
    """
    raw_factor_specs = _as_list_of_maps(data.get("factor_specs", []))
    raw_constraint_rules = _as_list_of_maps(data.get("constraint_rules", []))
    raw_score_rules = _as_list_of_maps(data.get("score_rules", []))

    factor_specs = [
        FactorSpec(
            factor_id=str(item["factor_id"]),
            label=str(item["label"]),
            form=FactorForm(str(item["form"])),
            description=str(item.get("description", "")),
            unit=str(item.get("unit", "")),
            allowed_values=tuple(str(v) for v in item.get("allowed_values", ())),
        )
        for item in raw_factor_specs
    ]

    constraint_rules = [
        ConstraintRule(
            rule_id=str(item["rule_id"]),
            factor_id=str(item["factor_id"]),
            comparator=Comparator(str(item["comparator"])),
            threshold=_coerce_threshold(item["threshold"]),
            message=str(item["message"]),
            rationale=str(item.get("rationale", "")),
        )
        for item in raw_constraint_rules
    ]

    score_rules = [
        ScoreRule(
            rule_id=str(item["rule_id"]),
            factor_id=str(item["factor_id"]),
            weight=float(item["weight"]),
            categorical_scores=_as_string_float_map(
                item.get("categorical_scores", {})
            ),
            numeric_bands=tuple(
                NumericBand(
                    min_inclusive=float(band["min_inclusive"]),
                    max_inclusive=float(band["max_inclusive"]),
                    score=float(band["score"]),
                    band=band.get("band", ""),
                )
                for band in item.get("numeric_bands", [])
            ),
            binary_scores=_as_bool_float_map(item.get("binary_scores", {})),
            rationale=str(item.get("rationale", "")),
        )
        for item in raw_score_rules
    ]

    return cls(
        factor_specs=factor_specs,
        constraint_rules=constraint_rules,
        score_rules=score_rules,
        interpretation=_as_string_float_map(data.get("interpretation", {})),
        metadata=_as_string_string_map(data.get("metadata", {})),
    )

ScoreRule dataclass

Rule for converting a factor value into a score.

Exactly one mapping style must be used for a given rule: - categorical_scores for categorical factors - numeric_bands for numeric factors - binary_scores for binary factors

Raises:

Type Description
ValueError

If not exactly one mapping style is populated.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
@dataclass(frozen=True)
class ScoreRule:
    """Rule for converting a factor value into a score.

    Exactly one mapping style must be used for a given rule:
    - categorical_scores for categorical factors
    - numeric_bands for numeric factors
    - binary_scores for binary factors

    Raises:
        ValueError: If not exactly one mapping style is populated.
    """

    rule_id: str
    factor_id: str
    weight: float
    categorical_scores: dict[str, float] = field(
        default_factory=_default_string_float_map
    )

    numeric_bands: tuple[NumericBand, ...] = ()
    binary_scores: dict[bool, float] = field(default_factory=_default_bool_float_map)
    rationale: str = ""

    def __post_init__(self) -> None:
        """Validate that exactly one mapping style is populated."""
        active = sum(
            (
                bool(self.categorical_scores),
                bool(self.numeric_bands),
                bool(self.binary_scores),
            )
        )
        if active != 1:
            raise ValueError(
                f"ScoreRule {self.rule_id!r} must specify exactly one mapping style "
                f"(categorical_scores, numeric_bands, or binary_scores); "
                f"{active} were populated."
            )

__post_init__

__post_init__() -> None

Validate that exactly one mapping style is populated.

Source code in src/multidimensional_evaluation_engine/policy/policy.py
def __post_init__(self) -> None:
    """Validate that exactly one mapping style is populated."""
    active = sum(
        (
            bool(self.categorical_scores),
            bool(self.numeric_bands),
            bool(self.binary_scores),
        )
    )
    if active != 1:
        raise ValueError(
            f"ScoreRule {self.rule_id!r} must specify exactly one mapping style "
            f"(categorical_scores, numeric_bands, or binary_scores); "
            f"{active} were populated."
        )