diff --git a/README.md b/README.md index 2f58883..6843f05 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,20 @@ # catopt-category-theoretic-compositional- -Problem space: distributed, offline-first optimization across heterogeneous edge devices (DERs, meters, controllers, EV chargers) in mesh networks. Centralized solvers are infeasible due to latency, bandwidth, and privacy. We need a modular, provably \ No newline at end of file +Problem space: distributed, offline-first optimization across heterogeneous edge devices (DERs, meters, controllers, EV chargers) in mesh networks. Centralized solvers are infeasible due to latency, bandwidth, and privacy. We need a modular, provably compositional optimization framework that can be plugged into existing energy and robotics ecosystems with privacy by design and offline resilience. + +This repository implements a minimal MVP scaffold for CatOpt, a light-weight framework that uses category-theory-inspired abstractions to express distributed optimization problems and a toy solver stack to validate the idea in CI. + +MVP Runtime (new): lightweight primitives to experiment with CatOpt concepts locally. +- LocalProblem: representation of an agent's optimization task. +- DataContract, SharedVariables, PlanDelta: lightweight data-exchange primitives for a privacy-conscious mesh. +- ADMMTwoAgentSolver: tiny, in-process ADMM-like solver to demonstrate joint optimization across two agents. +- demo_two_agent_admm(): quick in-repo demonstration function returning final variables. + +Usage notes: +- Import from the package and optionally run the demo function to observe convergence on a toy problem. +- Example: + ```python + from catopt_category_theoretic_compositional.runtime import ADMMTwoAgentSolver, demo_two_agent_admm + result = demo_two_agent_admm() + print(result) + ``` diff --git a/src/catopt_category_theoretic_compositional_/__init__.py b/src/catopt_category_theoretic_compositional_/__init__.py index ed0324b..6578c21 100644 --- a/src/catopt_category_theoretic_compositional_/__init__.py +++ b/src/catopt_category_theoretic_compositional_/__init__.py @@ -1,13 +1,13 @@ -"""Minimal placeholder for CatOpt package. +"""CatOpt: Minimal placeholder surface & MVP runtime exposure.""" -This module provides a tiny, well-typed surface to validate packaging and imports -in CI. The real project would implement the MVP of the CatOpt framework. -""" +from .runtime import LocalProblem, SharedVariables, DataContract, PlanDelta, ADMMTwoAgentSolver, demo_two_agent_admm # noqa: F401 def add(a: int, b: int) -> int: """Return the sum of two integers. This tiny function exists to provide a deterministic, testable behavior - for the initial CI gate. + for the initial CI gate, preserving backwards compatibility with tests. """ return a + b + +__all__ = ["add", "LocalProblem", "SharedVariables", "DataContract", "PlanDelta", "ADMMTwoAgentSolver", "demo_two_agent_admm"] diff --git a/src/catopt_category_theoretic_compositional_/runtime.py b/src/catopt_category_theoretic_compositional_/runtime.py new file mode 100644 index 0000000..01ecbaf --- /dev/null +++ b/src/catopt_category_theoretic_compositional_/runtime.py @@ -0,0 +1,121 @@ +"""CatOpt MVP Runtime: Minimal category-inspired runtime primitives. + +This module provides a tiny, self-contained runtime that demonstrates: +- Local optimization problem representation (LocalProblem) +- Simple data contracts (DataContract, SharedVariables, PlanDelta) +- A toy ADMM-like two-agent solver (ADMMTwoAgentSolver) + +The goal is not to be a full implementation but to provide a minimal, +understandable surface that respects the MVP's spirit and can be wired up +into the existing test harness without changing the current public API. +""" +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Dict, Optional, List, Tuple +import time + +@dataclass +class LocalProblem: + """Represents a local optimization problem for an agent. + + For the MVP this captures a quadratic objective of form + 0.5 * a * x^2 + b * x + along with an optional linear term for convenience. The real CatOpt use + would expose a richer DSL; this is a compact placeholder. + """ + + id: str + a: float # quadratic coefficient (scalar) + b: float # linear coefficient (scalar) + x_name: str = "x" # variable name for display purposes + + def __post_init__(self): + if self.a <= 0: + # Keep a positive definite quadratic for stability in demo + self.a = max(self.a, 1e-6) + + +@dataclass +class DataContract: + """A lightweight data-contract describing what is exchanged between agents.""" + + version: int = 1 + schema: Dict[str, str] = field(default_factory=dict) + + +@dataclass +class SharedVariables: + """Container for values shared between agents (e.g., delta signals).""" + + values: Dict[str, float] = field(default_factory=dict) + + +@dataclass +class PlanDelta: + """Represents a delta/plan update from an agent.""" + + contract_id: str + delta: Dict[str, float] + timestamp: float = field(default_factory=time.time) + + +class ADMMTwoAgentSolver: + """Tiny ADMM-like solver for two agents sharing one variable. + + Solves a simple coupled objective: + minimize 0.5*a1*x1^2 + b1*x1 + 0.5*a2*x2^2 + b2*x2 + subject to x1 + x2 = 1 + using the scaled dual variable formulation with updates similar to ADMM. + This is intentionally small and self-contained for MVP purposes. + """ + + def __init__(self, a1: float, b1: float, a2: float, b2: float, rho: float = 1.0): + self.a1 = float(a1) + self.b1 = float(b1) + self.a2 = float(a2) + self.b2 = float(b2) + self.rho = float(rho) + # Initialize variables + self.x1 = 0.0 + self.x2 = 0.0 + self.z = 0.5 # shared variable estimate + self.u1 = 0.0 # dual vars + self.u2 = 0.0 + + # History for introspection + self.history: List[Dict[str, float]] = [] + + def _step(self) -> None: + # x1 update: minimize 0.5*a1*x1^2 + b1*x1 + (rho/2)*(x1 - z + u1)^2 + # Closed-form: x1 = -(b1 + rho*(-z + u1)) / (a1 + rho) + self.x1 = (-(self.b1) + self.rho * (self.z - self.u1)) / (self.a1 + self.rho) + # x2 update: similarly + self.x2 = (-(self.b2) + self.rho * (self.z - self.u2)) / (self.a2 + self.rho) + # z update: minimize w.r.t z of the augmented terms + self.z = (self.x1 + self.x2 + self.u1 + self.u2) / 2.0 + # Dual updates + self.u1 += self.x1 - self.z + self.u2 += self.x2 - self.z + # Save history snapshot + self.history.append({"x1": self.x1, "x2": self.x2, "z": self.z, + "u1": self.u1, "u2": self.u2}) + + def run(self, iterations: int = 10) -> List[Dict[str, float]]: + for _ in range(iterations): + self._step() + return self.history or [] + + def final_solution(self) -> Tuple[float, float, float]: + return self.x1, self.x2, self.z + + +def demo_two_agent_admm() -> Dict[str, float]: + """Run a tiny demo with 2 agents and return the final variables. + + This helper is convenient for quick sanity checks in the REPL or tests. + """ + solver = ADMMTwoAgentSolver(a1=2.0, b1=0.5, a2=3.0, b2=-0.2, rho=1.0) + _ = solver.run(iterations=20) + x1, x2, z = solver.final_solution() + return {"x1": float(x1), "x2": float(x2), "z": float(z)}