build(agent): new-agents-3#dd492b iteration
This commit is contained in:
parent
88805d1286
commit
132f7d0b29
41
README.md
41
README.md
|
|
@ -1,33 +1,16 @@
|
|||
# CatOpt-Grid MVP
|
||||
# CatOpt-Grid
|
||||
|
||||
A production-friendly MVP for a category-theoretic, cross-domain distributed optimization
|
||||
framework. This repository implements the core primitives and a tiny ADMM-lite solver to
|
||||
help validate the architecture and provide a concrete starting point for adapters and cross-domain
|
||||
integration.
|
||||
CatOpt-Grid is a modular, open-source framework that expresses distributed optimization problems across heterogeneous edge devices in a category-theory-inspired formalism. This repository contains a minimal, production-ready skeleton to bootstrap a Cross-Domain, Privacy-Preserving Distributed Edge Mesh MVP.
|
||||
|
||||
Key components
|
||||
- LocalProblem: per-asset optimization task with a convex quadratic objective and bound constraints.
|
||||
- SharedVariable: consensus variable used by all agents in the ADMM-like loop.
|
||||
- ADMMLiteSolver: lightweight solver implementing x-update, z-update, and dual variable updates with bound projection.
|
||||
What you’ll find here
|
||||
- Core primitives: LocalProblem (Objects), SharedVariables/DualVariables (Morphisms), adapters (Functors)
|
||||
- Lightweight ADMM-like solver (ADMM-lite) designed for delta-sync and offline-first operation
|
||||
- A tiny DSL scaffold for cross-domain adapters and a registry for future GoC (Graph of Contracts)
|
||||
- Minimal, testable story with two toy adapters and a couple of unit tests
|
||||
- Packaging scaffolding to ensure python packaging via pyproject.toml
|
||||
|
||||
Usage
|
||||
- Install dependencies and run tests with the provided test.sh.
|
||||
- This MVP focuses on correctness and stability for the ADMM-lite loop; cross-domain adapters and governance
|
||||
layers can be added in future iterations.
|
||||
Contributing
|
||||
- This is a stepwise MVP: start with the core solver and add adapters and governance in future iterations.
|
||||
- Tests live under tests/. Run with ./test.sh after dependencies are in place.
|
||||
|
||||
This repository is a stepping stone toward the CatOpt-Grid architecture described in AGENTS.md.
|
||||
|
||||
Architecture scaffolding: Bridge and Adapters
|
||||
- catopt_grid.bridge: lightweight interoperability layer with IRObject/IRMorphism and a tiny GraphOfContracts registry to version adapters and data schemas.
|
||||
- catopt_grid.adapters: starter adapters (rover_planner, habitat_module) that illustrate mapping local problems to the canonical IR and seed cross-domain interoperability.
|
||||
- This scaffolding is intentionally minimal and designed to evolve into a production-grade interop surface without altering core solver behavior.
|
||||
|
||||
Interop helper
|
||||
- lp_to_ir(lp): Convert a local problem instance to a vendor-agnostic IRObject for cross-domain exchange. Accepts various LocalProblem shapes (core.LocalProblem, admm_lite.LocalProblem) and returns an IRObject carrying dimension/id and lightweight metadata.
|
||||
|
||||
Roadmap
|
||||
- Bridge and adapters: evolve to a production-ready interoperability surface with a robust Graph-of-Contracts, versioned schemas, and recoverable delta-sync semantics.
|
||||
- Governance: implement per-message privacy budgets, audit logs, and a DID-based identity layer for secure messaging.
|
||||
- Cross-domain MVP: extend with more adapters (energy, water, mobility, robotics) and a reference SDK with Python/C++ bindings; support codegen or bindings for edge devices (Rust/C).
|
||||
- Global constraints: add a Limits/Colimits layer to enforce fleet policies without re-deriving global models; deterministic reconciliation on reconnects.
|
||||
- Evaluation: formal convergence guarantees for broader convex/weakly convex classes; HIL validation and privacy budget accounting.
|
||||
See the tests for usage examples and expected behavior.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,9 @@
|
|||
"""Toy Habitat Module Adapter (placeholder for MVP wiring)."""
|
||||
|
||||
class HabitatModuleAdapter:
|
||||
def __init__(self, adapter_id: str = "habitat_module"):
|
||||
self.adapter_id = adapter_id
|
||||
|
||||
def to_local_problem(self, data):
|
||||
# Placeholder: would map habitat domain data to a LocalProblem
|
||||
raise NotImplementedError
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
"""Toy Rover Planner Adapter (placeholder for MVP wiring)."""
|
||||
|
||||
class RoverPlannerAdapter:
|
||||
def __init__(self, adapter_id: str = "rover_planner"):
|
||||
self.adapter_id = adapter_id
|
||||
|
||||
def to_local_problem(self, data):
|
||||
# Placeholder: would map rover planning domain data to a LocalProblem
|
||||
raise NotImplementedError
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, List, Optional
|
||||
from dataclasses import dataclass
|
||||
import numpy as _np
|
||||
|
||||
|
||||
|
||||
|
|
@ -24,17 +25,68 @@ class SharedVariable:
|
|||
version: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class LocalProblem:
|
||||
id: str
|
||||
dimension: int
|
||||
# Optional gradient function for the local objective: grad(x) -> list of length dimension
|
||||
objective_grad: Optional[Callable[[List[float]], List[float]]] = None
|
||||
# Optional per-asset offset that the gradient may push towards
|
||||
target: Optional[List[float]] = None
|
||||
data: Optional[dict] = None
|
||||
"""Flexible local optimization problem descriptor.
|
||||
|
||||
def __post_init__(self):
|
||||
Accepts both the legacy style used in tests (id, domain, n, Q, c)
|
||||
and a more generic style (dimension, objective_grad, etc.). It
|
||||
exposes attributes used by the solver:
|
||||
- dimension: int
|
||||
- Q: ndarray of shape (dimension, dimension)
|
||||
- c: ndarray of length dimension
|
||||
- objective_grad: callable f(x) -> gradient, length-d vector
|
||||
If objective_grad is not provided, a quadratic-gradient is derived from Q and c.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
id: str,
|
||||
domain: Optional[str] = None,
|
||||
n: Optional[int] = None,
|
||||
Q: Optional[List[List[float]]] = None,
|
||||
c: Optional[List[float]] = None,
|
||||
dimension: Optional[int] = None,
|
||||
objective_grad: Optional[Callable[[List[float]], List[float]]] = None,
|
||||
target: Optional[List[float]] = None,
|
||||
data: Optional[dict] = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
# Backwards compatibility: support both n/dimension and explicit dimension
|
||||
if n is None and dimension is None:
|
||||
raise ValueError("LocalProblem requires 'n' or 'dimension' to specify size")
|
||||
self.dimension = int(n if n is not None else dimension)
|
||||
|
||||
# Domain is optional; store for compatibility if provided
|
||||
self.id = id
|
||||
self.domain = domain
|
||||
|
||||
# Build Q and c from explicit kwargs
|
||||
if Q is not None:
|
||||
self.Q = _np.asarray(Q).reshape((self.dimension, self.dimension))
|
||||
else:
|
||||
# Default to zero quadratic term if not provided
|
||||
self.Q = _np.zeros((self.dimension, self.dimension))
|
||||
|
||||
if c is not None:
|
||||
self.c = _np.asarray(c).reshape((self.dimension,))
|
||||
else:
|
||||
self.c = _np.zeros((self.dimension,))
|
||||
|
||||
# User-provided gradient, if any
|
||||
self.objective_grad = objective_grad
|
||||
|
||||
if self.objective_grad is None:
|
||||
# Default gradient for quadratic objective: grad = Qx + c
|
||||
def _default_grad(x: List[float]) -> List[float]:
|
||||
x_arr = _np.asarray(x).reshape((self.dimension,))
|
||||
g = self.Q.dot(x_arr) + self.c
|
||||
return g.tolist()
|
||||
self.objective_grad = _default_grad
|
||||
|
||||
self.target = target
|
||||
self.data = data
|
||||
|
||||
# Basic validation to help catch obvious misuse
|
||||
if self.dimension <= 0:
|
||||
raise ValueError("dimension must be positive")
|
||||
if self.target is not None and len(self.target) != self.dimension:
|
||||
|
|
|
|||
|
|
@ -1,11 +1,18 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import List, Dict
|
||||
from typing import List, Dict, Optional
|
||||
import numpy as _np
|
||||
|
||||
from .core import LocalProblem
|
||||
|
||||
|
||||
def admm_lite(problems: List[LocalProblem], rho: float = 1.0, max_iter: int = 50) -> Dict:
|
||||
def admm_lite(
|
||||
problems: List[LocalProblem],
|
||||
rho: float = 1.0,
|
||||
max_iter: int = 50,
|
||||
max_iters: Optional[int] = None,
|
||||
tol: float = 1e-4,
|
||||
) -> 'AdmmLiteResult':
|
||||
"""
|
||||
A minimal ADMM-lite solver across a set of LocalProblem instances.
|
||||
|
||||
|
|
@ -18,8 +25,12 @@ def admm_lite(problems: List[LocalProblem], rho: float = 1.0, max_iter: int = 50
|
|||
The function returns a dict containing the final local variables and the consensus.
|
||||
"""
|
||||
|
||||
# Backwards/forwards-compatible handling for test harness that may pass max_iters
|
||||
if max_iters is not None:
|
||||
max_iter = int(max_iters)
|
||||
|
||||
if len(problems) == 0:
|
||||
return {"X": [], "Z": None, "iterations": 0}
|
||||
return AdmmLiteResult([], [], [])
|
||||
|
||||
dims = [p.dimension for p in problems]
|
||||
if not all(d == dims[0] for d in dims):
|
||||
|
|
@ -35,16 +46,48 @@ def admm_lite(problems: List[LocalProblem], rho: float = 1.0, max_iter: int = 50
|
|||
return [0.0 for _ in range(dim)]
|
||||
return p.objective_grad(x)
|
||||
|
||||
history: List[List[float]] = []
|
||||
# Initialize dual variables for ADMM (u_i per problem)
|
||||
U: List[List[float]] = [[0.0 for _ in range(dim)] for _ in problems]
|
||||
I = _np.eye(dim)
|
||||
for _ in range(max_iter):
|
||||
# Local update (proximal-like step towards consensus Z)
|
||||
# Local update via closed-form ADMM update: x_i = (Q_i + rho I)^{-1} (rho (z - u_i) - c_i)
|
||||
Z_arr = _np.asarray(Z)
|
||||
for i, p in enumerate(problems):
|
||||
g = _grad(p, X[i])
|
||||
# X[i] = X[i] - (1/rho) * g - (1/rho) * (X[i] - Z)
|
||||
for d in range(dim):
|
||||
X[i][d] = X[i][d] - (1.0 / max(1e-8, rho)) * g[d] - (1.0 / max(1e-8, rho)) * (X[i][d] - Z[d])
|
||||
|
||||
M = p.Q + rho * I
|
||||
rhs = rho * (Z_arr - _np.asarray(U[i])) - p.c
|
||||
xi = _np.linalg.solve(M, rhs)
|
||||
X[i] = xi.tolist()
|
||||
# Global consensus update (element-wise average)
|
||||
for d in range(dim):
|
||||
Z[d] = sum(X[i][d] for i in range(len(X))) / len(X)
|
||||
# Dual update
|
||||
for i in range(len(problems)):
|
||||
for d in range(dim):
|
||||
U[i][d] = U[i][d] + X[i][d] - Z[d]
|
||||
# record history for debugging/verification
|
||||
history.append(Z.copy())
|
||||
return AdmmLiteResult(X, Z, history)
|
||||
|
||||
return {"X": X, "Z": Z, "iterations": max_iter}
|
||||
|
||||
class AdmmLiteResult:
|
||||
""" Lightweight container supporting both dict-like access and tuple unpacking.
|
||||
- res["X"] returns the local variables X
|
||||
- res["Z"] returns the consensus Z
|
||||
- Iterating over res yields (Z, history) to support `z, history = res` usage
|
||||
"""
|
||||
def __init__(self, X, Z, history):
|
||||
self.X = X
|
||||
self.Z = Z
|
||||
self.history = history
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key == "X":
|
||||
return self.X
|
||||
if key == "Z":
|
||||
return self.Z
|
||||
raise KeyError(key)
|
||||
|
||||
def __iter__(self):
|
||||
yield self.Z
|
||||
yield self.history
|
||||
|
|
|
|||
|
|
@ -4,11 +4,10 @@ build-backend = "setuptools.build_meta"
|
|||
|
||||
[project]
|
||||
name = "catopt-grid"
|
||||
version = "0.0.1"
|
||||
description = "Category-theoretic compositional optimizer MVP for cross-domain distributed optimization"
|
||||
version = "0.1.0"
|
||||
description = "Category-Theoretic Compositional Optimizer for Cross-Domain, Privacy-Preserving Distributed Edge Meshes (MVP)"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.8"
|
||||
dependencies = ["numpy"]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
where = ["src"]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
"""CatOpt-Grid core package"""
|
||||
|
||||
from .core import LocalProblem, SharedVariable, DualVariable, PlanDelta, PrivacyBudget, AuditLog
|
||||
|
||||
__all__ = [
|
||||
"LocalProblem",
|
||||
"SharedVariable",
|
||||
"DualVariable",
|
||||
"PlanDelta",
|
||||
"PrivacyBudget",
|
||||
"AuditLog",
|
||||
]
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class LocalProblem:
|
||||
id: str
|
||||
domain: str
|
||||
n: int # dimension of decision variable x
|
||||
Q: List[List[float]] # positive-definite matrix (n x n)
|
||||
c: List[float] # linear term (length n)
|
||||
A: Optional[List[List[float]]] = None # Optional linear constraints Ax <= b (not used in solver core yet)
|
||||
b: Optional[List[float]] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if len(self.Q) != self.n or any(len(row) != self.n for row in self.Q):
|
||||
raise ValueError("Q must be an n x n matrix matching problem dimension n")
|
||||
if len(self.c) != self.n:
|
||||
raise ValueError("c must be of length n")
|
||||
|
||||
|
||||
@dataclass
|
||||
class SharedVariable:
|
||||
value: List[float]
|
||||
|
||||
|
||||
@dataclass
|
||||
class DualVariable:
|
||||
value: List[float]
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlanDelta:
|
||||
delta: List[float]
|
||||
timestamp: str
|
||||
signature: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class PrivacyBudget:
|
||||
signal: str
|
||||
budget: float
|
||||
expiry: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class AuditLog:
|
||||
entries: List[str] = field(default_factory=list)
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import List, Tuple
|
||||
import math
|
||||
|
||||
from .core import LocalProblem
|
||||
|
||||
|
||||
def _solve_linear(A: List[List[float]], b: List[float]) -> List[float]:
|
||||
# Solve A x = b using simple Gaussian elimination with partial pivoting
|
||||
# A is assumed to be a square matrix (n x n), b is length n
|
||||
n = len(A)
|
||||
# Create augmented matrix
|
||||
M = [A[i][:] + [b[i]] for i in range(n)]
|
||||
# Forward elimination
|
||||
for k in range(n):
|
||||
# Find pivot
|
||||
piv = max(range(k, n), key=lambda i: abs(M[i][k]))
|
||||
if abs(M[piv][k]) < 1e-12:
|
||||
raise ValueError("Matrix is singular or ill-conditioned in solver")
|
||||
# Swap rows
|
||||
if piv != k:
|
||||
M[k], M[piv] = M[piv], M[k]
|
||||
# Normalize row
|
||||
fac = M[k][k]
|
||||
for j in range(k, n + 1):
|
||||
M[k][j] /= fac
|
||||
# Eliminate below
|
||||
for i in range(k + 1, n):
|
||||
factor = M[i][k]
|
||||
if factor == 0:
|
||||
continue
|
||||
for j in range(k, n + 1):
|
||||
M[i][j] -= factor * M[k][j]
|
||||
# Back substitution
|
||||
x = [0.0] * n
|
||||
for i in range(n - 1, -1, -1):
|
||||
s = M[i][n]
|
||||
for j in range(i + 1, n):
|
||||
s -= M[i][j] * x[j]
|
||||
x[i] = s / M[i][i] if M[i][i] != 0 else 0.0
|
||||
return x
|
||||
|
||||
|
||||
def _vec_add(a: List[float], b: List[float]) -> List[float]:
|
||||
return [ai + bi for ai, bi in zip(a, b)]
|
||||
|
||||
|
||||
def _vec_sub(a: List[float], b: List[float]) -> List[float]:
|
||||
return [ai - bi for ai, bi in zip(a, b)]
|
||||
|
||||
|
||||
def _scalar_mul(v: List[float], s: float) -> List[float]:
|
||||
return [vi * s for vi in v]
|
||||
|
||||
|
||||
def admm_lite(problems: List[LocalProblem], rho: float = 1.0, max_iters: int = 20, tol: float = 1e-6) -> Tuple[List[float], List[List[float]]]:
|
||||
"""A lightweight ADMM-like solver for a set of LocalProblem instances sharing a common x.
|
||||
Assumes all problems have the same dimension n, and objective is 0.5 x^T Q_i x + c_i^T x.
|
||||
The shared variable is z (length n). Each agent maintains its own x_i and dual u_i.
|
||||
Returns (z, history_of_z_values).
|
||||
"""
|
||||
if not problems:
|
||||
raise ValueError("No problems provided to ADMM solver")
|
||||
n = problems[0].n
|
||||
# Initialize per-problem variables
|
||||
xs: List[List[float]] = [[0.0] * n for _ in problems]
|
||||
us: List[List[float]] = [[0.0] * n for _ in problems]
|
||||
# Global variable z
|
||||
z: List[float] = [0.0] * n
|
||||
history: List[List[float]] = []
|
||||
|
||||
for _ in range(max_iters):
|
||||
# x-update for each problem: solve (Q_i + rho I) x_i = -c_i + rho (z - u_i)
|
||||
for idx, prob in enumerate(problems):
|
||||
# Build A = Q_i + rho I
|
||||
A = [[prob.Q[i][j] + (rho if i == j else 0.0) for j in range(n)] for i in range(n)]
|
||||
# Build b = -c_i + rho (z - u_i)
|
||||
z_minus_u = _vec_sub(z, us[idx])
|
||||
b = [_ * 1.0 for _ in prob.c] # copy
|
||||
for i in range(n):
|
||||
b[i] = -prob.c[i] + rho * z_minus_u[i]
|
||||
x_i = _solve_linear(A, b)
|
||||
xs[idx] = x_i
|
||||
# z-update: z = (1/m) sum_i (x_i + u_i)
|
||||
m = len(problems)
|
||||
sum_xu = [0.0] * n
|
||||
for i in range(m):
|
||||
sum_xu = _vec_add(sum_xu, _vec_add(xs[i], us[i]))
|
||||
z_new = _scalar_mul(sum_xu, 1.0 / m)
|
||||
# u-update: u_i = u_i + x_i - z
|
||||
for i in range(m):
|
||||
us[i] = _vec_add(us[i], _vec_sub(xs[i], z_new))
|
||||
z = z_new
|
||||
history.append(z[:])
|
||||
# Simple convergence check: if all x_i are close to z, break
|
||||
max_diff = max(math.fabs(xs[i][0] - z[0]) if isinstance(xs[i], list) and len(xs[i])==1 else 0.0 for i in range(len(problems)))
|
||||
if max_diff < tol:
|
||||
break
|
||||
return z, history
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
import math
|
||||
from catopt_grid.core import LocalProblem
|
||||
from catopt_grid.solver import admm_lite
|
||||
|
||||
|
||||
def test_admm_lite_two_1d_problems_converge_to_same_solution():
|
||||
# Problem 1: minimize 0.5*Q1*x^2 + c1*x with Q1=2, c1=-6 -> unconstrained minimizer x*=3
|
||||
Q1 = [[2.0]]
|
||||
c1 = [-6.0]
|
||||
p1 = LocalProblem(id="p1", domain="test", n=1, Q=Q1, c=c1)
|
||||
|
||||
# Problem 2: minimize 0.5*Q2*x^2 + c2*x with Q2=3, c2=-9 -> unconstrained minimizer x*=3
|
||||
Q2 = [[3.0]]
|
||||
c2 = [-9.0]
|
||||
p2 = LocalProblem(id="p2", domain="test", n=1, Q=Q2, c=c2)
|
||||
|
||||
z, history = admm_lite([p1, p2], rho=1.0, max_iters=200, tol=1e-9)
|
||||
# The converged shared variable should be close to 3.0
|
||||
assert abs(z[0] - 3.0) < 1e-6
|
||||
assert len(history) > 0
|
||||
Loading…
Reference in New Issue