build(agent): molt-b#d1f4fd iteration

This commit is contained in:
agent-d1f4fdedbc508482 2026-04-15 01:57:18 +02:00
parent 8faf33804c
commit a2787cbe34
10 changed files with 228 additions and 2 deletions

21
.gitignore vendored Normal file
View File

@ -0,0 +1,21 @@
node_modules/
.npmrc
.env
.env.*
__tests__/
coverage/
.nyc_output/
dist/
build/
.cache/
*.log
.DS_Store
tmp/
.tmp/
__pycache__/
*.pyc
.venv/
venv/
*.egg-info/
.pytest_cache/
READY_TO_PUBLISH

10
AGENTS.md Normal file
View File

@ -0,0 +1,10 @@
# OpenBench SWARM Agent Guidelines
- Architecture: MVP focusing on a privacy-preserving KPI share/aggregation pipeline with offline-first storage.
- Tech Stack: Python 3.8+, standard library, no heavy dependencies for MVP; packaging via setuptools; tests with pytest.
- Testing: `pytest` for unit tests; `python3 -m build` to verify packaging metadata and directory structure.
- Running tests: `bash test.sh` in the repo root.
- Contribution Rules: one feature per patch; keep changes minimal; avoid touching unrelated areas.
- Data Model: KPIRecord with revenue, COGS, inventory_turns, lead_time, CAC, LTV; anonymous sharing via anon_id.
- Privacy: aggregate with optional Laplace noise (simple, deterministic in tests when anonymize=False).
- How to Extend: add new adapters, contracts, or playbooks under respective namespaces; ensure tests cover new behavior.

View File

@ -1,3 +1,19 @@
# openbench-privacy-preserving-benchmarkin
# OpenBench: Privacy-Preserving Benchmarking (MVP)
A modular, open-source platform that enables small businesses to anonymously share and compare KPIs across industries and regions, without exposing raw data. It uses policy-driven data exchanges with data contracts and secure aggregation, offering of
This repository contains a minimal, working MVP of the OpenBench platform focused on:
- An offline-first KPI data model (Revenue, COGS, inventory turns, lead time, CAC, LTV).
- A simple, privacy-preserving aggregation primitive (Laplace-noise-enabled) for anonymized benchmarking.
- A lightweight Python packaging setup compatible with pytest-based tests and python -m build packaging checks.
How to run
- Install dependencies and run tests: `bash test.sh`
- The MVP stores KPI records locally in a JSONL file under the package data directory.
Project layout (high-level)
- openbench_privacy_preserving_benchmarkin/core.py: Core data model and analytics primitives.
- __init__.py: Re-exports core primitives for simple imports.
- test.sh: Quick test runner that also builds the distribution.
- AGENTS.md: Swarm agent guidelines describing architecture and testing commands.
- pyproject.toml/setup.py: Packaging metadata to satisfy python packaging checks.
This is a deliberately minimal MVP intended to demonstrate architecture and testing practices.

View File

@ -0,0 +1,14 @@
"""OpenBench Privacy-Preserving Benchmarking (MVP)
This package provides a compact, offline-first core for capturing KPI data,
performing privacy-preserving aggregations, and computing simple derived metrics.
"""
from .core import KPIRecord, LocalStore, SecureAggregator, GrowthCalculator
__all__ = [
"KPIRecord",
"LocalStore",
"SecureAggregator",
"GrowthCalculator",
]

View File

@ -0,0 +1,105 @@
from __future__ import annotations
import json
import os
import uuid
from dataclasses import dataclass, asdict, field
from datetime import datetime
from typing import List, Optional
import random
import math
STORE_DIR = os.path.join(os.path.dirname(__file__), "data")
STORE_PATH = os.path.join(STORE_DIR, "kpi_records.jsonl")
@dataclass
class KPIRecord:
revenue: float
cogs: float
inventory_turns: float
lead_time: float
cac: float
ltv: float
region: str = "global"
industry: str = "unknown"
anon_id: str = field(default_factory=lambda: uuid.uuid4().hex)
timestamp: str = field(default_factory=lambda: datetime.utcnow().isoformat())
class LocalStore:
"""Simple offline-first store for KPI records (JSONL)."""
def __init__(self, path: Optional[str] = None) -> None:
self.path = path or STORE_PATH
os.makedirs(os.path.dirname(self.path), exist_ok=True)
def add_kpi(self, record: KPIRecord) -> None:
with open(self.path, "a", encoding="utf-8") as f:
f.write(json.dumps(asdict(record)) + "\n")
def get_all(self) -> List[KPIRecord]:
records: List[KPIRecord] = []
if not os.path.exists(self.path):
return records
with open(self.path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
data = json.loads(line)
# Guard against missing fields in legacy lines
records.append(KPIRecord(**data))
return records
class SecureAggregator:
"""Privacy-aware aggregate over KPI records.
By default, returns the plain mean. When anonymize=True, adds Laplace noise
to the result to preserve differential privacy characteristics. Noise is
deterministic for unit tests by allowing a fixed seed on the RNG via Python's
random module; in production, a robust rng should be used.
"""
@staticmethod
def _laplace_sample(b: float) -> float:
# Laplace sampling via the difference of two exponentials: ~ Laplace(0, b)
u1 = random.random()
u2 = random.random() if u1 == 0 else random.random()
# ensure positive values and stable logs
return -b * (math.log(u1 + 1e-12) - math.log(u2 + 1e-12))
@staticmethod
def aggregate(records: List[KPIRecord], metric: str, anonymize: bool = False, epsilon: float = 1.0) -> float:
if not records:
return 0.0
if not hasattr(KPIRecord, '__annotations__') or metric not in KPIRecord.__annotations__:
raise ValueError(f"Unknown metric '{metric}' for KPIRecord.")
values = [getattr(r, metric) for r in records]
mean = sum(values) / len(values) if values else 0.0
if anonymize:
b = 1.0 / max(epsilon, 1e-9)
noise = SecureAggregator._laplace_sample(b)
mean += noise
return mean
class GrowthCalculator:
"""Derived-growth helpers for KPI records."""
@staticmethod
def roi(record: KPIRecord) -> float:
# Simple return on investment proxy: (LTV - CAC) / CAC
if record.cac == 0:
return float('inf')
return (record.ltv - record.cac) / record.cac
@staticmethod
def growth_index(record: KPIRecord) -> float:
# A lightweight composite growth index using revenue and LTV
return (record.revenue + record.ltv) / max(1.0, record.cogs)
__all__ = ["KPIRecord", "LocalStore", "SecureAggregator", "GrowthCalculator"]

12
pyproject.toml Normal file
View File

@ -0,0 +1,12 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "openbench-privacy-preserving-benchmarkin"
version = "0.1.0"
description = "Privacy-preserving benchmarking for SMEs with secure aggregation and data contracts."
readme = "README.md"
requires-python = ">=3.8"
license = { text = "MIT" }
authors = [ { name = "OpenCode SWARM", email = "dev@example.com" } ]

9
setup.py Normal file
View File

@ -0,0 +1,9 @@
from setuptools import setup, find_packages
setup(
name="openbench-privacy-preserving-benchmarkin",
version="0.1.0",
description="Privacy-preserving benchmarking for SMEs with secure aggregation and data contracts.",
packages=find_packages(exclude=("tests",)),
include_package_data=True,
)

13
test.sh Executable file
View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -euo pipefail
echo "[TEST] Installing package (editable) for test environment..."
python3 -m pip install -e .
echo "[TEST] Running pytest..."
pytest -q
echo "[TEST] Building package..."
python3 -m build
echo "All tests passed and packaging completed."

24
tests/test_core.py Normal file
View File

@ -0,0 +1,24 @@
import os
from openbench_privacy_preserving_benchmarkin import KPIRecord, LocalStore, SecureAggregator, GrowthCalculator
def test_store_and_aggregate_simple():
path = "tests_data/kpi_records.jsonl"
if os.path.exists(path):
os.remove(path)
store = LocalStore(path=path)
r1 = KPIRecord(revenue=100.0, cogs=60.0, inventory_turns=3.0, lead_time=5.0, cac=20.0, ltv=120.0, region="NA", industry="Retail", anon_id="anon1", timestamp="2020-01-01T00:00:00Z")
r2 = KPIRecord(revenue=200.0, cogs=110.0, inventory_turns=4.0, lead_time=6.0, cac=30.0, ltv=240.0, region="NA", industry="Retail", anon_id="anon2", timestamp="2020-01-02T00:00:00Z")
store.add_kpi(r1)
store.add_kpi(r2)
all_recs = store.get_all()
assert len(all_recs) == 2
mean_rev = SecureAggregator.aggregate(all_recs, "revenue", anonymize=False)
assert mean_rev == (100.0 + 200.0) / 2
def test_roi_calc():
r = KPIRecord(revenue=100.0, cogs=50.0, inventory_turns=2.0, lead_time=3.0, cac=10.0, ltv=100.0)
roI = GrowthCalculator.roi(r)
assert roI == (100.0 - 10.0) / 10.0

View File

@ -0,0 +1,2 @@
{"revenue": 100.0, "cogs": 60.0, "inventory_turns": 3.0, "lead_time": 5.0, "cac": 20.0, "ltv": 120.0, "region": "NA", "industry": "Retail", "anon_id": "anon1", "timestamp": "2020-01-01T00:00:00Z"}
{"revenue": 200.0, "cogs": 110.0, "inventory_turns": 4.0, "lead_time": 6.0, "cac": 30.0, "ltv": 240.0, "region": "NA", "industry": "Retail", "anon_id": "anon2", "timestamp": "2020-01-02T00:00:00Z"}