build(agent): molt-a#3856f9 iteration
This commit is contained in:
parent
cb618e34b8
commit
9b4fa44b03
|
|
@ -11,5 +11,11 @@ Usage highlights:
|
|||
- Privacy-preserving flavor via optional noise on aggregated updates.
|
||||
- Offline-first capability via local update caching (non-connected clients save updates to disk).
|
||||
|
||||
Privacy controls
|
||||
- The system supports DP-friendly clipping of updates to bound sensitivity.
|
||||
- Client.train accepts an optional clip_norm parameter (default None). If provided, per-update deltas are clipped to have L2 norm at most clip_norm before sending to the server.
|
||||
- Server.aggregate also supports an optional clip_norm parameter to clip all incoming updates prior to averaging, providing an additional privacy safeguard.
|
||||
- You can combine clipping with Gaussian noise (noise_scale) for stronger privacy guarantees.
|
||||
|
||||
How to run tests:
|
||||
- This repository provides a test script via test.sh (see below).
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class Client:
|
|||
self.w = [0.0 for _ in range(n_features)]
|
||||
return self.w
|
||||
|
||||
def train(self, w: List[float], lr: float = 0.01, epochs: int = 5) -> List[float]:
|
||||
def train(self, w: List[float], lr: float = 0.01, epochs: int = 5, clip_norm: Optional[float] = None) -> List[float]:
|
||||
if self.w is None:
|
||||
self.initialize(len(w))
|
||||
n_samples = len(self.y)
|
||||
|
|
@ -41,6 +41,13 @@ class Client:
|
|||
# update local weights
|
||||
self.w = [self.w[j] - lr * grad[j] for j in range(n_features)]
|
||||
update = [self.w[j] - w[j] for j in range(n_features)]
|
||||
# Optional per-update clipping for DP safety and bounded sensitivity
|
||||
if clip_norm is not None and clip_norm > 0.0:
|
||||
# Compute L2 norm of the delta update
|
||||
norm = sum(val * val for val in update) ** 0.5
|
||||
if norm > clip_norm:
|
||||
scale = clip_norm / (norm if norm != 0.0 else 1.0)
|
||||
update = [val * scale for val in update]
|
||||
if not self.connected:
|
||||
self._save_update(update)
|
||||
return update
|
||||
|
|
@ -70,7 +77,7 @@ class Server:
|
|||
else:
|
||||
self.w = list(initial_weights)
|
||||
|
||||
def aggregate(self, updates: List[List[float]], noise_scale: float = 0.0, seed: Optional[int] = None) -> List[float]:
|
||||
def aggregate(self, updates: List[List[float]], noise_scale: float = 0.0, seed: Optional[int] = None, clip_norm: Optional[float] = None) -> List[float]:
|
||||
if not updates:
|
||||
return self.w
|
||||
# Deterministic seed when provided for testability
|
||||
|
|
@ -81,6 +88,22 @@ class Server:
|
|||
for i in range(self.n_features):
|
||||
avg_delta[i] += upd[i]
|
||||
avg_delta = [d / len(updates) for d in avg_delta]
|
||||
# Optional per-update clipping before aggregation for DP safety
|
||||
if clip_norm is not None and clip_norm > 0.0:
|
||||
clipped_updates = []
|
||||
for upd in updates:
|
||||
norm = sum(x * x for x in upd) ** 0.5
|
||||
if norm > clip_norm:
|
||||
scale = clip_norm / (norm if norm != 0.0 else 1.0)
|
||||
clipped_updates.append([x * scale for x in upd])
|
||||
else:
|
||||
clipped_updates.append(list(upd))
|
||||
# Recompute average delta from clipped updates
|
||||
avg_delta = [0.0 for _ in range(self.n_features)]
|
||||
for upd in clipped_updates:
|
||||
for i in range(self.n_features):
|
||||
avg_delta[i] += upd[i]
|
||||
avg_delta = [d / len(updates) for d in avg_delta]
|
||||
# add noise if requested
|
||||
if noise_scale and noise_scale > 0.0:
|
||||
for i in range(self.n_features):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,20 @@
|
|||
import math
|
||||
import random
|
||||
from interplanetary_edge_orchestrator_privacy import Client, Server
|
||||
|
||||
|
||||
def test_training_clipping_applies():
|
||||
random.seed(0)
|
||||
# Create a dataset with large feature values to encourage large updates
|
||||
X = [[100.0, 100.0], [100.0, -100.0]]
|
||||
y = [0.0, 0.0]
|
||||
c = Client(client_id=99, data_X=X, data_y=y, connected=True)
|
||||
c.initialize(n_features=2)
|
||||
w = [0.0, 0.0]
|
||||
|
||||
# Clip updates to a small norm to enforce DP-like behavior
|
||||
update = c.train(w, lr=0.01, epochs=1, clip_norm=1.0)
|
||||
|
||||
# verify that the resulting update has L2 norm <= clip_norm
|
||||
norm = math.sqrt(sum(v * v for v in update))
|
||||
assert norm <= 1.0 + 1e-9
|
||||
Loading…
Reference in New Issue