bp_core/control/
protocol.rs

1//! JSON-RPC–style control protocol between the CLI and the daemon.
2//!
3//! Messages are newline-delimited JSON frames exchanged over a Unix socket.
4
5use crate::{
6    network::state::NodeInfo,
7    service::{ServiceInfo, ServiceType},
8};
9use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11
12/// Request sent from CLI → daemon.
13#[derive(Debug, Clone, Serialize, Deserialize)]
14#[serde(tag = "cmd", rename_all = "snake_case")]
15pub enum ControlRequest {
16    /// Spawn a new service of `service_type` in `network_id`.
17    Hatch {
18        service_type: ServiceType,
19        network_id: String,
20        /// Optional extra config (e.g. `{"storage_bytes": 10737418240}` for Pouch).
21        metadata: HashMap<String, serde_json::Value>,
22    },
23    /// Return all known peers and network summary.
24    Flock,
25    /// Kill a running service by ID.
26    Farewell { service_id: String },
27    /// Permanently evict a Pouch from the network.
28    ///
29    /// Announces to the network that this Pouch is going offline permanently,
30    /// records a reputation eviction, removes the service from `ServiceRegistry`,
31    /// and purges all on-disk fragment storage.
32    ///
33    /// Fragment redistribution to other peers is not yet automated — the
34    /// remaining network peers will detect missing fragments via Proof-of-Storage
35    /// challenges and trigger preventive recoding.
36    FarewellEvict { service_id: String },
37    /// Pause a running Pouch service for planned maintenance.
38    ///
39    /// Announces via gossip that peers should mark this node's fragments as
40    /// `temporarily_unavailable`.  If the service does not resume within
41    /// `eta_minutes`, the quality monitor will apply fault-score increments.
42    Pause {
43        /// Service ID to pause (UUID returned by `bp hatch`).
44        service_id: String,
45        /// Estimated downtime in minutes.
46        eta_minutes: u64,
47    },
48    /// Resume a previously paused service.
49    ///
50    /// Triggers an immediate gossip announcement and clears the Paused status.
51    Resume {
52        /// Service ID to resume.
53        service_id: String,
54    },
55    /// Join an existing network (subscribe to gossip topics).
56    Join { network_id: String },
57    /// Leave a network.
58    ///
59    /// If `force` is `true` the daemon automatically evicts all active services
60    /// on the network before leaving instead of returning a blocking error.
61    /// Pouch services are evicted permanently (equivalent to
62    /// `bp farewell --evict`); Bill and Post services are stopped gracefully
63    /// (equivalent to `bp farewell`).
64    Leave {
65        network_id: String,
66        /// Auto-evict all blocking services, then leave.
67        #[serde(default)]
68        force: bool,
69    },
70    /// Return info about this daemon (identity, services, networks).
71    Status,
72    /// Announce all active services via gossip and wait 2 s for propagation.
73    ///
74    /// Used by `bp status` to ensure the peer list is fresh before
75    /// displaying it.  Returns `Ok("announced")` once the wait is done.
76    AnnounceNow,
77    /// Ping — used to check if daemon is alive.
78    Ping,
79    /// Encode `chunk_data` with RLNC and store fragments in the local Pouch.
80    ///
81    /// `k` and `n` are derived automatically by the daemon from live QoS data
82    /// and the `ph` target recovery probability.
83    /// Returns a `chunk_id` (BLAKE3 hash prefix) that can be used with `GetFile`.
84    PutFile {
85        /// Raw bytes to encode and store.
86        chunk_data: Vec<u8>,
87        /// Target recovery probability (default: 0.999).
88        /// The daemon derives k/n from live peer QoS and this target.
89        ph: Option<f64>,
90        /// Redundancy overhead fraction (default: 1.0 = k extra fragments).
91        q_target: Option<f64>,
92        /// Network ID whose Pouch should hold the fragments.
93        network_id: String,
94        /// Optional file name to record in the local file registry.
95        /// Displayed by `bp ls`.
96        #[serde(default)]
97        file_name: Option<String>,
98    },
99    /// Retrieve and decode a stored file chunk by its `chunk_id`.
100    GetFile {
101        /// BLAKE3 chunk hash prefix (16 hex chars) returned by `PutFile`.
102        chunk_id: String,
103        /// Network ID to search for the chunk.
104        network_id: String,
105    },
106    /// Dial a relay node to enable NAT traversal via circuit relay v2.
107    ///
108    /// The daemon dials `relay_addr`, establishes a reservation, and
109    /// subsequently becomes reachable through the relay at
110    /// `/p2p-circuit` addresses.
111    ConnectRelay {
112        /// Full multiaddr of the relay node, e.g.
113        /// `/ip4/1.2.3.4/tcp/4001/p2p/12D3KooW...`
114        relay_addr: String,
115    },
116    /// Generate a signed + password-encrypted invite token for `network_id`.
117    ///
118    /// The token contains the `NetworkMetaKey` for the network, signed with
119    /// the daemon's Ed25519 key and encrypted with `invite_password`.  Share
120    /// the blob and the password with the invitee out-of-band.
121    CreateInvite {
122        /// Network to invite the recipient into.
123        network_id: String,
124        /// Optional: fingerprint of the specific invitee.  `None` = open invite.
125        invitee_fingerprint: Option<String>,
126        /// Password used to encrypt the token — shared out-of-band.
127        invite_password: String,
128        /// Token validity in hours (default: 24).
129        ttl_hours: Option<u64>,
130    },
131    /// Return storage statistics for all local Pouch services.
132    ///
133    /// Includes per-Pouch quota, usage, tier, plus aggregate totals and
134    /// total bytes uploaded by this node's Bill services.
135    StorageInfo {
136        /// Filter to a specific network (empty string = all networks).
137        #[serde(default)]
138        network_id: String,
139    },
140    /// List files uploaded by this node.
141    ///
142    /// Returns entries from the local file registry populated by `PutFile`.
143    ListFiles {
144        /// Filter to a specific network (empty string = all networks).
145        #[serde(default)]
146        network_id: String,
147    },
148}
149
150/// Response sent from daemon → CLI.
151#[derive(Debug, Clone, Serialize, Deserialize)]
152#[serde(tag = "status", rename_all = "snake_case")]
153pub enum ControlResponse {
154    Ok {
155        #[serde(skip_serializing_if = "Option::is_none")]
156        data: Option<serde_json::Value>,
157    },
158    Error {
159        message: String,
160    },
161}
162
163impl ControlResponse {
164    pub fn ok(data: impl Serialize) -> Self {
165        let v = serde_json::to_value(data).unwrap_or(serde_json::Value::Null);
166        ControlResponse::Ok { data: Some(v) }
167    }
168
169    pub fn ok_empty() -> Self {
170        ControlResponse::Ok { data: None }
171    }
172
173    pub fn err(msg: impl ToString) -> Self {
174        ControlResponse::Error {
175            message: msg.to_string(),
176        }
177    }
178}
179
180// ── Typed data payloads returned in ControlResponse::Ok.data ─────────────────
181
182#[derive(Debug, Clone, Serialize, Deserialize)]
183pub struct HatchData {
184    pub service_id: String,
185    pub service_type: ServiceType,
186    pub network_id: String,
187    pub message: String,
188}
189
190#[derive(Debug, Clone, Serialize, Deserialize)]
191pub struct FlockData {
192    pub local_services: Vec<ServiceInfo>,
193    pub known_peers: Vec<NodeInfo>,
194    pub networks: Vec<String>,
195    pub peer_count: usize,
196}
197
198#[derive(Debug, Clone, Serialize, Deserialize)]
199pub struct StatusData {
200    pub peer_id: String,
201    pub fingerprint: String,
202    pub alias: Option<String>,
203    pub local_services: Vec<ServiceInfo>,
204    pub networks: Vec<String>,
205    pub known_peers: usize,
206    pub version: String,
207    /// This node's own reputation tier (R0–R4).
208    pub reputation_tier: String,
209    /// Continuous reputation score underlying the tier.
210    pub reputation_score: i64,
211    /// Storage stats for each active Pouch on this node.
212    pub pouch_stats: Vec<PouchStat>,
213    /// Summary of network-wide QoS (if any peers are known).
214    pub network_qos: Option<NetworkQosSummary>,
215}
216
217/// Returned by [`ControlRequest::PutFile`].
218#[derive(Debug, Clone, Serialize, Deserialize)]
219pub struct PutFileData {
220    /// BLAKE3 chunk hash prefix identifying this chunk.
221    pub chunk_id: String,
222    /// Recovery threshold: minimum fragments needed to reconstruct.
223    pub k: usize,
224    /// Total fragments generated per chunk.
225    pub n: usize,
226    /// Effective redundancy overhead: `(n − k) / k`.
227    pub q: f64,
228    /// Target recovery probability declared at upload time.
229    pub ph: f64,
230    /// Rolling effective recovery probability at upload time.
231    pub pe: f64,
232    /// How many fragments were stored locally.
233    pub fragments_stored: usize,
234    /// How many fragments were pushed to remote Pouches.
235    pub fragments_distributed: usize,
236    /// Human-readable summary.
237    pub message: String,
238}
239
240/// Returned by [`ControlRequest::GetFile`].
241#[derive(Debug, Clone, Serialize, Deserialize)]
242pub struct GetFileData {
243    /// BLAKE3 chunk hash prefix of the retrieved chunk.
244    pub chunk_id: String,
245    /// Recovered raw bytes.
246    pub data: Vec<u8>,
247    /// How many fragments were used for decoding.
248    pub fragments_used: usize,
249    /// How many of those fragments came from remote Pouches.
250    pub fragments_remote: usize,
251}
252
253/// Returned by [`ControlRequest::Leave`].
254#[derive(Debug, Clone, Serialize, Deserialize)]
255pub struct LeaveData {
256    pub network_id: String,
257    /// Whether the leave was blocked by active services (only when `force=false`).
258    #[serde(default)]
259    pub blocked: bool,
260    /// Services that were automatically evicted (when `force=true`).
261    #[serde(default)]
262    pub services_auto_evicted: Vec<serde_json::Value>,
263    /// Human-readable summary.
264    pub message: String,
265}
266
267/// Returned by [`ControlRequest::CreateInvite`].
268#[derive(Debug, Clone, Serialize, Deserialize)]
269pub struct InviteData {
270    /// Hex-encoded invite blob to share with the invitee.
271    pub blob: String,
272    /// Network this invite grants access to.
273    pub network_id: String,
274    /// Unix timestamp when the token expires.
275    pub expires_at: u64,
276    /// Fingerprint of the inviter (for display).
277    pub inviter_fingerprint: String,
278}
279
280// ── Storage / file-listing types ────────────────────────────────────────────
281
282/// Storage statistics for a single local Pouch service.
283#[derive(Debug, Clone, Serialize, Deserialize)]
284pub struct PouchStat {
285    pub service_id: String,
286    pub network_id: String,
287    /// Named storage tier (e.g. "T2 — Stone"), if declared at hatch time.
288    pub storage_tier: Option<String>,
289    /// Bytes offered to the network at bid time.
290    pub storage_bid_bytes: u64,
291    /// Bytes currently occupied by stored fragments.
292    pub storage_used_bytes: u64,
293    /// Bytes still available for new fragments.
294    pub available_bytes: u64,
295}
296
297/// High-level QoS summary for the connected network peers.
298#[derive(Debug, Clone, Serialize, Deserialize)]
299pub struct NetworkQosSummary {
300    /// Number of peers with known QoS data.
301    pub observed_peers: usize,
302    /// Average stability score across all observed peers (0.0–1.0).
303    pub avg_stability: f64,
304    /// Number of peers at each reputation tier.
305    pub tier_counts: std::collections::HashMap<String, usize>,
306}
307
308/// Returned by [`ControlRequest::StorageInfo`].
309#[derive(Debug, Clone, Serialize, Deserialize)]
310pub struct StorageInfoData {
311    /// Per-Pouch breakdown.
312    pub pouches: Vec<PouchStat>,
313    /// Sum of all Pouch bids.
314    pub total_bid_bytes: u64,
315    /// Sum of all fragment bytes currently stored.
316    pub total_used_bytes: u64,
317    /// Total bytes still available across all Pouches.
318    pub total_available_bytes: u64,
319    /// Total number of files in the local file registry.
320    pub total_files_uploaded: usize,
321    /// Total original bytes of all uploaded files.
322    pub total_uploaded_bytes: u64,
323}
324
325/// A single file entry returned by [`ControlRequest::ListFiles`].
326#[derive(Debug, Clone, Serialize, Deserialize)]
327pub struct FileEntry {
328    pub file_name: String,
329    pub size_bytes: u64,
330    pub chunk_id: String,
331    pub network_id: String,
332    /// Unix timestamp (seconds) when the file was uploaded.
333    pub uploaded_at: u64,
334}
335
336/// Returned by [`ControlRequest::ListFiles`].
337#[derive(Debug, Clone, Serialize, Deserialize)]
338pub struct ListFilesData {
339    pub files: Vec<FileEntry>,
340    pub network_id: String,
341    pub total_files: usize,
342    pub total_bytes: u64,
343}